Compare commits

..

432 Commits

Author SHA1 Message Date
Erik Johnston
88a973cde5 Merge branch 'release-v0.13.2' of github.com:matrix-org/synapse 2016-02-11 10:55:16 +00:00
Erik Johnston
1a830b751d Bump version and changelog 2016-02-11 10:53:42 +00:00
Erik Johnston
abc1b22193 Merge pull request #570 from matrix-org/erikj/events_fixes
Return events in correct order for /events
2016-02-11 10:13:26 +00:00
Erik Johnston
0eff740523 Return events in correct order for /events 2016-02-11 10:07:27 +00:00
Erik Johnston
a1b7902944 Add some paranoia logging 2016-02-11 09:22:37 +00:00
Erik Johnston
7718303e71 Merge branch 'master' of github.com:matrix-org/synapse into develop 2016-02-10 17:06:53 +00:00
Matthew Hodgson
103b432c84 0.13.1 2016-02-10 16:35:17 +00:00
Matthew Hodgson
a45cc801d2 bump for 0.13.1 2016-02-10 16:34:46 +00:00
Matthew Hodgson
7634687057 Merge branch 'master' of git+ssh://github.com/matrix-org/synapse 2016-02-10 16:27:15 +00:00
Matthew Hodgson
b3ecb96e36 try to bump syweb to 0.6.8 2016-02-10 16:27:12 +00:00
Erik Johnston
907c1faf1e Merge branch 'release-v0.13.0' of github.com:matrix-org/synapse 2016-02-10 14:52:06 +00:00
Erik Johnston
6c3126d950 Update CHANGES 2016-02-10 14:49:48 +00:00
Erik Johnston
6e89e69d08 Bump version and changelog 2016-02-10 14:36:06 +00:00
Erik Johnston
e66d0bd03a Merge branch 'develop' of github.com:matrix-org/synapse into release-v0.13.0 2016-02-10 14:12:48 +00:00
Erik Johnston
4a2ace1857 Merge pull request #569 from matrix-org/erikj/initial_sync
Improvements to initial /syncs
2016-02-10 13:43:15 +00:00
Erik Johnston
5189bfdef4 Batch fetch _get_state_groups_from_groups 2016-02-10 13:24:42 +00:00
Erik Johnston
24f00a6c33 Use _simple_select_many for _get_state_group_for_events 2016-02-10 12:57:50 +00:00
Erik Johnston
8e49892b21 Only calculate initial sync for 10 rooms at a time
This helps to ensure we don't completely starve other requests.
2016-02-10 11:42:07 +00:00
Erik Johnston
e557dc80b8 Merge pull request #566 from matrix-org/erikj/logcontext
Don't bother copying records on parent context
2016-02-10 11:41:45 +00:00
Erik Johnston
4eb8f9ca8a Remove comment 2016-02-10 11:29:21 +00:00
Erik Johnston
f7ef5c1d57 Merge pull request #568 from matrix-org/erikj/unread_notif
Atomically persit push actions when we persist the event
2016-02-10 11:25:32 +00:00
Erik Johnston
00c9ad49df s/parent_context/previous_context/ 2016-02-10 11:25:19 +00:00
Erik Johnston
9777c5f49a Set parent context on instansiation 2016-02-10 11:23:32 +00:00
Erik Johnston
0214745239 Rename functions 2016-02-10 11:09:56 +00:00
Erik Johnston
46a02ff15b Merge pull request #532 from floviolleau/floviolleau/documentation
Update documentation
2016-02-10 10:40:28 +00:00
Erik Johnston
6ad9586c84 Merge pull request #565 from matrix-org/erikj/macaroon_config
Derive macaroon_secret_key from signing key.
2016-02-09 16:34:15 +00:00
Erik Johnston
78a5482267 Typo 2016-02-09 16:23:11 +00:00
Erik Johnston
7b0d846407 Atomically persit push actions when we persist the event 2016-02-09 16:19:15 +00:00
Erik Johnston
f28cc45183 Pass in current state to push action handler 2016-02-09 16:01:40 +00:00
Erik Johnston
e664e9737c Fix test 2016-02-09 14:57:43 +00:00
Erik Johnston
13ba8d878c Fix test 2016-02-09 14:55:21 +00:00
Erik Johnston
78d6c1b5be Change a log from debug to info 2016-02-09 14:44:12 +00:00
Erik Johnston
feb294d552 Remove dead code 2016-02-09 14:32:17 +00:00
Erik Johnston
70a8608749 Invalidate get_last_receipt_event_id_for_user cache 2016-02-09 14:27:29 +00:00
Erik Johnston
7e3b586c1e Merge pull request #567 from matrix-org/erikj/sync_ephemeral
Don't load all ephemeral state for a room on every sync
2016-02-09 14:11:47 +00:00
Erik Johnston
eff12e838c Don't load all ephemeral state for a room on every sync 2016-02-09 13:55:59 +00:00
Erik Johnston
82631c5f94 Fix unit tests 2016-02-09 13:50:37 +00:00
Daniel Wagner-Hall
b58a8b1ee0 Merge pull request #560 from matrix-org/daniel/tox
Remove pyc files before running tests
2016-02-09 13:44:08 +00:00
Daniel Wagner-Hall
5b75b637b8 Remove pyc files before running tests 2016-02-09 13:44:04 +00:00
Erik Johnston
9ac9b75bc4 Merge branch 'develop' of github.com:matrix-org/synapse into develop 2016-02-09 12:58:10 +00:00
Daniel Wagner-Hall
ebaa999f92 Revert "Reject additional path segments"
This reverts commit 1d19a5ec0f.

iOS Console is apparently relying on these paths.
2016-02-09 12:46:52 +00:00
Erik Johnston
6c558ee8bc Measure some /sync related things 2016-02-09 11:31:42 +00:00
Erik Johnston
31a2b892d8 Revert to putting it around the entire block 2016-02-09 11:25:09 +00:00
Erik Johnston
9daa4e2a85 Don't create new logging context 2016-02-09 11:06:19 +00:00
Erik Johnston
3e2fcd67b2 Don't bother copying records on parent context 2016-02-09 10:50:31 +00:00
Erik Johnston
241b71852e Fix bug in util.metrics.Measure 2016-02-09 10:28:13 +00:00
Erik Johnston
97294ef2fd Create new context when measuring 2016-02-09 10:12:00 +00:00
Erik Johnston
549698b1e0 Don't measure across event stream call, as it lasts for a long time. 2016-02-09 09:37:09 +00:00
Erik Johnston
c486b7b41c Change logcontext warns to debug 2016-02-09 09:20:38 +00:00
Erik Johnston
f078ecbc8f Derive macaroon_secret_key from signing key.
Unfortunately, there are people that are running synapse without a
`macaroon_sercret_key` set. Mandating they set one is a good solution,
except that breaking auto upgrades is annoying.
2016-02-08 16:35:44 +00:00
Erik Johnston
2bb5f035af Merge pull request #564 from matrix-org/erikj/logcontext
Fix up logcontexts
2016-02-08 15:16:16 +00:00
Erik Johnston
cca5c06679 Merge pull request #562 from matrix-org/erikj/push_metric
Add metrics to pushers
2016-02-08 14:57:40 +00:00
Daniel Wagner-Hall
0897357993 Merge pull request #563 from matrix-org/daniel/dollarz
Reject additional path segments
2016-02-08 14:46:37 +00:00
Erik Johnston
2c1fbea531 Fix up logcontexts 2016-02-08 14:26:45 +00:00
Erik Johnston
13e6262659 Add metrics to pushers 2016-02-08 14:26:45 +00:00
Daniel Wagner-Hall
1d19a5ec0f Reject additional path segments 2016-02-08 10:50:55 +00:00
Mark Haines
77c7ed0e93 Report the v1 and v2 patterns separately 2016-02-05 15:43:27 +00:00
Mark Haines
b052621f67 List the URL patterns in synapse 2016-02-05 15:18:58 +00:00
Daniel Wagner-Hall
8f1031586f Merge pull request #550 from matrix-org/daniel/guestnames
Allocate guest user IDs numericcally

The current random IDs are ugly and confusing when presented in UIs.
This makes them prettier and easier to read.

Also, disable non-automated registration of numeric IDs so that we don't
need to worry so much about people carving out our automated address
space and us needing to keep retrying ID registration.
2016-02-05 11:22:42 +00:00
Daniel Wagner-Hall
79a1c0574b Allocate guest user IDs numericcally
The current random IDs are ugly and confusing when presented in UIs.
This makes them prettier and easier to read.

Also, disable non-automated registration of numeric IDs so that we don't
need to worry so much about people carving out our automated address
space and us needing to keep retrying ID registration.
2016-02-05 11:22:30 +00:00
Daniel Wagner-Hall
489f92e0e5 Merge pull request #559 from matrix-org/daniel/media
Host /media/r0 as well as /media/v1
2016-02-05 11:04:24 +00:00
Daniel Wagner-Hall
737c4223ef Host /media/r0 as well as /media/v1 2016-02-05 10:47:46 +00:00
Daniel Wagner-Hall
db0da033eb Merge pull request #558 from matrix-org/daniel/config
Error if macaroon key is missing from config

Currently we store all access tokens in the DB, and fall back to that
check if we can't validate the macaroon, so our fallback works here, but
for guests, their macaroons don't get persisted, so we don't get to
find them in the database. Each restart, we generate a new ephemeral
key, so guests lose access after each server restart.

I tried to fix up the config stuff to be less insane, but gave up, so
instead I bolt on yet another piece of custom one-off insanity.

Also, add some basic tests for config generation and loading.
2016-02-05 01:58:36 +00:00
Daniel Wagner-Hall
6a9f1209df Error if macaroon key is missing from config
Currently we store all access tokens in the DB, and fall back to that
check if we can't validate the macaroon, so our fallback works here, but
for guests, their macaroons don't get persisted, so we don't get to
find them in the database. Each restart, we generate a new ephemeral
key, so guests lose access after each server restart.

I tried to fix up the config stuff to be less insane, but gave up, so
instead I bolt on yet another piece of custom one-off insanity.

Also, add some basic tests for config generation and loading.
2016-02-05 01:58:23 +00:00
David Baker
34dda7cc7f Merge pull request #557 from matrix-org/dbkr/profile_dont_return_null
Omit keys rather then returning null in profile API
2016-02-04 15:39:12 +00:00
Erik Johnston
4d36e73230 Actually return something sensible 2016-02-03 16:35:00 +00:00
Erik Johnston
709e09e1c3 Remove old log line 2016-02-03 16:32:20 +00:00
Erik Johnston
aa4af94c69 We return dicts now. 2016-02-03 16:29:32 +00:00
Erik Johnston
b84d59c5f0 Add descriptions 2016-02-03 16:22:35 +00:00
Erik Johnston
33c71c3a4b Preserve log context over when deferring to thread pool in media repo 2016-02-03 16:17:18 +00:00
Erik Johnston
c8e4d5de7f Merge pull request #555 from matrix-org/erikj/logcontext
Allowing tagging log contexts
2016-02-03 15:20:00 +00:00
David Baker
156cea5b45 No braces here 2016-02-03 15:04:51 +00:00
Erik Johnston
8450114098 Merge pull request #554 from matrix-org/erikj/event_push
Change event_push_actions_rm_tokens schema
2016-02-03 15:02:47 +00:00
David Baker
24277fbb97 Don't return null if profile display name / avatar url isn't set: omit them instead 2016-02-03 14:59:19 +00:00
Daniel Wagner-Hall
66bb255fcd Merge pull request #556 from matrix-org/daniel/config
Rename config field to reflect yaml name
2016-02-03 14:55:54 +00:00
Daniel Wagner-Hall
5054806ec1 Rename config field to reflect yaml name 2016-02-03 14:42:01 +00:00
Erik Johnston
430e496050 Merge pull request #552 from matrix-org/erikj/public_room_fix
Change the way we do public room list fetching
2016-02-03 13:53:59 +00:00
Erik Johnston
d4f72a5bfb Allowing tagging log contexts 2016-02-03 13:52:27 +00:00
Erik Johnston
f8aae79a72 Simplify get_rooms 2016-02-03 13:24:35 +00:00
Erik Johnston
9cd80a7b5c PEP8 2016-02-03 11:52:57 +00:00
Erik Johnston
772b45c745 Remove unused method 2016-02-03 11:43:26 +00:00
Daniel Wagner-Hall
5f280837a6 Add macaroon inspection script 2016-02-03 11:27:39 +00:00
Erik Johnston
6f52e90065 Merge branch 'develop' of github.com:matrix-org/synapse into erikj/public_room_fix 2016-02-03 11:06:29 +00:00
Erik Johnston
771528ab13 Change event_push_actions_rm_tokens schema 2016-02-03 10:50:49 +00:00
Erik Johnston
b32121a5d1 Unused import 2016-02-03 10:30:56 +00:00
Daniel Wagner-Hall
2e36689df3 Merge pull request #553 from matrix-org/daniel/accesstokenlogging
Log more diagnostics for unrecognised access tokens
2016-02-02 19:22:43 +00:00
Daniel Wagner-Hall
2df6114bc4 Log more diagnostics for unrecognised access tokens 2016-02-02 19:21:49 +00:00
Daniel Wagner-Hall
a644ac6d2c Explain what W503 is 2016-02-02 17:23:12 +00:00
Daniel Wagner-Hall
de11b5b9b5 Merge pull request #551 from matrix-org/daniel/flake8
Fix flake8 warnings for new flake8
2016-02-02 17:18:54 +00:00
Daniel Wagner-Hall
d83d004ccd Fix flake8 warnings for new flake8 2016-02-02 17:18:50 +00:00
Erik Johnston
43e13dbd4d Merge pull request #549 from matrix-org/erikj/sync
Fetch events in a separate transaction.
2016-02-02 16:23:54 +00:00
Erik Johnston
8a391e33ae s/get_room_changes_for_user/get_membership_changes_for_user/ 2016-02-02 16:12:10 +00:00
Erik Johnston
477b1ed6cf Fetch events in a separate transaction.
This has a couple of benefits:

- It reduces the time of transactions, allowing other database requests
  to run.
- Fetching events is given a dedicated database thread, and so can't
  starve other database requests.
2016-02-02 15:58:14 +00:00
Erik Johnston
04ad93e6fd Merge pull request #545 from matrix-org/erikj/sync
Move /sync state calculations from rest to handler
2016-02-02 15:28:43 +00:00
Erik Johnston
65e92eca49 Change the way we do public room list fetching 2016-02-02 15:21:10 +00:00
David Baker
9039904f4c Merge pull request #548 from matrix-org/dbkr/fix_guest_db_column
Pass make_guest when we autogen a user ID
2016-02-02 14:46:25 +00:00
David Baker
69214ea671 Pass make_guest whne we autogen a user ID 2016-02-02 14:42:31 +00:00
Erik Johnston
b023995538 WARN if we get a topo token instead of stream. 2016-02-02 14:11:14 +00:00
David Baker
793369791a Merge pull request #547 from matrix-org/dbkr/fix_guest_upgrade
Pull guest access token out of the auth session params
2016-02-02 09:47:30 +00:00
Erik Johnston
7a8ea7e78b Merge pull request #546 from matrix-org/erikj/events
Switch over /events to use per room caches
2016-02-01 17:23:44 +00:00
Erik Johnston
854ca32f10 Comments 2016-02-01 16:52:27 +00:00
David Baker
d7ac861d3b Pull guest access token out of the auth session params, otherwise it will break if you open the email on a different device. 2016-02-01 16:33:19 +00:00
Erik Johnston
89b40b225c Order things correctly 2016-02-01 16:32:46 +00:00
Erik Johnston
4bf448be25 Switch over /events to use per room caches 2016-02-01 16:26:51 +00:00
Erik Johnston
fa48020a52 Move state calculations from rest to handler 2016-02-01 15:59:40 +00:00
Erik Johnston
1ef7cae41b Merge branch 'develop' of github.com:matrix-org/synapse into erikj/sync 2016-02-01 15:59:12 +00:00
Erik Johnston
2d3837bec7 Merge pull request #543 from matrix-org/erikj/sync
Cache get_room_changes_for_user
2016-02-01 15:05:06 +00:00
Erik Johnston
498c2e60fd Merge pull request #544 from matrix-org/erikj/stream_rooms
Only use room_ids if in get_room_events_stream if is_guest
2016-02-01 15:04:57 +00:00
Erik Johnston
ceb6b8680a Only use room_ids if in get_room_events_stream if is_guest 2016-02-01 10:33:52 +00:00
Erik Johnston
b264b9548f Merge branch 'develop' of github.com:matrix-org/synapse into erikj/sync 2016-02-01 09:34:17 +00:00
Erik Johnston
d98a9f2583 Don't use before_token. Its wrong. Use actual limit. 2016-01-31 13:31:15 +00:00
Erik Johnston
226a9a5fa6 Merge pull request #542 from matrix-org/erikj/cache_fix
Cache fixes
2016-01-29 16:54:34 +00:00
Erik Johnston
25c311eaf6 Cache get_room_changes_for_user 2016-01-29 16:52:48 +00:00
Erik Johnston
cc9c97e0dc Invalidate _account_data_stream_cache correctly 2016-01-29 16:41:51 +00:00
Erik Johnston
e70165039c If stream pos is greater then earliest known key and entity hasn't changed, then entity hasn't changed 2016-01-29 16:41:32 +00:00
Erik Johnston
c1de91aca4 Merge pull request #540 from matrix-org/erikj/sync
Prefill stream change caches
2016-01-29 15:52:42 +00:00
Erik Johnston
b55b90bfb4 Merge pull request #541 from matrix-org/erikj/fixsomeofpush
Make /events always return a newer token, if one exists
2016-01-29 15:45:35 +00:00
Erik Johnston
8da95b6f1b Comment. Remove superfluous order by 2016-01-29 15:39:17 +00:00
Mark Haines
b91baae09d Merge pull request #539 from matrix-org/markjh/3pid
Fix up the /account/3pid API
2016-01-29 16:34:13 +01:00
Erik Johnston
13724569ec Deal with None limit 2016-01-29 15:33:44 +00:00
Erik Johnston
4a6eb5eb45 Make /events always return a newer token, if one exists 2016-01-29 15:22:17 +00:00
Mark Haines
6927d0e091 Add missing param to the log line 2016-01-29 15:01:26 +00:00
Erik Johnston
b5dbced938 Don't prefill account data 2016-01-29 14:53:59 +00:00
Mark Haines
f2d5ff5bf2 Fix the mock homserver used in the tests 2016-01-29 14:53:14 +00:00
Erik Johnston
3d60686c0c Actually use cache 2016-01-29 14:49:11 +00:00
Erik Johnston
45488e0ffa Max is not a function 2016-01-29 14:42:01 +00:00
Erik Johnston
f67d60496a Convert param style 2016-01-29 14:41:16 +00:00
Erik Johnston
18579534ea Prefill stream change caches 2016-01-29 14:37:59 +00:00
Mark Haines
47374a33fc Merge remote-tracking branch 'origin/develop' into markjh/3pid 2016-01-29 14:15:12 +00:00
Mark Haines
0fcafbece8 Add config option for setting the trusted id servers, disabling checking the ID server in integration tests 2016-01-29 14:12:26 +00:00
Erik Johnston
96bb4bf38a Update CHANGES 2016-01-29 13:56:22 +00:00
Erik Johnston
fd142c29d9 Merge branch 'develop' of github.com:matrix-org/synapse into release-v0.12.1 2016-01-29 13:52:12 +00:00
Erik Johnston
ebc5f00efe Bump AccountDataAndTagsChangeCache size 2016-01-29 13:37:40 +00:00
Erik Johnston
ea320d3464 Don't work out unread_notifs_for_room_id unless needed 2016-01-29 13:34:48 +00:00
Mark Haines
5687a00e4e Allow three_pid_creds as well as threePidCreds in /account/3pid 2016-01-29 13:26:15 +00:00
Erik Johnston
b18114e19e Merge pull request #536 from matrix-org/erikj/sync
Make /sync "better".
2016-01-29 13:04:51 +00:00
Erik Johnston
02a9c3be6c Merge pull request #538 from matrix-org/erikj/fix_lru_cache
Fix LruCache. Make TreeCache track its own size.
2016-01-29 11:53:55 +00:00
Erik Johnston
4fce59f274 Add tests 2016-01-29 11:33:11 +00:00
Erik Johnston
fb7299800f Directly set self.value 2016-01-29 11:29:14 +00:00
Erik Johnston
c046630c33 Remove spurious self.size 2016-01-29 11:17:54 +00:00
Erik Johnston
a30364c1f9 Correctly bookkeep the size of TreeCache 2016-01-29 10:44:46 +00:00
Erik Johnston
766526e114 Make TreeCache keep track of its own size. 2016-01-29 10:11:21 +00:00
Erik Johnston
50e18938a9 Reset size on clear 2016-01-29 10:00:45 +00:00
Erik Johnston
f3af1840cb Merge pull request #537 from matrix-org/erikj/cache_filters
Cache user filters.
2016-01-28 18:26:16 +00:00
Erik Johnston
467c27a1f9 Amalgamate tags and account data stream caches 2016-01-28 18:20:00 +00:00
Erik Johnston
3f5dd18bd4 If the same as the earliest key, assume nothing has changed. 2016-01-28 18:11:41 +00:00
Erik Johnston
40431251cb Correctly update _entity_to_key 2016-01-28 18:05:43 +00:00
Erik Johnston
82cf3a8043 Fix inequalities 2016-01-28 17:44:04 +00:00
Erik Johnston
03b2c2577c Don't use defer.returnValue 2016-01-28 17:29:24 +00:00
Erik Johnston
0663c5bd52 Include cache hits with has_entity_changed 2016-01-28 17:27:28 +00:00
Erik Johnston
35981c8b71 Fix test 2016-01-28 17:20:05 +00:00
Erik Johnston
8fe8951a8d Cache filters 2016-01-28 17:09:09 +00:00
Erik Johnston
fdca8ec418 Add events index 2016-01-28 16:41:59 +00:00
Erik Johnston
45cf827c8f Change name and doc has_entity_changed 2016-01-28 16:39:18 +00:00
Erik Johnston
00cb3eb24b Cache tags and account data 2016-01-28 16:37:41 +00:00
Erik Johnston
c23a8c7833 Ensure keys to RoomStreamChangeCache are ints 2016-01-28 15:55:26 +00:00
Erik Johnston
e1941442d4 Invalidate caches properly. Remove unused arg 2016-01-28 15:02:41 +00:00
Daniel Wagner-Hall
49c328a892 Prune on fetching
So we don't try to checkout a stale ref
2016-01-28 14:57:40 +00:00
Daniel Wagner-Hall
0935802f1e Pin pynacl to 0.3.0
Something has gone wrong in the packaging of 1.* which causes it not to
compile.
2016-01-28 14:47:03 +00:00
Erik Johnston
19fd425928 Merge branch 'develop' of github.com:matrix-org/synapse into erikj/sync 2016-01-28 14:46:08 +00:00
Erik Johnston
167d1df699 Merge pull request #534 from matrix-org/erikj/setup
Add a Homeserver.setup method
2016-01-28 14:45:36 +00:00
Erik Johnston
7ed2bbeb11 Clean up a bit. Add comment 2016-01-28 14:32:05 +00:00
Daniel Wagner-Hall
9101193242 Quot all the things 2016-01-28 14:18:45 +00:00
Erik Johnston
571a566399 Change load limit params 2016-01-28 14:11:16 +00:00
Erik Johnston
3c6518ddbf Amalgamate incremental and full sync for user 2016-01-28 14:03:48 +00:00
Erik Johnston
4e7948b47a Allow paginating backwards from stream token 2016-01-28 11:52:34 +00:00
Erik Johnston
ba8931829b Return correct type of token 2016-01-28 11:34:17 +00:00
Erik Johnston
61eaa6ec64 Merge branch 'develop' of github.com:matrix-org/synapse into erikj/sync 2016-01-28 10:50:57 +00:00
Erik Johnston
c5e7c0e436 Up get_rooms_for_user cache size 2016-01-28 09:58:45 +00:00
Richard van der Hoff
e26390ca46 Merge pull request #535 from matrix-org/rav/paginate_from_stream_token
Make it possible to paginate forwards from stream tokens
2016-01-28 09:49:46 +00:00
Erik Johnston
a6477d5933 Remove chdir 2016-01-28 09:19:55 +00:00
Richard van der Hoff
5cba88ea7c Make it possible to paginate forwards from stream tokens
In order that we can fill the gap after a /sync, make it possible to paginate
forwards from a stream token.
2016-01-27 17:42:45 +00:00
Erik Johnston
5fc9b17518 No chdir 2016-01-27 17:39:20 +00:00
Erik Johnston
fa90c180ee Merge branch 'develop' of github.com:matrix-org/synapse into erikj/setup 2016-01-27 17:37:33 +00:00
Erik Johnston
5610880003 Merge pull request #530 from matrix-org/erikj/server_refactor
Remove redundant BaseHomeServer
2016-01-27 17:36:31 +00:00
Erik Johnston
e7febf4fbb PEP8 2016-01-27 17:33:27 +00:00
Erik Johnston
aca3193efb Use the same path for incremental with gap or without gap 2016-01-27 17:33:27 +00:00
Erik Johnston
b97f6626b6 Add cache to room stream 2016-01-27 17:33:26 +00:00
Erik Johnston
f93ecf8783 Don't turn on profiling 2016-01-27 17:33:26 +00:00
Erik Johnston
0487c9441f Fix tests 2016-01-27 17:33:13 +00:00
Erik Johnston
a955cbfa49 Merge branch 'develop' of github.com:matrix-org/synapse into erikj/setup 2016-01-27 17:22:35 +00:00
Richard van der Hoff
8c97b49886 Merge pull request #533 from matrix-org/rav/hashtest_federation
Fix federation for #test:matrix.org
2016-01-27 17:19:36 +00:00
Erik Johnston
2152b320c5 PEP 8 2016-01-27 17:09:17 +00:00
Richard van der Hoff
d6d60b4d6c Federation: drop events which cause SynapseErrors
... rather than rejecting any attempt to federate channels which contain such
events.
2016-01-27 17:02:10 +00:00
Mark Haines
d6c831bd3d Merge pull request #531 from matrix-org/markjh/relative_push_rules
Fix adding push rules relative to other rules
2016-01-27 17:13:36 +01:00
Florent VIOLLEAU
97b364cb25 Update documentation
Signed-off-by: Florent VIOLLEAU <floviolleau@gmail.com>
2016-01-27 14:11:05 +01:00
Erik Johnston
03f4569dc3 Bump version and changelog 2016-01-27 10:35:30 +00:00
Mark Haines
8c94833b72 Fix adding push rules relative to other rules 2016-01-27 10:24:20 +00:00
Erik Johnston
9fda8b5193 Don't turn on profiling 2016-01-26 18:27:23 +00:00
Erik Johnston
e4e33c743e Merge branch 'develop' of github.com:matrix-org/synapse into erikj/server_refactor 2016-01-26 17:30:00 +00:00
Erik Johnston
87f9477b10 Add a Homeserver.setup method.
This is for setting up dependencies that require work on startup. This
is useful for the DataStore that wants to read a bunch from the database
before initiliazing.
2016-01-26 15:51:06 +00:00
Erik Johnston
9959d9ece8 Remove redundated BaseHomeServer 2016-01-26 13:52:29 +00:00
David Baker
27b9775073 Merge pull request #529 from matrix-org/dbkr/one_to_one_only_messages
Only notify for messages in one to one rooms, not every event
2016-01-26 11:50:26 +00:00
David Baker
766c24b2e6 Only notify for messages in one to one rooms, not every event
Fixes the fact that candidate events and hangups generated notifications.
2016-01-26 10:21:41 +00:00
Mark Haines
7179fdd550 Merge pull request #528 from matrix-org/markjh/missing_yield
Add missing yield in push_rules set enabled
2016-01-25 21:26:30 +01:00
Erik Johnston
c887c4cbd5 Merge pull request #524 from matrix-org/erikj/sync
Move some sync logic from rest to handlers pacakege
2016-01-25 16:58:39 +00:00
Mark Haines
e18257f0e5 Add missing yield in push_rules set enabled 2016-01-25 16:51:56 +00:00
Erik Johnston
8431f62ebb Merge pull request #525 from matrix-org/erikj/select_many
Implement a `_simple_select_many_batch`
2016-01-25 16:30:36 +00:00
Erik Johnston
f091b73e69 Merge pull request #527 from matrix-org/erikj/push_cache
Push: Use storage apis that are cached
2016-01-25 16:16:34 +00:00
Erik Johnston
ce6fbbea94 Merge pull request #526 from matrix-org/erikj/push_index
Add index to event_push_actions
2016-01-25 16:06:07 +00:00
Erik Johnston
aea5da0ef6 Guard against empty iterables 2016-01-25 15:59:29 +00:00
Erik Johnston
3a75159832 Merge pull request #521 from matrix-org/erikj/underscores
Underscores are allowed in user ids
2016-01-25 15:56:31 +00:00
Erik Johnston
1ebf5e3d03 Correct docstring 2016-01-25 15:53:36 +00:00
Erik Johnston
dc2647cd3d PEP8 2016-01-25 15:48:54 +00:00
Erik Johnston
86896408b0 Add index to event_push_actions 2016-01-25 15:30:32 +00:00
Erik Johnston
53cb173663 Push: Use storage apis that are cached 2016-01-25 13:55:18 +00:00
Erik Johnston
d59c58bc95 Remove weird stuff 2016-01-25 13:38:53 +00:00
Erik Johnston
ddd25def01 Implement a _simple_select_many_batch 2016-01-25 13:36:02 +00:00
Erik Johnston
8c6012a4af Fix tests 2016-01-25 13:12:35 +00:00
Erik Johnston
42deca50c2 Merge branch 'develop' of github.com:matrix-org/synapse into erikj/sync 2016-01-25 12:49:45 +00:00
Erik Johnston
d685ae73b4 Merge branch 'erikj/filters' of github.com:matrix-org/synapse into develop 2016-01-25 12:35:25 +00:00
Erik Johnston
4021f95261 Move logic from rest/ to handlers/ 2016-01-25 10:10:44 +00:00
David Baker
f92fe15897 Merge pull request #523 from matrix-org/dbkr/no_push_unless_notify
Better fix for actions with both dont_notify and tweaks
2016-01-22 17:27:25 +00:00
David Baker
3fe8c56736 Better fix for actions with both dont_notify and tweaks 2016-01-22 17:21:58 +00:00
David Baker
60965bd7e5 Revert b4a41aa542 as it's just broken. 2016-01-22 17:21:15 +00:00
David Baker
0e0e441b33 Merge pull request #522 from matrix-org/dbkr/no_push_unless_notify
Don't add notifications to the table unless there's actually a 'notify' action
2016-01-22 17:06:52 +00:00
David Baker
b4a41aa542 Don't add notifications to the table unless there's actually a 'notify' action 2016-01-22 16:56:48 +00:00
Erik Johnston
db6e26bb8c Don't mutate cached values 2016-01-22 16:03:55 +00:00
Erik Johnston
88baa3865e Merge branch 'develop' of github.com:matrix-org/synapse into erikj/sync 2016-01-22 15:57:12 +00:00
David Baker
74f49f99f9 Merge pull request #520 from matrix-org/dbkr/bulk_push_overlay_enabled
Overlay the push_rules_enabled map for users
2016-01-22 15:25:52 +00:00
David Baker
7065b75bfd Don't crash if a user has no push rule enabled entries 2016-01-22 15:13:44 +00:00
Erik Johnston
7959e8b764 Underscores are allowed in user ids 2016-01-22 14:59:49 +00:00
David Baker
52bdd1b834 Overlay the push_rules_enabled map for users, otherwise they won't be able to disable server default rules. 2016-01-22 14:58:19 +00:00
David Baker
7a3fe48ba4 Merge pull request #519 from matrix-org/dbkr/treecache
Make LRU caching tree-based so subtrees of the cache can be invalidated cheaply.
2016-01-22 14:47:48 +00:00
David Baker
7cd418d38e Don't add the member functiopn if we're not using treecache 2016-01-22 13:40:37 +00:00
David Baker
cd80019eec docs 2016-01-22 12:21:13 +00:00
David Baker
d552861346 Revert all the bits changing keys of eeverything that used LRUCaches to tuples 2016-01-22 12:18:14 +00:00
David Baker
10f76dc5da Make LRU cache not default to treecache & add options to use it 2016-01-22 12:10:33 +00:00
David Baker
5b142788d2 Add __contains__ 2016-01-22 11:49:59 +00:00
David Baker
eaa836e8ca Docs for treecache 2016-01-22 11:47:22 +00:00
David Baker
42eae4634f Use new invalidate_many cache invalidation to invalidate the event_push_actions cache appropriately. 2016-01-22 11:22:48 +00:00
David Baker
8acc5cb60f Add invalidate_many here too 2016-01-22 11:22:32 +00:00
David Baker
31a051b677 Test treecache directly 2016-01-22 11:22:00 +00:00
Erik Johnston
8f9c74e9f1 Fix tests 2016-01-22 10:48:27 +00:00
Erik Johnston
975903ae17 Sanitize filters 2016-01-22 10:41:30 +00:00
David Baker
4efcaa43c8 Add tests for treecache directly and test del_multi at the LruCache level too. 2016-01-22 10:37:37 +00:00
David Baker
330be18ec5 peppate 2016-01-21 19:17:32 +00:00
David Baker
f1f8122120 Change LRUCache to be tree-based so we can delete subtrees. 2016-01-21 19:16:25 +00:00
Erik Johnston
297eded261 Merge pull request #517 from matrix-org/erikj/push_only_room
Only fetch events for rooms and receipts
2016-01-21 16:12:39 +00:00
Erik Johnston
0e07f2e15d Only fetch events for rooms and receipts 2016-01-21 16:10:37 +00:00
Erik Johnston
82b46f556d Merge pull request #516 from matrix-org/erikj/push_perf
Reduce number of calls to get_unread_event_push_actions_by_room
2016-01-21 15:12:56 +00:00
Erik Johnston
8f66fe6392 Cache get_unread_event_push_actions_by_room_for_user 2016-01-21 15:02:07 +00:00
Erik Johnston
3a00f13436 Only compute badge count when necessary.
This reverts commit d726597737.
2016-01-21 14:56:11 +00:00
Erik Johnston
c6549117a2 Fix AttributeError 2016-01-21 14:02:14 +00:00
Erik Johnston
ed1d189e10 Merge pull request #515 from matrix-org/erikj/syn-606
SYN-606: Peeking does not wake up /events
2016-01-21 13:40:20 +00:00
Erik Johnston
dfe1273d14 Merge pull request #509 from matrix-org/erikj/dns_cache
Cache dns lookups, and use the cache if we fail to lookup servers later
2016-01-21 13:37:23 +00:00
Erik Johnston
91a222c66d SYN-606: Peeking does not wake up /events
If a real user attempted to first peek into one room, and then another,
their room event stream would not be woken up for events in the later
room.
2016-01-21 13:22:26 +00:00
David Baker
0503bdb316 Merge pull request #514 from matrix-org/remove_member_event_rule
Remove member event rule as per SYN-607
2016-01-21 12:00:26 +00:00
David Baker
930ba003f8 Remove member event rule as per SYN-607 2016-01-21 11:50:27 +00:00
Erik Johnston
d54005059c Merge branch 'develop' of github.com:matrix-org/synapse into develop 2016-01-21 10:19:15 +00:00
Erik Johnston
d7c85ad916 Add another graph contrib 2016-01-21 10:19:05 +00:00
David Baker
c1a3021771 Merge pull request #507 from matrix-org/push_badge_counts
Push badge counts
2016-01-21 10:09:11 +00:00
Erik Johnston
d049e81b10 Merge pull request #513 from matrix-org/erikj/register_user_chars
Don't explode when given a unicode username in /register/
2016-01-21 09:53:35 +00:00
Erik Johnston
c43b6dcc75 Fix change_password 2016-01-20 16:14:48 +00:00
David Baker
367cfab4e6 peppate 2016-01-20 16:05:09 +00:00
Erik Johnston
69adf8c384 Merge pull request #512 from matrix-org/erikj/whine_on_from
Whine if we give a from param to /sync
2016-01-20 15:58:44 +00:00
Erik Johnston
73ca8e5834 Whine if we give a from param to /sync 2016-01-20 15:42:57 +00:00
Erik Johnston
b088291f14 Don't explode when given a unicode username in /register/ 2016-01-20 15:40:25 +00:00
Daniel Wagner-Hall
a2ae01cc0f Merge pull request #510 from matrix-org/daniel/nonguestpeeking
Allow non-guests to peek on rooms using /events
2016-01-20 15:34:18 +00:00
Daniel Wagner-Hall
da417aa56d Allow non-guests to peek on rooms using /events 2016-01-20 15:34:07 +00:00
David Baker
d4315bbf6b Add index by user id on receipts_linearized 2016-01-20 15:33:27 +00:00
David Baker
3fa344c037 Add storage function to get all receipts for a user. Also add some cache invalidation to the receipts storage because there wasn't any, and remove a method that was unused. 2016-01-20 15:30:31 +00:00
David Baker
7cc047455e Inline membership specifier 2016-01-20 13:50:28 +00:00
David Baker
d726597737 Simplify badge updating code by just updating it every time we get woken up and it's not an event 2016-01-20 13:49:00 +00:00
David Baker
2309450a76 Merge branch 'develop' into push_badge_counts 2016-01-20 13:45:13 +00:00
David Baker
ea5eea2424 Merge branch 'dbkr/no_push_for_own_events' into develop 2016-01-20 13:44:46 +00:00
David Baker
746f6e0eb3 'filtered' is a list of zero or 1 2016-01-20 13:44:04 +00:00
David Baker
7441d8cc0c Merge remote-tracking branch 'origin/develop' into push_badge_counts 2016-01-20 13:40:22 +00:00
David Baker
ccf9387d57 Merge branch 'develop' into push_badge_counts 2016-01-20 13:33:45 +00:00
David Baker
d4cefb6289 Merge pull request #511 from matrix-org/dbkr/no_push_for_own_events
Don't generate push actions for our own events
2016-01-20 13:33:25 +00:00
David Baker
259d1ecd1d Don't generate push actions for our own events 2016-01-20 13:24:59 +00:00
Erik Johnston
191070123d Cache dns lookups, and use the cache if we fail to lookup servers later 2016-01-20 11:34:09 +00:00
David Baker
afb7b377f2 Merge branch 'develop' into push_badge_counts 2016-01-19 18:17:23 +00:00
Erik Johnston
af30140621 Merge pull request #506 from matrix-org/erikj/push_fast
Only compute unread notifications for rooms we send down stream
2016-01-19 17:31:10 +00:00
Erik Johnston
ac2842ff1e Only compute unread notifications for rooms we send down stream 2016-01-19 17:19:53 +00:00
Erik Johnston
892ee473d9 Don't use form of get_state_for_events with None state_key 2016-01-19 17:14:46 +00:00
Erik Johnston
40d9765123 Merge pull request #505 from matrix-org/erikj/push_fast
Push actions perf
2016-01-19 16:16:05 +00:00
Erik Johnston
2818a000aa Use split rather than endswith 2016-01-19 16:11:39 +00:00
Erik Johnston
fb5d8e58ff Change regex cache size to 5000 2016-01-19 16:07:07 +00:00
Erik Johnston
5a7d1ecffc Add regex cache. Only caculate push actions for users that have sent read receipts, and are on that server 2016-01-19 16:01:05 +00:00
Erik Johnston
d056a0a3d8 Handle glob -> regex errors 2016-01-19 14:43:24 +00:00
Erik Johnston
7a079adc8f Merge pull request #477 from matrix-org/erikj/access_token_log
Don't log urlencoded access_tokens
2016-01-19 14:28:29 +00:00
Erik Johnston
b8518ffe65 Use all_ephemeral_by_room in incremental_sync_with_gap_for_room 2016-01-19 14:26:58 +00:00
Erik Johnston
9654ee0848 Return don't break 2016-01-19 14:24:59 +00:00
Erik Johnston
7ecd211163 Except truthy values 2016-01-19 14:22:02 +00:00
Erik Johnston
05f78b3b52 Merge pull request #504 from matrix-org/erikj/highlight_count
Return highlight_count in /sync
2016-01-19 13:21:48 +00:00
Erik Johnston
f5fc8f2928 Merge pull request #486 from matrix-org/default_notify
Change default pushrules back to notifying for all messages.
2016-01-19 12:49:29 +00:00
Erik Johnston
9a8949f022 Merge branch 'develop' of github.com:matrix-org/synapse into default_notify 2016-01-19 11:37:05 +00:00
Erik Johnston
3adcc4c86a Return highlight_count in /sync 2016-01-19 11:35:50 +00:00
Erik Johnston
47e7963e50 Merge pull request #502 from matrix-org/erikj/push_notif_perf
Unread notification performance.
2016-01-19 11:27:27 +00:00
Daniel Wagner-Hall
88af7bb48b Merge pull request #503 from matrix-org/daniel/nonghosts
Don't error on AS non-ghost user use
2016-01-19 11:27:00 +00:00
Erik Johnston
0d241e1114 Take a deepcopy of push rules before mutating them 2016-01-19 10:15:12 +00:00
Erik Johnston
f750a442f7 Update _id 2016-01-19 10:14:53 +00:00
Erik Johnston
003853e702 Preserve truthiness 2016-01-18 17:34:02 +00:00
Erik Johnston
a284ad4092 You need to escape backslashes 2016-01-18 17:20:44 +00:00
Erik Johnston
47f82e4408 Fix branch didn't check word_boundary 2016-01-18 17:04:36 +00:00
Erik Johnston
5cd2126a6a Remove dead code 2016-01-18 16:49:46 +00:00
Erik Johnston
29c353c553 Don't split at word boundaries, actually use regex 2016-01-18 16:48:17 +00:00
Daniel Wagner-Hall
808a8aedab Don't error on AS non-ghost user use
This will probably go away either when we fix our existing ASes, or when
we kill the concept of non-ghost users.
2016-01-18 16:33:05 +00:00
Daniel Wagner-Hall
74474a6d63 Pull out app service user lookup
I find this a lot simpler than nested try-catches and stuff
2016-01-18 16:32:33 +00:00
Erik Johnston
d16dcf642e Drop log levels 2016-01-18 15:44:04 +00:00
Erik Johnston
7dd14e5d1c Add comments and remove dead code 2016-01-18 15:42:23 +00:00
Erik Johnston
866fe27e78 Do for loop once at start 2016-01-18 15:29:41 +00:00
Erik Johnston
d1f56f732e Use static for const dicts 2016-01-18 15:17:56 +00:00
Erik Johnston
0e39dcd135 Remove internal ids 2016-01-18 14:50:17 +00:00
Erik Johnston
345ff2196a Don't edit ruleset 2016-01-18 14:50:17 +00:00
Erik Johnston
2c176e02ae Make unit tests work 2016-01-18 14:48:50 +00:00
Erik Johnston
63485b3029 Re-enable urnead notifications 2016-01-18 14:48:30 +00:00
Erik Johnston
f59b564507 Make notifications go quicker 2016-01-18 14:48:29 +00:00
Erik Johnston
2068678b8c Make Event objects behave more like dicts 2016-01-18 14:43:50 +00:00
Erik Johnston
cc66a9a5e3 Allow filtering events for multiple users at once 2016-01-18 14:43:50 +00:00
Daniel Wagner-Hall
5de1563997 Merge pull request #501 from matrix-org/daniel/unban
Require unbanning before other membership changes
2016-01-15 16:27:29 +00:00
Daniel Wagner-Hall
ac5a4477ad Require unbanning before other membership changes 2016-01-15 16:27:26 +00:00
Daniel Wagner-Hall
c049f60d4a Merge pull request #500 from matrix-org/daniel/cleanup
Remove unused parameters
2016-01-15 16:02:22 +00:00
Daniel Wagner-Hall
b5ce4f0427 Remove unused parameters 2016-01-15 13:54:03 +00:00
David Baker
ac12b6d332 Merge branch 'invalid_user_name' into develop 2016-01-15 10:07:01 +00:00
David Baker
5819b7a78c M_INVALID_USERNAME to be consistent with the parameter name 2016-01-15 10:06:34 +00:00
David Baker
5bf1a3d6dc Merge pull request #499 from matrix-org/invalid_user_name
Add specific error code for invalid user names.
2016-01-15 09:18:02 +00:00
David Baker
3f8db3d597 Add specific error code for invalid user names. 2016-01-14 17:21:04 +00:00
Erik Johnston
a50013fd99 Merge pull request #497 from matrix-org/erikj/max_limit
Clamp pagination limits to at most 1000
2016-01-14 16:28:52 +00:00
Richard van der Hoff
2978053d16 Merge branch 'release-v0.12.1' into develop 2016-01-14 15:04:08 +00:00
Daniel Wagner-Hall
2c760372d6 Merge pull request #496 from matrix-org/daniel/3
Require ID and as_token be unique for ASs

Defaults ID to as_token if not specified. This will change
when IDs are fully supported.
2016-01-14 14:34:11 +00:00
Daniel Wagner-Hall
2680043bc6 Require ID and as_token be unique for ASs
Defaults ID to as_token if not specified. This will change
when IDs are fully supported.
2016-01-14 14:34:01 +00:00
David Baker
8db451f652 Merge pull request #498 from matrix-org/push_rule_enabled_fix
Fix enabling & disabling push rules
2016-01-14 13:34:05 +00:00
Richard van der Hoff
430d3d74f6 Merge branch 'release-v0.12.1' into develop 2016-01-14 11:17:47 +00:00
Daniel Wagner-Hall
d14fcfd24a Merge pull request #487 from matrix-org/daniel/forceregistration
Require AS users to be registered before use
2016-01-14 11:06:43 +00:00
Daniel Wagner-Hall
27927463a1 Merge pull request #494 from matrix-org/daniel/2
Don't start server if ASes are invalidly configured
2016-01-14 11:06:19 +00:00
Daniel Wagner-Hall
fcb6df45e5 Merge pull request #493 from matrix-org/daniel/1
Delete unused code
2016-01-14 11:06:14 +00:00
David Baker
a7927c13fd Fix enabling & disabling push rules 2016-01-14 10:53:44 +00:00
Erik Johnston
339c8f0133 Clamp pagination limits to at most 1000 2016-01-14 10:22:02 +00:00
Erik Johnston
bce602eb4e Use logger not logging 2016-01-14 09:56:26 +00:00
Erik Johnston
939cbd7057 Merge pull request #495 from matrix-org/erikj/disable_notification
Temporarily disable notification branch
2016-01-14 09:46:09 +00:00
David Baker
12623c99b6 Use the unread notification count to send accurate badge counts in push notifications. 2016-01-13 18:55:57 +00:00
Erik Johnston
2655d61d70 Don't change signature. Return empty list 2016-01-13 17:43:39 +00:00
Erik Johnston
fcb05b4c82 Temporarily disable notification branch 2016-01-13 17:39:58 +00:00
Mark Haines
806bae1ee7 Merge pull request #488 from matrix-org/markjh/user_name
Rename 'user_name' to 'user_id' in push
2016-01-13 18:11:01 +01:00
Daniel Wagner-Hall
f6fcff3602 Don't start server if ASes are invalidly configured 2016-01-13 17:09:24 +00:00
Daniel Wagner-Hall
244b356a37 Delete unused code 2016-01-13 17:03:58 +00:00
Mark Haines
9c1f853d58 Rename 'user_name' to 'user_id' in push to make it consistent with the rest of the code 2016-01-13 13:32:59 +00:00
Daniel Wagner-Hall
7d09ab8915 Require AS users to be registered before use 2016-01-13 13:19:47 +00:00
David Baker
d9db819e23 Change default pushrules back to notifying for all messages. 2016-01-13 13:15:53 +00:00
Mark Haines
37716d55ed Merge pull request #482 from matrix-org/markjh/table_name
Finish removing the .*Table objects from the storage layer.
2016-01-13 13:45:39 +01:00
Erik Johnston
4399684582 Merge pull request #483 from matrix-org/erikj/bulk_get_push_rules
bulk_get_push_rules should handle empty lists
2016-01-13 12:35:11 +00:00
Erik Johnston
44b4fc5f50 Use compiled regex 2016-01-13 11:47:32 +00:00
Mark Haines
f4dad9f639 Merge remote-tracking branch 'origin/erikj/bulk_get_push_rules' into markjh/table_name
Conflicts:
	synapse/storage/push_rule.py
2016-01-13 11:46:07 +00:00
Erik Johnston
8740e4e94a bulk_get_push_rules should handle empty lists 2016-01-13 11:37:17 +00:00
Mark Haines
c0a279e808 Delete the table objects from TransactionStore 2016-01-13 11:15:20 +00:00
Mark Haines
96e400fee5 Remove the RoomsTable object 2016-01-13 11:07:32 +00:00
Erik Johnston
72ba26679b Merge pull request #480 from matrix-org/erikj/guest_event_tightloop
Dont fire user_joined_room when guest hits /events
2016-01-13 11:00:50 +00:00
Erik Johnston
ea47760bd8 Merge pull request #481 from matrix-org/erikj/SYN-589
Don't include old left rooms in /sync
2016-01-12 16:55:55 +00:00
Erik Johnston
70dfe4dc96 Don't include old left rooms 2016-01-12 15:01:56 +00:00
Mark Haines
a8e9e0b916 Remove the PushersTable and EventPushActionsTable objects 2016-01-12 14:41:26 +00:00
Mark Haines
31de2953a3 Remove the PushRuleTable and PushRuleEnableTable objects 2016-01-12 14:36:16 +00:00
Erik Johnston
fd5c28dc52 Dont fire user_joined_room when guest hits /events
Firing the 'user_joined_room' signal everytime a guest hits /events
causes all presence for that room to be returned in the stream. This may
sound helpful, but causes clients to tightloop calling /events.

In general, guest users should get the initial presence from (room)
intial sync and so we don't require presence to sbsequently come down
the event stream.
2016-01-12 11:04:06 +00:00
Daniel Wagner-Hall
42aa1f3f33 Merge pull request #478 from matrix-org/daniel/userobject
Introduce a User object

I'm sick of passing around more and more things as tuple items around
the whole world, and needing to edit every call site every time there is
more information about a user. So pass them around together as an
object.

This object has incredibly poorly named fields because we have a
convention that `user` indicates a UserID object, and `user_id`
indicates a string. I tried to clean up the whole repo to fix this, but
gave up. So instead, I introduce a second convention. A user_object is a
User, and a user_id_object is a UserId. I may have cried a little bit.
2016-01-11 17:50:22 +00:00
Daniel Wagner-Hall
2110e35fd6 Introduce a Requester object
This tracks data about the entity which made the request. This is
instead of passing around a tuple, which requires call-site
modifications every time a new piece of optional context is passed
around.

I tried to introduce a User object. I gave up.
2016-01-11 17:48:45 +00:00
David Baker
b5d33a656f Postgres doesn't like booleans 2016-01-11 17:13:52 +00:00
David Baker
fe56138142 Remove rogue 'admin' 2016-01-11 17:09:03 +00:00
Erik Johnston
8f8b884430 Don't log urlencoded access_tokens 2016-01-08 17:48:08 +00:00
Erik Johnston
29e595e5d4 Merge pull request #474 from matrix-org/erikj/core_dump
Turn on core dumps
2016-01-08 16:52:15 +00:00
David Baker
c232780081 Merge pull request #456 from matrix-org/store_event_actions
Send unread notification counts
2016-01-08 14:47:15 +00:00
Mark Haines
7c816de442 Merge pull request #475 from matrix-org/markjh/fix_thumbnail
Only use cropped thumbnails when asked for a cropped thumbnail.
2016-01-07 20:07:53 +01:00
Mark Haines
8677b7d698 Only use cropped thumbnails when asked for a cropped thumbnail.
Even though ones cropped with scale might be technically valid.
2016-01-07 18:57:15 +00:00
Erik Johnston
33bef689c1 Turn on core dumps 2016-01-07 15:34:30 +00:00
Erik Johnston
fcbe63eaad Use logger not logging 2016-01-07 15:28:17 +00:00
Erik Johnston
5727922106 Merge pull request #473 from matrix-org/erikj/ssh_manhole
Change manhole to use ssh
2016-01-07 14:36:16 +00:00
Erik Johnston
5dc5e29b9c s/telnet/ssh/ 2016-01-07 14:02:57 +00:00
Erik Johnston
3deffcdb1e Merge pull request #465 from matrix-org/syn-453
Update readme to reflect change in signing key name
2016-01-07 14:01:03 +00:00
Erik Johnston
c9ae1d1ee5 Change manhole to use ssh 2016-01-07 13:59:02 +00:00
David Baker
daadcf36c0 This comma is actually important 2016-01-07 10:15:35 +00:00
David Baker
823b679232 more commas 2016-01-07 10:02:47 +00:00
Matthew Hodgson
6c28ac260c copyrights 2016-01-07 04:26:29 +00:00
Matthew Hodgson
49c34dfd36 Merge pull request #472 from roblabla/patch-1
Config Comment mixup in captcha public/private key
2016-01-06 22:24:28 +00:00
Robin Lambertz
4106477e7f Config Comment mixup in captcha public/private key 2016-01-06 23:19:33 +01:00
Daniel Wagner-Hall
7ac6ca7311 Merge pull request #468 from matrix-org/daniel/versionsyo
Add /_matrix/versions to report supported versions
2016-01-06 18:09:04 +00:00
Daniel Wagner-Hall
11a974da21 Add /_matrix/versions to report supported versions 2016-01-06 18:08:52 +00:00
David Baker
09dc9854cd comma style 2016-01-06 17:44:10 +00:00
David Baker
442fcc02f7 Merge remote-tracking branch 'origin/develop' into store_event_actions 2016-01-06 17:28:55 +00:00
David Baker
b6a585348a Adding is_guest here won't work because it just constructs a dict of uid -> password hash 2016-01-06 17:16:02 +00:00
Mark Haines
c582f178b7 Merge pull request #469 from matrix-org/markjh/joined_guest_access
Guest users must be joined to a room to see it in /sync
2016-01-06 18:10:11 +01:00
Mark Haines
4cec90a260 Pass whether the user was a guest to some of the event streams 2016-01-06 16:54:57 +00:00
David Baker
0e48f7f245 fix tests 2016-01-06 16:46:41 +00:00
Mark Haines
392773ccb2 Guest users must be joined to a room to see it in /sync 2016-01-06 16:44:13 +00:00
Daniel Wagner-Hall
bf32922e5a Log when starting stats reporting 2016-01-06 14:13:34 +00:00
Daniel Wagner-Hall
797691f908 Log on stats scheduling 2016-01-06 14:04:27 +00:00
Daniel Wagner-Hall
e5ea4fad78 Merge pull request #466 from matrix-org/daniel/logloglog
Log when we skip daily messages
2016-01-06 13:52:19 +00:00
Daniel Wagner-Hall
5880de186b Log when we skip daily messages 2016-01-06 13:52:16 +00:00
David Baker
992928304f Delete notifications for redacted events 2016-01-06 11:58:46 +00:00
David Baker
ae1262a241 Add schema change file for is_guest flag 2016-01-06 11:58:20 +00:00
David Baker
c79f221192 Add is_guest flag to users db to track whether a user is a guest user or not. Use this so we can run _filter_events_for_client when calculating event_push_actions. 2016-01-06 11:38:09 +00:00
Erik Johnston
8ce5679813 Update readme to reflect change in signing key name 2016-01-06 11:19:51 +00:00
David Baker
eb03625626 Merge remote-tracking branch 'origin/develop' into store_event_actions 2016-01-05 18:39:50 +00:00
Daniel Wagner-Hall
87d577e023 Merge pull request #463 from matrix-org/daniel/hashtagnofilter
Skip, rather than erroring, invalid guest requests

Erroring causes problems when people make illegal requests, because they
don't know what limit parameter they should pass.

This is definitely buggy. It leaks message counts for rooms people don't
have permission to see, via tokens. But apparently we already
consciously decided to allow that as a team, so this preserves that
behaviour.
2016-01-05 18:12:40 +00:00
Daniel Wagner-Hall
2ef6de928d Skip, rather than erroring, invalid guest requests
Erroring causes problems when people make illegal requests, because they
don't know what limit parameter they should pass.

This is definitely buggy. It leaks message counts for rooms people don't
have permission to see, via tokens. But apparently we already
consciously decided to allow that as a team, so this preserves that
behaviour.
2016-01-05 18:12:37 +00:00
Daniel Wagner-Hall
29e131df43 Merge pull request #462 from matrix-org/daniel/guestupgrade
Allow guests to upgrade their accounts
2016-01-05 18:01:29 +00:00
Daniel Wagner-Hall
cfd07aafff Allow guests to upgrade their accounts 2016-01-05 18:01:18 +00:00
Erik Johnston
a178eb1bc8 Merge pull request #464 from matrix-org/erikj/crop_correct
Use larger thumbnails rather than smaller when method=crop
2016-01-05 17:41:07 +00:00
Erik Johnston
8737ead008 Use larger thumbnail rather than smaller. 2016-01-05 17:30:30 +00:00
Erik Johnston
90921981be Merge pull request #461 from matrix-org/erikj/sync_leave
Return /sync when something under the 'leave' key has changed
2016-01-05 14:59:05 +00:00
Erik Johnston
acb19068d0 Return /sync when something under the 'leave' key has changed 2016-01-05 14:49:06 +00:00
Erik Johnston
d74c4e90d4 Merge pull request #460 from matrix-org/erikj/create_room_3pid_invite
Support inviting 3pids in /createRoom
2016-01-05 14:06:23 +00:00
David Baker
85ca8cb90c comment typo 2016-01-05 13:32:39 +00:00
Erik Johnston
c3ea36304b Use named args 2016-01-05 12:57:45 +00:00
Erik Johnston
1b5642604b Support inviting 3pids in /createRoom 2016-01-05 11:56:21 +00:00
Erik Johnston
07c33eff43 Merge branch 'master' of github.com:matrix-org/synapse into develop 2016-01-05 10:25:13 +00:00
David Baker
4eb7b950c8 = not == in sql 2016-01-04 18:11:17 +00:00
David Baker
c77e7e60fc Only joined rooms have unread_notif_count 2016-01-04 15:49:06 +00:00
David Baker
d74c6ace24 comma 2016-01-04 15:32:00 +00:00
David Baker
f1b67730fa Add unread_notif_count in incremental_sync_with_gap 2016-01-04 14:50:36 +00:00
David Baker
92a1e74b20 fix tests 2016-01-04 14:17:35 +00:00
David Baker
c914d67cda Rename event-actions to event_push_actions as per PR request 2016-01-04 14:05:37 +00:00
David Baker
928c575c6f Merge remote-tracking branch 'origin/develop' into store_event_actions 2016-01-04 13:39:51 +00:00
David Baker
3051c9d002 Address minor PR issues 2016-01-04 13:39:29 +00:00
David Baker
d2a92c6bde Fix merge fail with anon access stuff 2015-12-22 18:25:04 +00:00
David Baker
d79e90f078 Add mocks to make tests work again 2015-12-22 17:56:56 +00:00
David Baker
9b4cd0cd0f pep8 & unused variable 2015-12-22 17:25:09 +00:00
David Baker
140a50f641 Merge remote-tracking branch 'origin/develop' into store_event_actions 2015-12-22 17:23:35 +00:00
David Baker
5645d9747b Add some comments to areas that could be optimised. 2015-12-22 17:19:22 +00:00
David Baker
3fbb031745 Remove the list of problems (moved to jira issues) 2015-12-22 17:13:47 +00:00
David Baker
4c8f6a7e42 Insert push actions in a single db query rather than one per user/profile_tag 2015-12-22 17:04:31 +00:00
David Baker
77f06856b6 clarify problems 2015-12-22 15:22:26 +00:00
David Baker
65c451cb38 Add bulk push rule evaluator which actually still evaluates rules one by one, but does far fewer db queries to fetch the rules 2015-12-22 15:19:34 +00:00
David Baker
c061b47c57 Merge remote-tracking branch 'origin/develop' into store_event_actions 2015-12-21 15:30:26 +00:00
David Baker
f73f154ec2 Only run pushers for users on this hs! 2015-12-21 15:28:54 +00:00
David Baker
091c545c4f pep8 2015-12-21 10:14:57 +00:00
David Baker
b131fb1fe2 add list of things I want to fix with this branch 2015-12-18 18:04:45 +00:00
David Baker
413d0d6a24 Make unread notification count sending work: put the correct count in incremental syncs too, where necessary, and fix silly bugs like only select the event actions for that user... 2015-12-18 17:47:00 +00:00
David Baker
42ad49f5b7 still very WIP, but now sends unread_notifications_count in the room object on sync (only actually corrrect in a full sync: hardcoded to 0 in incremental syncs). 2015-12-16 18:42:09 +00:00
David Baker
5e909c73d7 Store nothing instead of ['dont_notify'] for events with no notification required: much as it would be nice to be able to tell between the event not having been processed and there being no notification for it, this isn't worth filling up the table with ['dont_notify'] I think. Consequently treat the empty actions array as dont_notify and filter dont_notify out of the result. 2015-12-10 18:40:28 +00:00
David Baker
aa667ee396 Save event actions to the db 2015-12-10 17:51:15 +00:00
David Baker
a84a693327 Having consulted The Erikle, this should go at the end of on_receive_pdu, otherwise it will be triggered whenever we backfill too. 2015-12-10 17:18:46 +00:00
David Baker
21f135ba76 Very first cut of calculating actions for events as they come in. Doesn't store them yet. Not very efficient. 2015-12-10 16:26:08 +00:00
339 changed files with 5757 additions and 3985 deletions

View File

@@ -51,3 +51,6 @@ Steven Hammerton <steven.hammerton at openmarket.com>
Mads Robin Christensen <mads at v42 dot dk>
* CentOS 7 installation instructions.
Florent Violleau <floviolleau at gmail dot com>
* Add Raspberry Pi installation instructions and general troubleshooting items

View File

@@ -1,3 +1,85 @@
Changes in synapse v0.13.2 (2016-02-11)
=======================================
* Fix bug where ``/events`` would fail to skip some events if there had been
more events than the limit specified since the last request (PR #570)
Changes in synapse v0.13.1 (2016-02-10)
=======================================
* Bump matrix-angular-sdk (matrix web console) dependency to 0.6.8 to
pull in the fix for SYWEB-361 so that the default client can display
HTML messages again(!)
Changes in synapse v0.13.0 (2016-02-10)
=======================================
This version includes an upgrade of the schema, specifically adding an index to
the ``events`` table. This may cause synapse to pause for several minutes the
first time it is started after the upgrade.
Changes:
* Improve general performance (PR #540, #543. #544, #54, #549, #567)
* Change guest user ids to be incrementing integers (PR #550)
* Improve performance of public room list API (PR #552)
* Change profile API to omit keys rather than return null (PR #557)
* Add ``/media/r0`` endpoint prefix, which is equivalent to ``/media/v1/``
(PR #595)
Bug fixes:
* Fix bug with upgrading guest accounts where it would fail if you opened the
registration email on a different device (PR #547)
* Fix bug where unread count could be wrong (PR #568)
Changes in synapse v0.12.1-rc1 (2016-01-29)
===========================================
Features:
* Add unread notification counts in ``/sync`` (PR #456)
* Add support for inviting 3pids in ``/createRoom`` (PR #460)
* Add ability for guest accounts to upgrade (PR #462)
* Add ``/versions`` API (PR #468)
* Add ``event`` to ``/context`` API (PR #492)
* Add specific error code for invalid user names in ``/register`` (PR #499)
* Add support for push badge counts (PR #507)
* Add support for non-guest users to peek in rooms using ``/events`` (PR #510)
Changes:
* Change ``/sync`` so that guest users only get rooms they've joined (PR #469)
* Change to require unbanning before other membership changes (PR #501)
* Change default push rules to notify for all messages (PR #486)
* Change default push rules to not notify on membership changes (PR #514)
* Change default push rules in one to one rooms to only notify for events that
are messages (PR #529)
* Change ``/sync`` to reject requests with a ``from`` query param (PR #512)
* Change server manhole to use SSH rather than telnet (PR #473)
* Change server to require AS users to be registered before use (PR #487)
* Change server not to start when ASes are invalidly configured (PR #494)
* Change server to require ID and ``as_token`` to be unique for AS's (PR #496)
* Change maximum pagination limit to 1000 (PR #497)
Bug fixes:
* Fix bug where ``/sync`` didn't return when something under the leave key
changed (PR #461)
* Fix bug where we returned smaller rather than larger than requested
thumbnails when ``method=crop`` (PR #464)
* Fix thumbnails API to only return cropped thumbnails when asking for a
cropped thumbnail (PR #475)
* Fix bug where we occasionally still logged access tokens (PR #477)
* Fix bug where ``/events`` would always return immediately for guest users
(PR #480)
* Fix bug where ``/sync`` unexpectedly returned old left rooms (PR #481)
* Fix enabling and disabling push rules (PR #498)
* Fix bug where ``/register`` returned 500 when given unicode username
(PR #513)
Changes in synapse v0.12.0 (2016-01-04)
=======================================

View File

@@ -125,6 +125,15 @@ Installing prerequisites on Mac OS X::
sudo easy_install pip
sudo pip install virtualenv
Installing prerequisites on Raspbian::
sudo apt-get install build-essential python2.7-dev libffi-dev \
python-pip python-setuptools sqlite3 \
libssl-dev python-virtualenv libjpeg-dev
sudo pip install --upgrade pip
sudo pip install --upgrade ndg-httpsclient
sudo pip install --upgrade virtualenv
To install the synapse homeserver run::
virtualenv -p python2.7 ~/.synapse
@@ -167,8 +176,7 @@ identify itself to other Home Servers, so don't lose or delete them. It would be
wise to back them up somewhere safe. If, for whatever reason, you do need to
change your Home Server's keys, you may find that other Home Servers have the
old key cached. If you update the signing key, you should change the name of the
key in the <server name>.signing.key file (the second word, which by default is
, 'auto') to something different.
key in the <server name>.signing.key file (the second word) to something different.
By default, registration of new users is disabled. You can either enable
registration in the config by specifying ``enable_registration: true``
@@ -311,6 +319,18 @@ may need to manually upgrade it::
sudo pip install --upgrade pip
Installing may fail with ``Could not find any downloads that satisfy the requirement pymacaroons-pynacl (from matrix-synapse==0.12.0)``.
You can fix this by manually upgrading pip and virtualenv::
sudo pip install --upgrade virtualenv
You can next rerun ``virtualenv -p python2.7 synapse`` to update the virtual env.
Installing may fail during installing virtualenv with ``InsecurePlatformWarning: A true SSLContext object is not available. This prevents urllib3 from configuring SSL appropriately and may cause certain SSL connections to fail. For more information, see https://urllib3.readthedocs.org/en/latest/security.html#insecureplatformwarning.``
You can fix this by manually installing ndg-httpsclient::
pip install --upgrade ndg-httpsclient
Installing may fail with ``mock requires setuptools>=17.1. Aborting installation``.
You can fix this by upgrading setuptools::
@@ -545,4 +565,4 @@ sphinxcontrib-napoleon::
Building internal API documentation::
python setup.py build_sphinx

View File

@@ -1,6 +1,6 @@
#!/usr/bin/env python
# Copyright 2014 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2014 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.

View File

@@ -1,4 +1,4 @@
# Copyright 2014 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2014 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.

View File

@@ -1,4 +1,4 @@
# Copyright 2014 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.

View File

@@ -1,4 +1,4 @@
# Copyright 2014 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.

151
contrib/graph/graph3.py Normal file
View File

@@ -0,0 +1,151 @@
# Copyright 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pydot
import cgi
import simplejson as json
import datetime
import argparse
from synapse.events import FrozenEvent
from synapse.util.frozenutils import unfreeze
def make_graph(file_name, room_id, file_prefix, limit):
print "Reading lines"
with open(file_name) as f:
lines = f.readlines()
print "Read lines"
events = [FrozenEvent(json.loads(line)) for line in lines]
print "Loaded events."
events.sort(key=lambda e: e.depth)
print "Sorted events"
if limit:
events = events[-int(limit):]
node_map = {}
graph = pydot.Dot(graph_name="Test")
for event in events:
t = datetime.datetime.fromtimestamp(
float(event.origin_server_ts) / 1000
).strftime('%Y-%m-%d %H:%M:%S,%f')
content = json.dumps(unfreeze(event.get_dict()["content"]), indent=4)
content = content.replace("\n", "<br/>\n")
print content
content = []
for key, value in unfreeze(event.get_dict()["content"]).items():
if value is None:
value = "<null>"
elif isinstance(value, basestring):
pass
else:
value = json.dumps(value)
content.append(
"<b>%s</b>: %s," % (
cgi.escape(key, quote=True).encode("ascii", 'xmlcharrefreplace'),
cgi.escape(value, quote=True).encode("ascii", 'xmlcharrefreplace'),
)
)
content = "<br/>\n".join(content)
print content
label = (
"<"
"<b>%(name)s </b><br/>"
"Type: <b>%(type)s </b><br/>"
"State key: <b>%(state_key)s </b><br/>"
"Content: <b>%(content)s </b><br/>"
"Time: <b>%(time)s </b><br/>"
"Depth: <b>%(depth)s </b><br/>"
">"
) % {
"name": event.event_id,
"type": event.type,
"state_key": event.get("state_key", None),
"content": content,
"time": t,
"depth": event.depth,
}
node = pydot.Node(
name=event.event_id,
label=label,
)
node_map[event.event_id] = node
graph.add_node(node)
print "Created Nodes"
for event in events:
for prev_id, _ in event.prev_events:
try:
end_node = node_map[prev_id]
except:
end_node = pydot.Node(
name=prev_id,
label="<<b>%s</b>>" % (prev_id,),
)
node_map[prev_id] = end_node
graph.add_node(end_node)
edge = pydot.Edge(node_map[event.event_id], end_node)
graph.add_edge(edge)
print "Created edges"
graph.write('%s.dot' % file_prefix, format='raw', prog='dot')
print "Created Dot"
graph.write_svg("%s.svg" % file_prefix, prog='dot')
print "Created svg"
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Generate a PDU graph for a given room by reading "
"from a file with line deliminated events. \n"
"Requires pydot."
)
parser.add_argument(
"-p", "--prefix", dest="prefix",
help="String to prefix output files with",
default="graph_output"
)
parser.add_argument(
"-l", "--limit",
help="Only retrieve the last N events.",
)
parser.add_argument('event_file')
parser.add_argument('room')
args = parser.parse_args()
make_graph(args.event_file, args.room, args.prefix, args.limit)

View File

@@ -26,7 +26,7 @@ TOX_BIN=$WORKSPACE/.tox/py27/bin
if [[ ! -e .sytest-base ]]; then
git clone https://github.com/matrix-org/sytest.git .sytest-base --mirror
else
(cd .sytest-base; git fetch)
(cd .sytest-base; git fetch -p)
fi
rm -rf sytest
@@ -52,7 +52,7 @@ RUN_POSTGRES=""
for port in $(($PORT_BASE + 1)) $(($PORT_BASE + 2)); do
if psql synapse_jenkins_$port <<< ""; then
RUN_POSTGRES=$RUN_POSTGRES:$port
RUN_POSTGRES="$RUN_POSTGRES:$port"
cat > localhost-$port/database.yaml << EOF
name: psycopg2
args:
@@ -62,7 +62,7 @@ EOF
done
# Run if both postgresql databases exist
if test $RUN_POSTGRES = ":$(($PORT_BASE + 1)):$(($PORT_BASE + 2))"; then
if test "$RUN_POSTGRES" = ":$(($PORT_BASE + 1)):$(($PORT_BASE + 2))"; then
echo >&2 "Running sytest with PostgreSQL";
$TOX_BIN/pip install psycopg2
./run-tests.pl --coverage -O tap --synapse-directory $WORKSPACE \

View File

@@ -1,5 +1,5 @@
#!/usr/bin/perl -pi
# Copyright 2015 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -14,7 +14,7 @@
# limitations under the License.
$copyright = <<EOT;
/* Copyright 2015 OpenMarket Ltd
/* Copyright 2016 OpenMarket Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.

View File

@@ -1,5 +1,5 @@
#!/usr/bin/perl -pi
# Copyright 2014 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -14,7 +14,7 @@
# limitations under the License.
$copyright = <<EOT;
# Copyright 2015 OpenMarket Ltd
# Copyright 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.

24
scripts-dev/dump_macaroon.py Executable file
View File

@@ -0,0 +1,24 @@
#!/usr/bin/env python2
import pymacaroons
import sys
if len(sys.argv) == 1:
sys.stderr.write("usage: %s macaroon [key]\n" % (sys.argv[0],))
sys.exit(1)
macaroon_string = sys.argv[1]
key = sys.argv[2] if len(sys.argv) > 2 else None
macaroon = pymacaroons.Macaroon.deserialize(macaroon_string)
print macaroon.inspect()
print ""
verifier = pymacaroons.Verifier()
verifier.satisfy_general(lambda c: True)
try:
verifier.verify(macaroon, key)
print "Signature is correct"
except Exception as e:
print e.message

View File

@@ -0,0 +1,62 @@
#! /usr/bin/python
import ast
import argparse
import os
import sys
import yaml
PATTERNS_V1 = []
PATTERNS_V2 = []
RESULT = {
"v1": PATTERNS_V1,
"v2": PATTERNS_V2,
}
class CallVisitor(ast.NodeVisitor):
def visit_Call(self, node):
if isinstance(node.func, ast.Name):
name = node.func.id
else:
return
if name == "client_path_patterns":
PATTERNS_V1.append(node.args[0].s)
elif name == "client_v2_patterns":
PATTERNS_V2.append(node.args[0].s)
def find_patterns_in_code(input_code):
input_ast = ast.parse(input_code)
visitor = CallVisitor()
visitor.visit(input_ast)
def find_patterns_in_file(filepath):
with open(filepath) as f:
find_patterns_in_code(f.read())
parser = argparse.ArgumentParser(description='Find url patterns.')
parser.add_argument(
"directories", nargs='+', metavar="DIR",
help="Directories to search for definitions"
)
args = parser.parse_args()
for directory in args.directories:
for root, dirs, files in os.walk(directory):
for filename in files:
if filename.endswith(".py"):
filepath = os.path.join(root, filename)
find_patterns_in_file(filepath)
PATTERNS_V1.sort()
PATTERNS_V2.sort()
yaml.dump(RESULT, sys.stdout, default_flow_style=False)

View File

@@ -1,6 +1,6 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2015 OpenMarket Ltd
# Copyright 2015, 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.

View File

@@ -1,6 +1,6 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2015 OpenMarket Ltd
# Copyright 2015, 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.

View File

@@ -16,3 +16,4 @@ ignore =
[flake8]
max-line-length = 90
ignore = W503 ; W503 requires that binary operators be at the end, not start, of lines. Erik doesn't like it.

View File

@@ -1,6 +1,6 @@
#!/usr/bin/env python
# Copyright 2014 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -16,4 +16,4 @@
""" This is a reference implementation of a Matrix home server.
"""
__version__ = "0.12.0"
__version__ = "0.13.2"

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
# Copyright 2014 - 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -22,8 +22,9 @@ from twisted.internet import defer
from synapse.api.constants import EventTypes, Membership, JoinRules
from synapse.api.errors import AuthError, Codes, SynapseError, EventSizeError
from synapse.types import RoomID, UserID, EventID
from synapse.types import Requester, RoomID, UserID, EventID
from synapse.util.logutils import log_function
from synapse.util.logcontext import preserve_context_over_fn
from unpaddedbase64 import decode_base64
import logging
@@ -510,35 +511,14 @@ class Auth(object):
"""
# Can optionally look elsewhere in the request (e.g. headers)
try:
access_token = request.args["access_token"][0]
# Check for application service tokens with a user_id override
try:
app_service = yield self.store.get_app_service_by_token(
access_token
)
if not app_service:
raise KeyError
user_id = app_service.sender
if "user_id" in request.args:
user_id = request.args["user_id"][0]
if not app_service.is_interested_in_user(user_id):
raise AuthError(
403,
"Application service cannot masquerade as this user."
)
if not user_id:
raise KeyError
user_id = yield self._get_appservice_user_id(request.args)
if user_id:
request.authenticated_entity = user_id
defer.returnValue(
Requester(UserID.from_string(user_id), "", False)
)
defer.returnValue((UserID.from_string(user_id), "", False))
return
except KeyError:
pass # normal users won't have the user_id query parameter set.
access_token = request.args["access_token"][0]
user_info = yield self._get_user_by_access_token(access_token)
user = user_info["user"]
token_id = user_info["token_id"]
@@ -550,7 +530,8 @@ class Auth(object):
default=[""]
)[0]
if user and access_token and ip_addr:
self.store.insert_client_ip(
preserve_context_over_fn(
self.store.insert_client_ip,
user=user,
access_token=access_token,
ip=ip_addr,
@@ -564,13 +545,40 @@ class Auth(object):
request.authenticated_entity = user.to_string()
defer.returnValue((user, token_id, is_guest,))
defer.returnValue(Requester(user, token_id, is_guest))
except KeyError:
raise AuthError(
self.TOKEN_NOT_FOUND_HTTP_STATUS, "Missing access token.",
errcode=Codes.MISSING_TOKEN
)
@defer.inlineCallbacks
def _get_appservice_user_id(self, request_args):
app_service = yield self.store.get_app_service_by_token(
request_args["access_token"][0]
)
if app_service is None:
defer.returnValue(None)
if "user_id" not in request_args:
defer.returnValue(app_service.sender)
user_id = request_args["user_id"][0]
if app_service.sender == user_id:
defer.returnValue(app_service.sender)
if not app_service.is_interested_in_user(user_id):
raise AuthError(
403,
"Application service cannot masquerade as this user."
)
if not (yield self.store.get_user_by_id(user_id)):
raise AuthError(
403,
"Application service has not registered this user"
)
defer.returnValue(user_id)
@defer.inlineCallbacks
def _get_user_by_access_token(self, token):
""" Get a registered user's ID.
@@ -583,7 +591,7 @@ class Auth(object):
AuthError if no user by that token exists or the token is invalid.
"""
try:
ret = yield self._get_user_from_macaroon(token)
ret = yield self.get_user_from_macaroon(token)
except AuthError:
# TODO(daniel): Remove this fallback when all existing access tokens
# have been re-issued as macaroons.
@@ -591,7 +599,7 @@ class Auth(object):
defer.returnValue(ret)
@defer.inlineCallbacks
def _get_user_from_macaroon(self, macaroon_str):
def get_user_from_macaroon(self, macaroon_str):
try:
macaroon = pymacaroons.Macaroon.deserialize(macaroon_str)
self.validate_macaroon(macaroon, "access", False)
@@ -690,6 +698,7 @@ class Auth(object):
def _look_up_user_by_access_token(self, token):
ret = yield self.store.get_user_by_access_token(token)
if not ret:
logger.warn("Unrecognised access token - not in store: %s" % (token,))
raise AuthError(
self.TOKEN_NOT_FOUND_HTTP_STATUS, "Unrecognised access token.",
errcode=Codes.UNKNOWN_TOKEN
@@ -707,6 +716,7 @@ class Auth(object):
token = request.args["access_token"][0]
service = yield self.store.get_app_service_by_token(token)
if not service:
logger.warn("Unrecognised appservice access token: %s" % (token,))
raise AuthError(
self.TOKEN_NOT_FOUND_HTTP_STATUS,
"Unrecognised access token.",

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -29,6 +29,7 @@ class Codes(object):
USER_IN_USE = "M_USER_IN_USE"
ROOM_IN_USE = "M_ROOM_IN_USE"
BAD_PAGINATION = "M_BAD_PAGINATION"
BAD_STATE = "M_BAD_STATE"
UNKNOWN = "M_UNKNOWN"
NOT_FOUND = "M_NOT_FOUND"
MISSING_TOKEN = "M_MISSING_TOKEN"
@@ -42,6 +43,7 @@ class Codes(object):
EXCLUSIVE = "M_EXCLUSIVE"
THREEPID_AUTH_FAILED = "M_THREEPID_AUTH_FAILED"
THREEPID_IN_USE = "THREEPID_IN_USE"
INVALID_USERNAME = "M_INVALID_USERNAME"
class CodeMessageException(RuntimeError):
@@ -120,22 +122,6 @@ class AuthError(SynapseError):
super(AuthError, self).__init__(*args, **kwargs)
class GuestAccessError(AuthError):
"""An error raised when a there is a problem with a guest user accessing
a room"""
def __init__(self, rooms, *args, **kwargs):
self.rooms = rooms
super(GuestAccessError, self).__init__(*args, **kwargs)
def error_dict(self):
return cs_error(
self.msg,
self.errcode,
rooms=self.rooms,
)
class EventSizeError(SynapseError):
"""An error raised when an event is too big."""

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2015 OpenMarket Ltd
# Copyright 2015, 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -15,6 +15,8 @@
from synapse.api.errors import SynapseError
from synapse.types import UserID, RoomID
import ujson as json
class Filtering(object):
@@ -28,14 +30,14 @@ class Filtering(object):
return result
def add_user_filter(self, user_localpart, user_filter):
self._check_valid_filter(user_filter)
self.check_valid_filter(user_filter)
return self.store.add_user_filter(user_localpart, user_filter)
# TODO(paul): surely we should probably add a delete_user_filter or
# replace_user_filter at some point? There's no REST API specified for
# them however
def _check_valid_filter(self, user_filter_json):
def check_valid_filter(self, user_filter_json):
"""Check if the provided filter is valid.
This inspects all definitions contained within the filter.
@@ -129,88 +131,80 @@ class Filtering(object):
class FilterCollection(object):
def __init__(self, filter_json):
self.filter_json = filter_json
self._filter_json = filter_json
room_filter_json = self.filter_json.get("room", {})
room_filter_json = self._filter_json.get("room", {})
self.room_filter = Filter({
self._room_filter = Filter({
k: v for k, v in room_filter_json.items()
if k in ("rooms", "not_rooms")
})
self.room_timeline_filter = Filter(room_filter_json.get("timeline", {}))
self.room_state_filter = Filter(room_filter_json.get("state", {}))
self.room_ephemeral_filter = Filter(room_filter_json.get("ephemeral", {}))
self.room_account_data = Filter(room_filter_json.get("account_data", {}))
self.presence_filter = Filter(self.filter_json.get("presence", {}))
self.account_data = Filter(self.filter_json.get("account_data", {}))
self._room_timeline_filter = Filter(room_filter_json.get("timeline", {}))
self._room_state_filter = Filter(room_filter_json.get("state", {}))
self._room_ephemeral_filter = Filter(room_filter_json.get("ephemeral", {}))
self._room_account_data = Filter(room_filter_json.get("account_data", {}))
self._presence_filter = Filter(filter_json.get("presence", {}))
self._account_data = Filter(filter_json.get("account_data", {}))
self.include_leave = self.filter_json.get("room", {}).get(
self.include_leave = filter_json.get("room", {}).get(
"include_leave", False
)
def list_rooms(self):
return self.room_filter.list_rooms()
def __repr__(self):
return "<FilterCollection %s>" % (json.dumps(self._filter_json),)
def get_filter_json(self):
return self._filter_json
def timeline_limit(self):
return self.room_timeline_filter.limit()
return self._room_timeline_filter.limit()
def presence_limit(self):
return self.presence_filter.limit()
return self._presence_filter.limit()
def ephemeral_limit(self):
return self.room_ephemeral_filter.limit()
return self._room_ephemeral_filter.limit()
def filter_presence(self, events):
return self.presence_filter.filter(events)
return self._presence_filter.filter(events)
def filter_account_data(self, events):
return self.account_data.filter(events)
return self._account_data.filter(events)
def filter_room_state(self, events):
return self.room_state_filter.filter(self.room_filter.filter(events))
return self._room_state_filter.filter(self._room_filter.filter(events))
def filter_room_timeline(self, events):
return self.room_timeline_filter.filter(self.room_filter.filter(events))
return self._room_timeline_filter.filter(self._room_filter.filter(events))
def filter_room_ephemeral(self, events):
return self.room_ephemeral_filter.filter(self.room_filter.filter(events))
return self._room_ephemeral_filter.filter(self._room_filter.filter(events))
def filter_room_account_data(self, events):
return self.room_account_data.filter(self.room_filter.filter(events))
return self._room_account_data.filter(self._room_filter.filter(events))
class Filter(object):
def __init__(self, filter_json):
self.filter_json = filter_json
def list_rooms(self):
"""The list of room_id strings this filter restricts the output to
or None if the this filter doesn't list the room ids.
"""
if "rooms" in self.filter_json:
return list(set(self.filter_json["rooms"]))
else:
return None
def check(self, event):
"""Checks whether the filter matches the given event.
Returns:
bool: True if the event matches
"""
if isinstance(event, dict):
return self.check_fields(
event.get("room_id", None),
event.get("sender", None),
event.get("type", None),
)
else:
return self.check_fields(
getattr(event, "room_id", None),
getattr(event, "sender", None),
event.type,
)
sender = event.get("sender", None)
if not sender:
# Presence events have their 'sender' in content.user_id
sender = event.get("content", {}).get("user_id", None)
return self.check_fields(
event.get("room_id", None),
sender,
event.get("type", None),
)
def check_fields(self, room_id, sender, event_type):
"""Checks whether the filter matches the given event fields.
@@ -270,3 +264,6 @@ def _matches_wildcard(actual_value, filter_value):
return actual_value.startswith(type_prefix)
else:
return actual_value == filter_value
DEFAULT_FILTER_COLLECTION = FilterCollection({})

View File

@@ -1,4 +1,4 @@
# Copyright 2014, 2015 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -23,5 +23,6 @@ WEB_CLIENT_PREFIX = "/_matrix/client"
CONTENT_REPO_PREFIX = "/_matrix/content"
SERVER_KEY_PREFIX = "/_matrix/key/v1"
SERVER_KEY_V2_PREFIX = "/_matrix/key/v2"
MEDIA_PREFIX = "/_matrix/media/v1"
MEDIA_PREFIX = "/_matrix/media/r0"
LEGACY_MEDIA_PREFIX = "/_matrix/media/v1"
APP_SERVICE_PREFIX = "/_matrix/appservice/v1"

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -12,3 +12,22 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.dont_write_bytecode = True
from synapse.python_dependencies import (
check_requirements, MissingRequirementError
) # NOQA
try:
check_requirements()
except MissingRequirementError as e:
message = "\n".join([
"Missing Requirement: %s" % (e.message,),
"To install run:",
" pip install --upgrade --force \"%s\"" % (e.dependency,),
"",
])
sys.stderr.writelines(message)
sys.exit(1)

View File

@@ -1,6 +1,6 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -14,61 +14,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from synapse.rest import ClientRestResource
sys.dont_write_bytecode = True
from synapse.python_dependencies import (
check_requirements, DEPENDENCY_LINKS, MissingRequirementError
)
if __name__ == '__main__':
try:
check_requirements()
except MissingRequirementError as e:
message = "\n".join([
"Missing Requirement: %s" % (e.message,),
"To install run:",
" pip install --upgrade --force \"%s\"" % (e.dependency,),
"",
])
sys.stderr.writelines(message)
sys.exit(1)
from synapse.storage.engines import create_engine, IncorrectDatabaseSetup
from synapse.storage import are_all_users_on_domain
from synapse.storage.prepare_database import UpgradeDatabaseException
from synapse.server import HomeServer
from twisted.internet import reactor, task, defer
from twisted.application import service
from twisted.enterprise import adbapi
from twisted.web.resource import Resource, EncodingResourceWrapper
from twisted.web.static import File
from twisted.web.server import Site, GzipEncoderFactory, Request
from synapse.http.server import JsonResource, RootRedirect
from synapse.rest.media.v0.content_repository import ContentRepoResource
from synapse.rest.media.v1.media_repository import MediaRepositoryResource
from synapse.rest.key.v1.server_key_resource import LocalKey
from synapse.rest.key.v2 import KeyApiV2Resource
from synapse.http.matrixfederationclient import MatrixFederationHttpClient
from synapse.api.urls import (
FEDERATION_PREFIX, WEB_CLIENT_PREFIX, CONTENT_REPO_PREFIX,
SERVER_KEY_PREFIX, MEDIA_PREFIX, STATIC_PREFIX,
SERVER_KEY_V2_PREFIX,
)
from synapse.config.homeserver import HomeServerConfig
from synapse.crypto import context_factory
from synapse.util.logcontext import LoggingContext
from synapse.metrics.resource import MetricsResource, METRICS_PREFIX
from synapse import events
from daemonize import Daemonize
import twisted.manhole.telnet
import synapse
import contextlib
@@ -77,90 +22,94 @@ import os
import re
import resource
import subprocess
import sys
import time
from synapse.config._base import ConfigError
from synapse.python_dependencies import (
check_requirements, DEPENDENCY_LINKS
)
from synapse.rest import ClientRestResource
from synapse.storage.engines import create_engine, IncorrectDatabaseSetup
from synapse.storage import are_all_users_on_domain
from synapse.storage.prepare_database import UpgradeDatabaseException
from synapse.server import HomeServer
from twisted.conch.manhole import ColoredManhole
from twisted.conch.insults import insults
from twisted.conch import manhole_ssh
from twisted.cred import checkers, portal
from twisted.internet import reactor, task, defer
from twisted.application import service
from twisted.web.resource import Resource, EncodingResourceWrapper
from twisted.web.static import File
from twisted.web.server import Site, GzipEncoderFactory, Request
from synapse.http.server import RootRedirect
from synapse.rest.media.v0.content_repository import ContentRepoResource
from synapse.rest.media.v1.media_repository import MediaRepositoryResource
from synapse.rest.key.v1.server_key_resource import LocalKey
from synapse.rest.key.v2 import KeyApiV2Resource
from synapse.api.urls import (
FEDERATION_PREFIX, WEB_CLIENT_PREFIX, CONTENT_REPO_PREFIX,
SERVER_KEY_PREFIX, LEGACY_MEDIA_PREFIX, MEDIA_PREFIX, STATIC_PREFIX,
SERVER_KEY_V2_PREFIX,
)
from synapse.config.homeserver import HomeServerConfig
from synapse.crypto import context_factory
from synapse.util.logcontext import LoggingContext
from synapse.metrics.resource import MetricsResource, METRICS_PREFIX
from synapse.federation.transport.server import TransportLayerServer
from synapse import events
from daemonize import Daemonize
logger = logging.getLogger("synapse.app.homeserver")
ACCESS_TOKEN_RE = re.compile(r'(\?.*access(_|%5[Ff])token=)[^&]*(.*)$')
def gz_wrap(r):
return EncodingResourceWrapper(r, [GzipEncoderFactory()])
def build_resource_for_web_client(hs):
webclient_path = hs.get_config().web_client_location
if not webclient_path:
try:
import syweb
except ImportError:
quit_with_error(
"Could not find a webclient.\n\n"
"Please either install the matrix-angular-sdk or configure\n"
"the location of the source to serve via the configuration\n"
"option `web_client_location`\n\n"
"To install the `matrix-angular-sdk` via pip, run:\n\n"
" pip install '%(dep)s'\n"
"\n"
"You can also disable hosting of the webclient via the\n"
"configuration option `web_client`\n"
% {"dep": DEPENDENCY_LINKS["matrix-angular-sdk"]}
)
syweb_path = os.path.dirname(syweb.__file__)
webclient_path = os.path.join(syweb_path, "webclient")
# GZip is disabled here due to
# https://twistedmatrix.com/trac/ticket/7678
# (It can stay enabled for the API resources: they call
# write() with the whole body and then finish() straight
# after and so do not trigger the bug.
# GzipFile was removed in commit 184ba09
# return GzipFile(webclient_path) # TODO configurable?
return File(webclient_path) # TODO configurable?
class SynapseHomeServer(HomeServer):
def build_http_client(self):
return MatrixFederationHttpClient(self)
def build_client_resource(self):
return ClientRestResource(self)
def build_resource_for_federation(self):
return JsonResource(self)
def build_resource_for_web_client(self):
webclient_path = self.get_config().web_client_location
if not webclient_path:
try:
import syweb
except ImportError:
quit_with_error(
"Could not find a webclient.\n\n"
"Please either install the matrix-angular-sdk or configure\n"
"the location of the source to serve via the configuration\n"
"option `web_client_location`\n\n"
"To install the `matrix-angular-sdk` via pip, run:\n\n"
" pip install '%(dep)s'\n"
"\n"
"You can also disable hosting of the webclient via the\n"
"configuration option `web_client`\n"
% {"dep": DEPENDENCY_LINKS["matrix-angular-sdk"]}
)
syweb_path = os.path.dirname(syweb.__file__)
webclient_path = os.path.join(syweb_path, "webclient")
# GZip is disabled here due to
# https://twistedmatrix.com/trac/ticket/7678
# (It can stay enabled for the API resources: they call
# write() with the whole body and then finish() straight
# after and so do not trigger the bug.
# GzipFile was removed in commit 184ba09
# return GzipFile(webclient_path) # TODO configurable?
return File(webclient_path) # TODO configurable?
def build_resource_for_static_content(self):
# This is old and should go away: not going to bother adding gzip
return File(
os.path.join(os.path.dirname(synapse.__file__), "static")
)
def build_resource_for_content_repo(self):
return ContentRepoResource(
self, self.config.uploads_path, self.auth, self.content_addr
)
def build_resource_for_media_repository(self):
return MediaRepositoryResource(self)
def build_resource_for_server_key(self):
return LocalKey(self)
def build_resource_for_server_key_v2(self):
return KeyApiV2Resource(self)
def build_resource_for_metrics(self):
if self.get_config().enable_metrics:
return MetricsResource(self)
else:
return None
def build_db_pool(self):
name = self.db_config["name"]
return adbapi.ConnectionPool(
name,
**self.db_config.get("args", {})
)
def _listener_http(self, config, listener_config):
port = listener_config["port"]
bind_address = listener_config.get("bind_address", "")
@@ -170,13 +119,11 @@ class SynapseHomeServer(HomeServer):
if tls and config.no_tls:
return
metrics_resource = self.get_resource_for_metrics()
resources = {}
for res in listener_config["resources"]:
for name in res["names"]:
if name == "client":
client_resource = self.get_client_resource()
client_resource = ClientRestResource(self)
if res["compress"]:
client_resource = gz_wrap(client_resource)
@@ -185,35 +132,42 @@ class SynapseHomeServer(HomeServer):
"/_matrix/client/r0": client_resource,
"/_matrix/client/unstable": client_resource,
"/_matrix/client/v2_alpha": client_resource,
"/_matrix/client/versions": client_resource,
})
if name == "federation":
resources.update({
FEDERATION_PREFIX: self.get_resource_for_federation(),
FEDERATION_PREFIX: TransportLayerServer(self),
})
if name in ["static", "client"]:
resources.update({
STATIC_PREFIX: self.get_resource_for_static_content(),
STATIC_PREFIX: File(
os.path.join(os.path.dirname(synapse.__file__), "static")
),
})
if name in ["media", "federation", "client"]:
media_repo = MediaRepositoryResource(self)
resources.update({
MEDIA_PREFIX: self.get_resource_for_media_repository(),
CONTENT_REPO_PREFIX: self.get_resource_for_content_repo(),
MEDIA_PREFIX: media_repo,
LEGACY_MEDIA_PREFIX: media_repo,
CONTENT_REPO_PREFIX: ContentRepoResource(
self, self.config.uploads_path, self.auth, self.content_addr
),
})
if name in ["keys", "federation"]:
resources.update({
SERVER_KEY_PREFIX: self.get_resource_for_server_key(),
SERVER_KEY_V2_PREFIX: self.get_resource_for_server_key_v2(),
SERVER_KEY_PREFIX: LocalKey(self),
SERVER_KEY_V2_PREFIX: KeyApiV2Resource(self),
})
if name == "webclient":
resources[WEB_CLIENT_PREFIX] = self.get_resource_for_web_client()
resources[WEB_CLIENT_PREFIX] = build_resource_for_web_client(self)
if name == "metrics" and metrics_resource:
resources[METRICS_PREFIX] = metrics_resource
if name == "metrics" and self.get_config().enable_metrics:
resources[METRICS_PREFIX] = MetricsResource(self)
root_resource = create_resource_tree(resources)
if tls:
@@ -248,10 +202,21 @@ class SynapseHomeServer(HomeServer):
if listener["type"] == "http":
self._listener_http(config, listener)
elif listener["type"] == "manhole":
f = twisted.manhole.telnet.ShellFactory()
f.username = "matrix"
f.password = "rabbithole"
f.namespace['hs'] = self
checker = checkers.InMemoryUsernamePasswordDatabaseDontUse(
matrix="rabbithole"
)
rlm = manhole_ssh.TerminalRealm()
rlm.chainedProtocolFactory = lambda: insults.ServerProtocol(
ColoredManhole,
{
"__name__": "__console__",
"hs": self,
}
)
f = manhole_ssh.ConchFactory(portal.Portal(rlm, [checker]))
reactor.listenTCP(
listener["port"],
f,
@@ -276,6 +241,18 @@ class SynapseHomeServer(HomeServer):
except IncorrectDatabaseSetup as e:
quit_with_error(e.message)
def get_db_conn(self):
# Any param beginning with cp_ is a parameter for adbapi, and should
# not be passed to the database engine.
db_params = {
k: v for k, v in self.db_config.get("args", {}).items()
if not k.startswith("cp_")
}
db_conn = self.database_engine.module.connect(**db_params)
self.database_engine.on_new_connection(db_conn)
return db_conn
def quit_with_error(error_string):
message_lines = error_string.split("\n")
@@ -358,10 +335,13 @@ def change_resource_limit(soft_file_no):
soft_file_no = hard
resource.setrlimit(resource.RLIMIT_NOFILE, (soft_file_no, hard))
logger.info("Set file limit to: %d", soft_file_no)
resource.setrlimit(
resource.RLIMIT_CORE, (resource.RLIM_INFINITY, resource.RLIM_INFINITY)
)
except (ValueError, resource.error) as e:
logger.warn("Failed to set file limit: %s", e)
logger.warn("Failed to set file or core limit: %s", e)
def setup(config_options):
@@ -373,11 +353,20 @@ def setup(config_options):
Returns:
HomeServer
"""
config = HomeServerConfig.load_config(
"Synapse Homeserver",
config_options,
generate_section="Homeserver"
)
try:
config = HomeServerConfig.load_config(
"Synapse Homeserver",
config_options,
generate_section="Homeserver"
)
except ConfigError as e:
sys.stderr.write("\n" + e.message + "\n")
sys.exit(1)
if not config:
# If a config isn't returned, and an exception isn't raised, we're just
# generating config files and shouldn't try to continue.
sys.exit(0)
config.setup_logging()
@@ -409,13 +398,7 @@ def setup(config_options):
logger.info("Preparing database: %s...", config.database_config['name'])
try:
db_conn = database_engine.module.connect(
**{
k: v for k, v in config.database_config.get("args", {}).items()
if not k.startswith("cp_")
}
)
db_conn = hs.get_db_conn()
database_engine.prepare_database(db_conn)
hs.run_startup_checks(db_conn, database_engine)
@@ -430,13 +413,17 @@ def setup(config_options):
logger.info("Database prepared in %s.", config.database_config['name'])
hs.setup()
hs.start_listening()
hs.get_pusherpool().start()
hs.get_state_handler().start_caching()
hs.get_datastore().start_profiling()
hs.get_datastore().start_doing_background_updates()
hs.get_replication_layer().start_get_pdu_cache()
def start():
hs.get_pusherpool().start()
hs.get_state_handler().start_caching()
hs.get_datastore().start_profiling()
hs.get_datastore().start_doing_background_updates()
hs.get_replication_layer().start_get_pdu_cache()
reactor.callWhenRunning(start)
return hs
@@ -475,9 +462,8 @@ class SynapseRequest(Request):
)
def get_redacted_uri(self):
return re.sub(
r'(\?.*access_token=)[^&]*(.*)$',
r'\1<redacted>\2',
return ACCESS_TOKEN_RE.sub(
r'\1<redacted>\3',
self.uri
)
@@ -653,7 +639,7 @@ def _resource_id(resource, path_seg):
the mapping should looks like _resource_id(A,C) = B.
Args:
resource (Resource): The *parent* Resource
resource (Resource): The *parent* Resourceb
path_seg (str): The name of the child Resource to be attached.
Returns:
str: A unique string which can be a key to the child Resource.
@@ -688,6 +674,7 @@ def run(hs):
@defer.inlineCallbacks
def phone_stats_home():
logger.info("Gathering stats for reporting")
now = int(hs.get_clock().time())
uptime = int(now - start_time)
if uptime < 0:
@@ -699,8 +686,8 @@ def run(hs):
stats["uptime_seconds"] = uptime
stats["total_users"] = yield hs.get_datastore().count_all_users()
all_rooms = yield hs.get_datastore().get_rooms(False)
stats["total_room_count"] = len(all_rooms)
room_count = yield hs.get_datastore().get_room_count()
stats["total_room_count"] = room_count
stats["daily_active_users"] = yield hs.get_datastore().count_daily_users()
daily_messages = yield hs.get_datastore().count_daily_messages()
@@ -718,9 +705,12 @@ def run(hs):
if hs.config.report_stats:
phone_home_task = task.LoopingCall(phone_stats_home)
logger.info("Scheduling stats reporting for 24 hour intervals")
phone_home_task.start(60 * 60 * 24, now=False)
def in_thread():
# Uncomment to enable tracing of log context changes.
# sys.settrace(logcontext_tracer)
with LoggingContext("run"):
change_resource_limit(hs.config.soft_file_limit)
reactor.run()

View File

@@ -1,6 +1,6 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2015 OpenMarket Ltd
# Copyright 2015, 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2015 OpenMarket Ltd
# Copyright 2015, 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -29,7 +29,7 @@ class ApplicationServiceApi(SimpleHttpClient):
pushing.
"""
def __init__(self, hs):
def __init__(self, hs):
super(ApplicationServiceApi, self).__init__(hs)
self.clock = hs.get_clock()

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2015 OpenMarket Ltd
# Copyright 2015, 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2015 OpenMarket Ltd
# Copyright 2015, 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -12,6 +12,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from synapse.config._base import ConfigError
if __name__ == "__main__":
import sys
@@ -21,7 +22,11 @@ if __name__ == "__main__":
if action == "read":
key = sys.argv[2]
config = HomeServerConfig.load_config("", sys.argv[3:])
try:
config = HomeServerConfig.load_config("", sys.argv[3:])
except ConfigError as e:
sys.stderr.write("\n" + e.message + "\n")
sys.exit(1)
print getattr(config, key)
sys.exit(0)

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -17,7 +17,6 @@ import argparse
import errno
import os
import yaml
import sys
from textwrap import dedent
@@ -136,13 +135,20 @@ class Config(object):
results.append(getattr(cls, name)(self, *args, **kargs))
return results
def generate_config(self, config_dir_path, server_name, report_stats=None):
def generate_config(
self,
config_dir_path,
server_name,
is_generating_file,
report_stats=None,
):
default_config = "# vim:ft=yaml\n"
default_config += "\n\n".join(dedent(conf) for conf in self.invoke_all(
"default_config",
config_dir_path=config_dir_path,
server_name=server_name,
is_generating_file=is_generating_file,
report_stats=report_stats,
))
@@ -244,8 +250,10 @@ class Config(object):
server_name = config_args.server_name
if not server_name:
print "Must specify a server_name to a generate config for."
sys.exit(1)
raise ConfigError(
"Must specify a server_name to a generate config for."
" Pass -H server.name."
)
if not os.path.exists(config_dir_path):
os.makedirs(config_dir_path)
with open(config_path, "wb") as config_file:
@@ -253,6 +261,7 @@ class Config(object):
config_dir_path=config_dir_path,
server_name=server_name,
report_stats=(config_args.report_stats == "yes"),
is_generating_file=True
)
obj.invoke_all("generate_files", config)
config_file.write(config_bytes)
@@ -266,7 +275,7 @@ class Config(object):
"If this server name is incorrect, you will need to"
" regenerate the SSL certificates"
)
sys.exit(0)
return
else:
print (
"Config file %r already exists. Generating any missing key"
@@ -302,25 +311,25 @@ class Config(object):
specified_config.update(yaml_config)
if "server_name" not in specified_config:
sys.stderr.write("\n" + MISSING_SERVER_NAME + "\n")
sys.exit(1)
raise ConfigError(MISSING_SERVER_NAME)
server_name = specified_config["server_name"]
_, config = obj.generate_config(
config_dir_path=config_dir_path,
server_name=server_name
server_name=server_name,
is_generating_file=False,
)
config.pop("log_config")
config.update(specified_config)
if "report_stats" not in config:
sys.stderr.write(
"\n" + MISSING_REPORT_STATS_CONFIG_INSTRUCTIONS + "\n" +
MISSING_REPORT_STATS_SPIEL + "\n")
sys.exit(1)
raise ConfigError(
MISSING_REPORT_STATS_CONFIG_INSTRUCTIONS + "\n" +
MISSING_REPORT_STATS_SPIEL
)
if generate_keys:
obj.invoke_all("generate_files", config)
sys.exit(0)
return
obj.invoke_all("read_config", config)

View File

@@ -1,4 +1,4 @@
# Copyright 2015 OpenMarket Ltd
# Copyright 2015, 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.

View File

@@ -1,4 +1,4 @@
# Copyright 2014, 2015 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -29,10 +29,10 @@ class CaptchaConfig(Config):
## Captcha ##
# This Home Server's ReCAPTCHA public key.
recaptcha_private_key: "YOUR_PRIVATE_KEY"
recaptcha_public_key: "YOUR_PUBLIC_KEY"
# This Home Server's ReCAPTCHA private key.
recaptcha_public_key: "YOUR_PUBLIC_KEY"
recaptcha_private_key: "YOUR_PRIVATE_KEY"
# Enables ReCaptcha checks when registering, preventing signup
# unless a captcha is answered. Requires a valid ReCaptcha

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2015 OpenMarket Ltd
# Copyright 2015, 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2015 OpenMarket Ltd
# Copyright 2015, 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -22,8 +22,14 @@ from signedjson.key import (
read_signing_keys, write_signing_keys, NACL_ED25519
)
from unpaddedbase64 import decode_base64
from synapse.util.stringutils import random_string_with_symbols
import os
import hashlib
import logging
logger = logging.getLogger(__name__)
class KeyConfig(Config):
@@ -40,9 +46,29 @@ class KeyConfig(Config):
config["perspectives"]
)
def default_config(self, config_dir_path, server_name, **kwargs):
self.macaroon_secret_key = config.get(
"macaroon_secret_key", self.registration_shared_secret
)
if not self.macaroon_secret_key:
# Unfortunately, there are people out there that don't have this
# set. Lets just be "nice" and derive one from their secret key.
logger.warn("Config is missing missing macaroon_secret_key")
seed = self.signing_key[0].seed
self.macaroon_secret_key = hashlib.sha256(seed)
def default_config(self, config_dir_path, server_name, is_generating_file=False,
**kwargs):
base_key_name = os.path.join(config_dir_path, server_name)
if is_generating_file:
macaroon_secret_key = random_string_with_symbols(50)
else:
macaroon_secret_key = None
return """\
macaroon_secret_key: "%(macaroon_secret_key)s"
## Signing Keys ##
# Path to the signing key to sign messages with

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2015 OpenMarket Ltd
# Copyright 2015, 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2015 OpenMarket Ltd
# Copyright 2015, 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.

View File

@@ -1,4 +1,4 @@
# Copyright 2014, 2015 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2015 OpenMarket Ltd
# Copyright 2015, 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -23,22 +23,23 @@ from distutils.util import strtobool
class RegistrationConfig(Config):
def read_config(self, config):
self.disable_registration = not bool(
self.enable_registration = bool(
strtobool(str(config["enable_registration"]))
)
if "disable_registration" in config:
self.disable_registration = bool(
self.enable_registration = not bool(
strtobool(str(config["disable_registration"]))
)
self.registration_shared_secret = config.get("registration_shared_secret")
self.macaroon_secret_key = config.get("macaroon_secret_key")
self.bcrypt_rounds = config.get("bcrypt_rounds", 12)
self.trusted_third_party_id_servers = config["trusted_third_party_id_servers"]
self.allow_guest_access = config.get("allow_guest_access", False)
def default_config(self, **kwargs):
registration_shared_secret = random_string_with_symbols(50)
macaroon_secret_key = random_string_with_symbols(50)
return """\
## Registration ##
@@ -49,8 +50,6 @@ class RegistrationConfig(Config):
# secret, even if registration is otherwise disabled.
registration_shared_secret: "%(registration_shared_secret)s"
macaroon_secret_key: "%(macaroon_secret_key)s"
# Set the number of bcrypt rounds used to generate password hash.
# Larger numbers increase the work factor needed to generate the hash.
# The default number of rounds is 12.
@@ -60,6 +59,12 @@ class RegistrationConfig(Config):
# participate in rooms hosted on this server which have been made
# accessible to anonymous users.
allow_guest_access: False
# The list of identity servers trusted to verify third party
# identifiers by this server.
trusted_third_party_id_servers:
- matrix.org
- vector.im
""" % locals()
def add_arguments(self, parser):
@@ -71,6 +76,6 @@ class RegistrationConfig(Config):
def read_arguments(self, args):
if args.enable_registration is not None:
self.disable_registration = not bool(
self.enable_registration = bool(
strtobool(str(args.enable_registration))
)

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -200,7 +200,7 @@ class ServerConfig(Config):
- names: [federation]
compress: false
# Turn on the twisted telnet manhole service on localhost on the given
# Turn on the twisted ssh manhole service on localhost on the given
# port.
# - port: 9000
# bind_address: 127.0.0.1

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.

View File

@@ -1,4 +1,4 @@
# Copyright 2014, 2015 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.

View File

@@ -1,4 +1,4 @@
# Copyright 2014, 2015 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.

View File

@@ -1,6 +1,6 @@
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -18,6 +18,10 @@ from synapse.api.errors import SynapseError, Codes
from synapse.util.retryutils import get_retry_limiter
from synapse.util import unwrapFirstError
from synapse.util.async import ObservableDeferred
from synapse.util.logcontext import (
preserve_context_over_deferred, preserve_context_over_fn, PreserveLoggingContext,
preserve_fn
)
from twisted.internet import defer
@@ -142,40 +146,43 @@ class Keyring(object):
for server_name, _ in server_and_json
}
# We want to wait for any previous lookups to complete before
# proceeding.
wait_on_deferred = self.wait_for_previous_lookups(
[server_name for server_name, _ in server_and_json],
server_to_deferred,
)
with PreserveLoggingContext():
# Actually start fetching keys.
wait_on_deferred.addBoth(
lambda _: self.get_server_verify_keys(group_id_to_group, deferreds)
)
# We want to wait for any previous lookups to complete before
# proceeding.
wait_on_deferred = self.wait_for_previous_lookups(
[server_name for server_name, _ in server_and_json],
server_to_deferred,
)
# When we've finished fetching all the keys for a given server_name,
# resolve the deferred passed to `wait_for_previous_lookups` so that
# any lookups waiting will proceed.
server_to_gids = {}
# Actually start fetching keys.
wait_on_deferred.addBoth(
lambda _: self.get_server_verify_keys(group_id_to_group, deferreds)
)
def remove_deferreds(res, server_name, group_id):
server_to_gids[server_name].discard(group_id)
if not server_to_gids[server_name]:
d = server_to_deferred.pop(server_name, None)
if d:
d.callback(None)
return res
# When we've finished fetching all the keys for a given server_name,
# resolve the deferred passed to `wait_for_previous_lookups` so that
# any lookups waiting will proceed.
server_to_gids = {}
for g_id, deferred in deferreds.items():
server_name = group_id_to_group[g_id].server_name
server_to_gids.setdefault(server_name, set()).add(g_id)
deferred.addBoth(remove_deferreds, server_name, g_id)
def remove_deferreds(res, server_name, group_id):
server_to_gids[server_name].discard(group_id)
if not server_to_gids[server_name]:
d = server_to_deferred.pop(server_name, None)
if d:
d.callback(None)
return res
for g_id, deferred in deferreds.items():
server_name = group_id_to_group[g_id].server_name
server_to_gids.setdefault(server_name, set()).add(g_id)
deferred.addBoth(remove_deferreds, server_name, g_id)
# Pass those keys to handle_key_deferred so that the json object
# signatures can be verified
return [
handle_key_deferred(
preserve_context_over_fn(
handle_key_deferred,
group_id_to_group[g_id],
deferreds[g_id],
)
@@ -198,12 +205,13 @@ class Keyring(object):
if server_name in self.key_downloads
]
if wait_on:
yield defer.DeferredList(wait_on)
with PreserveLoggingContext():
yield defer.DeferredList(wait_on)
else:
break
for server_name, deferred in server_to_deferred.items():
d = ObservableDeferred(deferred)
d = ObservableDeferred(preserve_context_over_deferred(deferred))
self.key_downloads[server_name] = d
def rm(r, server_name):
@@ -244,12 +252,13 @@ class Keyring(object):
for group in group_id_to_group.values():
for key_id in group.key_ids:
if key_id in merged_results[group.server_name]:
group_id_to_deferred[group.group_id].callback((
group.group_id,
group.server_name,
key_id,
merged_results[group.server_name][key_id],
))
with PreserveLoggingContext():
group_id_to_deferred[group.group_id].callback((
group.group_id,
group.server_name,
key_id,
merged_results[group.server_name][key_id],
))
break
else:
missing_groups.setdefault(
@@ -504,7 +513,7 @@ class Keyring(object):
yield defer.gatherResults(
[
self.store_keys(
preserve_fn(self.store_keys)(
server_name=key_server_name,
from_server=server_name,
verify_keys=verify_keys,
@@ -573,7 +582,7 @@ class Keyring(object):
yield defer.gatherResults(
[
self.store.store_server_keys_json(
preserve_fn(self.store.store_server_keys_json)(
server_name=server_name,
key_id=key_id,
from_server=server_name,
@@ -675,7 +684,7 @@ class Keyring(object):
# TODO(markjh): Store whether the keys have expired.
yield defer.gatherResults(
[
self.store.store_server_verify_key(
preserve_fn(self.store.store_server_verify_key)(
server_name, server_name, key.time_added, key
)
for key_id, key in verify_keys.items()

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -117,6 +117,15 @@ class EventBase(object):
def __set__(self, instance, value):
raise AttributeError("Unrecognized attribute %s" % (instance,))
def __getitem__(self, field):
return self._event_dict[field]
def __contains__(self, field):
return field in self._event_dict
def items(self):
return self._event_dict.items()
class FrozenEvent(EventBase):
def __init__(self, event_dict, internal_metadata_dict={}, rejected_reason=None):

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -20,3 +20,4 @@ class EventContext(object):
self.current_state = current_state
self.state_group = None
self.rejected = False
self.push_actions = []

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -17,15 +17,10 @@
"""
from .replication import ReplicationLayer
from .transport import TransportLayer
from .transport.client import TransportLayerClient
def initialize_http_replication(homeserver):
transport = TransportLayer(
homeserver,
homeserver.hostname,
server=homeserver.get_resource_for_federation(),
client=homeserver.get_http_client()
)
transport = TransportLayerClient(homeserver)
return ReplicationLayer(homeserver, transport)

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2015 OpenMarket Ltd
# Copyright 2015, 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2015 OpenMarket Ltd
# Copyright 2015, 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -57,7 +57,7 @@ class FederationClient(FederationBase):
cache_name="get_pdu_cache",
clock=self._clock,
max_len=1000,
expiry_ms=120*1000,
expiry_ms=120 * 1000,
reset_expiry_on_get=False,
)

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2015 OpenMarket Ltd
# Copyright 2015, 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -126,10 +126,8 @@ class FederationServer(FederationBase):
results = []
for pdu in pdu_list:
d = self._handle_new_pdu(transaction.origin, pdu)
try:
yield d
yield self._handle_new_pdu(transaction.origin, pdu)
results.append({})
except FederationError as e:
self.send_failure(e, transaction.origin)

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -54,8 +54,6 @@ class ReplicationLayer(FederationClient, FederationServer):
self.keyring = hs.get_keyring()
self.transport_layer = transport_layer
self.transport_layer.register_received_handler(self)
self.transport_layer.register_request_handler(self)
self.federation_client = self

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -103,7 +103,6 @@ class TransactionQueue(object):
else:
return not destination.startswith("localhost")
@defer.inlineCallbacks
def enqueue_pdu(self, pdu, destinations, order):
# We loop through all destinations to see whether we already have
# a transaction in progress. If we do, stick it in the pending_pdus
@@ -141,8 +140,6 @@ class TransactionQueue(object):
deferreds.append(deferred)
yield defer.DeferredList(deferreds, consumeErrors=True)
# NO inlineCallbacks
def enqueue_edu(self, edu):
destination = edu.destination

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -20,55 +20,3 @@ By default this is done over HTTPS (and all home servers are required to
support HTTPS), however individual pairings of servers may decide to
communicate over a different (albeit still reliable) protocol.
"""
from .server import TransportLayerServer
from .client import TransportLayerClient
from synapse.util.ratelimitutils import FederationRateLimiter
class TransportLayer(TransportLayerServer, TransportLayerClient):
"""This is a basic implementation of the transport layer that translates
transactions and other requests to/from HTTP.
Attributes:
server_name (str): Local home server host
server (synapse.http.server.HttpServer): the http server to
register listeners on
client (synapse.http.client.HttpClient): the http client used to
send requests
request_handler (TransportRequestHandler): The handler to fire when we
receive requests for data.
received_handler (TransportReceivedHandler): The handler to fire when
we receive data.
"""
def __init__(self, homeserver, server_name, server, client):
"""
Args:
server_name (str): Local home server host
server (synapse.protocol.http.HttpServer): the http server to
register listeners on
client (synapse.protocol.http.HttpClient): the http client used to
send requests
"""
self.keyring = homeserver.get_keyring()
self.clock = homeserver.get_clock()
self.server_name = server_name
self.server = server
self.client = client
self.request_handler = None
self.received_handler = None
self.ratelimiter = FederationRateLimiter(
self.clock,
window_size=homeserver.config.federation_rc_window_size,
sleep_limit=homeserver.config.federation_rc_sleep_limit,
sleep_msec=homeserver.config.federation_rc_sleep_delay,
reject_limit=homeserver.config.federation_rc_reject_limit,
concurrent_requests=homeserver.config.federation_rc_concurrent,
)

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -28,6 +28,10 @@ logger = logging.getLogger(__name__)
class TransportLayerClient(object):
"""Sends federation HTTP requests to other servers"""
def __init__(self, hs):
self.server_name = hs.hostname
self.client = hs.get_http_client()
@log_function
def get_room_state(self, destination, room_id, event_id):
""" Requests all state for a given room from the given server at the

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -17,7 +17,8 @@ from twisted.internet import defer
from synapse.api.urls import FEDERATION_PREFIX as PREFIX
from synapse.api.errors import Codes, SynapseError
from synapse.util.logutils import log_function
from synapse.http.server import JsonResource
from synapse.util.ratelimitutils import FederationRateLimiter
import functools
import logging
@@ -28,9 +29,41 @@ import re
logger = logging.getLogger(__name__)
class TransportLayerServer(object):
class TransportLayerServer(JsonResource):
"""Handles incoming federation HTTP requests"""
def __init__(self, hs):
self.hs = hs
self.clock = hs.get_clock()
super(TransportLayerServer, self).__init__(hs)
self.authenticator = Authenticator(hs)
self.ratelimiter = FederationRateLimiter(
self.clock,
window_size=hs.config.federation_rc_window_size,
sleep_limit=hs.config.federation_rc_sleep_limit,
sleep_msec=hs.config.federation_rc_sleep_delay,
reject_limit=hs.config.federation_rc_reject_limit,
concurrent_requests=hs.config.federation_rc_concurrent,
)
self.register_servlets()
def register_servlets(self):
register_servlets(
self.hs,
resource=self,
ratelimiter=self.ratelimiter,
authenticator=self.authenticator,
)
class Authenticator(object):
def __init__(self, hs):
self.keyring = hs.get_keyring()
self.server_name = hs.hostname
# A method just so we can pass 'self' as the authenticator to the Servlets
@defer.inlineCallbacks
def authenticate_request(self, request):
@@ -98,37 +131,9 @@ class TransportLayerServer(object):
defer.returnValue((origin, content))
@log_function
def register_received_handler(self, handler):
""" Register a handler that will be fired when we receive data.
Args:
handler (TransportReceivedHandler)
"""
FederationSendServlet(
handler,
authenticator=self,
ratelimiter=self.ratelimiter,
server_name=self.server_name,
).register(self.server)
@log_function
def register_request_handler(self, handler):
""" Register a handler that will be fired when we get asked for data.
Args:
handler (TransportRequestHandler)
"""
for servletclass in SERVLET_CLASSES:
servletclass(
handler,
authenticator=self,
ratelimiter=self.ratelimiter,
).register(self.server)
class BaseFederationServlet(object):
def __init__(self, handler, authenticator, ratelimiter):
def __init__(self, handler, authenticator, ratelimiter, server_name):
self.handler = handler
self.authenticator = authenticator
self.ratelimiter = ratelimiter
@@ -172,7 +177,9 @@ class FederationSendServlet(BaseFederationServlet):
PATH = "/send/([^/]*)/"
def __init__(self, handler, server_name, **kwargs):
super(FederationSendServlet, self).__init__(handler, **kwargs)
super(FederationSendServlet, self).__init__(
handler, server_name=server_name, **kwargs
)
self.server_name = server_name
# This is when someone is trying to send us a bunch of data.
@@ -432,6 +439,7 @@ class On3pidBindServlet(BaseFederationServlet):
SERVLET_CLASSES = (
FederationSendServlet,
FederationPullServlet,
FederationEventServlet,
FederationStateServlet,
@@ -451,3 +459,13 @@ SERVLET_CLASSES = (
FederationThirdPartyInviteExchangeServlet,
On3pidBindServlet,
)
def register_servlets(hs, resource, authenticator, ratelimiter):
for servletclass in SERVLET_CLASSES:
servletclass(
handler=hs.get_replication_layer(),
authenticator=authenticator,
ratelimiter=ratelimiter,
server_name=hs.hostname,
).register(resource)

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.

View File

@@ -19,6 +19,7 @@ from synapse.api.errors import LimitExceededError, SynapseError, AuthError
from synapse.crypto.event_signing import add_hashes_and_signatures
from synapse.api.constants import Membership, EventTypes
from synapse.types import UserID, RoomAlias
from synapse.push.action_generator import ActionGenerator
from synapse.util.logcontext import PreserveLoggingContext
@@ -52,21 +53,51 @@ class BaseHandler(object):
self.event_builder_factory = hs.get_event_builder_factory()
@defer.inlineCallbacks
def _filter_events_for_client(self, user_id, events, is_guest=False):
# Assumes that user has at some point joined the room if not is_guest.
def _filter_events_for_clients(self, user_tuples, events, event_id_to_state):
""" Returns dict of user_id -> list of events that user is allowed to
see.
"""
forgotten = yield defer.gatherResults([
self.store.who_forgot_in_room(
room_id,
)
for room_id in frozenset(e.room_id for e in events)
], consumeErrors=True)
# Set of membership event_ids that have been forgotten
event_id_forgotten = frozenset(
row["event_id"] for rows in forgotten for row in rows
)
def allowed(event, user_id, is_peeking):
state = event_id_to_state[event.event_id]
visibility_event = state.get((EventTypes.RoomHistoryVisibility, ""), None)
if visibility_event:
visibility = visibility_event.content.get("history_visibility", "shared")
else:
visibility = "shared"
def allowed(event, membership, visibility):
if visibility == "world_readable":
return True
if is_guest:
if is_peeking:
return False
membership_event = state.get((EventTypes.Member, user_id), None)
if membership_event:
if membership_event.event_id in event_id_forgotten:
membership = None
else:
membership = membership_event.membership
else:
membership = None
if membership == Membership.JOIN:
return True
if event.type == EventTypes.RoomHistoryVisibility:
return not is_guest
return not is_peeking
if visibility == "shared":
return True
@@ -77,43 +108,30 @@ class BaseHandler(object):
return True
defer.returnValue({
user_id: [
event
for event in events
if allowed(event, user_id, is_peeking)
]
for user_id, is_peeking in user_tuples
})
@defer.inlineCallbacks
def _filter_events_for_client(self, user_id, events, is_peeking=False):
# Assumes that user has at some point joined the room if not is_guest.
types = (
(EventTypes.RoomHistoryVisibility, ""),
(EventTypes.Member, user_id),
)
event_id_to_state = yield self.store.get_state_for_events(
frozenset(e.event_id for e in events),
types=(
(EventTypes.RoomHistoryVisibility, ""),
(EventTypes.Member, user_id),
)
types=types
)
events_to_return = []
for event in events:
state = event_id_to_state[event.event_id]
membership_event = state.get((EventTypes.Member, user_id), None)
if membership_event:
was_forgotten_at_event = yield self.store.was_forgotten_at(
membership_event.state_key,
membership_event.room_id,
membership_event.event_id
)
if was_forgotten_at_event:
membership = None
else:
membership = membership_event.membership
else:
membership = None
visibility_event = state.get((EventTypes.RoomHistoryVisibility, ""), None)
if visibility_event:
visibility = visibility_event.content.get("history_visibility", "shared")
else:
visibility = "shared"
should_include = allowed(event, membership, visibility)
if should_include:
events_to_return.append(event)
defer.returnValue(events_to_return)
res = yield self._filter_events_for_clients(
[(user_id, is_peeking)], events, event_id_to_state
)
defer.returnValue(res.get(user_id, []))
def ratelimit(self, user_id):
time_now = self.clock.time()
@@ -124,7 +142,7 @@ class BaseHandler(object):
)
if not allowed:
raise LimitExceededError(
retry_after_ms=int(1000*(time_allowed - time_now)),
retry_after_ms=int(1000 * (time_allowed - time_now)),
)
@defer.inlineCallbacks
@@ -170,12 +188,10 @@ class BaseHandler(object):
)
@defer.inlineCallbacks
def handle_new_client_event(self, event, context, extra_destinations=[],
extra_users=[], suppress_auth=False):
def handle_new_client_event(self, event, context, extra_users=[]):
# We now need to go and hit out to wherever we need to hit out to.
if not suppress_auth:
self.auth.check(event, auth_events=context.current_state)
self.auth.check(event, auth_events=context.current_state)
yield self.maybe_kick_guest_users(event, context.current_state.values())
@@ -248,11 +264,16 @@ class BaseHandler(object):
"You don't have permission to redact events"
)
action_generator = ActionGenerator(self.hs)
yield action_generator.handle_push_actions_for_event(
event, context, self
)
(event_stream_id, max_stream_id) = yield self.store.persist_event(
event, context=context
)
destinations = set(extra_destinations)
destinations = set()
for k, s in context.current_state.items():
try:
if k[0] == EventTypes.Member:
@@ -267,19 +288,11 @@ class BaseHandler(object):
with PreserveLoggingContext():
# Don't block waiting on waking up all the listeners.
notify_d = self.notifier.on_new_room_event(
self.notifier.on_new_room_event(
event, event_stream_id, max_stream_id,
extra_users=extra_users
)
def log_failure(f):
logger.warn(
"Failed to notify about %s: %s",
event.event_id, f.value
)
notify_d.addErrback(log_failure)
# If invite, remove room_state from unsigned before sending.
event.unsigned.pop("invite_room_state", None)

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2015 OpenMarket Ltd
# Copyright 2015, 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2015 OpenMarket Ltd
# Copyright 2015, 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
# Copyright 2014 - 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -408,7 +408,7 @@ class AuthHandler(BaseHandler):
macaroon = pymacaroons.Macaroon.deserialize(login_token)
auth_api = self.hs.get_auth()
auth_api.validate_macaroon(macaroon, "login", True)
return self._get_user_from_macaroon(macaroon)
return self.get_user_from_macaroon(macaroon)
except (pymacaroons.exceptions.MacaroonException, TypeError, ValueError):
raise AuthError(401, "Invalid token", errcode=Codes.UNKNOWN_TOKEN)
@@ -421,7 +421,7 @@ class AuthHandler(BaseHandler):
macaroon.add_first_party_caveat("user_id = %s" % (user_id,))
return macaroon
def _get_user_from_macaroon(self, macaroon):
def get_user_from_macaroon(self, macaroon):
user_prefix = "user_id = "
for caveat in macaroon.caveats:
if caveat.caveat_id.startswith(user_prefix):

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -175,8 +175,8 @@ class DirectoryHandler(BaseHandler):
# If this server is in the list of servers, return it first.
if self.server_name in servers:
servers = (
[self.server_name]
+ [s for s in servers if s != self.server_name]
[self.server_name] +
[s for s in servers if s != self.server_name]
)
else:
servers = list(servers)

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -18,6 +18,7 @@ from twisted.internet import defer
from synapse.util.logutils import log_function
from synapse.types import UserID
from synapse.events.utils import serialize_event
from synapse.util.logcontext import preserve_context_over_fn
from ._base import BaseHandler
@@ -29,15 +30,17 @@ logger = logging.getLogger(__name__)
def started_user_eventstream(distributor, user):
return distributor.fire("started_user_eventstream", user)
return preserve_context_over_fn(
distributor.fire,
"started_user_eventstream", user
)
def stopped_user_eventstream(distributor, user):
return distributor.fire("stopped_user_eventstream", user)
def user_joined_room(distributor, user, room_id):
return distributor.fire("user_joined_room", user, room_id)
return preserve_context_over_fn(
distributor.fire,
"stopped_user_eventstream", user
)
class EventStreamHandler(BaseHandler):
@@ -117,10 +120,10 @@ class EventStreamHandler(BaseHandler):
@log_function
def get_stream(self, auth_user_id, pagin_config, timeout=0,
as_client_event=True, affect_presence=True,
only_room_events=False, room_id=None, is_guest=False):
only_keys=None, room_id=None, is_guest=False):
"""Fetches the events stream for a given user.
If `only_room_events` is `True` only room events will be returned.
If `only_keys` is not None, events from keys will be sent down.
"""
auth_user = UserID.from_string(auth_user_id)
@@ -134,15 +137,12 @@ class EventStreamHandler(BaseHandler):
# Add some randomness to this value to try and mitigate against
# thundering herds on restart.
timeout = random.randint(int(timeout*0.9), int(timeout*1.1))
if is_guest:
yield user_joined_room(self.distributor, auth_user, room_id)
timeout = random.randint(int(timeout * 0.9), int(timeout * 1.1))
events, tokens = yield self.notifier.get_events_for(
auth_user, pagin_config, timeout,
only_room_events=only_room_events,
is_guest=is_guest, guest_room_id=room_id
only_keys=only_keys,
is_guest=is_guest, explicit_room_id=room_id
)
time_now = self.clock.time_msec()

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -36,6 +36,8 @@ from synapse.events.utils import prune_event
from synapse.util.retryutils import NotRetryingDestination
from synapse.push.action_generator import ActionGenerator
from twisted.internet import defer
import itertools
@@ -219,19 +221,11 @@ class FederationHandler(BaseHandler):
extra_users.append(target_user)
with PreserveLoggingContext():
d = self.notifier.on_new_room_event(
self.notifier.on_new_room_event(
event, event_stream_id, max_stream_id,
extra_users=extra_users
)
def log_failure(f):
logger.warn(
"Failed to notify about %s: %s",
event.event_id, f.value
)
d.addErrback(log_failure)
if event.type == EventTypes.Member:
if event.membership == Membership.JOIN:
prev_state = context.current_state.get((event.type, event.state_key))
@@ -635,19 +629,11 @@ class FederationHandler(BaseHandler):
)
with PreserveLoggingContext():
d = self.notifier.on_new_room_event(
self.notifier.on_new_room_event(
event, event_stream_id, max_stream_id,
extra_users=[joinee]
)
def log_failure(f):
logger.warn(
"Failed to notify about %s: %s",
event.event_id, f.value
)
d.addErrback(log_failure)
logger.debug("Finished joining %s to %s", joinee, room_id)
finally:
room_queue = self.room_queues[room_id]
@@ -722,18 +708,10 @@ class FederationHandler(BaseHandler):
extra_users.append(target_user)
with PreserveLoggingContext():
d = self.notifier.on_new_room_event(
self.notifier.on_new_room_event(
event, event_stream_id, max_stream_id, extra_users=extra_users
)
def log_failure(f):
logger.warn(
"Failed to notify about %s: %s",
event.event_id, f.value
)
d.addErrback(log_failure)
if event.type == EventTypes.Member:
if event.content["membership"] == Membership.JOIN:
user = UserID.from_string(event.state_key)
@@ -803,19 +781,11 @@ class FederationHandler(BaseHandler):
target_user = UserID.from_string(event.state_key)
with PreserveLoggingContext():
d = self.notifier.on_new_room_event(
self.notifier.on_new_room_event(
event, event_stream_id, max_stream_id,
extra_users=[target_user],
)
def log_failure(f):
logger.warn(
"Failed to notify about %s: %s",
event.event_id, f.value
)
d.addErrback(log_failure)
defer.returnValue(event)
@defer.inlineCallbacks
@@ -940,18 +910,10 @@ class FederationHandler(BaseHandler):
extra_users.append(target_user)
with PreserveLoggingContext():
d = self.notifier.on_new_room_event(
self.notifier.on_new_room_event(
event, event_stream_id, max_stream_id, extra_users=extra_users
)
def log_failure(f):
logger.warn(
"Failed to notify about %s: %s",
event.event_id, f.value
)
d.addErrback(log_failure)
new_pdu = event
destinations = set()
@@ -1105,6 +1067,12 @@ class FederationHandler(BaseHandler):
auth_events=auth_events,
)
if not backfilled and not event.internal_metadata.is_outlier():
action_generator = ActionGenerator(self.hs)
yield action_generator.handle_push_actions_for_event(
event, context, self
)
event_stream_id, max_stream_id = yield self.store.persist_event(
event,
context=context,
@@ -1178,7 +1146,13 @@ class FederationHandler(BaseHandler):
try:
self.auth.check(e, auth_events=auth_for_e)
except AuthError as err:
except SynapseError as err:
# we may get SynapseErrors here as well as AuthErrors. For
# instance, there are a couple of (ancient) events in some
# rooms whose senders do not have the correct sigil; these
# cause SynapseErrors in auth.check. We don't want to give up
# the attempt to federate altogether in such cases.
logger.warn(
"Rejecting %s because %s",
e.event_id, err.msg
@@ -1684,7 +1658,7 @@ class FederationHandler(BaseHandler):
self.auth.check(event, context.current_state)
yield self._validate_keyserver(event, auth_events=context.current_state)
member_handler = self.hs.get_handlers().room_member_handler
yield member_handler.change_membership(event, context)
yield member_handler.send_membership_event(event, context)
else:
destinations = set([x.split(":", 1)[-1] for x in (sender, room_id)])
yield self.replication_layer.forward_third_party_invite(
@@ -1713,7 +1687,7 @@ class FederationHandler(BaseHandler):
# TODO: Make sure the signatures actually are correct.
event.signatures.update(returned_invite.signatures)
member_handler = self.hs.get_handlers().room_member_handler
yield member_handler.change_membership(event, context)
yield member_handler.send_membership_event(event, context)
@defer.inlineCallbacks
def add_display_name_to_third_party_invite(self, event_dict, event, context):

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2015 OpenMarket Ltd
# Copyright 2015, 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -36,14 +36,15 @@ class IdentityHandler(BaseHandler):
self.http_client = hs.get_simple_http_client()
self.trusted_id_servers = set(hs.config.trusted_third_party_id_servers)
self.trust_any_id_server_just_for_testing_do_not_use = (
hs.config.use_insecure_ssl_client_just_for_testing_do_not_use
)
@defer.inlineCallbacks
def threepid_from_creds(self, creds):
yield run_on_reactor()
# XXX: make this configurable!
# trustedIdServers = ['matrix.org', 'localhost:8090']
trustedIdServers = ['matrix.org', 'vector.im']
if 'id_server' in creds:
id_server = creds['id_server']
elif 'idServer' in creds:
@@ -58,10 +59,19 @@ class IdentityHandler(BaseHandler):
else:
raise SynapseError(400, "No client_secret in creds")
if id_server not in trustedIdServers:
logger.warn('%s is not a trusted ID server: rejecting 3pid ' +
'credentials', id_server)
defer.returnValue(None)
if id_server not in self.trusted_id_servers:
if self.trust_any_id_server_just_for_testing_do_not_use:
logger.warn(
"Trusting untrustworthy ID server %r even though it isn't"
" in the trusted id list for testing because"
" 'use_insecure_ssl_client_just_for_testing_do_not_use'"
" is set in the config",
id_server,
)
else:
logger.warn('%s is not a trusted ID server: rejecting 3pid ' +
'credentials', id_server)
defer.returnValue(None)
data = {}
try:

View File

@@ -16,7 +16,7 @@
from twisted.internet import defer
from synapse.api.constants import EventTypes, Membership
from synapse.api.errors import SynapseError, AuthError, Codes
from synapse.api.errors import AuthError, Codes
from synapse.streams.config import PaginationConfig
from synapse.events.utils import serialize_event
from synapse.events.validator import EventValidator
@@ -78,21 +78,20 @@ class MessageHandler(BaseHandler):
defer.returnValue(None)
@defer.inlineCallbacks
def get_messages(self, user_id=None, room_id=None, pagin_config=None,
as_client_event=True, is_guest=False):
def get_messages(self, requester, room_id=None, pagin_config=None,
as_client_event=True):
"""Get messages in a room.
Args:
user_id (str): The user requesting messages.
requester (Requester): The user requesting messages.
room_id (str): The room they want messages from.
pagin_config (synapse.api.streams.PaginationConfig): The pagination
config rules to apply, if any.
as_client_event (bool): True to get events in client-server format.
is_guest (bool): Whether the requesting user is a guest (as opposed
to a fully registered user).
Returns:
dict: Pagination API results
"""
user_id = requester.user.to_string()
data_source = self.hs.get_event_sources().sources["room"]
if pagin_config.from_token:
@@ -106,8 +105,6 @@ class MessageHandler(BaseHandler):
room_token = pagin_config.from_token.room_key
room_token = RoomStreamToken.parse(room_token)
if room_token.topological is None:
raise SynapseError(400, "Invalid token")
pagin_config.from_token = pagin_config.from_token.copy_and_replace(
"room_key", str(room_token)
@@ -115,36 +112,37 @@ class MessageHandler(BaseHandler):
source_config = pagin_config.get_source_config("room")
if not is_guest:
member_event = yield self.auth.check_user_was_in_room(room_id, user_id)
if member_event.membership == Membership.LEAVE:
# If they have left the room then clamp the token to be before
# they left the room.
# If they're a guest, we'll just 403 them if they're asking for
# events they can't see.
leave_token = yield self.store.get_topological_token_for_event(
member_event.event_id
)
leave_token = RoomStreamToken.parse(leave_token)
if leave_token.topological < room_token.topological:
source_config.from_key = str(leave_token)
if source_config.direction == "f":
if source_config.to_key is None:
source_config.to_key = str(leave_token)
else:
to_token = RoomStreamToken.parse(source_config.to_key)
if leave_token.topological < to_token.topological:
source_config.to_key = str(leave_token)
yield self.hs.get_handlers().federation_handler.maybe_backfill(
room_id, room_token.topological
membership, member_event_id = yield self._check_in_room_or_world_readable(
room_id, user_id
)
user = UserID.from_string(user_id)
if source_config.direction == 'b':
# if we're going backwards, we might need to backfill. This
# requires that we have a topo token.
if room_token.topological:
max_topo = room_token.topological
else:
max_topo = yield self.store.get_max_topological_token_for_stream_and_room(
room_id, room_token.stream
)
if membership == Membership.LEAVE:
# If they have left the room then clamp the token to be before
# they left the room, to save the effort of loading from the
# database.
leave_token = yield self.store.get_topological_token_for_event(
member_event_id
)
leave_token = RoomStreamToken.parse(leave_token)
if leave_token.topological < max_topo:
source_config.from_key = str(leave_token)
yield self.hs.get_handlers().federation_handler.maybe_backfill(
room_id, max_topo
)
events, next_key = yield data_source.get_pagination_rows(
user, source_config, room_id
requester.user, source_config, room_id
)
next_token = pagin_config.from_token.copy_and_replace(
@@ -158,7 +156,11 @@ class MessageHandler(BaseHandler):
"end": next_token.to_string(),
})
events = yield self._filter_events_for_client(user_id, events, is_guest=is_guest)
events = yield self._filter_events_for_client(
user_id,
events,
is_peeking=(member_event_id is None),
)
time_now = self.clock.time_msec()
@@ -174,30 +176,25 @@ class MessageHandler(BaseHandler):
defer.returnValue(chunk)
@defer.inlineCallbacks
def create_and_send_event(self, event_dict, ratelimit=True,
token_id=None, txn_id=None, is_guest=False):
""" Given a dict from a client, create and handle a new event.
def create_event(self, event_dict, token_id=None, txn_id=None):
"""
Given a dict from a client, create a new event.
Creates an FrozenEvent object, filling out auth_events, prev_events,
etc.
Adds display names to Join membership events.
Persists and notifies local clients and federation.
Args:
event_dict (dict): An entire event
Returns:
Tuple of created event (FrozenEvent), Context
"""
builder = self.event_builder_factory.new(event_dict)
self.validator.validate_new(builder)
if ratelimit:
self.ratelimit(builder.user_id)
# TODO(paul): Why does 'event' not have a 'user' object?
user = UserID.from_string(builder.user_id)
assert self.hs.is_mine(user), "User must be our own: %s" % (user,)
if builder.type == EventTypes.Member:
membership = builder.content.get("membership", None)
if membership == Membership.JOIN:
@@ -216,6 +213,25 @@ class MessageHandler(BaseHandler):
event, context = yield self._create_new_client_event(
builder=builder,
)
defer.returnValue((event, context))
@defer.inlineCallbacks
def send_event(self, event, context, ratelimit=True, is_guest=False):
"""
Persists and notifies local clients and federation of an event.
Args:
event (FrozenEvent) the event to send.
context (Context) the context of the event.
ratelimit (bool): Whether to rate limit this send.
is_guest (bool): Whether the sender is a guest.
"""
user = UserID.from_string(event.sender)
assert self.hs.is_mine(user), "User must be our own: %s" % (user,)
if ratelimit:
self.ratelimit(event.sender)
if event.is_state():
prev_state = context.current_state.get((event.type, event.state_key))
@@ -229,7 +245,7 @@ class MessageHandler(BaseHandler):
if event.type == EventTypes.Member:
member_handler = self.hs.get_handlers().room_member_handler
yield member_handler.change_membership(event, context, is_guest=is_guest)
yield member_handler.send_membership_event(event, context, is_guest=is_guest)
else:
yield self.handle_new_client_event(
event=event,
@@ -241,6 +257,25 @@ class MessageHandler(BaseHandler):
with PreserveLoggingContext():
presence.bump_presence_active_time(user)
@defer.inlineCallbacks
def create_and_send_event(self, event_dict, ratelimit=True,
token_id=None, txn_id=None, is_guest=False):
"""
Creates an event, then sends it.
See self.create_event and self.send_event.
"""
event, context = yield self.create_event(
event_dict,
token_id=token_id,
txn_id=txn_id
)
yield self.send_event(
event,
context,
ratelimit=ratelimit,
is_guest=is_guest
)
defer.returnValue(event)
@defer.inlineCallbacks
@@ -256,7 +291,7 @@ class MessageHandler(BaseHandler):
SynapseError if something went wrong.
"""
membership, membership_event_id = yield self._check_in_room_or_world_readable(
room_id, user_id, is_guest
room_id, user_id
)
if membership == Membership.JOIN:
@@ -273,7 +308,7 @@ class MessageHandler(BaseHandler):
defer.returnValue(data)
@defer.inlineCallbacks
def _check_in_room_or_world_readable(self, room_id, user_id, is_guest):
def _check_in_room_or_world_readable(self, room_id, user_id):
try:
# check_user_was_in_room will return the most recent membership
# event for the user if:
@@ -283,7 +318,7 @@ class MessageHandler(BaseHandler):
member_event = yield self.auth.check_user_was_in_room(room_id, user_id)
defer.returnValue((member_event.membership, member_event.event_id))
return
except AuthError, auth_error:
except AuthError:
visibility = yield self.state_handler.get_current_state(
room_id, EventTypes.RoomHistoryVisibility, ""
)
@@ -293,8 +328,6 @@ class MessageHandler(BaseHandler):
):
defer.returnValue((Membership.JOIN, None))
return
if not is_guest:
raise auth_error
raise AuthError(
403, "Guest access not allowed", errcode=Codes.GUEST_ACCESS_FORBIDDEN
)
@@ -312,7 +345,7 @@ class MessageHandler(BaseHandler):
A list of dicts representing state events. [{}, {}, {}]
"""
membership, membership_event_id = yield self._check_in_room_or_world_readable(
room_id, user_id, is_guest
room_id, user_id
)
if membership == Membership.JOIN:
@@ -523,13 +556,13 @@ class MessageHandler(BaseHandler):
defer.returnValue(ret)
@defer.inlineCallbacks
def room_initial_sync(self, user_id, room_id, pagin_config=None, is_guest=False):
def room_initial_sync(self, requester, room_id, pagin_config=None):
"""Capture the a snapshot of a room. If user is currently a member of
the room this will be what is currently in the room. If the user left
the room this will be what was in the room when they left.
Args:
user_id(str): The user to get a snapshot for.
requester(Requester): The user to get a snapshot for.
room_id(str): The room to get a snapshot of.
pagin_config(synapse.streams.config.PaginationConfig):
The pagination config used to determine how many messages to
@@ -540,19 +573,20 @@ class MessageHandler(BaseHandler):
A JSON serialisable dict with the snapshot of the room.
"""
user_id = requester.user.to_string()
membership, member_event_id = yield self._check_in_room_or_world_readable(
room_id,
user_id,
is_guest
room_id, user_id,
)
is_peeking = member_event_id is None
if membership == Membership.JOIN:
result = yield self._room_initial_sync_joined(
user_id, room_id, pagin_config, membership, is_guest
user_id, room_id, pagin_config, membership, is_peeking
)
elif membership == Membership.LEAVE:
result = yield self._room_initial_sync_parted(
user_id, room_id, pagin_config, membership, member_event_id, is_guest
user_id, room_id, pagin_config, membership, member_event_id, is_peeking
)
account_data_events = []
@@ -576,7 +610,7 @@ class MessageHandler(BaseHandler):
@defer.inlineCallbacks
def _room_initial_sync_parted(self, user_id, room_id, pagin_config,
membership, member_event_id, is_guest):
membership, member_event_id, is_peeking):
room_state = yield self.store.get_state_for_events(
[member_event_id], None
)
@@ -598,7 +632,7 @@ class MessageHandler(BaseHandler):
)
messages = yield self._filter_events_for_client(
user_id, messages, is_guest=is_guest
user_id, messages, is_peeking=is_peeking
)
start_token = StreamToken(token[0], 0, 0, 0, 0)
@@ -621,7 +655,7 @@ class MessageHandler(BaseHandler):
@defer.inlineCallbacks
def _room_initial_sync_joined(self, user_id, room_id, pagin_config,
membership, is_guest):
membership, is_peeking):
current_state = yield self.state.get_current_state(
room_id=room_id,
)
@@ -685,7 +719,7 @@ class MessageHandler(BaseHandler):
).addErrback(unwrapFirstError)
messages = yield self._filter_events_for_client(
user_id, messages, is_guest=is_guest,
user_id, messages, is_peeking=is_peeking,
)
start_token = now_token.copy_and_replace("room_key", token[0])
@@ -704,7 +738,7 @@ class MessageHandler(BaseHandler):
"presence": presence,
"receipts": receipts,
}
if not is_guest:
if not is_peeking:
ret["membership"] = membership
defer.returnValue(ret)

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -34,7 +34,7 @@ metrics = synapse.metrics.get_metrics_for(__name__)
# Don't bother bumping "last active" time if it differs by less than 60 seconds
LAST_ACTIVE_GRANULARITY = 60*1000
LAST_ACTIVE_GRANULARITY = 60 * 1000
# Keep no more than this number of offline serial revisions
MAX_OFFLINE_SERIALS = 1000
@@ -378,9 +378,9 @@ class PresenceHandler(BaseHandler):
was_polling = target_user in self._user_cachemap
if now_online and not was_polling:
self.start_polling_presence(target_user, state=state)
yield self.start_polling_presence(target_user, state=state)
elif not now_online and was_polling:
self.stop_polling_presence(target_user)
yield self.stop_polling_presence(target_user)
# TODO(paul): perform a presence push as part of start/stop poll so
# we don't have to do this all the time
@@ -394,7 +394,8 @@ class PresenceHandler(BaseHandler):
if now - prev_state.state.get("last_active", 0) < LAST_ACTIVE_GRANULARITY:
return
self.changed_presencelike_data(user, {"last_active": now})
with PreserveLoggingContext():
self.changed_presencelike_data(user, {"last_active": now})
def get_joined_rooms_for_user(self, user):
"""Get the list of rooms a user is joined to.
@@ -466,11 +467,12 @@ class PresenceHandler(BaseHandler):
local_user, room_ids=[room_id], add_to_cache=False
)
self.push_update_to_local_and_remote(
observed_user=local_user,
users_to_push=[user],
statuscache=statuscache,
)
with PreserveLoggingContext():
self.push_update_to_local_and_remote(
observed_user=local_user,
users_to_push=[user],
statuscache=statuscache,
)
@defer.inlineCallbacks
def send_presence_invite(self, observer_user, observed_user):
@@ -556,7 +558,7 @@ class PresenceHandler(BaseHandler):
observer_user.localpart, observed_user.to_string()
)
self.start_polling_presence(
yield self.start_polling_presence(
observer_user, target_user=observed_user
)

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2015 OpenMarket Ltd
# Copyright 2015, 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
# Copyright 2014 - 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -21,7 +21,6 @@ from synapse.api.errors import (
AuthError, Codes, SynapseError, RegistrationError, InvalidCaptchaError
)
from ._base import BaseHandler
import synapse.util.stringutils as stringutils
from synapse.util.async import run_on_reactor
from synapse.http.client import CaptchaServerHttpClient
@@ -40,19 +39,22 @@ class RegistrationHandler(BaseHandler):
def __init__(self, hs):
super(RegistrationHandler, self).__init__(hs)
self.auth = hs.get_auth()
self.distributor = hs.get_distributor()
self.distributor.declare("registered_user")
self.captcha_client = CaptchaServerHttpClient(hs)
self._next_generated_user_id = None
@defer.inlineCallbacks
def check_username(self, localpart):
def check_username(self, localpart, guest_access_token=None):
yield run_on_reactor()
if urllib.quote(localpart) != localpart:
if urllib.quote(localpart.encode('utf-8')) != localpart:
raise SynapseError(
400,
"User ID must only contain characters which do not"
" require URL encoding."
"User ID can only contain characters a-z, 0-9, or '_-./'",
Codes.INVALID_USERNAME
)
user = UserID(localpart, self.hs.hostname)
@@ -62,19 +64,35 @@ class RegistrationHandler(BaseHandler):
users = yield self.store.get_users_by_id_case_insensitive(user_id)
if users:
raise SynapseError(
400,
"User ID already taken.",
errcode=Codes.USER_IN_USE,
)
if not guest_access_token:
raise SynapseError(
400,
"User ID already taken.",
errcode=Codes.USER_IN_USE,
)
user_data = yield self.auth.get_user_from_macaroon(guest_access_token)
if not user_data["is_guest"] or user_data["user"].localpart != localpart:
raise AuthError(
403,
"Cannot register taken user ID without valid guest "
"credentials for that user.",
errcode=Codes.FORBIDDEN,
)
@defer.inlineCallbacks
def register(self, localpart=None, password=None, generate_token=True):
def register(
self,
localpart=None,
password=None,
generate_token=True,
guest_access_token=None,
make_guest=False
):
"""Registers a new client on the server.
Args:
localpart : The local part of the user ID to register. If None,
one will be randomly generated.
one will be generated.
password (str) : The password to assign to this user so they can
login again. This can be None which means they cannot login again
via a password (e.g. the user is an application service user).
@@ -89,7 +107,19 @@ class RegistrationHandler(BaseHandler):
password_hash = self.auth_handler().hash(password)
if localpart:
yield self.check_username(localpart)
yield self.check_username(localpart, guest_access_token=guest_access_token)
was_guest = guest_access_token is not None
if not was_guest:
try:
int(localpart)
raise RegistrationError(
400,
"Numeric user IDs are reserved for guest users."
)
except ValueError:
pass
user = UserID(localpart, self.hs.hostname)
user_id = user.to_string()
@@ -100,37 +130,37 @@ class RegistrationHandler(BaseHandler):
yield self.store.register(
user_id=user_id,
token=token,
password_hash=password_hash
password_hash=password_hash,
was_guest=was_guest,
make_guest=make_guest,
)
yield registered_user(self.distributor, user)
else:
# autogen a random user ID
# autogen a sequential user ID
attempts = 0
user_id = None
token = None
while not user_id:
user = None
while not user:
localpart = yield self._generate_user_id(attempts > 0)
user = UserID(localpart, self.hs.hostname)
user_id = user.to_string()
yield self.check_user_id_is_valid(user_id)
if generate_token:
token = self.auth_handler().generate_access_token(user_id)
try:
localpart = self._generate_user_id()
user = UserID(localpart, self.hs.hostname)
user_id = user.to_string()
yield self.check_user_id_is_valid(user_id)
if generate_token:
token = self.auth_handler().generate_access_token(user_id)
yield self.store.register(
user_id=user_id,
token=token,
password_hash=password_hash)
yield registered_user(self.distributor, user)
password_hash=password_hash,
make_guest=make_guest
)
except SynapseError:
# if user id is taken, just generate another
user_id = None
token = None
attempts += 1
if attempts > 5:
raise RegistrationError(
500, "Cannot generate user ID.")
yield registered_user(self.distributor, user)
# We used to generate default identicons here, but nowadays
# we want clients to generate their own as part of their branding
@@ -156,7 +186,7 @@ class RegistrationHandler(BaseHandler):
token=token,
password_hash=""
)
registered_user(self.distributor, user)
yield registered_user(self.distributor, user)
defer.returnValue((user_id, token))
@defer.inlineCallbacks
@@ -192,7 +222,7 @@ class RegistrationHandler(BaseHandler):
400,
"User ID must only contain characters which do not"
" require URL encoding."
)
)
user = UserID(localpart, self.hs.hostname)
user_id = user.to_string()
@@ -262,8 +292,16 @@ class RegistrationHandler(BaseHandler):
errcode=Codes.EXCLUSIVE
)
def _generate_user_id(self):
return "-" + stringutils.random_string(18)
@defer.inlineCallbacks
def _generate_user_id(self, reseed=False):
if reseed or self._next_generated_user_id is None:
self._next_generated_user_id = (
yield self.store.find_next_generated_user_id_localpart()
)
id = self._next_generated_user_id
self._next_generated_user_id += 1
defer.returnValue(str(id))
@defer.inlineCallbacks
def _validate_captcha(self, ip_addr, private_key, challenge, response):

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
# Copyright 2014 - 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -18,13 +18,14 @@ from twisted.internet import defer
from ._base import BaseHandler
from synapse.types import UserID, RoomAlias, RoomID
from synapse.types import UserID, RoomAlias, RoomID, RoomStreamToken
from synapse.api.constants import (
EventTypes, Membership, JoinRules, RoomCreationPreset,
)
from synapse.api.errors import AuthError, StoreError, SynapseError
from synapse.api.errors import AuthError, StoreError, SynapseError, Codes
from synapse.util import stringutils, unwrapFirstError
from synapse.util.async import run_on_reactor
from synapse.util.logcontext import preserve_context_over_fn
from signedjson.sign import verify_signed_json
from signedjson.key import decode_verify_key_bytes
@@ -46,11 +47,17 @@ def collect_presencelike_data(distributor, user, content):
def user_left_room(distributor, user, room_id):
return distributor.fire("user_left_room", user=user, room_id=room_id)
return preserve_context_over_fn(
distributor.fire,
"user_left_room", user=user, room_id=room_id
)
def user_joined_room(distributor, user, room_id):
return distributor.fire("user_joined_room", user=user, room_id=room_id)
return preserve_context_over_fn(
distributor.fire,
"user_joined_room", user=user, room_id=room_id
)
class RoomCreationHandler(BaseHandler):
@@ -115,6 +122,8 @@ class RoomCreationHandler(BaseHandler):
except:
raise SynapseError(400, "Invalid user_id: %s" % (i,))
invite_3pid_list = config.get("invite_3pid", [])
is_public = config.get("visibility", None) == "public"
if room_id:
@@ -220,6 +229,20 @@ class RoomCreationHandler(BaseHandler):
"content": {"membership": Membership.INVITE},
}, ratelimit=False)
for invite_3pid in invite_3pid_list:
id_server = invite_3pid["id_server"]
address = invite_3pid["address"]
medium = invite_3pid["medium"]
yield self.hs.get_handlers().room_member_handler.do_3pid_invite(
room_id,
user,
medium,
address,
id_server,
token_id=None,
txn_id=None,
)
result = {"room_id": room_id}
if room_alias:
@@ -381,7 +404,58 @@ class RoomMemberHandler(BaseHandler):
remotedomains.add(member.domain)
@defer.inlineCallbacks
def change_membership(self, event, context, do_auth=True, is_guest=False):
def update_membership(self, requester, target, room_id, action, txn_id=None):
effective_membership_state = action
if action in ["kick", "unban"]:
effective_membership_state = "leave"
elif action == "forget":
effective_membership_state = "leave"
msg_handler = self.hs.get_handlers().message_handler
content = {"membership": unicode(effective_membership_state)}
if requester.is_guest:
content["kind"] = "guest"
event, context = yield msg_handler.create_event(
{
"type": EventTypes.Member,
"content": content,
"room_id": room_id,
"sender": requester.user.to_string(),
"state_key": target.to_string(),
},
token_id=requester.access_token_id,
txn_id=txn_id,
)
old_state = context.current_state.get((EventTypes.Member, event.state_key))
old_membership = old_state.content.get("membership") if old_state else None
if action == "unban" and old_membership != "ban":
raise SynapseError(
403,
"Cannot unban user who was not banned (membership=%s)" % old_membership,
errcode=Codes.BAD_STATE
)
if old_membership == "ban" and action != "unban":
raise SynapseError(
403,
"Cannot %s user who was is banned" % (action,),
errcode=Codes.BAD_STATE
)
yield msg_handler.send_event(
event,
context,
ratelimit=True,
is_guest=requester.is_guest
)
if action == "forget":
yield self.forget(requester.user, room_id)
@defer.inlineCallbacks
def send_membership_event(self, event, context, is_guest=False):
""" Change the membership status of a user in a room.
Args:
@@ -416,7 +490,7 @@ class RoomMemberHandler(BaseHandler):
if not is_guest_access_allowed:
raise AuthError(403, "Guest access not allowed")
yield self._do_join(event, context, do_auth=do_auth)
yield self._do_join(event, context)
else:
if event.membership == Membership.LEAVE:
is_host_in_room = yield self.is_host_in_room(room_id, context)
@@ -443,9 +517,7 @@ class RoomMemberHandler(BaseHandler):
yield self._do_local_membership_update(
event,
membership=event.content["membership"],
context=context,
do_auth=do_auth,
)
if prev_state and prev_state.membership == Membership.JOIN:
@@ -481,12 +553,12 @@ class RoomMemberHandler(BaseHandler):
})
event, context = yield self._create_new_client_event(builder)
yield self._do_join(event, context, room_hosts=hosts, do_auth=True)
yield self._do_join(event, context, room_hosts=hosts)
defer.returnValue({"room_id": room_id})
@defer.inlineCallbacks
def _do_join(self, event, context, room_hosts=None, do_auth=True):
def _do_join(self, event, context, room_hosts=None):
room_id = event.room_id
# XXX: We don't do an auth check if we are doing an invite
@@ -520,9 +592,7 @@ class RoomMemberHandler(BaseHandler):
yield self._do_local_membership_update(
event,
membership=event.content["membership"],
context=context,
do_auth=do_auth,
)
prev_state = context.current_state.get((event.type, event.state_key))
@@ -587,8 +657,7 @@ class RoomMemberHandler(BaseHandler):
defer.returnValue(room_ids)
@defer.inlineCallbacks
def _do_local_membership_update(self, event, membership, context,
do_auth):
def _do_local_membership_update(self, event, context):
yield run_on_reactor()
target_user = UserID.from_string(event.state_key)
@@ -597,7 +666,6 @@ class RoomMemberHandler(BaseHandler):
event,
context,
extra_users=[target_user],
suppress_auth=(not do_auth),
)
@defer.inlineCallbacks
@@ -815,39 +883,71 @@ class RoomListHandler(BaseHandler):
@defer.inlineCallbacks
def get_public_room_list(self):
chunk = yield self.store.get_rooms(is_public=True)
room_ids = yield self.store.get_public_room_ids()
room_members = yield defer.gatherResults(
[
self.store.get_users_in_room(room["room_id"])
for room in chunk
],
consumeErrors=True,
).addErrback(unwrapFirstError)
@defer.inlineCallbacks
def handle_room(room_id):
aliases = yield self.store.get_aliases_for_room(room_id)
if not aliases:
defer.returnValue(None)
avatar_urls = yield defer.gatherResults(
[
self.get_room_avatar_url(room["room_id"])
for room in chunk
],
consumeErrors=True,
).addErrback(unwrapFirstError)
state = yield self.state_handler.get_current_state(room_id)
for i, room in enumerate(chunk):
room["num_joined_members"] = len(room_members[i])
if avatar_urls[i]:
room["avatar_url"] = avatar_urls[i]
result = {"aliases": aliases, "room_id": room_id}
name_event = state.get((EventTypes.Name, ""), None)
if name_event:
name = name_event.content.get("name", None)
if name:
result["name"] = name
topic_event = state.get((EventTypes.Topic, ""), None)
if topic_event:
topic = topic_event.content.get("topic", None)
if topic:
result["topic"] = topic
canonical_event = state.get((EventTypes.CanonicalAlias, ""), None)
if canonical_event:
canonical_alias = canonical_event.content.get("alias", None)
if canonical_alias:
result["canonical_alias"] = canonical_alias
visibility_event = state.get((EventTypes.RoomHistoryVisibility, ""), None)
visibility = None
if visibility_event:
visibility = visibility_event.content.get("history_visibility", None)
result["world_readable"] = visibility == "world_readable"
guest_event = state.get((EventTypes.GuestAccess, ""), None)
guest = None
if guest_event:
guest = guest_event.content.get("guest_access", None)
result["guest_can_join"] = guest == "can_join"
avatar_event = state.get(("m.room.avatar", ""), None)
if avatar_event:
avatar_url = avatar_event.content.get("url", None)
if avatar_url:
result["avatar_url"] = avatar_url
result["num_joined_members"] = sum(
1 for (event_type, _), ev in state.items()
if event_type == EventTypes.Member and ev.membership == Membership.JOIN
)
defer.returnValue(result)
result = []
for chunk in (room_ids[i:i + 10] for i in xrange(0, len(room_ids), 10)):
chunk_result = yield defer.gatherResults([
handle_room(room_id)
for room_id in chunk
], consumeErrors=True).addErrback(unwrapFirstError)
result.extend(v for v in chunk_result if v)
# FIXME (erikj): START is no longer a valid value
defer.returnValue({"start": "START", "end": "END", "chunk": chunk})
@defer.inlineCallbacks
def get_room_avatar_url(self, room_id):
event = yield self.hs.get_state_handler().get_current_state(
room_id, "m.room.avatar"
)
if event and "url" in event.content:
defer.returnValue(event.content["url"])
defer.returnValue({"start": "START", "end": "END", "chunk": result})
class RoomContextHandler(BaseHandler):
@@ -866,7 +966,7 @@ class RoomContextHandler(BaseHandler):
Returns:
dict, or None if the event isn't found
"""
before_limit = math.floor(limit/2.)
before_limit = math.floor(limit / 2.)
after_limit = limit - before_limit
now_token = yield self.hs.get_event_sources().get_current_token()
@@ -875,7 +975,7 @@ class RoomContextHandler(BaseHandler):
return self._filter_events_for_client(
user.to_string(),
events,
is_guest=is_guest)
is_peeking=is_guest)
event = yield self.store.get_event(event_id, get_prev_content=True,
allow_none=True)
@@ -936,6 +1036,11 @@ class RoomEventSource(object):
to_key = yield self.get_current_key()
from_token = RoomStreamToken.parse(from_key)
if from_token.topological:
logger.warn("Stream has topological part!!!! %r", from_key)
from_key = "s%s" % (from_token.stream,)
app_service = yield self.store.get_app_service_by_user_id(
user.to_string()
)
@@ -947,15 +1052,31 @@ class RoomEventSource(object):
limit=limit,
)
else:
events, end_key = yield self.store.get_room_events_stream(
user_id=user.to_string(),
room_events = yield self.store.get_membership_changes_for_user(
user.to_string(), from_key, to_key
)
room_to_events = yield self.store.get_room_events_stream_for_rooms(
room_ids=room_ids,
from_key=from_key,
to_key=to_key,
limit=limit,
room_ids=room_ids,
is_guest=is_guest,
limit=limit or 10,
order='ASC',
)
events = list(room_events)
events.extend(e for evs, _ in room_to_events.values() for e in evs)
events.sort(key=lambda e: e.internal_metadata.order)
if limit:
events[:] = events[:limit]
if events:
end_key = events[-1].internal_metadata.after
else:
end_key = to_key
defer.returnValue((events, end_key))
def get_current_key(self, direction='f'):

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2015 OpenMarket Ltd
# Copyright 2015, 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.

File diff suppressed because it is too large Load Diff

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -19,6 +19,7 @@ from ._base import BaseHandler
from synapse.api.errors import SynapseError, AuthError
from synapse.util.logcontext import PreserveLoggingContext
from synapse.util.metrics import Measure
from synapse.types import UserID
import logging
@@ -222,6 +223,7 @@ class TypingNotificationHandler(BaseHandler):
class TypingNotificationEventSource(object):
def __init__(self, hs):
self.hs = hs
self.clock = hs.get_clock()
self._handler = None
self._room_member_handler = None
@@ -247,19 +249,20 @@ class TypingNotificationEventSource(object):
}
def get_new_events(self, from_key, room_ids, **kwargs):
from_key = int(from_key)
handler = self.handler()
with Measure(self.clock, "typing.get_new_events"):
from_key = int(from_key)
handler = self.handler()
events = []
for room_id in room_ids:
if room_id not in handler._room_serials:
continue
if handler._room_serials[room_id] <= from_key:
continue
events = []
for room_id in room_ids:
if room_id not in handler._room_serials:
continue
if handler._room_serials[room_id] <= from_key:
continue
events.append(self._make_event_for(room_id))
events.append(self._make_event_for(room_id))
return events, handler._latest_room_serial
return events, handler._latest_room_serial
def get_current_key(self):
return self.handler()._latest_room_serial

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -17,7 +17,7 @@ from twisted.internet.endpoints import SSL4ClientEndpoint, TCP4ClientEndpoint
from twisted.internet import defer
from twisted.internet.error import ConnectError
from twisted.names import client, dns
from twisted.names.error import DNSNameError
from twisted.names.error import DNSNameError, DomainError
import collections
import logging
@@ -27,6 +27,14 @@ import random
logger = logging.getLogger(__name__)
SERVER_CACHE = {}
_Server = collections.namedtuple(
"_Server", "priority weight host port"
)
def matrix_federation_endpoint(reactor, destination, ssl_context_factory=None,
timeout=None):
"""Construct an endpoint for the given matrix destination.
@@ -73,10 +81,6 @@ class SRVClientEndpoint(object):
Implements twisted.internet.interfaces.IStreamClientEndpoint.
"""
_Server = collections.namedtuple(
"_Server", "priority weight host port"
)
def __init__(self, reactor, service, domain, protocol="tcp",
default_port=None, endpoint=TCP4ClientEndpoint,
endpoint_kw_args={}):
@@ -84,7 +88,7 @@ class SRVClientEndpoint(object):
self.service_name = "_%s._%s.%s" % (service, protocol, domain)
if default_port is not None:
self.default_server = self._Server(
self.default_server = _Server(
host=domain,
port=default_port,
priority=0,
@@ -101,32 +105,8 @@ class SRVClientEndpoint(object):
@defer.inlineCallbacks
def fetch_servers(self):
try:
answers, auth, add = yield client.lookupService(self.service_name)
except DNSNameError:
answers = []
if (len(answers) == 1
and answers[0].type == dns.SRV
and answers[0].payload
and answers[0].payload.target == dns.Name('.')):
raise ConnectError("Service %s unavailable", self.service_name)
self.servers = []
self.used_servers = []
for answer in answers:
if answer.type != dns.SRV or not answer.payload:
continue
payload = answer.payload
self.servers.append(self._Server(
host=str(payload.target),
port=int(payload.port),
priority=int(payload.priority),
weight=int(payload.weight)
))
self.servers.sort()
self.servers = yield resolve_service(self.service_name)
def pick_server(self):
if not self.servers:
@@ -170,3 +150,64 @@ class SRVClientEndpoint(object):
)
connection = yield endpoint.connect(protocolFactory)
defer.returnValue(connection)
@defer.inlineCallbacks
def resolve_service(service_name, dns_client=client, cache=SERVER_CACHE):
servers = []
try:
try:
answers, _, _ = yield dns_client.lookupService(service_name)
except DNSNameError:
defer.returnValue([])
if (len(answers) == 1
and answers[0].type == dns.SRV
and answers[0].payload
and answers[0].payload.target == dns.Name('.')):
raise ConnectError("Service %s unavailable", service_name)
for answer in answers:
if answer.type != dns.SRV or not answer.payload:
continue
payload = answer.payload
host = str(payload.target)
try:
answers, _, _ = yield dns_client.lookupAddress(host)
except DNSNameError:
continue
ips = [
answer.payload.dottedQuad()
for answer in answers
if answer.type == dns.A and answer.payload
]
for ip in ips:
servers.append(_Server(
host=ip,
port=int(payload.port),
priority=int(payload.priority),
weight=int(payload.weight)
))
servers.sort()
cache[service_name] = list(servers)
except DomainError as e:
# We failed to resolve the name (other than a NameError)
# Try something in the cache, else rereaise
cache_entry = cache.get(service_name, None)
if cache_entry:
logger.warn(
"Failed to resolve %r, falling back to cache. %r",
service_name, e
)
servers = list(cache_entry)
else:
raise e
defer.returnValue(servers)

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -152,7 +152,7 @@ class MatrixFederationHttpClient(object):
return self.clock.time_bound_deferred(
request_deferred,
time_out=timeout/1000. if timeout else 60,
time_out=timeout / 1000. if timeout else 60,
)
response = yield preserve_context_over_fn(

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -41,7 +41,7 @@ metrics = synapse.metrics.get_metrics_for(__name__)
incoming_requests_counter = metrics.register_counter(
"requests",
labels=["method", "servlet"],
labels=["method", "servlet", "tag"],
)
outgoing_responses_counter = metrics.register_counter(
"responses",
@@ -50,23 +50,23 @@ outgoing_responses_counter = metrics.register_counter(
response_timer = metrics.register_distribution(
"response_time",
labels=["method", "servlet"]
labels=["method", "servlet", "tag"]
)
response_ru_utime = metrics.register_distribution(
"response_ru_utime", labels=["method", "servlet"]
"response_ru_utime", labels=["method", "servlet", "tag"]
)
response_ru_stime = metrics.register_distribution(
"response_ru_stime", labels=["method", "servlet"]
"response_ru_stime", labels=["method", "servlet", "tag"]
)
response_db_txn_count = metrics.register_distribution(
"response_db_txn_count", labels=["method", "servlet"]
"response_db_txn_count", labels=["method", "servlet", "tag"]
)
response_db_txn_duration = metrics.register_distribution(
"response_db_txn_duration", labels=["method", "servlet"]
"response_db_txn_duration", labels=["method", "servlet", "tag"]
)
@@ -99,9 +99,8 @@ def request_handler(request_handler):
request_context.request = request_id
with request.processing():
try:
d = request_handler(self, request)
with PreserveLoggingContext():
yield d
with PreserveLoggingContext(request_context):
yield request_handler(self, request)
except CodeMessageException as e:
code = e.code
if isinstance(e, SynapseError):
@@ -208,6 +207,9 @@ class JsonResource(HttpServer, resource.Resource):
if request.method == "OPTIONS":
self._send_response(request, 200, {})
return
start_context = LoggingContext.current_context()
# Loop through all the registered callbacks to check if the method
# and path regex match
for path_entry in self.path_regexs.get(request.method, []):
@@ -226,7 +228,6 @@ class JsonResource(HttpServer, resource.Resource):
servlet_classname = servlet_instance.__class__.__name__
else:
servlet_classname = "%r" % callback
incoming_requests_counter.inc(request.method, servlet_classname)
args = [
urllib.unquote(u).decode("UTF-8") if u else u for u in m.groups()
@@ -237,21 +238,40 @@ class JsonResource(HttpServer, resource.Resource):
code, response = callback_return
self._send_response(request, code, response)
response_timer.inc_by(
self.clock.time_msec() - start, request.method, servlet_classname
)
try:
context = LoggingContext.current_context()
tag = ""
if context:
tag = context.tag
if context != start_context:
logger.warn(
"Context have unexpectedly changed %r, %r",
context, self.start_context
)
return
incoming_requests_counter.inc(request.method, servlet_classname, tag)
response_timer.inc_by(
self.clock.time_msec() - start, request.method,
servlet_classname, tag
)
ru_utime, ru_stime = context.get_resource_usage()
response_ru_utime.inc_by(ru_utime, request.method, servlet_classname)
response_ru_stime.inc_by(ru_stime, request.method, servlet_classname)
response_ru_utime.inc_by(
ru_utime, request.method, servlet_classname, tag
)
response_ru_stime.inc_by(
ru_stime, request.method, servlet_classname, tag
)
response_db_txn_count.inc_by(
context.db_txn_count, request.method, servlet_classname
context.db_txn_count, request.method, servlet_classname, tag
)
response_db_txn_duration.inc_by(
context.db_txn_duration, request.method, servlet_classname
context.db_txn_duration, request.method, servlet_classname, tag
)
except:
pass

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2015 OpenMarket Ltd
# Copyright 2015, 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2015 OpenMarket Ltd
# Copyright 2015, 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2015 OpenMarket Ltd
# Copyright 2015, 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.

View File

@@ -18,10 +18,13 @@ from synapse.api.constants import EventTypes
from synapse.api.errors import AuthError
from synapse.util.logutils import log_function
from synapse.util.async import run_on_reactor, ObservableDeferred
from synapse.util.async import ObservableDeferred
from synapse.util.logcontext import PreserveLoggingContext
from synapse.types import StreamToken
import synapse.metrics
from collections import namedtuple
import logging
@@ -63,15 +66,16 @@ class _NotifierUserStream(object):
so that it can remove itself from the indexes in the Notifier class.
"""
def __init__(self, user, rooms, current_token, time_now_ms,
def __init__(self, user_id, rooms, current_token, time_now_ms,
appservice=None):
self.user = str(user)
self.user_id = user_id
self.appservice = appservice
self.rooms = set(rooms)
self.current_token = current_token
self.last_notified_ms = time_now_ms
self.notify_deferred = ObservableDeferred(defer.Deferred())
with PreserveLoggingContext():
self.notify_deferred = ObservableDeferred(defer.Deferred())
def notify(self, stream_key, stream_id, time_now_ms):
"""Notify any listeners for this user of a new event from an
@@ -86,8 +90,10 @@ class _NotifierUserStream(object):
)
self.last_notified_ms = time_now_ms
noify_deferred = self.notify_deferred
self.notify_deferred = ObservableDeferred(defer.Deferred())
noify_deferred.callback(self.current_token)
with PreserveLoggingContext():
self.notify_deferred = ObservableDeferred(defer.Deferred())
noify_deferred.callback(self.current_token)
def remove(self, notifier):
""" Remove this listener from all the indexes in the Notifier
@@ -98,7 +104,7 @@ class _NotifierUserStream(object):
lst = notifier.room_to_user_streams.get(room, set())
lst.discard(self)
notifier.user_to_user_stream.pop(self.user)
notifier.user_to_user_stream.pop(self.user_id)
if self.appservice:
notifier.appservice_to_user_streams.get(
@@ -118,6 +124,11 @@ class _NotifierUserStream(object):
return _NotificationListener(self.notify_deferred.observe())
class EventStreamResult(namedtuple("EventStreamResult", ("events", "tokens"))):
def __nonzero__(self):
return bool(self.events)
class Notifier(object):
""" This class is responsible for notifying any listeners when there are
new events available for it.
@@ -177,8 +188,6 @@ class Notifier(object):
lambda: count(bool, self.appservice_to_user_streams.values()),
)
@log_function
@defer.inlineCallbacks
def on_new_room_event(self, event, room_stream_id, max_room_stream_id,
extra_users=[]):
""" Used by handlers to inform the notifier something has happened
@@ -192,12 +201,11 @@ class Notifier(object):
until all previous events have been persisted before notifying
the client streams.
"""
yield run_on_reactor()
self.pending_new_room_events.append((
room_stream_id, event, extra_users
))
self._notify_pending_new_room_events(max_room_stream_id)
with PreserveLoggingContext():
self.pending_new_room_events.append((
room_stream_id, event, extra_users
))
self._notify_pending_new_room_events(max_room_stream_id)
def _notify_pending_new_room_events(self, max_room_stream_id):
"""Notify for the room events that were queued waiting for a previous
@@ -244,48 +252,45 @@ class Notifier(object):
extra_streams=app_streams,
)
@defer.inlineCallbacks
@log_function
def on_new_event(self, stream_key, new_token, users=[], rooms=[],
extra_streams=set()):
""" Used to inform listeners that something has happend event wise.
Will wake up all listeners for the given users and rooms.
"""
yield run_on_reactor()
user_streams = set()
with PreserveLoggingContext():
user_streams = set()
for user in users:
user_stream = self.user_to_user_stream.get(str(user))
if user_stream is not None:
user_streams.add(user_stream)
for user in users:
user_stream = self.user_to_user_stream.get(str(user))
if user_stream is not None:
user_streams.add(user_stream)
for room in rooms:
user_streams |= self.room_to_user_streams.get(room, set())
for room in rooms:
user_streams |= self.room_to_user_streams.get(room, set())
time_now_ms = self.clock.time_msec()
for user_stream in user_streams:
try:
user_stream.notify(stream_key, new_token, time_now_ms)
except:
logger.exception("Failed to notify listener")
time_now_ms = self.clock.time_msec()
for user_stream in user_streams:
try:
user_stream.notify(stream_key, new_token, time_now_ms)
except:
logger.exception("Failed to notify listener")
@defer.inlineCallbacks
def wait_for_events(self, user, timeout, callback, room_ids=None,
def wait_for_events(self, user_id, timeout, callback, room_ids=None,
from_token=StreamToken("s0", "0", "0", "0", "0")):
"""Wait until the callback returns a non empty response or the
timeout fires.
"""
user = str(user)
user_stream = self.user_to_user_stream.get(user)
user_stream = self.user_to_user_stream.get(user_id)
if user_stream is None:
appservice = yield self.store.get_app_service_by_user_id(user)
appservice = yield self.store.get_app_service_by_user_id(user_id)
current_token = yield self.event_sources.get_current_token()
if room_ids is None:
rooms = yield self.store.get_rooms_for_user(user)
rooms = yield self.store.get_rooms_for_user(user_id)
room_ids = [room.room_id for room in rooms]
user_stream = _NotifierUserStream(
user=user,
user_id=user_id,
rooms=room_ids,
appservice=appservice,
current_token=current_token,
@@ -302,7 +307,7 @@ class Notifier(object):
def timed_out():
if listener:
listener.deferred.cancel()
timer = self.clock.call_later(timeout/1000., timed_out)
timer = self.clock.call_later(timeout / 1000., timed_out)
prev_token = from_token
while not result:
@@ -319,7 +324,8 @@ class Notifier(object):
# that we don't miss any current_token updates.
prev_token = current_token
listener = user_stream.new_listener(prev_token)
yield listener.deferred
with PreserveLoggingContext():
yield listener.deferred
except defer.CancelledError:
break
@@ -332,13 +338,18 @@ class Notifier(object):
@defer.inlineCallbacks
def get_events_for(self, user, pagination_config, timeout,
only_room_events=False,
is_guest=False, guest_room_id=None):
only_keys=None,
is_guest=False, explicit_room_id=None):
""" For the given user and rooms, return any new events for them. If
there are no new events wait for up to `timeout` milliseconds for any
new events to happen before returning.
If `only_room_events` is `True` only room events will be returned.
If `only_keys` is not None, events from keys will be sent down.
If explicit_room_id is not set, the user's joined rooms will be polled
for events.
If explicit_room_id is set, that room will be polled for events only if
it is world readable or the user has joined the room.
"""
from_token = pagination_config.from_token
if not from_token:
@@ -346,20 +357,13 @@ class Notifier(object):
limit = pagination_config.limit
room_ids = []
if is_guest:
if guest_room_id:
if not (yield self._is_world_readable(guest_room_id)):
raise AuthError(403, "Guest access not allowed")
room_ids = [guest_room_id]
else:
rooms = yield self.store.get_rooms_for_user(user.to_string())
room_ids = [room.room_id for room in rooms]
room_ids, is_joined = yield self._get_room_ids(user, explicit_room_id)
is_peeking = not is_joined
@defer.inlineCallbacks
def check_for_updates(before_token, after_token):
if not after_token.is_after(before_token):
defer.returnValue(None)
defer.returnValue(EventStreamResult([], (from_token, from_token)))
events = []
end_token = from_token
@@ -370,13 +374,14 @@ class Notifier(object):
after_id = getattr(after_token, keyname)
if before_id == after_id:
continue
if only_room_events and name != "room":
if only_keys and name not in only_keys:
continue
new_events, new_key = yield source.get_new_events(
user=user,
from_key=getattr(from_token, keyname),
limit=limit,
is_guest=is_guest,
is_guest=is_peeking,
room_ids=room_ids,
)
@@ -385,26 +390,50 @@ class Notifier(object):
new_events = yield room_member_handler._filter_events_for_client(
user.to_string(),
new_events,
is_guest=is_guest,
is_peeking=is_peeking,
)
events.extend(new_events)
end_token = end_token.copy_and_replace(keyname, new_key)
if events:
defer.returnValue((events, (from_token, end_token)))
else:
defer.returnValue(None)
defer.returnValue(EventStreamResult(events, (from_token, end_token)))
user_id_for_stream = user.to_string()
if is_peeking:
# Internally, the notifier keeps an event stream per user_id.
# This is used by both /sync and /events.
# We want /events to be used for peeking independently of /sync,
# without polluting its contents. So we invent an illegal user ID
# (which thus cannot clash with any real users) for keying peeking
# over /events.
#
# I am sorry for what I have done.
user_id_for_stream = "_PEEKING_%s_%s" % (
explicit_room_id, user_id_for_stream
)
result = yield self.wait_for_events(
user, timeout, check_for_updates, room_ids=room_ids, from_token=from_token
user_id_for_stream,
timeout,
check_for_updates,
room_ids=room_ids,
from_token=from_token,
)
if result is None:
result = ([], (from_token, from_token))
defer.returnValue(result)
@defer.inlineCallbacks
def _get_room_ids(self, user, explicit_room_id):
joined_rooms = yield self.store.get_rooms_for_user(user.to_string())
joined_room_ids = map(lambda r: r.room_id, joined_rooms)
if explicit_room_id:
if explicit_room_id in joined_room_ids:
defer.returnValue(([explicit_room_id], True))
if (yield self._is_world_readable(explicit_room_id)):
defer.returnValue(([explicit_room_id], False))
raise AuthError(403, "Non-joined access not allowed")
defer.returnValue((joined_room_ids, True))
@defer.inlineCallbacks
def _is_world_readable(self, room_id):
state = yield self.hs.get_state_handler().get_current_state(
@@ -432,7 +461,7 @@ class Notifier(object):
@log_function
def _register_with_keys(self, user_stream):
self.user_to_user_stream[user_stream.user] = user_stream
self.user_to_user_stream[user_stream.user_id] = user_stream
for room in user_stream.rooms:
s = self.room_to_user_streams.setdefault(room, set())

Some files were not shown because too many files have changed in this diff Show More