Compare commits

...

1445 Commits

Author SHA1 Message Date
Erik Johnston
118bb5c036 Cheekily use RUSAGE_THREAD 2015-08-19 10:17:33 +01:00
Erik Johnston
43349a7b43 Time CPU time spent in each log context 2015-08-19 10:17:33 +01:00
Erik Johnston
40da1f200d Remove an access token log line 2015-08-19 09:41:07 +01:00
Erik Johnston
abc6986a24 Fix regression where we incorrectly responded with a 200 to /login 2015-08-19 09:31:11 +01:00
Erik Johnston
e624cdec64 Merge pull request #228 from matrix-org/erikj/_get_state_for_groups
Ensure we never return a None event from _get_state_for_groups
2015-08-18 16:30:17 +01:00
Erik Johnston
c3dd2ecd5e Merge pull request #230 from matrix-org/erikj/appservice_auth_entity
Set request.authenticated_entity for application services
2015-08-18 16:30:11 +01:00
Erik Johnston
38a965b816 Merge pull request #227 from matrix-org/erikj/receipts_take2
Re-enable receipts API.
2015-08-18 16:30:04 +01:00
Erik Johnston
a82938416d Remove newline because vertical whitespace makes mjark sad 2015-08-18 16:28:13 +01:00
Erik Johnston
0bfdaf1f4f Rejig the code to make it nicer 2015-08-18 16:26:07 +01:00
Erik Johnston
a5cbd20001 Merge pull request #225 from matrix-org/erikj/reactor_metrics
Fix pending_calls metric to not lie
2015-08-18 16:21:11 +01:00
Erik Johnston
128ed32e6b Bump size of get_presence_state cache 2015-08-18 15:51:23 +01:00
Erik Johnston
ee59af9ac0 Set request.authenticated_entity for application services 2015-08-18 15:17:47 +01:00
Erik Johnston
f704c10f29 Rename unhelpful variable name 2015-08-18 11:54:03 +01:00
Erik Johnston
6e7d36a72c Also check for presence of 'threadCallQueue' in reactor 2015-08-18 11:51:08 +01:00
Erik Johnston
d3da63f766 Use more helpful variable names 2015-08-18 11:47:00 +01:00
Erik Johnston
8199475ce0 Ensure we never return a None event from _get_state_for_groups 2015-08-18 11:44:10 +01:00
Erik Johnston
0d4abf7777 Typo 2015-08-18 11:19:08 +01:00
Erik Johnston
e55291ce5e None check 2015-08-18 11:17:37 +01:00
Erik Johnston
8e254862f4 Don't assume @cachedList function returns keys for everything 2015-08-18 11:11:33 +01:00
Erik Johnston
85d0bc3bdc Reduce cache size from obscenely large to quite large 2015-08-18 11:00:38 +01:00
Erik Johnston
cfc503681f Comments 2015-08-18 10:49:23 +01:00
Erik Johnston
dc2a105fca Merge pull request #226 from matrix-org/erikj/room_presence
Add and use cached batched storage.get_state function.
2015-08-18 10:43:50 +01:00
Erik Johnston
83eb627b5a More helpful variable names 2015-08-18 10:33:11 +01:00
Erik Johnston
776ee6d92b Doc strings 2015-08-18 10:30:07 +01:00
Erik Johnston
f72ed6c6a3 Remove debug try/catch 2015-08-18 10:29:49 +01:00
Mark Haines
8899df13bf Merge pull request #208 from matrix-org/markjh/end-to-end-key-federation
Federation for end-to-end key requests.
2015-08-18 09:12:54 +01:00
Erik Johnston
8f4165628b Add index receipts_linearized_room_stream 2015-08-17 14:43:54 +01:00
Erik Johnston
d3d582bc1c Remove unused import 2015-08-17 13:38:09 +01:00
Erik Johnston
4d8e1e1f9e Remove added unused methods 2015-08-17 13:36:07 +01:00
Erik Johnston
afef6f5d16 Merge branch 'develop' of github.com:matrix-org/synapse into erikj/receipts_take2 2015-08-17 13:23:44 +01:00
Erik Johnston
2d97e65558 Remember to invalidate caches 2015-08-17 10:46:55 +01:00
Erik Johnston
1a9510bb84 Implement a batched presence_handler.get_state and use it 2015-08-17 10:40:23 +01:00
Erik Johnston
47abebfd6d Add batched version of store.get_presence_state 2015-08-17 09:50:50 +01:00
Erik Johnston
f9d4da7f45 Fix bug where we were leaking None into state event lists 2015-08-17 09:39:45 +01:00
Daniel Wagner-Hall
30883d8409 Merge pull request #221 from matrix-org/auth
Simplify LoginHander and AuthHandler
2015-08-14 17:02:22 +01:00
Erik Johnston
891dfd90bd Fix pending_calls metric to not lie 2015-08-14 15:43:11 +01:00
Erik Johnston
68b255c5a1 Batch _get_linearized_receipts_for_rooms 2015-08-14 15:06:22 +01:00
Erik Johnston
129ee4e149 Merge branch 'develop' of github.com:matrix-org/synapse into erikj/receipts_take2 2015-08-13 17:28:43 +01:00
Mark Haines
c5966b2a97 Merge remote-tracking branch 'origin/develop' into markjh/end-to-end-key-federation 2015-08-13 17:27:53 +01:00
Mark Haines
0cceb2ac92 Add a few strategic new lines to break up the on_query_client_keys and on_claim_client_keys methods in federation_server.py 2015-08-13 17:27:46 +01:00
Erik Johnston
d6bcc68ea7 Merge pull request #219 from matrix-org/erikj/dictionary_cache
Dictionary and list caches
2015-08-13 17:27:08 +01:00
Erik Johnston
9f7f228ec2 Remove pointless map 2015-08-13 17:20:59 +01:00
Erik Johnston
3d77e56c12 Merge branch 'develop' of github.com:matrix-org/synapse into erikj/dictionary_cache 2015-08-13 17:18:52 +01:00
Erik Johnston
d884047d34 Merge pull request #224 from matrix-org/erikj/reactor_metrics
Add some metrics about the reactor
2015-08-13 17:17:07 +01:00
Erik Johnston
2bb2c02571 Remove some vertical space 2015-08-13 17:11:30 +01:00
Mark Haines
3d1cdda762 Merge branch 'develop' into erikj/reactor_metrics 2015-08-13 17:03:58 +01:00
Erik Johnston
57877b01d7 Replace list comprehension 2015-08-13 17:00:17 +01:00
Erik Johnston
5db5677969 Add metrics to the receipts cache 2015-08-13 16:58:23 +01:00
Erik Johnston
7e77a82c5f Re-enable receipts 2015-08-13 16:58:10 +01:00
Mark Haines
7eb4d626ba Add max-line-length to the flake8 section of setup.cfg 2015-08-13 13:12:33 +01:00
Erik Johnston
06750140f6 Merge branch 'develop' of github.com:matrix-org/synapse into erikj/dictionary_cache 2015-08-13 11:55:20 +01:00
Erik Johnston
adbd720fab PEP8 2015-08-13 11:47:38 +01:00
Erik Johnston
8b7ce2945b Merge branch 'erikj/reactor_metrics' into erikj/dictionary_cache 2015-08-13 11:42:22 +01:00
Erik Johnston
a6c27de1aa Don't time getDelayedCalls 2015-08-13 11:41:57 +01:00
Erik Johnston
c044aca1fd Merge branch 'erikj/reactor_metrics' into erikj/dictionary_cache 2015-08-13 11:39:38 +01:00
Erik Johnston
ba5d34a832 Add some metrics about the reactor 2015-08-13 11:38:59 +01:00
Mark Haines
6a191d62ed Merge pull request #173 from matrix-org/markjh/twisted-15
Update to Twisted-15.2.1.
2015-08-12 17:28:47 +01:00
Mark Haines
21ac8be5f7 Depend on Twisted>=15.1 rather than pining to a particular version 2015-08-12 17:25:13 +01:00
Erik Johnston
3e4e367f09 Merge pull request #223 from matrix-org/markjh/enable_demo_registration
enable registration in the demo servers
2015-08-12 17:24:16 +01:00
Erik Johnston
0fbed2a8fa Comment 2015-08-12 17:22:54 +01:00
Mark Haines
998a72d4d9 Merge branch 'develop' into markjh/twisted-15
Conflicts:
	synapse/http/matrixfederationclient.py
2015-08-12 17:21:14 +01:00
Erik Johnston
c10ac7806e Explain why we're prefilling dict with Nones 2015-08-12 17:16:30 +01:00
Erik Johnston
101ee3fd00 Better variable name 2015-08-12 17:08:05 +01:00
Erik Johnston
df361d08f7 Split _get_state_for_group_from_cache into two 2015-08-12 17:06:21 +01:00
Erik Johnston
7b0e797080 Fix _filter_events_for_client 2015-08-12 17:05:24 +01:00
Mark Haines
2eb91e6694 enable registration in the demo servers 2015-08-12 16:53:30 +01:00
Erik Johnston
cfa62007a3 Docstring 2015-08-12 16:42:46 +01:00
Daniel Wagner-Hall
5ce903e2f7 Merge password checking implementations 2015-08-12 16:09:19 +01:00
Erik Johnston
a7eeb34c64 Simplify staggered deferred lists 2015-08-12 16:02:05 +01:00
Erik Johnston
f7e2f981ea Use list comprehension instead of filter 2015-08-12 16:01:10 +01:00
Daniel Wagner-Hall
bcc1d34d35 Merge branch 'develop' into auth 2015-08-12 15:58:52 +01:00
Daniel Wagner-Hall
f4122c64b5 Merge branch 'develop' of github.com:matrix-org/synapse into develop 2015-08-12 15:58:35 +01:00
Daniel Wagner-Hall
415c2f0549 Simplify LoginHander and AuthHandler
* Merge LoginHandler -> AuthHandler
 * Add a bunch of documentation
 * Improve some naming
 * Remove unused branches

I will start merging the actual logic of the two handlers shortly
2015-08-12 15:49:37 +01:00
Erik Johnston
d46208c12c Merge branch 'develop' of github.com:matrix-org/synapse into erikj/dictionary_cache 2015-08-12 14:28:43 +01:00
Erik Johnston
4f11a5b2b5 Merge pull request #220 from matrix-org/markjh/generate_keys
Fix the --generate-keys option.
2015-08-12 14:23:54 +01:00
Mark Haines
7bbaab9432 Fix the --generate-keys option. Make it do the same thing as --generate-config does when the config file exists, but without printing a warning 2015-08-12 11:57:37 +01:00
Daniel Wagner-Hall
7b49236b37 Merge pull request #216 from matrix-org/auth
Clean up some docs and redundant fluff
2015-08-12 10:55:42 +01:00
Erik Johnston
d7451e0f22 Merge branch 'develop' of github.com:matrix-org/synapse into erikj/dictionary_cache 2015-08-12 10:30:30 +01:00
Erik Johnston
4807616e16 Wire up the dictionarycache to the metrics 2015-08-12 10:13:35 +01:00
Mark Haines
275f7c987c Merge pull request #182 from matrix-org/manu/fix_no_rate_limit_in_federation_demo
Federation demo start.sh: Fixed --no-rate-limit param in the script
2015-08-12 09:39:57 +01:00
Mark Haines
b24d7ebd6e Fix the cleanup script to use the right $DIR 2015-08-12 09:39:07 +01:00
Erik Johnston
2df8dd9b37 Move all the caches into their own package, synapse.util.caches 2015-08-11 18:00:59 +01:00
Daniel Wagner-Hall
a23a760b3f Merge branch 'develop' into auth 2015-08-11 17:41:29 +01:00
Erik Johnston
7568fe880d Merge pull request #218 from matrix-org/mockfix
Remove call to "recently" removed function in mock
2015-08-11 17:00:17 +01:00
Daniel Wagner-Hall
4ff0228c25 Remove call to recently removed function in mock 2015-08-11 16:56:30 +01:00
Daniel Wagner-Hall
dcd5983fe4 Remove call to recently removed function in mock 2015-08-11 16:54:06 +01:00
Daniel Wagner-Hall
45610305ea Add missing space because linter 2015-08-11 16:43:27 +01:00
Daniel Wagner-Hall
88e03da39f Minor docs cleanup 2015-08-11 16:35:28 +01:00
Daniel Wagner-Hall
9dba813234 Remove redundant if-guard
The startswith("@") does the job
2015-08-11 16:34:17 +01:00
Erik Johnston
53a817518b Comments 2015-08-11 11:40:40 +01:00
Erik Johnston
6eaa116867 Comment 2015-08-11 11:35:24 +01:00
Erik Johnston
4762c276cb Docs 2015-08-11 11:33:41 +01:00
Erik Johnston
dc8399ee00 Remove debug loggers 2015-08-11 11:30:59 +01:00
Erik Johnston
1b994a97dd Fix application of ACLs 2015-08-11 10:41:40 +01:00
Erik Johnston
10b874067b Fix state cache 2015-08-11 09:12:41 +01:00
Erik Johnston
017b798e4f Clean up StateStore 2015-08-10 15:01:06 +01:00
Erik Johnston
2c019eea11 Remove unused function 2015-08-10 14:44:41 +01:00
Erik Johnston
bb0a475c30 Comments 2015-08-10 14:27:38 +01:00
Erik Johnston
dcefac3b06 Comments 2015-08-10 14:16:24 +01:00
Mark Haines
559c51debc Use TypeError instead of ValueError and give a nicer error mesasge
when someone calls Cache.invalidate with the wrong type.
2015-08-10 14:07:17 +01:00
Erik Johnston
6f274f7e13 Merge branch 'develop' of github.com:matrix-org/synapse into erikj/dictionary_cache 2015-08-10 13:53:09 +01:00
Erik Johnston
7ce71f2ffc Merge branch 'erikj/cache_varargs_interface' of github.com:matrix-org/synapse into erikj/dictionary_cache 2015-08-10 13:47:51 +01:00
Erik Johnston
8c3a62b5c7 Merge pull request #215 from matrix-org/erikj/cache_varargs_interface
Change Cache to not use *args in its interface
2015-08-10 13:47:45 +01:00
Erik Johnston
86eaaa885b Rename keyargs to args in CacheDescriptor 2015-08-10 13:44:44 +01:00
Erik Johnston
e0b6e49466 Merge branch 'erikj/cache_varargs_interface' of github.com:matrix-org/synapse into erikj/dictionary_cache 2015-08-10 10:39:22 +01:00
Erik Johnston
2cd6cb9f65 Rename keyargs to args in Cache 2015-08-10 10:38:47 +01:00
Erik Johnston
aa88582e00 Do bounds check 2015-08-10 10:08:15 +01:00
Erik Johnston
5119e416e8 Line length 2015-08-10 10:05:30 +01:00
Erik Johnston
8f04b6fa7a Merge branch 'erikj/cache_varargs_interface' of github.com:matrix-org/synapse into erikj/dictionary_cache 2015-08-07 19:30:25 +01:00
Erik Johnston
7dec0b2bee Merge branch 'develop' of github.com:matrix-org/synapse into erikj/dictionary_cache 2015-08-07 19:28:39 +01:00
Erik Johnston
06218ab125 Merge pull request #212 from matrix-org/erikj/cache_deferreds
Make CacheDescriptor cache deferreds rather than the deferreds' values
2015-08-07 19:28:05 +01:00
Erik Johnston
2352974aab Merge branch 'erikj/cache_deferreds' of github.com:matrix-org/synapse into erikj/cache_varargs_interface 2015-08-07 19:26:54 +01:00
Erik Johnston
9c5385b53a s/observed/observer/ 2015-08-07 19:26:38 +01:00
Erik Johnston
ffab798a38 Merge branch 'erikj/cache_deferreds' of github.com:matrix-org/synapse into erikj/cache_varargs_interface 2015-08-07 19:18:47 +01:00
Erik Johnston
62126c996c Propogate stale cache errors to calling functions 2015-08-07 19:17:58 +01:00
Erik Johnston
3213ff630c Remove unnecessary cache 2015-08-07 19:14:05 +01:00
Erik Johnston
20addfa358 Change Cache to not use *args in its interface 2015-08-07 18:32:47 +01:00
Erik Johnston
9eb5b23d3a Batch up various DB requests for event -> state 2015-08-07 18:16:02 +01:00
Erik Johnston
0211890134 Implement a CacheListDescriptor 2015-08-07 18:14:49 +01:00
Erik Johnston
ffdb8c3828 Don't be too enthusiatic with defer.gatherResults 2015-08-07 18:13:48 +01:00
Paul Evans
e69b669083 Merge pull request #213 from matrix-org/paul/SYN-420
Three small improvements to help debian package (SYN-420)
2015-08-07 17:49:54 +01:00
Paul "LeoNerd" Evans
0db40d3e93 Don't complain about extra .pyc files we find while hunting for database schemas 2015-08-07 17:22:11 +01:00
Paul "LeoNerd" Evans
e3c8e2c13c Add a --generate-keys option 2015-08-07 16:42:27 +01:00
Paul "LeoNerd" Evans
efe60d5e8c Only print the pidfile path on startup if requested by a commandline flag 2015-08-07 16:36:42 +01:00
Erik Johnston
b2c7bd4b09 Cache get_recent_events_for_room 2015-08-07 14:42:34 +01:00
Erik Johnston
b3768ec10a Remove unncessary cache 2015-08-07 13:41:05 +01:00
Erik Johnston
b8e386db59 Change Cache to not use *args in its interface 2015-08-07 11:52:21 +01:00
Erik Johnston
fe994e728f Store absence of state in cache 2015-08-07 10:17:38 +01:00
Erik Johnston
1d08bf7c17 Merge branch 'erikj/cache_deferreds' into erikj/dictionary_cache 2015-08-06 14:03:15 +01:00
Erik Johnston
63b1eaf32c Docs 2015-08-06 14:02:50 +01:00
Erik Johnston
b811c98574 Remove failed deferreds from cache 2015-08-06 14:01:27 +01:00
Erik Johnston
433314cc34 Re-implement DEBUG_CACHES flag 2015-08-06 14:01:05 +01:00
Erik Johnston
8049c9a71e Merge pull request #209 from matrix-org/erikj/cached_keyword_args
Add support for using keyword arguments with cached functions
2015-08-06 13:52:49 +01:00
Erik Johnston
f596ff402e Merge branch 'erikj/cache_deferreds' into erikj/dictionary_cache 2015-08-06 13:37:56 +01:00
Erik Johnston
2efb93af52 Merge branch 'erikj/cached_keyword_args' into erikj/cache_deferreds 2015-08-06 13:35:28 +01:00
Erik Johnston
953dbd28a7 Merge branch 'develop' of github.com:matrix-org/synapse into erikj/cached_keyword_args 2015-08-06 13:35:03 +01:00
Erik Johnston
7eea3e356f Make @cached cache deferreds rather than the deferreds' values 2015-08-06 13:33:34 +01:00
Erik Johnston
3e1b77efc2 Merge branch 'erikj/cached_keyword_args' of github.com:matrix-org/synapse into erikj/dictionary_cache 2015-08-05 16:45:56 +01:00
Erik Johnston
b52b4a84ec Merge branch 'develop' of github.com:matrix-org/synapse into erikj/dictionary_cache 2015-08-05 15:41:20 +01:00
Erik Johnston
1e62a3d3a9 Up the cache size for 'get_joined_hosts_for_room' and 'get_users_in_room' 2015-08-05 15:40:40 +01:00
Erik Johnston
a89559d797 Use LRU cache by default 2015-08-05 15:39:47 +01:00
Erik Johnston
07507643cb Use dictionary cache to do group -> state fetching 2015-08-05 15:11:42 +01:00
Erik Johnston
c67ba143fa Move DictionaryCache 2015-08-04 15:58:28 +01:00
Erik Johnston
e7768e77f5 Add basic dictionary cache 2015-08-04 15:56:56 +01:00
Erik Johnston
2e35a733cc Merge branch 'develop' of github.com:matrix-org/synapse into erikj/acl_perf 2015-08-04 13:00:52 +01:00
Erik Johnston
413a4c289b Add comment 2015-08-04 11:08:07 +01:00
Erik Johnston
4d6cb8814e Speed up event filtering (for ACL) logic 2015-08-04 09:32:23 +01:00
David Baker
7148aaf5d0 Don't try & check the username if we don't have one (which we won't if it's been saved in the auth layer) 2015-08-03 17:03:27 +01:00
David Baker
28d07a02e4 Add vector.im as trusted ID server 2015-08-03 15:31:21 +01:00
David Baker
2c963054f8 Merge pull request #210 from matrix-org/reg-v2a-password-skip
v2_alpha /register fixes for Application Services
2015-07-29 14:46:17 +01:00
Kegan Dougal
11b0a34074 Use the same reg paths as register v1 for ASes.
Namely this means using registration_handler.appservice_register.
2015-07-29 10:00:54 +01:00
Matthew Hodgson
c772dffc9f improve OS X instructions and remove all the leading $'s to make it easier to c+p commands 2015-07-29 09:39:55 +01:00
Kegan Dougal
a4d62ba36a Fix v2_alpha registration. Add unit tests.
V2 Registration forced everyone (including ASes) to create a password for a
user, when ASes should be able to omit passwords. Also unbreak AS registration
in general which checked too early if the given username was claimed by an AS;
it was checked before knowing if the AS was the one doing the registration! Add
unit tests for AS reg, user reg and disabled_registration flag.
2015-07-28 17:34:12 +01:00
Erik Johnston
c472d6107e Merge pull request #207 from matrix-org/erikj/generate_local_thumbnails_on_thread
Generate local thumbnails on a thread
2015-07-27 14:57:25 +01:00
Erik Johnston
39e21ea51c Add support for using keyword arguments with cached functions 2015-07-27 13:57:29 +01:00
Mark Haines
2da3b1e60b Get the end-to-end key federation working 2015-07-24 18:26:46 +01:00
Mark Haines
62c010283d Add federation support for end-to-end key requests 2015-07-23 16:03:38 +01:00
Erik Johnston
2b4f47db9c Generate local thumbnails on a thread 2015-07-23 14:52:29 +01:00
Mark Haines
6886bba988 Merge pull request #205 from matrix-org/erikj/pick_largest_thumbnail
Pick larger than desired thumbnail for 'crop'
2015-07-23 11:16:02 +01:00
Erik Johnston
103e1c2431 Pick larger than desired thumbnail for 'crop' 2015-07-23 11:12:49 +01:00
Matrix
4e2e67fd50 Disable receipts for now 2015-07-22 16:13:46 +01:00
David Baker
a56eccbbfc Query for all the ones we were asked about, not just the last... 2015-07-21 16:38:16 -07:00
David Baker
20c0324e9c Dodesn't seem to make any difference: guess it does work with the object reference 2015-07-21 16:21:37 -07:00
David Baker
cf7a40b08a I think this was what was intended... 2015-07-21 16:08:00 -07:00
Erik Johnston
90dbd71c13 Merge branch 'master' of github.com:matrix-org/synapse into develop 2015-07-21 09:25:30 +01:00
Mark Haines
3b5823c74d s/take/claim/ for end to end key APIs 2015-07-20 18:23:54 +01:00
Erik Johnston
bde97b988a Merge pull request #204 from illicitonion/develop
Improve naming
2015-07-20 14:41:14 +01:00
Daniel Wagner-Hall
53d1174aa9 Improve naming 2015-07-20 06:32:12 -07:00
Kegan Dougal
ddef5ea126 Remove semicolon. 2015-07-20 14:02:36 +01:00
Kegan Dougal
b6ee0585bd Parse the ID given to /invite|ban|kick to make sure it looks like a user ID. 2015-07-20 13:55:19 +01:00
Matrix
4f973eb657 Up default cache size for _RoomStreamChangeCache 2015-07-18 19:07:33 +01:00
Matrix
4cab2cfa34 Don't do any database hits in receipt handling if from_key == to_key 2015-07-18 19:07:12 +01:00
Erik Johnston
b6d4a4c6d8 Merge pull request #199 from matrix-org/erikj/receipts
Implement read receipts.
2015-07-16 18:18:36 +01:00
Erik Johnston
d155b318d2 Merge pull request #203 from matrix-org/erikj/room_creation_presets
Implement presets at room creation
2015-07-16 18:18:11 +01:00
Erik Johnston
a2ed7f437c Merge pull request #202 from matrix-org/erikj/power_level_sanity
Change power level semantics.
2015-07-16 18:18:04 +01:00
Erik Johnston
c456d17daf Implement specifying custom initial state for /createRoom 2015-07-16 15:25:29 +01:00
David Baker
09489499e7 pep8 + debug line 2015-07-15 19:39:18 +01:00
David Baker
4da05fa0ae Add back in support for remembering parameters submitted to a user-interactive auth call. 2015-07-15 19:28:57 +01:00
Matthew Hodgson
8cedf3ce95 bump up image quality a bit more as it looks crap 2015-07-14 23:53:13 +01:00
Erik Johnston
baa55fb69e Merge pull request #193 from matrix-org/erikj/bulk_persist_event
Add bulk insert events API
2015-07-14 10:49:24 +01:00
Erik Johnston
002a44ac1a s/everyone_ops/original_invitees_have_ops/ 2015-07-14 10:37:42 +01:00
David Baker
62b4b72fe4 Close, but no cigar. 2015-07-14 10:33:25 +01:00
Erik Johnston
b49a30a972 Capitalize contants 2015-07-14 10:20:31 +01:00
Erik Johnston
4624d6035e Docs 2015-07-14 10:19:07 +01:00
Erik Johnston
d5cc794598 Implement presets at room creation 2015-07-13 16:56:08 +01:00
Erik Johnston
5989637f37 Merge branch 'develop' of github.com:matrix-org/synapse into erikj/receipts 2015-07-13 13:50:57 +01:00
Erik Johnston
016c089f13 Merge branch 'develop' of github.com:matrix-org/synapse into erikj/power_level_sanity 2015-07-13 13:48:13 +01:00
Erik Johnston
e5991af629 Comments 2015-07-13 13:30:43 +01:00
Erik Johnston
17bb9a7eb9 Remove commented out code 2015-07-10 14:07:57 +01:00
Erik Johnston
a5ea22d468 Sanitize power level checks 2015-07-10 14:05:38 +01:00
Erik Johnston
7e3b14fe78 You shouldn't be able to ban/kick users with higher power levels 2015-07-10 14:05:38 +01:00
Erik Johnston
532fcc997a Merge pull request #196 from matrix-org/erikj/room_history
Add ability to restrict room history.
2015-07-10 13:47:04 +01:00
Erik Johnston
0b3389bcd2 Merge pull request #194 from matrix-org/erikj/bulk_verify_sigs
Implement bulk verify_signed_json API
2015-07-10 13:46:53 +01:00
Erik Johnston
0d7f0febf4 Uniquely name unique constraint 2015-07-10 13:43:03 +01:00
Erik Johnston
b7cb37b189 Merge pull request #198 from matrix-org/markjh/client-end-to-end-key-management
Client end to end key management API
2015-07-10 13:36:17 +01:00
Mark Haines
a01097d60b Assume that each device for a user has only one of each type of key 2015-07-10 13:26:18 +01:00
Erik Johnston
f3049d0b81 Small tweaks to SAML2 configuration.
- Add saml2 config docs to default config.
- Use existence of saml2 config to indicate if saml2 should be enabled.
2015-07-10 10:50:14 +01:00
Erik Johnston
a887efa07a Add Muthu Subramanian to AUTHORS 2015-07-10 10:37:24 +01:00
Erik Johnston
9158ad1abb Merge pull request #201 from EricssonResearch/msba/saml2-develop
Integrate SAML2 basic authentication - uses pysaml2
2015-07-10 10:25:56 +01:00
Erik Johnston
b5f0d73ea3 Add comment 2015-07-09 17:09:26 +01:00
Erik Johnston
ed88720952 Handle error slightly better 2015-07-09 16:14:46 +01:00
Erik Johnston
f0979afdb0 Remove spurious comment 2015-07-09 16:02:07 +01:00
Mark Haines
bf0d59ed30 Don't bother with a timeout for one time keys on the server. 2015-07-09 14:04:03 +01:00
Erik Johnston
c2d08ca62a Integer timestamps 2015-07-09 13:15:34 +01:00
Erik Johnston
4019b48aaa Merge branch 'develop' of github.com:matrix-org/synapse into erikj/receipts 2015-07-09 11:55:52 +01:00
Erik Johnston
294dbd712f We don't want semicolons. 2015-07-09 11:47:24 +01:00
Erik Johnston
1af188209a Change format of receipts to allow inclusion of data 2015-07-09 11:39:30 +01:00
Muthu Subramanian
8cd34dfe95 Make SAML2 optional and add some references/comments 2015-07-09 13:34:47 +05:30
Muthu Subramanian
d2caa5351a code beautify 2015-07-09 12:58:15 +05:30
Matthew Hodgson
fb8d2862c1 remove the tls_certificate_chain_path param and simply support tls_certificate_path pointing to a file containing a chain of certificates 2015-07-09 00:45:41 +01:00
Matthew Hodgson
8ad2d2d1cb document tls_certificate_chain_path more clearly 2015-07-09 00:06:01 +01:00
Matthew Hodgson
f26a3df1bf oops, context.tls_certificate_chain_file() expects a file, not a certificate. 2015-07-08 21:33:02 +01:00
Matthew Hodgson
19fa3731ae typo 2015-07-08 18:53:41 +01:00
Matthew Hodgson
465acb0c6a *cough* 2015-07-08 18:30:59 +01:00
Matthew Hodgson
64afbe6ccd add new optional config for tls_certificate_chain_path for folks with intermediary SSL certs 2015-07-08 18:20:02 +01:00
Matthew Hodgson
04192ee05b typo 2015-07-08 17:49:15 +01:00
Mark Haines
8fb79eeea4 Only remove one time keys when new one time keys are added 2015-07-08 17:04:29 +01:00
Erik Johnston
ce9e2f84ad Add blist to dependencies 2015-07-08 15:41:59 +01:00
Erik Johnston
304343f4d7 Merge branch 'develop' of github.com:matrix-org/synapse into erikj/receipts 2015-07-08 15:37:33 +01:00
Erik Johnston
af812b68dd Add a cache to fetching of receipt streams 2015-07-08 15:35:00 +01:00
Erik Johnston
d85ce8d89b Split receipt events up into one per room 2015-07-08 11:36:05 +01:00
Muthu Subramanian
f53bae0c19 code beautify 2015-07-08 16:05:46 +05:30
Muthu Subramanian
77c5db5977 code beautify 2015-07-08 16:05:20 +05:30
Muthu Subramanian
81682d0f82 Integrate SAML2 basic authentication - uses pysaml2 2015-07-08 15:36:54 +05:30
Erik Johnston
87311d1b8c Hook up receipts to v1 initialSync 2015-07-08 11:02:04 +01:00
Erik Johnston
f0dd6d4cbd Fix test. 2015-07-07 16:18:36 +01:00
Erik Johnston
ca041d5526 Wire together receipts and the notifer/federation 2015-07-07 15:25:30 +01:00
Erik Johnston
716e426933 Fix various typos 2015-07-07 10:55:31 +01:00
Erik Johnston
e8b2f6f8a1 Add a ReceiptServlet 2015-07-07 10:55:22 +01:00
Erik Johnston
dfc74c30c9 Merge pull request #197 from matrix-org/mjark/missing_regex_group
Don't 500 if a group is missing from the regex match
2015-07-07 09:37:38 +01:00
Mark Haines
28ef344077 Merge branch 'mjark/missing_regex_group' into markjh/client-end-to-end-key-management 2015-07-06 18:48:27 +01:00
Mark Haines
2ef182ee93 Add client API for uploading and querying keys for end to end encryption 2015-07-06 18:47:57 +01:00
Mark Haines
b5770f8947 Add store for client end to end keys 2015-07-06 18:46:47 +01:00
Mark Haines
a7dcbfe430 Don't 500 if a group is missing from the regex 2015-07-06 16:47:17 +01:00
Erik Johnston
1a3255b507 Add m.room.history_visibility to newly created rooms' m.room.power_levels 2015-07-06 13:25:35 +01:00
Erik Johnston
fb47c3cfbe Rename key and values for m.room.history_visibility. Support 'invited' value 2015-07-06 13:05:52 +01:00
Erik Johnston
65e69dec8b Don't explode if we don't recognize one of the event_ids in the backfill request 2015-07-06 09:33:03 +01:00
Erik Johnston
c3e2600c67 Filter and redact events that the other server doesn't have permission to see during backfill 2015-07-03 17:52:57 +01:00
Erik Johnston
400894616d Respect m.room.history_visibility in v2_alpha sync API 2015-07-03 14:51:01 +01:00
Erik Johnston
c0a975cc2e Merge pull request #195 from matrix-org/erikj/content_disposition
Add Content-Disposition headers to media repo downloads
2015-07-03 11:26:06 +01:00
Erik Johnston
12b83f1a0d If user supplies filename in URL when downloading from media repo, use that name in Content Disposition 2015-07-03 11:24:55 +01:00
Erik Johnston
00ab882ed6 Add m.room.history_visibility to list of auth events 2015-07-03 10:31:24 +01:00
Erik Johnston
41938afed8 Make v1 initial syncs respect room history ACL 2015-07-02 17:12:35 +01:00
Erik Johnston
1a60545626 Add basic impl for room history ACL on GET /messages client API 2015-07-02 16:20:10 +01:00
Erik Johnston
ac78e60de6 Add stream_id index 2015-07-02 13:18:41 +01:00
Erik Johnston
bd1236c0ee Consolidate duplicate code in notifier 2015-07-02 11:46:05 +01:00
Erik Johnston
ddf7979531 Add receipts_key to StreamToken 2015-07-02 11:45:44 +01:00
Erik Johnston
0862fed2a8 Add basic ReceiptHandler 2015-07-01 17:19:31 +01:00
Erik Johnston
80a61330ee Add basic storage functions for handling of receipts 2015-07-01 17:19:12 +01:00
Erik Johnston
67362a9a03 Merge branch 'release-v0.9.3' of github.com:matrix-org/synapse 2015-07-01 15:12:57 +01:00
Erik Johnston
480d720388 Bump changelog and version to v0.9.3 2015-07-01 15:12:32 +01:00
Erik Johnston
901f56fa63 Add tables for receipts 2015-06-30 15:29:47 +01:00
Erik Johnston
9beaedd164 Enforce ascii filenames for uploads 2015-06-30 10:31:59 +01:00
Erik Johnston
2124f668db Add Content-Disposition headers to media repo v1 downloads 2015-06-30 09:35:44 +01:00
Matthew Hodgson
11374a77ef clarify readme a bit more 2015-06-27 16:59:47 +02:00
Erik Johnston
f0dd568e16 Wait for previous attempts at fetching keys for a given server before trying to fetch more 2015-06-26 11:25:00 +01:00
Erik Johnston
b5f55a1d85 Implement bulk verify_signed_json API 2015-06-26 10:39:34 +01:00
Erik Johnston
5130d80d79 Add bulk insert events API 2015-06-25 17:29:34 +01:00
David Baker
6825eef955 Oops: underride rule had an identifier with override in it. 2015-06-23 14:26:14 +01:00
Erik Johnston
6924852592 Batch SELECTs in _get_auth_chain_ids_txn 2015-06-23 11:01:04 +01:00
Erik Johnston
f043b14bc0 Update change log 2015-06-23 10:22:42 +01:00
Erik Johnston
9c72011fd7 Bumb version 2015-06-23 10:12:19 +01:00
Erik Johnston
2f556e0c55 Fix typo 2015-06-19 16:22:53 +01:00
Erik Johnston
7fa1363fb0 Merge pull request #192 from matrix-org/erikj/fix_log_context
Fix log context when sending requests
2015-06-19 16:21:40 +01:00
Erik Johnston
275dab6b55 Merge pull request #190 from matrix-org/erikj/syn-412
Fix notifier leak
2015-06-19 11:49:58 +01:00
Erik Johnston
a68abc79fd Add comment on cancellation of observers 2015-06-19 11:48:55 +01:00
Erik Johnston
653533a3da Fix log context when sending requests 2015-06-19 11:46:49 +01:00
Erik Johnston
9bf61ef97b Merge pull request #189 from matrix-org/erikj/room_init_sync
Improve room init sync speed.
2015-06-19 11:36:06 +01:00
Erik Johnston
0e58d19163 Merge pull request #187 from matrix-org/erikj/sanitize_logging
Sanitize logging
2015-06-19 11:35:59 +01:00
Erik Johnston
18968efa0a Remove stale debug lines 2015-06-19 10:18:02 +01:00
Erik Johnston
eb928c9f52 Add site_tag to logger 2015-06-19 10:16:48 +01:00
Erik Johnston
9d112f4440 Add IDs to outbound transactions 2015-06-19 10:13:03 +01:00
Erik Johnston
ad460a8315 Add Eric Myhre to AUTHORS 2015-06-19 09:23:26 +01:00
Erik Johnston
bf628cf6dd Merge pull request #191 from heavenlyhash/configurable-upload-dir
Make upload dir a configurable path.
2015-06-19 09:16:04 +01:00
Eric Myhre
9e5a353663 Make upload dir a configurable path.
Fixes SYN-425.

Signed-off-by: Eric Myhre <hash@exultant.us>
2015-06-18 23:38:20 -05:00
Erik Johnston
6f6ebd216d PEP8 2015-06-18 17:00:32 +01:00
Erik Johnston
73513ececc Documentation 2015-06-18 16:15:10 +01:00
Erik Johnston
1f24c2e589 Don't bother proxying lookups on _NotificationListener to underlying deferred 2015-06-18 16:09:53 +01:00
Erik Johnston
22049ea700 Refactor the notifier.wait_for_events code to be clearer. Add _NotifierUserStream.new_listener that accpets a token to avoid races. 2015-06-18 15:49:24 +01:00
Erik Johnston
050ebccf30 Fix notifier leak 2015-06-18 11:36:26 +01:00
Kegan Dougal
d88e20cdb9 Fix bug where synapse was sending AS user queries incorrectly.
Bug introduced in 92b20713d7
which reversed the comparison when checking if a user existed
in the users table. Added UTs to prevent this happening again.
2015-06-17 17:26:03 +01:00
Erik Johnston
eceb554a2f Use another deferred list 2015-06-16 17:12:27 +01:00
Erik Johnston
b849a64f8d Use DeferredList 2015-06-16 17:03:24 +01:00
Erik Johnston
0460406298 Don't do unecessary db ops in presence.get_state 2015-06-16 16:59:38 +01:00
Paul "LeoNerd" Evans
9a3cd1c00d Correct -H SERVER_NAME in config-missing complaint message 2015-06-16 16:03:35 +01:00
Erik Johnston
fb7def3344 Remove access_token from synapse.rest.client.v1.transactions {get,store}_response logging 2015-06-16 10:09:43 +01:00
Erik Johnston
f13890ddce Merge branch 'develop' of github.com:matrix-org/synapse into erikj/sanitize_logging 2015-06-15 18:26:24 +01:00
Erik Johnston
aaa749d366 Disable twisted access logging. Move access logging to SynapseRequest object 2015-06-15 18:18:05 +01:00
Erik Johnston
bc42ca121f Merge pull request #185 from matrix-org/erikj/listeners_config
Change listener config.
2015-06-15 18:05:58 +01:00
Erik Johnston
cee69441d3 Log more when we have processed the request 2015-06-15 17:11:44 +01:00
Erik Johnston
b5209c5744 Create SynapseRequest that overrides __repr__ to not print access_token 2015-06-15 16:37:04 +01:00
Mark Haines
66da8f60d0 Bump the version of twisted needed for setup_requires to 15.2.1 2015-06-15 16:27:20 +01:00
Erik Johnston
f0583f65e1 Merge branch 'master' of github.com:matrix-org/synapse into develop 2015-06-15 15:17:47 +01:00
Erik Johnston
44c9102e7a Merge branch 'erikj/listeners_config' into erikj/sanitize_logging 2015-06-15 14:45:52 +01:00
Erik Johnston
6a7cf6b41f Merge pull request #186 from matrix-org/hotfixes-v0.9.2-r2
Hotfixes v0.9.2-r2
2015-06-15 14:39:46 +01:00
Erik Johnston
2acee97c2b Changelog 2015-06-15 14:20:25 +01:00
Erik Johnston
7f7ec84d6f Bump version 2015-06-15 14:16:29 +01:00
Erik Johnston
cebde85b94 Merge branch 'master' of github.com:matrix-org/synapse into hotfixes-v0.9.2-r2 2015-06-15 14:16:12 +01:00
Erik Johnston
f00f8346f1 Make http.server request logging more verbose, but redact access_tokens 2015-06-15 13:37:58 +01:00
Erik Johnston
83f119a84a Log requests and responses sent via http.client 2015-06-15 13:14:12 +01:00
Erik Johnston
9d0326baa6 Remove redundant newline 2015-06-15 11:27:29 +01:00
Erik Johnston
186f61a3ac Document listener config. Remove deprecated config options 2015-06-15 11:25:53 +01:00
Erik Johnston
fe9bac3749 Merge pull request #184 from matrix-org/hotfixes-v0.9.2-r1
Hotfixes v0.9.2-r1
2015-06-13 12:26:42 +01:00
Erik Johnston
6c01ceb8d0 Bump version 2015-06-13 12:22:14 +01:00
Erik Johnston
2eda996a63 Add a dummy.sql into delta/20 as pip isn't packinging the pushers.py 2015-06-13 12:21:58 +01:00
Matthew Hodgson
4706f3964d Merge pull request #183 from intelfx/install-python-schema-deltas
MANIFEST.in: include python schema delta scripts
2015-06-13 12:09:17 +01:00
Ivan Shapovalov
4df76b0a5d MANIFEST.in: include python schema delta scripts (we now have one in 20/) 2015-06-13 11:08:49 +03:00
Erik Johnston
a005b7269a Add backwards compat support for metrics, manhole and webclient config options 2015-06-12 17:44:23 +01:00
Erik Johnston
261ccd7f5f Fix tests 2015-06-12 17:17:29 +01:00
Erik Johnston
942e39e87c PEP8 2015-06-12 17:13:54 +01:00
Erik Johnston
9c5fc81c2d Correctly handle x_forwaded listener option 2015-06-12 17:13:23 +01:00
Erik Johnston
fd2c07bfed Use config.listeners 2015-06-12 15:33:07 +01:00
Erik Johnston
405f8c4796 Merge branch 'release-v0.9.2' 2015-06-12 11:53:03 +01:00
Erik Johnston
c42ed47660 Fix up create_resource_tree 2015-06-12 11:52:52 +01:00
Erik Johnston
1a87f5f26c Mention config option name 2015-06-12 11:46:41 +01:00
Erik Johnston
a3dc31cab9 s/some/certain 2015-06-12 11:45:13 +01:00
Erik Johnston
4dd47236e7 Update change log 2015-06-12 11:42:52 +01:00
Erik Johnston
295b400d57 Merge branch 'release-v0.9.2' into develop 2015-06-11 16:08:48 +01:00
Erik Johnston
716cf144ec Update change log 2015-06-11 16:07:06 +01:00
Erik Johnston
1e365e88bd Bump schema version 2015-06-11 15:50:39 +01:00
Erik Johnston
2d41dc0069 Bump version 2015-06-11 15:49:19 +01:00
Erik Johnston
f7f07dc517 Begin changing the config format 2015-06-11 15:48:52 +01:00
David Baker
b8690dd840 Catch any exceptions in the pusher loop. Use a lower timeout for pushers so we can see if they're actually still running. 2015-06-05 11:40:22 +01:00
manuroe
378a0f7a79 Federation demo start.sh: Fixed --no-rate-limit param in the script 2015-06-04 17:58:17 +02:00
David Baker
da84946de4 pep8 2015-06-04 16:43:45 +01:00
David Baker
63a7b3ad1e Add script to (re)convert the pushers table to changing the unique key. Also give the python db upgrade scripts the database engine so they can convert parameter strings, and add *args **kwargs to the upgrade function so we can add more args in future and previous scripts will ignore them. 2015-06-04 16:16:01 +01:00
Erik Johnston
5730b20c6d Merge pull request #175 from matrix-org/erikj/thumbnail_thread
Thumbnail images on a seperate thread
2015-06-03 17:26:56 +01:00
Erik Johnston
8047fd2434 Merge pull request #176 from matrix-org/erikj/backfill_auth
Improve backfill.
2015-06-03 17:25:37 +01:00
Erik Johnston
3bbd0d0e09 Merge pull request #180 from matrix-org/erikj/prev_state_context
Don't needlessly compute prev_state
2015-06-03 17:20:56 +01:00
Erik Johnston
9dda396baa Merge pull request #179 from matrix-org/erikj/state_group_outliers
Don't compute EventContext for outliers.
2015-06-03 17:20:40 +01:00
Erik Johnston
13ed3b9985 Merge pull request #178 from matrix-org/erikj/cache_state_groups
Add cache to get_state_groups.
2015-06-03 17:20:33 +01:00
Erik Johnston
bd2cf9d4bf Merge pull request #177 from matrix-org/erikj/content_repo_http_client
SYN-403: Make content repository use its own http client.
2015-06-03 17:20:27 +01:00
Erik Johnston
d4902a7ad0 Merge pull request #174 from matrix-org/erikj/compress_option
Add config option to disable compression of http responses
2015-06-03 17:18:17 +01:00
Erik Johnston
55bf90b9e4 Don't needlessly compute prev_state 2015-06-03 16:44:24 +01:00
Erik Johnston
53f0bf85d7 Comment 2015-06-03 16:43:40 +01:00
Erik Johnston
1c3d844e73 Don't needlessly compute context 2015-06-03 16:41:51 +01:00
Erik Johnston
0d7d9c37b6 Add cache to get_state_groups 2015-06-03 14:45:55 +01:00
Erik Johnston
d8866d7277 Caches should be bound to instances.
Before, caches were global and so different instances of the stores
would share caches. This caused problems in the unit tests.
2015-06-03 14:45:17 +01:00
Erik Johnston
2ef2f6d593 SYN-403: Make content repository use its own http client. 2015-06-03 10:17:37 +01:00
Erik Johnston
3483b78d1a Log where a request came from in federation 2015-06-02 18:15:13 +01:00
Erik Johnston
d3ded420b1 Rephrase log line 2015-06-02 16:30:52 +01:00
Erik Johnston
22716774d5 Don't about JSON when warning about content tampering 2015-06-02 16:30:52 +01:00
Erik Johnston
5044e6c544 Thumbnail images on a seperate thread 2015-06-02 15:39:08 +01:00
Erik Johnston
09e23334de Add a timeout 2015-06-02 11:00:37 +01:00
Erik Johnston
02410e9239 Handle the fact we might be missing auth events 2015-06-02 10:58:35 +01:00
Erik Johnston
e552b78d50 Add some logging 2015-06-02 10:28:14 +01:00
Erik Johnston
fde0da6f19 Correctly look up auth_events 2015-06-02 10:19:38 +01:00
Erik Johnston
3f04a08a0c Don't process events we've already processed. Remember to process state events 2015-06-02 10:11:32 +01:00
Erik Johnston
4bbfbf898e Correctly pass in auth_events 2015-06-01 17:02:23 +01:00
Erik Johnston
6e17463228 Don't explode if we don't have the event 2015-06-01 16:39:43 +01:00
Erik Johnston
522f285f9b Add config option to disable compression of http responses 2015-06-01 13:36:30 +01:00
Mark Haines
b8d49be5a1 Merge branch 'develop' into markjh/twisted-15
Conflicts:
	synapse/python_dependencies.py
2015-06-01 10:56:05 +01:00
Mark Haines
90abdaf3bc Use Twisted-15.2.1, Use Agent.usingEndpointFactory rather than implement our own Agent 2015-06-01 10:51:50 +01:00
Erik Johnston
b579a8ea18 Merge pull request #172 from intelfx/contrib-systemd
contrib/systemd: log_config.yaml: do not disable existing loggers
2015-05-31 20:53:45 +01:00
Ivan Shapovalov
53ef3a0bfe contrib/systemd: log_config.yaml: do not disable existing loggers
It turned out that merely configuring the root logger is not enough for
"catch-all" semantics. The logging subsystem also needs to be told not
to disable existing loggers (so that their messages will get propagated
to handlers up the logging hierarchy, not just silently discarded).

Signed-off-by: Ivan Shapovalov <intelfx100@gmail.com>
2015-05-31 19:25:21 +03:00
Mark Haines
d70c847b4f Merge pull request #170 from matrix-org/markjh/SYT-8-recaptcha
Allow endpoint for verifying recaptcha to be configured
2015-05-29 15:32:54 +01:00
Erik Johnston
d15f166093 Remove log line 2015-05-29 15:03:24 +01:00
Erik Johnston
ca580ef862 Don't copy twice 2015-05-29 15:02:55 +01:00
Erik Johnston
45bac68064 Merge pull request #169 from matrix-org/erikj/ultrajson
Use ultrajson when possible. Add option to turn off freezing of events.
2015-05-29 14:58:56 +01:00
Mark Haines
784aaa53df Merge branch 'develop' into markjh/SYT-8-recaptcha
Conflicts:
	synapse/handlers/auth.py
2015-05-29 13:49:44 +01:00
Erik Johnston
8355b4d074 Bump syutil version 2015-05-29 13:08:43 +01:00
Erik Johnston
a7b65bdedf Add config option to turn off freezing events. Use new encode_json api and ujson.loads 2015-05-29 12:17:33 +01:00
Mark Haines
d94590ed48 Add config for setting the recaptcha verify api endpoint, so we can test it in sytest 2015-05-29 12:11:40 +01:00
Erik Johnston
afbd3b2fc4 SYN-395: Fix CAPTCHA, don't double decode json 2015-05-28 18:05:00 +01:00
Erik Johnston
79e37a7ecb Correctly pass connection pool parameter 2015-05-28 16:48:53 +01:00
Erik Johnston
0f118e55db Merge pull request #168 from matrix-org/erikj/conn_pool
Make HTTP clients use connection pools.
2015-05-28 16:03:56 +01:00
Erik Johnston
2f54522d44 Merge pull request #167 from matrix-org/erikj/deep_copy_removal
Remove a deep copy
2015-05-28 16:00:07 +01:00
Erik Johnston
dd74436ffd Unused import 2015-05-28 15:47:20 +01:00
Erik Johnston
11f51e6ded Up maxPersistentPerHost count 2015-05-28 15:45:46 +01:00
Erik Johnston
086df80790 Add connection pooling to SimpleHttpClient 2015-05-28 15:43:21 +01:00
Erik Johnston
291e942332 Use connection pool for federation connections 2015-05-28 15:43:21 +01:00
Erik Johnston
31ade3b3e9 Remove a deep copy 2015-05-28 13:45:23 +01:00
Erik Johnston
36b3b75b21 Registration should be disabled by default 2015-05-28 11:01:34 +01:00
Erik Johnston
6d1dea337b Merge branch 'release-v0.9.1' of github.com:matrix-org/synapse 2015-05-26 16:03:32 +01:00
Erik Johnston
99eb1172b0 Merge branch 'release-v0.9.1' of github.com:matrix-org/synapse into develop 2015-05-26 16:02:59 +01:00
Erik Johnston
6cb3212fc2 changelog 2015-05-26 16:00:45 +01:00
Mark Haines
554c63ca60 Iterate over the user_streams not the user_ids 2015-05-26 15:03:49 +01:00
Mark Haines
fff7905409 Merge branch 'bugs/SYN-390' into release-v0.9.1 2015-05-26 14:58:49 +01:00
Mark Haines
00dd207f60 Take a dict of the rule, not the rule list 2015-05-26 14:57:48 +01:00
Erik Johnston
e417469af2 changelog 2015-05-26 11:08:46 +01:00
Erik Johnston
cb7dac3a5d changelog 2015-05-26 11:08:09 +01:00
Erik Johnston
2651fd5e24 Merge branch 'develop' of github.com:matrix-org/synapse into release-v0.9.1 2015-05-26 11:05:50 +01:00
Erik Johnston
764856777c changelog 2015-05-26 11:05:44 +01:00
Mark Haines
e7b25a649c Merge pull request #166 from matrix-org/bugs/SYN-390
SYN-390: Don't modify the dictionary returned from the database here either
2015-05-26 10:40:50 +01:00
Mark Haines
804b732aab SYN-390: Don't modify the dictionary returned from the database here either 2015-05-26 10:35:08 +01:00
Erik Johnston
45fffe8cbe Merge branch 'develop' of github.com:matrix-org/synapse into release-v0.9.1 2015-05-26 10:22:41 +01:00
Erik Johnston
9ba3c1ede4 Merge pull request #165 from matrix-org/bugs/SYN-390
SYN-390: Don't modify the dictionary returned from the data store
2015-05-26 10:20:36 +01:00
Mark Haines
a0bebeda8b SYN-390: Don't modify the dictionary returned from the data store 2015-05-26 10:14:15 +01:00
Erik Johnston
27e093cbc1 Bump version 2015-05-22 17:03:37 +01:00
Mark Haines
d9f60e8dc8 Merge pull request #163 from matrix-org/markjh/presence_list_cache
Add a cache for the presence list
2015-05-22 17:02:23 +01:00
Mark Haines
0e42dfbe22 Merge pull request #164 from matrix-org/markjh/pusher_performance_2
Add a cache for get_push rules for user, fix cache invalidation
2015-05-22 17:01:56 +01:00
Mark Haines
5ebd33302f Merge pull request #162 from matrix-org/erikj/backfill_fixes
backfill fixes
2015-05-22 17:01:40 +01:00
Mark Haines
17167898c8 Fix the presence tests 2015-05-22 16:22:54 +01:00
Erik Johnston
6eadbfbea0 Remove redundant for loop 2015-05-22 16:12:20 +01:00
Mark Haines
1a9a9abcc7 Add a cache for getting the presence list for a user 2015-05-22 16:11:17 +01:00
Erik Johnston
74b7de83ec Merge branch 'develop' of github.com:matrix-org/synapse into erikj/backfill_fixes 2015-05-22 16:10:42 +01:00
Mark Haines
36317f3dad Merge pull request #156 from matrix-org/erikj/join_perf
Make joining #matrix:matrix.org over federation quicker
2015-05-22 16:09:54 +01:00
Mark Haines
052ac0c8d0 Merge pull request #159 from matrix-org/erikj/metrics_interface_config
Enable changing the interface the metrics listener binds to
2015-05-22 16:09:33 +01:00
Mark Haines
49a2c10279 Merge pull request #157 from matrix-org/markjh/presence_performance
Improve presence performance in loadtest
2015-05-22 16:04:40 +01:00
Mark Haines
5d53c14342 Merge pull request #160 from matrix-org/markjh/appservice_performance
Make the appservice use 'users_in_room' rather than get_room_members …
2015-05-22 16:04:22 +01:00
Mark Haines
4752a990c8 Merge pull request #161 from matrix-org/erikj/txn_logging_fix
Erikj/txn logging fix
2015-05-22 16:03:52 +01:00
Mark Haines
106a3051b8 Remove spurious TODO comment 2015-05-22 15:53:03 +01:00
Erik Johnston
284f55a7fb Add doc strings 2015-05-22 15:18:04 +01:00
Erik Johnston
1ce1509989 s/metric_interface/metric_bind_host/ 2015-05-22 14:51:22 +01:00
Erik Johnston
8bb85c8c5a Update log line 2015-05-22 14:48:06 +01:00
Mark Haines
c8135f808b Remove unused import 2015-05-22 14:45:46 +01:00
Erik Johnston
b21d015c55 Log origin and stats of incoming transactions 2015-05-22 14:44:25 +01:00
Erik Johnston
e70e8e053e Add txn_id to some log lines 2015-05-22 14:44:02 +01:00
Erik Johnston
1b446a5d85 Log less lines at INFO level, but include more helpful information 2015-05-22 14:29:57 +01:00
Erik Johnston
59a0682f3e Enable changing the interface the metrics listener binds to 2015-05-22 13:13:07 +01:00
Mark Haines
b6adfc59f5 Invalidate the get_latest_event_ids_in_room cache when deleting from event_forward_extremities 2015-05-22 13:01:03 +01:00
Erik Johnston
254aa3c986 Revert register_new_matrix_user to use v1 api 2015-05-22 11:59:48 +01:00
Mark Haines
f43544eecc Make the appservice use 'users_in_room' rather than get_room_members since it is cached 2015-05-22 11:01:28 +01:00
Mark Haines
a04cde613e Add a cache for get_push rules for user, fix cache invalidation 2015-05-22 10:39:45 +01:00
Erik Johnston
4429e720ae Merge branch 'master' of github.com:matrix-org/synapse into develop 2015-05-22 10:33:00 +01:00
Erik Johnston
ee49098843 Changelog 2015-05-21 17:36:52 +01:00
Erik Johnston
51f5d36f4f Merge branch 'hotfixes-v0.9.0-r5' of github.com:matrix-org/synapse 2015-05-21 17:16:10 +01:00
Erik Johnston
f8c2cd129d Bump version 2015-05-21 17:03:30 +01:00
Erik Johnston
f6d1183fc5 Merge branch 'markjh/pusher_performance_master' of github.com:matrix-org/synapse into hotfixes-v0.9.0-r5 2015-05-21 17:02:54 +01:00
Mark Haines
2043527b9b Don't try to use a txn when not in one, remove spurious debug logging 2015-05-21 16:53:03 +01:00
Mark Haines
53447e9cd3 Add caches for things requested by the pushers 2015-05-21 16:41:39 +01:00
Mark Haines
d61ce3f670 Add a cache for get_current_state with state_key 2015-05-21 16:41:39 +01:00
Erik Johnston
a910984b58 Actually return something from lambda 2015-05-21 15:58:41 +01:00
Erik Johnston
e309b1045d Sort backfill events 2015-05-21 15:57:35 +01:00
Erik Johnston
0180bfe4aa Remove dead code 2015-05-21 15:53:41 +01:00
Erik Johnston
1f3d1d85a9 Only get non-state 2015-05-21 15:52:29 +01:00
Erik Johnston
39a3340f73 Skip events we've already seen 2015-05-21 15:48:56 +01:00
Erik Johnston
ae3bff3491 Correctly prepopulate queue 2015-05-21 15:46:07 +01:00
Erik Johnston
dc085ddf8c Don't prepopulate event_results 2015-05-21 15:44:05 +01:00
Erik Johnston
73d23c6ae8 Don't readd things that are already in event_results 2015-05-21 15:40:22 +01:00
Erik Johnston
6189d8e54d PriorityQueue gives lowest first 2015-05-21 15:38:08 +01:00
Erik Johnston
115ef3ddac Correctly capture Queue.Empty exception 2015-05-21 15:37:43 +01:00
Erik Johnston
4fb858d90a Merge branch 'develop' of github.com:matrix-org/synapse into erikj/backfill_fixes 2015-05-21 15:25:54 +01:00
Mark Haines
88f1ea36ce Oops, get_rooms_for_user returns a namedtuple, not a room_id 2015-05-21 15:23:58 +01:00
Erik Johnston
c2633907c5 Merge branch 'erikj/join_perf' of github.com:matrix-org/synapse into erikj/backfill_fixes 2015-05-21 14:58:47 +01:00
Erik Johnston
ebfdd2eb5b Merge branch 'develop' of github.com:matrix-org/synapse into erikj/join_perf 2015-05-21 14:54:52 +01:00
Erik Johnston
a551c5dad7 Merge pull request #155 from matrix-org/erikj/perf
Bulk and batch retrieval of events.
2015-05-21 14:54:40 +01:00
Erik Johnston
27e4b45c06 s/for events/for requests for events/ 2015-05-21 14:52:23 +01:00
Erik Johnston
ac5f2bf9db s/for events/for requests for events/ 2015-05-21 14:50:57 +01:00
Erik Johnston
80a167b1f0 Add comments 2015-05-21 11:19:04 +01:00
Mark Haines
7ae8afb7ef Removed unused 'is_visible' method 2015-05-20 14:48:11 +01:00
Erik Johnston
9118a92862 Split up _get_events into defer and txn versions 2015-05-20 13:27:16 +01:00
Mark Haines
8eca5bd50a Fix the presence tests 2015-05-20 13:22:18 +01:00
Mark Haines
e01b825cc9 Clean up the presence_list checking logic a bit 2015-05-20 13:21:59 +01:00
Erik Johnston
ab45e12d31 Make not return a deferred _get_event_from_row_txn 2015-05-20 13:07:19 +01:00
Erik Johnston
f407cbd2f1 PEP8 2015-05-20 13:02:01 +01:00
Erik Johnston
227f8ef031 Split out _get_event_from_row back into defer and _txn version 2015-05-20 13:00:57 +01:00
Erik Johnston
2bc60c55af Fix _get_backfill_events to return events in the correct order 2015-05-20 12:57:00 +01:00
Erik Johnston
20814fabdd Actually fetch state for new backwards extremeties when backfilling. 2015-05-20 11:59:02 +01:00
Erik Johnston
9084cdd70f Ensure event_results is a set 2015-05-19 16:34:31 +01:00
Erik Johnston
5b731178b2 Merge branch 'develop' of github.com:matrix-org/synapse into erikj/join_perf 2015-05-19 16:08:00 +01:00
Erik Johnston
3a653515ec Add None check 2015-05-19 15:27:09 +01:00
Erik Johnston
aa729349dd Fix event_backwards_extrem insertion to ignore outliers 2015-05-19 15:27:00 +01:00
Erik Johnston
5b1631a4a9 Add a timeout param to get_event 2015-05-19 14:53:32 +01:00
Erik Johnston
291cba284b Handle the case when things return empty but non none things 2015-05-19 14:42:46 +01:00
Erik Johnston
253f76a0a5 Don't always hit get_server_verify_key_v1_direct 2015-05-19 14:42:38 +01:00
Erik Johnston
6837c5edab Handle the case when things return empty but non none things 2015-05-19 14:27:11 +01:00
Erik Johnston
7223129916 Don't apply new room join hack if depth > 5 2015-05-19 14:16:08 +01:00
Erik Johnston
5ae4a84211 Don't always hit get_server_verify_key_v1_direct 2015-05-19 13:43:34 +01:00
Erik Johnston
118a760719 Merge branch 'develop' of github.com:matrix-org/synapse into erikj/join_perf 2015-05-19 13:20:29 +01:00
David Baker
19505e0392 Disable GZip encoding on static file resources as per comment 2015-05-19 13:20:25 +01:00
Erik Johnston
df431b127b Add forgotten .items() 2015-05-19 13:14:21 +01:00
Erik Johnston
882ac83d8d Fix scripts-dev/convert_server_keys.py to have correct format 2015-05-19 13:12:55 +01:00
Erik Johnston
d3e09f12d0 SYN-383: Actually, we expect this value to be a dict 2015-05-19 13:12:41 +01:00
Erik Johnston
677be13ffc Revert accidental commit 2015-05-19 13:12:28 +01:00
Erik Johnston
350b88656a SYN-383: Actually, we expect this value to be a dict 2015-05-19 13:01:57 +01:00
Erik Johnston
9de94d5a4d Merge branch 'develop' of github.com:matrix-org/synapse into erikj/join_perf 2015-05-19 12:50:17 +01:00
Erik Johnston
2b7120e233 SYN-383: Handle the fact the server might not have signed things 2015-05-19 12:49:38 +01:00
Erik Johnston
8b256a7296 Don't reuse var names 2015-05-19 11:58:22 +01:00
Erik Johnston
62ccc6d95f Don't reuse var names 2015-05-19 11:58:04 +01:00
Erik Johnston
01858bcbf2 Merge branch 'develop' of github.com:matrix-org/synapse into erikj/join_perf 2015-05-19 11:56:35 +01:00
Erik Johnston
2aeee2a905 SYN-383: Fix parsing of verify_keys and catching of _DefGen_Return 2015-05-19 11:56:18 +01:00
Erik Johnston
5e7883ec19 Merge branch 'develop' of github.com:matrix-org/synapse into erikj/join_perf 2015-05-19 10:50:43 +01:00
Mark Haines
c6a03c46e6 SYN-383: Extract the response list from 'server_keys' in the response JSON as it might work better than iterating over the top level dict 2015-05-19 10:23:02 +01:00
Mark Haines
e4c65b338d Speed up the get_pagination_rows as well 2015-05-18 18:21:06 +01:00
Mark Haines
99914ec9f8 Merge pull request #152 from matrix-org/notifier_performance
Notifier performance
2015-05-18 17:49:59 +01:00
Erik Johnston
ef910a0358 Do work in parellel when joining a room 2015-05-18 17:17:04 +01:00
Mark Haines
591c4bf223 Cache the most recent serial for each room 2015-05-18 16:21:51 +01:00
Mark Haines
e1150cac4b Move updating the serial and state of the presence cache into a single function 2015-05-18 15:46:37 +01:00
Erik Johnston
165eb2dbe6 Comments and shuffle of functions 2015-05-18 15:18:41 +01:00
Mark Haines
880fb46de0 Merge branch 'notifier_performance' into markjh/presence_performance 2015-05-18 14:33:58 +01:00
Mark Haines
9396723995 Merge pull request #154 from matrix-org/erikj/events_move
Move get_events functions to storage.events
2015-05-18 14:24:35 +01:00
Erik Johnston
65878a2319 Remove unused metric 2015-05-18 14:06:30 +01:00
Mark Haines
ad31fa3040 Don't bother sorting by the room_stream_ids, it shouldn't matter which order they are notified in 2015-05-18 14:04:58 +01:00
Erik Johnston
4d1b6f4ad1 Remove rejected events if we don't want rejected events 2015-05-18 14:03:46 +01:00
Mark Haines
0b0033c40b Merge branch 'develop' into notifier_performance 2015-05-18 13:50:01 +01:00
Mark Haines
755def8083 Add more doc string, reduce C+P boilerplate for getting room list 2015-05-18 13:46:47 +01:00
Mark Haines
1e90715a3d Make sure the notifier stream token goes forward when it is updated. Sort the pending events by the correct room_stream_id 2015-05-18 13:17:36 +01:00
Erik Johnston
131bdf9bb1 Merge branch 'erikj/events_move' of github.com:matrix-org/synapse into erikj/perf 2015-05-18 10:23:37 +01:00
Erik Johnston
10f1bdb9a2 Move get_events functions to storage.events 2015-05-18 10:21:40 +01:00
Erik Johnston
d5cea26d45 Remove pointless newline 2015-05-18 10:16:45 +01:00
Erik Johnston
c71176858b Newline, remove debug logging 2015-05-18 10:11:14 +01:00
Erik Johnston
f8bd4de87d Remove debug logging 2015-05-18 09:58:03 +01:00
Erik Johnston
c3b37abdfd PEP8 2015-05-15 16:59:58 +01:00
Erik Johnston
6c74fd62a0 Revert limiting of fetching, it didn't help perf. 2015-05-15 16:45:35 +01:00
Erik Johnston
9ff7f66a2b init j 2015-05-15 16:36:03 +01:00
Erik Johnston
70f272f71c Don't completely drain the list 2015-05-15 16:34:17 +01:00
Erik Johnston
8763dd80ef Don't fetch prev_content for current_state 2015-05-15 15:33:01 +01:00
Erik Johnston
807229f2f2 Err, defer.gatherResults ftw 2015-05-15 15:20:29 +01:00
Erik Johnston
acb12cc811 Make store.get_current_state fetch events asyncly 2015-05-15 15:20:05 +01:00
Erik Johnston
d62dee7eae Remove more debug logging 2015-05-15 15:06:37 +01:00
Erik Johnston
0f29cfabc3 Remove debug logging 2015-05-15 14:06:42 +01:00
Erik Johnston
e275a9c0d9 preserve log context 2015-05-15 11:54:51 +01:00
Erik Johnston
aa32bd38e4 Add a wait 2015-05-15 11:35:04 +01:00
Erik Johnston
372d4c6d7b Srsly. Don't use closures. Baaaaaad 2015-05-15 11:26:00 +01:00
Erik Johnston
575ec91d82 Correctly pass through params 2015-05-15 11:15:10 +01:00
Mark Haines
10be983f2c Merge pull request #153 from matrix-org/markjh/presence_docstring
Add some doc strings for presence.
2015-05-15 11:11:47 +01:00
Mark Haines
415b158ce2 More whitespace 2015-05-15 11:09:47 +01:00
Erik Johnston
de01438a57 Sort out error handling 2015-05-15 11:00:50 +01:00
Erik Johnston
a2c4f3f150 Fix daedlock 2015-05-15 10:54:04 +01:00
Mark Haines
0a4330cd5d Add some missed argument types, cleanup the whitespace a bit 2015-05-14 17:48:12 +01:00
Mark Haines
47ec693e29 More doc-strings 2015-05-14 17:07:02 +01:00
Erik Johnston
1d566edb81 Remove race condition 2015-05-14 16:54:35 +01:00
David Baker
6e1ad283cf Support gzip encoding for client, client v2 and web client resources (SYN-176). 2015-05-14 16:39:19 +01:00
Erik Johnston
ef3d8754f5 Call from right thread 2015-05-14 15:41:55 +01:00
Erik Johnston
142934084a Count and loop 2015-05-14 15:40:21 +01:00
Erik Johnston
96c5b9f87c Don't start up more fetch_events 2015-05-14 15:36:04 +01:00
Erik Johnston
7cd6a6f6cf Awful idea for speeding up fetching of events 2015-05-14 15:34:02 +01:00
Mark Haines
c5d1b4986b Remove unused arguments and doc PresenceHandler.push_update_to_clients 2015-05-14 14:59:31 +01:00
Erik Johnston
7f4105a5c9 Turn off preemptive transactions 2015-05-14 14:51:06 +01:00
Erik Johnston
f4d58deba1 PEP8 2015-05-14 14:45:42 +01:00
Erik Johnston
386b7330d2 Move from _base to events 2015-05-14 14:45:22 +01:00
Mark Haines
0ad1c67234 Add some doc-strings to notifier 2015-05-14 14:35:07 +01:00
Erik Johnston
7d6a1dae31 Jump out early 2015-05-14 14:27:58 +01:00
Erik Johnston
656223fbd3 Actually, we probably want to run this in a transaction 2015-05-14 14:26:35 +01:00
David Baker
67800f7626 Treat setting your display name to the empty string as removing it (SYN-186). 2015-05-14 14:19:59 +01:00
Erik Johnston
2f7f8e1c2b Preemptively jump into a transaction if we ask for get_prev_content 2015-05-14 14:17:36 +01:00
Mark Haines
4770cec7bc Merge pull request #150 from matrix-org/notifier_unify
Make v1 and v2 client APIs interact with the notifier in the same way.
2015-05-14 14:16:59 +01:00
Erik Johnston
e1e9f0c5b2 loop -> gatherResults 2015-05-14 13:58:49 +01:00
Erik Johnston
ab78a8926e Err, we probably want a bigger limit 2015-05-14 13:47:16 +01:00
Erik Johnston
f6f902d459 Move fetching of events into their own transactions 2015-05-14 13:45:48 +01:00
Erik Johnston
cdb3757942 Refactor _get_events 2015-05-14 13:31:55 +01:00
David Baker
92e1c8983d Disallow whitespace in aliases here too 2015-05-14 13:21:55 +01:00
David Baker
0c894e1ebd Throw error when creating room if alias contains whitespace #SYN-335 2015-05-14 13:11:28 +01:00
Mark Haines
084c365c3a Use the current token when timing out a notifier, make sure the user_id is a string in on_new_user_event 2015-05-14 12:03:26 +01:00
David Baker
c37a6e151f Make shared secret registration work again 2015-05-14 12:03:13 +01:00
Erik Johnston
36ea26c5c0 Merge branch 'develop' of github.com:matrix-org/synapse into erikj/perf 2015-05-14 12:01:38 +01:00
David Baker
7c549dd557 Add ID generator for push_rules_enable to #resolve SYN-378 2015-05-14 11:44:03 +01:00
Mark Haines
899d4675dd Merge branch 'notifier_unify' into notifier_performance 2015-05-14 11:36:44 +01:00
Mark Haines
243c56e725 Merge branch 'develop' into notifier_unify 2015-05-14 11:36:23 +01:00
Mark Haines
3edd2d5c93 Fix v2 sync, update the last_notified_ms only if there was an active listener 2015-05-14 11:25:30 +01:00
David Baker
47fb089eb5 Specify python 2.7 in the virtualenv setup (SYN-319) #resolved 2015-05-14 10:23:10 +01:00
Erik Johnston
4f1d984e56 Add index on events 2015-05-13 17:22:26 +01:00
Mark Haines
5e0c533672 Fix metric counter 2015-05-13 17:20:28 +01:00
Erik Johnston
968b01a91a Actually use async method 2015-05-13 17:02:46 +01:00
Erik Johnston
4071f29653 Fetch events from events_id in their own transactions 2015-05-13 16:59:41 +01:00
Mark Haines
f1b83d88a3 Discard unused NotifierUserStreams 2015-05-13 16:54:02 +01:00
Erik Johnston
a988361aea Typo 2015-05-13 15:44:15 +01:00
Erik Johnston
8888982db3 Don't insert None 2015-05-13 15:43:32 +01:00
Mark Haines
9af432257d Don't set a timer if there's already a result to return 2015-05-13 15:42:13 +01:00
Erik Johnston
cf706cc6ef Don't return None 2015-05-13 15:31:25 +01:00
Erik Johnston
5971d240d4 Limit batch size 2015-05-13 15:26:49 +01:00
Erik Johnston
ca4f458787 Fetch events in bulk 2015-05-13 15:13:42 +01:00
Mark Haines
df6db5c802 Don't bother checking for new events from a source if the stream token hasn't advanced for that source 2015-05-13 15:08:24 +01:00
Erik Johnston
6edff11a88 Don't fetch redaction and rejection stuff for each event, so we can use index only scan 2015-05-13 14:39:05 +01:00
Mark Haines
63878c0379 Don't bother checking for updates if the stream token hasn't advanced for a user 2015-05-13 13:42:21 +01:00
Erik Johnston
02590c3e1d Temp turn off checking for rejections and redactions 2015-05-13 11:31:28 +01:00
Erik Johnston
619a21812b defer.gatherResults loop 2015-05-13 11:29:03 +01:00
Erik Johnston
fec4485e28 Batch fetching of events for state groups 2015-05-13 11:22:42 +01:00
Erik Johnston
409bcc76bd Load events for state group seperately 2015-05-13 11:13:31 +01:00
Mark Haines
cffe6057fb Merge branch 'notifier_unify' into notifier_performance
Conflicts:
	synapse/notifier.py
2015-05-12 16:37:50 +01:00
Erik Johnston
80fd2b574c Don't talk to yourself when backfilling 2015-05-12 16:19:46 +01:00
Erik Johnston
e122685978 You need to call contextmanager 2015-05-12 16:12:37 +01:00
Mark Haines
54ef09f860 Merge pull request #151 from matrix-org/revert-147-presence-performance
Revert "Improvement to performance of presence event stream handling"
2015-05-12 15:44:55 +01:00
Mark Haines
d7b3ac46f8 Revert "Improvement to performance of presence event stream handling" 2015-05-12 15:44:21 +01:00
Mark Haines
4429e4bf24 Merge branch 'develop' into notifier_unify
Conflicts:
	synapse/notifier.py
2015-05-12 15:31:26 +01:00
Mark Haines
ec07dba29e Merge pull request #143 from matrix-org/erikj/SYN-375
SYN-375 - Lots of unhandled deferred exceptions.
2015-05-12 15:25:54 +01:00
Mark Haines
c167cbc9fd Merge pull request #147 from matrix-org/presence-performance
Improvement to performance of presence event stream handling
2015-05-12 15:24:54 +01:00
Mark Haines
a6fb2aa2a5 Merge pull request #144 from matrix-org/erikj/logging_context
Preserving logging contexts
2015-05-12 15:23:50 +01:00
Mark Haines
1fce36b111 Merge pull request #149 from matrix-org/erikj/backfill
Backfill support
2015-05-12 15:20:32 +01:00
Erik Johnston
8b28209c60 Err, delete the right stuff 2015-05-12 15:02:53 +01:00
Erik Johnston
30c72d377e Newlines 2015-05-12 14:47:40 +01:00
Erik Johnston
e4eddf9b36 We do actually want to delete rows out of event_backward_extremities 2015-05-12 14:47:23 +01:00
Erik Johnston
c1779a79bc Fix up _handle_prev_events to not try to insert duplicate rows 2015-05-12 14:41:50 +01:00
Erik Johnston
74850d7f75 Do state groups persistence /after/ checking if we have already persisted the event 2015-05-12 14:14:58 +01:00
Erik Johnston
07a1223156 s/backfil/backfill/ 2015-05-12 14:09:54 +01:00
Erik Johnston
0d31ad5101 Typos everywhere 2015-05-12 14:02:01 +01:00
Erik Johnston
a0dfffb33c And another typo. 2015-05-12 14:00:31 +01:00
Erik Johnston
6e5ac4a28f Err, gatherResults doesn't take a dict... 2015-05-12 13:58:14 +01:00
Erik Johnston
8022b27fc2 Make distributer.fire work as it did 2015-05-12 13:14:48 +01:00
Erik Johnston
95dedb866f Unwrap defer.gatherResults failures 2015-05-12 13:14:29 +01:00
Mark Haines
78672a9fd5 Merge branch 'notifier_unify' into notifier_performance 2015-05-12 13:11:54 +01:00
Erik Johnston
da6a7bbdde Merge branch 'develop' of github.com:matrix-org/synapse into erikj/logging_context 2015-05-12 13:10:42 +01:00
Mark Haines
2551b6645d Update the end_token correctly, otherwise the token doesn't advance and the client gets duplicate events 2015-05-12 11:54:18 +01:00
Mark Haines
5e4ba463b7 Merge branch 'develop' into notifier_unify 2015-05-12 11:41:53 +01:00
Mark Haines
51da995806 Merge pull request #148 from matrix-org/bugs/SYN-377
SYN-377: Make sure that the event is marked as persisted from the main thread.
2015-05-12 11:36:44 +01:00
Mark Haines
5002056b16 SYN-377: Make sure that the StreamIdGenerator.get_next.__exit__ is called from the main thread after the transaction completes, not from database thread before the transaction completes. 2015-05-12 11:20:40 +01:00
Mark Haines
5c75adff95 Add a NotifierUserStream to hold all the notification listeners for a user 2015-05-12 11:00:37 +01:00
Erik Johnston
367382b575 Handle the case where the other side is unreachable when backfilling 2015-05-12 10:35:45 +01:00
Erik Johnston
4df11b5039 Make get_current_token accept a direction parameter, which tells whether the source whether we want a token for going 'forwards' or 'backwards' 2015-05-12 10:28:10 +01:00
Erik Johnston
84e6b4001f Initial hack at wiring together pagination and backfill 2015-05-11 18:01:31 +01:00
Erik Johnston
17653a5dfe Move storage.stream._StreamToken to types.RoomStreamToken 2015-05-11 18:01:01 +01:00
Mark Haines
e269c511f6 Don't bother passing the events to the notifier since it isn't using them 2015-05-11 15:01:51 +01:00
Mark Haines
5e3b254dc8 Use wait_for_events to implement 'get_events' 2015-05-11 14:37:33 +01:00
Erik Johnston
d244fa9741 Merge branch 'hotfixes-v0.9.0-r4' of github.com:matrix-org/synapse into develop 2015-05-11 13:34:31 +01:00
Erik Johnston
e89ca34e0e Merge branch 'hotfixes-v0.9.0-r4' of github.com:matrix-org/synapse 2015-05-11 13:16:11 +01:00
Erik Johnston
79b7154454 Merge pull request #146 from matrix-org/erikj/push_rules_fixes
Fix 500 on push rule updates.
2015-05-11 11:33:47 +01:00
Erik Johnston
4ef556f650 Bump version 2015-05-11 11:31:04 +01:00
Erik Johnston
b036596b75 Prefer to use _simple_*. 2015-05-11 11:24:01 +01:00
Erik Johnston
cd525c0f5a push_rules table expects an 'id' field 2015-05-11 11:24:01 +01:00
Mark Haines
3c224f4d0e SYN-376: Add script for converting server keys from v1 to v2 2015-05-11 11:00:17 +01:00
Erik Johnston
d38862a080 Merge branch 'master' of github.com:matrix-org/synapse into develop 2015-05-10 10:56:46 +01:00
David Baker
2640d6718d Merge pull request #145 from matrix-org/hotfixes-v0.9.0-r3
Hotfixes v0.9.0 r3
2015-05-10 10:54:44 +01:00
Erik Johnston
de87541862 Bump version 2015-05-10 10:51:08 +01:00
Erik Johnston
22d2f498fa Fix push rule bug: can't insert bool into small int column 2015-05-10 10:50:51 +01:00
Matthew Hodgson
d79ffa1898 typo 2015-05-09 14:45:37 +01:00
Erik Johnston
2236ef6c92 Fix up leak. Add warnings. 2015-05-08 19:53:34 +01:00
Erik Johnston
da1aa07db5 Add some docs 2015-05-08 16:52:49 +01:00
Erik Johnston
4ac1941592 PEP8 2015-05-08 16:33:01 +01:00
Erik Johnston
476899295f Change the way we do logging contexts so that they survive divergences 2015-05-08 16:32:18 +01:00
Erik Johnston
fca28d243e Change the way we create observers to deferreds so that we don't get spammed by 'unhandled errors' 2015-05-08 16:28:08 +01:00
Erik Johnston
37feb4031f Merge branch 'hotfixes-v0.9.0-r2' of github.com:matrix-org/synapse 2015-05-08 16:13:15 +01:00
Erik Johnston
0cd1401f8d Bump version 2015-05-08 16:11:51 +01:00
Erik Johnston
724bb1e7d9 Merge branch 'master' of github.com:matrix-org/synapse into develop 2015-05-08 16:11:19 +01:00
Mark Haines
1c7912751e Drop the old table not the new table 2015-05-08 16:04:32 +01:00
Mark Haines
9d36eb4eab Rename unique constraint 2015-05-08 16:01:55 +01:00
Mark Haines
b0f71db3ff Remove unsigned 2015-05-08 15:59:51 +01:00
Mark Haines
84e1cacea4 Bump schema version 2015-05-08 15:58:14 +01:00
Mark Haines
6538d445e8 Make the timestamps in server_keys_json bigints 2015-05-08 15:55:17 +01:00
Erik Johnston
52f98f8a5b Merge branch 'hotfixes-v0.9.0-r1' of github.com:matrix-org/synapse 2015-05-08 14:25:18 +01:00
Erik Johnston
22a7ba8b22 Actually rename all isntances 2015-05-08 13:50:03 +01:00
Erik Johnston
3a42f32134 Reword port script usage 2015-05-08 13:47:48 +01:00
Erik Johnston
4fa0f53521 Support reading directly from a config 2015-05-08 13:45:58 +01:00
Erik Johnston
326121aec4 UPGRADES: s/v0.x.x/v0.9.0 2015-05-08 13:38:29 +01:00
Erik Johnston
9a9386226a Mention Ivan Shapovalov contrib/systemd 2015-05-08 13:37:54 +01:00
Erik Johnston
126d562576 Bump version 2015-05-08 13:29:37 +01:00
Erik Johnston
f08c33e834 Fix port_from_sqlite_to_postgres after changes to storage layer. 2015-05-08 13:29:00 +01:00
Paul "LeoNerd" Evans
45543028bb Use the presence cachemap ordering to early-abort the iteration loop 2015-05-07 22:40:10 +01:00
Paul "LeoNerd" Evans
f683b5de47 Store presence cachemap in an ordered dict, so that the newer serials will be at the end 2015-05-07 21:27:53 +01:00
Erik Johnston
db0dca2f6f Merge branch 'master' of github.com:matrix-org/synapse into develop 2015-05-07 19:21:00 +01:00
Erik Johnston
89c0cd4acc Merge branch 'release-v0.9.0' of github.com:matrix-org/synapse 2015-05-07 19:07:00 +01:00
Erik Johnston
6101ce427a Slight rewording 2015-05-07 18:58:28 +01:00
Erik Johnston
5fe26a9b5c Reword docs/application_services.rst 2015-05-07 18:54:53 +01:00
Erik Johnston
35698484a5 Add some information on registering AS's 2015-05-07 18:51:09 +01:00
Erik Johnston
63562f6d5a Bump date 2015-05-07 18:20:13 +01:00
Erik Johnston
a151693a3b Bump syweb version 2015-05-07 18:01:46 +01:00
Erik Johnston
ac29318b84 Add link to registration spec 2015-05-07 17:58:50 +01:00
Mark Haines
dfa98f911b revert accidental bcrypt gensalt round reduction from loadtesting 2015-05-07 17:45:42 +01:00
Erik Johnston
4605953b0f Add JIRA issue id 2015-05-07 16:53:18 +01:00
Mark Haines
ef8e8ebd91 pynacl-0.3.0 was released so we can finally start using it directly from pypi 2015-05-07 16:46:51 +01:00
Erik Johnston
3188e94ac4 Explain the change in AS /register api 2015-05-07 16:12:02 +01:00
David Baker
97a64f3ebe Merge branch 'develop' of github.com:matrix-org/synapse into develop 2015-05-07 09:33:42 +01:00
David Baker
b850c9fa04 Typo 2015-05-07 09:33:30 +01:00
Mark Haines
4a7a4a5b6c Optional profiling using cProfile 2015-05-06 17:08:00 +01:00
Erik Johnston
771fc05d30 Change log: Link to application services spec. 2015-05-06 13:59:32 +01:00
Erik Johnston
938939fd89 Move CAPTCHA_SETUP to docs/ 2015-05-06 13:48:06 +01:00
Erik Johnston
028a570e17 Linkify docs/postgres.sql 2015-05-06 13:42:40 +01:00
Erik Johnston
0e4393652f Update change log to be more detailed 2015-05-06 13:31:59 +01:00
Mark Haines
b994fb2b96 Don't read from the config file before checking it exists 2015-05-06 12:56:47 +01:00
Erik Johnston
f10fd8a470 Merge branch 'develop' of github.com:matrix-org/synapse into release-v0.9.0 2015-05-06 12:54:36 +01:00
Mark Haines
3c11c9c122 Merge pull request #140 from matrix-org/erikj/scripts_refactor
Seperate scripts/ into scripts/ and scripts-dev/
2015-05-06 12:54:07 +01:00
Erik Johnston
673375fe2d Acutally add scripts-dev/ 2015-05-06 11:46:02 +01:00
Erik Johnston
3c92231094 Re-add scripts/register_new_matrix_user 2015-05-06 11:45:18 +01:00
Erik Johnston
119e5d7702 Seperate scripts/ into scripts/ and scripts-dev/, where scripts/* are automatically added to the package 2015-05-06 11:41:19 +01:00
Erik Johnston
271ee604f8 Update change log 2015-05-06 11:29:54 +01:00
Erik Johnston
04c01882fc Bump version 2015-05-06 09:59:13 +01:00
Mark Haines
f4664a6cbd Merge pull request #138 from matrix-org/erikj/SYN-371
SYN-371 - Failed to persist event
2015-05-05 18:53:15 +01:00
Mark Haines
ecb26beda5 Merge pull request #137 from matrix-org/erikj/executemany
executemany support
2015-05-05 18:30:35 +01:00
Erik Johnston
0c4ac271ca Merge branch 'erikj/executemany' of github.com:matrix-org/synapse into erikj/SYN-371 2015-05-05 18:21:19 +01:00
Erik Johnston
0cf7e480b4 And use buffer(...) there as well 2015-05-05 18:20:01 +01:00
Erik Johnston
ed2584050f Merge branch 'develop' of github.com:matrix-org/synapse into erikj/executemany 2015-05-05 18:15:20 +01:00
Erik Johnston
977338a7af Use buffer(...) when inserting into bytea column 2015-05-05 18:12:53 +01:00
Mark Haines
31049c4d72 Merge pull request #139 from matrix-org/bugs/SYN-369
Fix race with cache invalidation. SYN-369
2015-05-05 17:46:13 +01:00
Mark Haines
deb0237166 Add some doc-string 2015-05-05 17:45:11 +01:00
Mark Haines
e45b05647e Fix the --help option for synapse 2015-05-05 17:39:59 +01:00
Erik Johnston
3d5a955e08 Missed events are not outliers 2015-05-05 17:36:57 +01:00
Mark Haines
d18f37e026 Collect the invalidate callbacks on the transaction object rather than passing around a separate list 2015-05-05 17:32:21 +01:00
Erik Johnston
9951542393 Add a comment about the zip(*[zip(sorted(...),...)]) 2015-05-05 17:06:55 +01:00
Mark Haines
041b6cba61 SYN-369: Add comments to the sequence number logic in the cache 2015-05-05 16:32:44 +01:00
Mark Haines
63075118a5 Add debug flag in synapse/storage/_base.py for debugging the cache logic by comparing what is in the cache with what was in the database on every access 2015-05-05 16:24:04 +01:00
Erik Johnston
531d7955fd Don't insert without deduplication. In this case we never actually use this table, so simply remove the insert entirely 2015-05-05 16:12:28 +01:00
Mark Haines
bfa4a7f8b0 Invalidate the room_member cache if the current state events updates 2015-05-05 15:43:49 +01:00
Mark Haines
d0fece8d3c Missing return for when the event was already persisted 2015-05-05 15:39:09 +01:00
Erik Johnston
bdcd7693c8 Fix indentation 2015-05-05 15:14:48 +01:00
Erik Johnston
43c2e8deae Add support for using executemany 2015-05-05 15:13:25 +01:00
Erik Johnston
1692dc019d Don't call 'encode_parameter' no-op 2015-05-05 15:00:30 +01:00
Mark Haines
a9aea68fd5 Invalidate the caches from the correct thread 2015-05-05 14:57:08 +01:00
Mark Haines
261d809a47 Sequence the modifications to the cache so that selects don't race with inserts 2015-05-05 14:13:50 +01:00
Erik Johnston
d9cc5de9e5 Correctly name transaction 2015-05-05 10:24:10 +01:00
Erik Johnston
b8940cd902 Remove some unused indexes 2015-05-01 16:14:25 +01:00
Erik Johnston
1942382246 Don't log enqueue_ 2015-05-01 16:14:25 +01:00
David Baker
eb9bd2d949 user_id now in user_threepids 2015-05-01 15:04:37 +01:00
Erik Johnston
2d386d7038 That wasn't a deferred 2015-05-01 14:41:25 +01:00
Erik Johnston
4ac2823b3c Remove inlineCallbacks from non-generator 2015-05-01 14:41:25 +01:00
Erik Johnston
22c7c5eb8f Typo 2015-05-01 14:41:25 +01:00
Erik Johnston
42c12c04f6 Remove some run_on_reactors 2015-05-01 14:41:25 +01:00
Erik Johnston
adb5b76ff5 Don't log all auth events every time we call auth.check 2015-05-01 14:41:25 +01:00
Mark Haines
3bcdf3664c Use the daemonize key from the config if it exists 2015-05-01 14:34:55 +01:00
David Baker
9eeb03c0dd Don't use self.execute: it's designed for fetching stuff 2015-05-01 14:21:25 +01:00
Mark Haines
32937f3ea0 database config is not kept in separate config file anymore 2015-05-01 14:06:54 +01:00
Mark Haines
7b50769eb9 Merge pull request #136 from matrix-org/markjh/config_cleanup
Config restructuring.
2015-05-01 14:04:39 +01:00
David Baker
7693f24792 No id field on user 2015-05-01 13:55:42 +01:00
Mark Haines
46a65c282f Allow generate-config to run against an existing config file to generate default keys 2015-05-01 13:54:38 +01:00
David Baker
92b20713d7 More missed get_user_by_id API changes 2015-05-01 13:45:54 +01:00
Erik Johnston
da4ed08739 One too many lens 2015-05-01 13:29:38 +01:00
Erik Johnston
9060dc6b59 Change public room list to use defer.gatherResults 2015-05-01 13:28:36 +01:00
David Baker
1fae1b3166 This api now no longer returns an array 2015-05-01 13:26:41 +01:00
Erik Johnston
80b4119279 Don't wait for storage of access_token 2015-05-01 13:14:05 +01:00
Erik Johnston
4011cf1c42 Cache latest_event_ids_in_room 2015-05-01 13:06:26 +01:00
Erik Johnston
657298cebd Don't lock user_ips table for upsert. 2015-05-01 10:46:48 +01:00
Erik Johnston
fabb7acd45 Fix bug where we reconnected to the database on every query. 2015-05-01 10:24:24 +01:00
Erik Johnston
23c639ff32 Split a storage function in two so that we don't have to do extra work. 2015-05-01 10:17:19 +01:00
Erik Johnston
8be5284e91 Remove pointless join 2015-04-30 18:49:26 +01:00
Erik Johnston
503e4d3d52 Fix broken SQL 2015-04-30 18:44:47 +01:00
Erik Johnston
00718ae7a9 Need more yield 2015-04-30 18:43:39 +01:00
Erik Johnston
0465560c1a Add missing param 2015-04-30 18:42:44 +01:00
Erik Johnston
61d05daab1 More join conditions 2015-04-30 18:41:05 +01:00
Erik Johnston
6ead27ddda Add more conditions on JOINs to make postgres go a little faster. 2015-04-30 18:32:03 +01:00
Mark Haines
50c87b8eed Allow "manhole" to be ommited from the config 2015-04-30 18:11:47 +01:00
Mark Haines
345995fcde Remove the ~, comment the lines instead 2015-04-30 18:10:19 +01:00
Mark Haines
62cebee8ee Update key.py 2015-04-30 17:54:01 +01:00
Mark Haines
95cbfee8ae Update metrics.py 2015-04-30 17:52:20 +01:00
Mark Haines
4ad8350607 Update README.rst 2015-04-30 17:48:29 +01:00
Mark Haines
6ea9cf58be missing import 2015-04-30 17:21:21 +01:00
Erik Johnston
f383d5a801 Fix up get_current_state and get_room_name_and_aliases queries to parse events in transaction 2015-04-30 17:12:52 +01:00
Mark Haines
c95480963e read the pid_file from the config file in synctl 2015-04-30 17:12:15 +01:00
Mark Haines
069296dbb0 Can't specify bind-port on the cmdline anymore 2015-04-30 17:08:07 +01:00
Mark Haines
2d4d2bbae4 Merge branch 'develop' into markjh/config_cleanup
Conflicts:
	synapse/config/captcha.py
2015-04-30 16:54:55 +01:00
Mark Haines
2f1348f339 Write a default log_config when generating config 2015-04-30 16:52:57 +01:00
Erik Johnston
69d4063651 Add get_rooms_for_user cache 2015-04-30 16:47:51 +01:00
David Baker
5b02f33451 Undo changes to logger config, ie. remove the access_log_file option: decision is to support this through log_config rather tan adding an option. 2015-04-30 16:21:39 +01:00
David Baker
054aa0d58c Do access log using python's logging stuff, just under a separate logger name 2015-04-30 16:21:38 +01:00
Erik Johnston
3c4c229788 Don't use sub queries, it makes postgres sad 2015-04-30 16:16:53 +01:00
Mark Haines
74aaacf82a Don't break when sizes or durations are given as integers 2015-04-30 16:04:02 +01:00
Mark Haines
29400b45b9 SYN-367: Use upsert rather than insert_or_replace 2015-04-30 15:21:31 +01:00
Mark Haines
c28f1d16f0 Add a random string to the auto generated key id 2015-04-30 15:13:14 +01:00
Mark Haines
265f30bd3f Allow --enable-registration to be passed on the commandline 2015-04-30 15:04:06 +01:00
Mark Haines
c9e62927f2 Use disable_registration keys if they are present 2015-04-30 14:34:09 +01:00
Erik Johnston
2366d28780 Don't needlessly join on state_events 2015-04-30 14:02:06 +01:00
David Baker
d89a9f7283 Add an access_log
SYN-161 #resolve
2015-04-30 13:58:13 +01:00
Mark Haines
1aa11cf7ce Allow multiple config files, set up a default config before applying the config files 2015-04-30 13:48:15 +01:00
David Baker
0c1b7f843b Unused import 2015-04-30 13:33:30 +01:00
David Baker
4b46fbec5b Doesn't look like this is used anymore 2015-04-30 12:04:08 +01:00
Erik Johnston
1d7702833d Make simple query rather than long one and then throw away half the results 2015-04-30 10:16:12 +01:00
Mark Haines
6b69ddd17a remove duplicate parse_size method 2015-04-30 04:26:29 +01:00
Mark Haines
d624e2a638 Manually generate the default config yaml, remove most of the commandline arguments for synapse anticipating that people will use the yaml instead. Simpify implementing config options by not requiring the classes to hit the super class 2015-04-30 04:24:44 +01:00
Erik Johnston
b1ca784aca Correctly decode, for sqlite and postgres, rows from pushers table 2015-04-29 19:41:14 +01:00
Erik Johnston
4a9dc5b2f5 pushkey' are also bytes. 2015-04-29 19:27:02 +01:00
Erik Johnston
0ade2712d1 Typo 2015-04-29 19:17:25 +01:00
Erik Johnston
50f96f256f Also remove yield from within lock in the other generator 2015-04-29 19:17:00 +01:00
Erik Johnston
d2d61a8288 Fix deadlock in id_generators. No idea why this was an actual deadlock. 2015-04-29 19:15:23 +01:00
Erik Johnston
3e71d13acf Also log when we've started pushers 2015-04-29 18:37:42 +01:00
Erik Johnston
e7a6edb0ee Revert previous 2015-04-29 18:37:30 +01:00
Erik Johnston
c27d6ad6b5 Only start pushers when synapse has fully started 2015-04-29 18:25:24 +01:00
Erik Johnston
46daf2d200 Start pushers on reactor thread 2015-04-29 18:22:20 +01:00
Erik Johnston
3864b3a8e6 Actually return rows 2015-04-29 18:07:36 +01:00
Erik Johnston
0618978238 Typo, args wrong way round 2015-04-29 18:04:35 +01:00
Erik Johnston
09177f4f2e Decode buffers in same thread 2015-04-29 18:03:42 +01:00
Erik Johnston
472be88674 We store pusher data as bytes 2015-04-29 17:43:46 +01:00
Erik Johnston
a6e62cf6d0 Fix off by one in presence token handling 2015-04-29 17:37:11 +01:00
David Baker
12d381bd5d Decode the data json in the storage layer (was moved but this part was missed) 2015-04-29 17:13:51 +01:00
David Baker
f8c30faf25 Oops, update the contraint too 2015-04-29 16:58:42 +01:00
David Baker
61cd5d9045 Be more postgressive 2015-04-29 16:57:14 +01:00
David Baker
fb95035a65 Be postgressive 2015-04-29 16:53:41 +01:00
David Baker
4669def000 Oops, forgot the schema delta file 2015-04-29 16:50:16 +01:00
Erik Johnston
0337eaf321 txn.execute doesn't return cursors 2015-04-29 16:43:39 +01:00
Erik Johnston
884fb88e28 txn.execute doesn't return cursors 2015-04-29 16:35:20 +01:00
Erik Johnston
d76c058eea Fix invalid SQL to work in postgres land 2015-04-29 16:30:25 +01:00
David Baker
9927170787 Accept camelcase + underscores in binding too 2015-04-29 15:57:09 +01:00
David Baker
109c8aafd2 Fix includes 2015-04-29 15:45:44 +01:00
David Baker
b7788f80a3 Accept both camelcase and underscore threepid creds for transition 2015-04-29 15:41:29 +01:00
Erik Johnston
c8ed9bd278 pushers table requires an access_token 2015-04-29 15:33:27 +01:00
Mark Haines
f2d90d5c02 Fix whitespace 2015-04-29 14:53:23 +01:00
Mark Haines
845b0b2c97 Check requirements before doing anything else when running the homeserver 2015-04-29 14:52:42 +01:00
Mark Haines
c0036ced54 bump syutil to 0.0.6 2015-04-29 14:16:41 +01:00
Erik Johnston
970a9b9d2b We can't use REPLACE when upgrading databases now we have postgres 2015-04-29 13:55:44 +01:00
Erik Johnston
64991b0c8b Merge pull request #129 from matrix-org/key_distribution
Key distribution v2
2015-04-29 13:34:38 +01:00
Mark Haines
e26a3d8d9e bump database schema version 2015-04-29 13:32:32 +01:00
Mark Haines
1319905d7a Use a defer.gatherResults to collect results from the perspective servers 2015-04-29 13:31:14 +01:00
Mark Haines
a9549fdce3 Use bytea rather than BLOB 2015-04-29 13:16:09 +01:00
Mark Haines
4ad8b45155 Merge branch 'develop' into key_distribution
Conflicts:
	synapse/config/homeserver.py
2015-04-29 13:15:14 +01:00
Mark Haines
19167fd21f Merge pull request #135 from matrix-org/erikj/postgres_charset_check
Check that postgres database has correct charset set
2015-04-29 13:04:22 +01:00
Matthew Hodgson
9c4ea42e79 minimal doc 2015-04-29 12:22:20 +01:00
Mark Haines
74874ffda7 Update the query format used by keyring to match current key v2 spec 2015-04-29 12:14:08 +01:00
Erik Johnston
cd0864121b Make postgres database error slightly more helpful 2015-04-29 12:12:25 +01:00
Erik Johnston
4932a7e2d9 Use __all__ instead of assert to stop pyflakes from warning about unused import in __init__ 2015-04-29 12:12:25 +01:00
Matthew Hodgson
0baf923153 Merge pull request #134 from intelfx/contrib-systemd
contrib/systemd: add a sample systemd unit file and a logger configuration
2015-04-29 12:10:02 +01:00
Mark Haines
9894da6a29 Merge branch 'develop' into erikj/postgres_charset_check 2015-04-29 11:58:06 +01:00
Mark Haines
46d200a3a1 Implement minimum_valid_until_ts in the remote key resource 2015-04-29 11:57:26 +01:00
Erik Johnston
72443572bf Mention that postgres databases must have the correct charset encoding 2015-04-29 11:50:33 +01:00
Erik Johnston
a08bf11138 Appease PEP8 2015-04-29 11:44:48 +01:00
Erik Johnston
204132a998 Check that postgres database has correct charset set 2015-04-29 11:42:28 +01:00
Mark Haines
f4c9ebbc34 Delete ugly commented out log line. 2015-04-29 11:07:13 +01:00
Ivan Shapovalov
45278eaa19 contrib/systemd: add a sample systemd unit file and a logger configuration
The added logger configuration (--log-config or log_config:) uses
systemd's python bindings to pass messages directly to the journal.

Signed-off-by: Ivan Shapovalov <intelfx100@gmail.com>
2015-04-29 03:01:56 +03:00
Matthew Hodgson
478e511db0 improve postgres blurb a bit 2015-04-29 00:48:07 +01:00
Matthew Hodgson
68c0603946 comment out ugly test logline 2015-04-29 00:14:44 +01:00
Matthew Hodgson
e3005d3ddb mention silviof's dockerfile 2015-04-29 00:14:29 +01:00
Matthew Hodgson
cc5d68f4c4 general clean up. s/alpha/beta/g. add intelfx's AUR package for Arch. s/the homeserver/Synapse/g. move installation & running sections closer together. 2015-04-29 00:00:24 +01:00
Erik Johnston
cc52f02d74 Fix rst 2015-04-28 18:09:20 +01:00
Erik Johnston
3151afee9e Update docs/postgres.rst to explain port script usage 2015-04-28 17:59:27 +01:00
Erik Johnston
f41a9a1ffc Add better help to scripts/port_from_sqlite_to_postgres.py 2015-04-28 17:42:36 +01:00
Erik Johnston
1783c7ca92 Ensure we never miss any presence updates 2015-04-28 17:24:24 +01:00
Erik Johnston
0126ef7f3c Fix typo 2015-04-28 17:23:53 +01:00
Erik Johnston
d98edb548a Ensure the serial returned by presence is always an integer 2015-04-28 17:20:32 +01:00
Erik Johnston
9fbcf19188 Merge pull request #123 from matrix-org/postgres
Postgres
2015-04-28 14:24:08 +01:00
Erik Johnston
073b891ec1 Remove unused imports 2015-04-28 13:44:23 +01:00
Erik Johnston
327ca883ec Merge branch 'develop' of github.com:matrix-org/synapse into postgres 2015-04-28 13:39:42 +01:00
Erik Johnston
a1d4813a54 Quickly fix dodgy est. time remaining 2015-04-28 12:55:29 +01:00
Erik Johnston
18f8247701 Use TEXT instead of VARCHAR(n), since PostgreSQL treats them the same except for a limit 2015-04-28 12:41:33 +01:00
Erik Johnston
af27b84ff7 Correctly handle total/remaining counts in the presence of sent_transasctions table 2015-04-28 12:40:04 +01:00
Erik Johnston
4a13ae7201 Correctly handle total/remaining counts in the presence of sent_transasctions table 2015-04-28 11:16:44 +01:00
Mark Haines
9182f87664 Merge pull request #126 from matrix-org/csauth
Client / Server Auth Refactor
2015-04-28 11:00:27 +01:00
Mark Haines
55e1bc8920 And don't bump the schema version unnecessarily 2015-04-28 10:54:15 +01:00
Mark Haines
55fcf62e9c Merge pull request #133 from matrix-org/invite_power_level
Invite power level
2015-04-28 10:50:34 +01:00
Mark Haines
b96c133034 Add server_keys.sql to the current delta rather than creating a new delta 2015-04-28 10:50:00 +01:00
Erik Johnston
ce8b0b2868 Remove accidentally committed debug hardcode hack 2015-04-28 10:45:05 +01:00
Mark Haines
252e6f6869 Merge pull request #130 from matrix-org/server_rename_check
Fix for SYN-266
2015-04-28 10:39:31 +01:00
Erik Johnston
1ccaea5b92 Typo in port script 2015-04-28 10:34:06 +01:00
Mark Haines
0bc71103e1 Output vim style mode markers into the yaml config file 2015-04-28 10:17:10 +01:00
Mark Haines
f8b865264a Merge branch 'develop' into key_distribution
Conflicts:
	synapse/crypto/keyring.py
2015-04-27 18:29:32 +01:00
Erik Johnston
5b8b1a43bd Split setuping up and processing of tables 2015-04-27 17:53:40 +01:00
Erik Johnston
40cbd6b6ee Shuffle progress stuff 2015-04-27 17:53:15 +01:00
Erik Johnston
4e49f52375 Don't port over all of the sent_transactions table 2015-04-27 17:36:37 +01:00
Paul "LeoNerd" Evans
38432d8c25 Merge branch 'develop' into invite_power_level 2015-04-27 17:09:25 +01:00
Erik Johnston
42b7139dec Remove unused import 2015-04-27 15:59:56 +01:00
Erik Johnston
1ef66cc3bd Move database configuration into config module 2015-04-27 15:57:43 +01:00
Erik Johnston
416a3e6c4f Ensure check_same_thread is enabled for sqlite3 2015-04-27 15:44:30 +01:00
Erik Johnston
8558e1ec73 Make get_max_token into inlineCallbacks so that the lock works. 2015-04-27 15:19:44 +01:00
Erik Johnston
56f518d279 Add docs on how to use synapse with psycopg2 2015-04-27 14:53:35 +01:00
Erik Johnston
6f8e2d517e Merge branch 'develop' of github.com:matrix-org/synapse into postgres 2015-04-27 14:41:40 +01:00
Erik Johnston
1c1d67dfef Merge pull request #132 from matrix-org/observer_and_locks
Observer and locks
2015-04-27 14:41:14 +01:00
Erik Johnston
2c70849dc3 Fix newlines 2015-04-27 14:38:29 +01:00
Erik Johnston
0a016b0525 Pull inner function out. 2015-04-27 14:37:24 +01:00
Erik Johnston
e701aec2d1 Implement locks using create_observer for fetching media and server keys 2015-04-27 14:20:26 +01:00
David Baker
412ece18e7 Add commentage. 2015-04-27 14:08:45 +01:00
Erik Johnston
1c82fbd2eb Implement create_observer.
`create_observer` takes a deferred and create a new deferred that
*observers* the original deferred. Any callbacks added to the observing
deferred will *not* affect the origin deferred.
2015-04-27 13:59:37 +01:00
Erik Johnston
2732be83d9 Shuffle operations so that locking upsert happens last in the txn. This ensures the lock is held for the least amount of time possible. 2015-04-27 13:22:30 +01:00
Erik Johnston
e4c4664d73 Handle the fact that postgres databases can be restarted from under us 2015-04-27 12:40:49 +01:00
David Baker
03c4f0ed67 pep8 2015-04-27 12:36:59 +01:00
David Baker
f1acb9fd40 logging args 2015-04-27 11:56:34 +01:00
David Baker
8a5be236e0 pep8 2015-04-27 11:49:18 +01:00
David Baker
df75914791 pep8 2015-04-27 11:48:33 +01:00
David Baker
b02e1006b9 Run database check before daemonizing, at the cost of database hygiene. 2015-04-27 11:46:00 +01:00
David Baker
f8152f2708 rename db method to be more informative 2015-04-27 10:16:26 +01:00
David Baker
2f475bd5d5 pep8 2015-04-24 18:15:07 +01:00
David Baker
a7b51f4539 Check users in our table aren't on a different domain to the one we're configured with to try & fix SYN-266 2015-04-24 18:11:21 +01:00
Mark Haines
288702170d Add config for setting the perspective servers 2015-04-24 17:01:34 +01:00
David Baker
f46eee838a Add note about updating your signing keys (ie. "the auto thing") 2015-04-24 15:25:28 +01:00
David Baker
a654f3fe49 Matrix ID server is now HTTPS 2015-04-24 15:07:24 +01:00
David Baker
44ccfa6258 Remove ancient history 2015-04-24 15:05:56 +01:00
David Baker
04d1725752 Pedant: OS X has a space 2015-04-24 15:01:14 +01:00
David Baker
1bac74b9ae Change to https for ID server communication 2015-04-24 14:48:49 +01:00
David Baker
ed83638668 Make one-to-one rule an underride otherwise bings don't work in one-to-one wrooms. Likewise a couple of other rules. 2015-04-24 14:27:17 +01:00
David Baker
7ac8a60c6f More underscores 2015-04-24 11:44:27 +01:00
Mark Haines
c253b14f6e Merge branch 'develop' into key_distribution 2015-04-24 11:29:46 +01:00
Mark Haines
bdcb23ca25 Fix spelling 2015-04-24 11:29:19 +01:00
Mark Haines
b2c2dc8940 Merge branch 'develop' into key_distribution 2015-04-24 11:28:10 +01:00
Mark Haines
869dc94cbb Call the super classes when generating config 2015-04-24 11:27:56 +01:00
David Baker
a218619626 Use underscores instead of camelcase for id server stuff 2015-04-24 11:27:38 +01:00
Mark Haines
b1e68add19 Add a config file for perspective servers 2015-04-24 11:26:19 +01:00
Mark Haines
31e262e6b4 Copyright notice 2015-04-24 10:36:51 +01:00
Mark Haines
eede182df7 Merge branch 'develop' into key_distribution 2015-04-24 10:35:49 +01:00
Mark Haines
4e2f8b8722 Copyright notices 2015-04-24 10:35:29 +01:00
Mark Haines
c8c710eca7 Move the key related config parser into a separate file 2015-04-24 10:22:22 +01:00
Mark Haines
149ed9f151 Better help for the old-signing-key option 2015-04-24 10:07:55 +01:00
David Baker
f7a79a37be pep8 2015-04-24 09:42:37 +01:00
David Baker
6532b6e607 Merge branch 'develop' into csauth
Conflicts:
	synapse/http/server.py
2015-04-24 09:37:54 +01:00
David Baker
74270defda No commas here, otherwise our error string constants become tuples. 2015-04-24 09:28:57 +01:00
Paul "LeoNerd" Evans
e1e5e53127 Remove users from the remote_offline_serials list (and clean up empty elements) when they go online again 2015-04-23 19:01:37 +01:00
Paul "LeoNerd" Evans
b3bda8a75f Don't let the remote offline serial list grow arbitrarily large 2015-04-23 18:40:47 +01:00
Paul "LeoNerd" Evans
8a785c3006 Store a list of the presence serial number at which remote users went offline, so that when we delete them from the cachemap, we can still synthesize OFFLINE events for them (SYN-261) 2015-04-23 18:40:19 +01:00
Paul "LeoNerd" Evans
191f7f09ce Generate presence event-stream JSON structures directly 2015-04-23 18:27:25 +01:00
David Baker
03eb4adc6e Dedicated error code for failed 3pid auth verification 2015-04-23 18:20:17 +01:00
Mark Haines
4bbf7156ef Update to match the specification for key/v2 2015-04-23 16:39:13 +01:00
Paul "LeoNerd" Evans
6d15401341 Mumble ReST mumble `fixed-width` mumble 2015-04-23 16:16:08 +01:00
Paul "LeoNerd" Evans
8c78414284 Formatting / wording fixes to metrics doc 2015-04-23 16:14:08 +01:00
Matthew Hodgson
6c99491347 prometheus/metrics howto from Leo 2015-04-23 16:08:08 +01:00
David Baker
0eb61a3d16 Remove ultimately unused feature of saving params from the first call in the session: it's probably too open to abuse. 2015-04-23 14:44:12 +01:00
David Baker
a2c10d37d7 Add an error code to 'missing token' response. 2015-04-23 13:23:44 +01:00
David Baker
2e0d9219b9 Remove now-redundant email config 2015-04-23 11:45:29 +01:00
Mark Haines
f30d47c876 Implement remote key lookup api 2015-04-22 14:21:08 +01:00
Paul "LeoNerd" Evans
a16eaa0c33 Neater fetching of user's auth level in a room - squash to int() at access time (SYN-353) 2015-04-22 14:20:04 +01:00
Paul "LeoNerd" Evans
f43063158a Appease pep8 2015-04-22 13:12:11 +01:00
Kegsay
7c50e3b816 Add info on breaking AS API changes 2015-04-22 08:38:26 +01:00
Paul "LeoNerd" Evans
2808c040ef Also remember to check 'invite' level for changes 2015-04-21 21:13:14 +01:00
Paul "LeoNerd" Evans
48b6ee2b67 Create an 'invite' powerlevel when making new rooms 2015-04-21 21:07:35 +01:00
Paul "LeoNerd" Evans
bc41f0398f Initial implementation of an 'invite' power_level 2015-04-21 20:56:08 +01:00
Paul "LeoNerd" Evans
d3309933f5 Much neater fetching of defined powerlevels from m.room.power_levels state event 2015-04-21 20:53:23 +01:00
Paul "LeoNerd" Evans
b568c0231c Remove debugging print statement accidentally committed 2015-04-21 20:21:14 +01:00
Paul "LeoNerd" Evans
3a7d7a3f22 Sanitise a user's powerlevel to an int() before numerical comparison, because otherwise Python is "helpful" with it (SYN-351) 2015-04-21 20:18:29 +01:00
Mark Haines
3ba522bb23 Merge branch 'develop' into key_distribution 2015-04-21 17:10:25 +01:00
Mark Haines
6080830bef Bump syutil version to 0.0.5 2015-04-21 17:04:06 +01:00
Mark Haines
8b183781cb Merge pull request #128 from matrix-org/unify_http_wrappers
Unify http wrappers
2015-04-21 17:01:04 +01:00
Mark Haines
1f651ba6ec Merge pull request #127 from matrix-org/bugs/SYN-350
SYN-350: Don't ratelimit the events generated during create room
2015-04-21 16:52:59 +01:00
Mark Haines
812a99100b Set a version_string in BaseMediaResource so that the request_handler wrapper works 2015-04-21 16:43:58 +01:00
Mark Haines
1967650bc4 Combine the request wrappers in rest/media/v1 and http/server into a single wrapper decorator 2015-04-21 16:35:53 +01:00
Mark Haines
1ebff9736b Split out the JsonResource request logging and error handling into a separate wrapper function 2015-04-21 16:07:20 +01:00
Mark Haines
24d21887ed SYN-350: Don't ratelimit the individual events generated during room creation 2015-04-21 14:14:19 +01:00
Mark Haines
db8d4e8dd6 Merge branch 'develop' into key_distribution 2015-04-20 16:24:21 +01:00
Mark Haines
2f9157b427 Implement v2 key lookup 2015-04-20 16:23:47 +01:00
David Baker
91c8f828e1 pep8 2015-04-17 19:56:04 +01:00
David Baker
8db6832db8 Password reset, finally. 2015-04-17 19:53:47 +01:00
David Baker
117f35ac4a Add endpoint to get threepids from server 2015-04-17 17:20:18 +01:00
David Baker
4eea5cf6c2 pep8 2015-04-17 16:46:45 +01:00
David Baker
f96ab9d18d make add3pid servlet work 2015-04-17 16:44:49 +01:00
Erik Johnston
865398b4a9 Revert needless change to rest.profile 2015-04-17 16:14:51 +01:00
Erik Johnston
e3417bbbe0 Revert needless change to storage.profile 2015-04-17 16:13:14 +01:00
Erik Johnston
2492efb162 Merge branch 'develop' of github.com:matrix-org/synapse into postgres 2015-04-17 16:08:42 +01:00
Erik Johnston
4a5990ff8f Merge branch 'master' of github.com:matrix-org/synapse into develop 2015-04-17 16:08:28 +01:00
Erik Johnston
5e7a90316d Update --database-path metavar to SQLITE_DATABASE_PATH 2015-04-17 16:08:18 +01:00
Erik Johnston
231498ac45 Merge pull request #124 from matrix-org/hotfixes-v0.8.1-r4
Hotfixes v0.8.1-r4
2015-04-17 15:16:27 +01:00
Mark Haines
fd4fa9097f The new parameter to urlopen is "context" not "ctx" 2015-04-17 14:44:31 +01:00
David Baker
0b1a8500a2 just the once would probably be fine 2015-04-17 13:53:54 +01:00
David Baker
cb03fafdf1 Merge branch 'develop' into csauth 2015-04-17 13:51:10 +01:00
David Baker
bf5e54f255 Register the 3pid servlet 2015-04-17 13:44:55 +01:00
David Baker
94e1e58b4d password -> account servlet and add start of an 'add 3pid' endpoint 2015-04-17 13:44:12 +01:00
Erik Johnston
ced39d019f Bump version 2015-04-17 13:25:16 +01:00
Erik Johnston
16dcdedc8a As of version 2.7.9, urllib2 now checks SSL certs 2015-04-17 13:24:55 +01:00
David Baker
83b554437e Need to yield the username check, otherwise very very weird things happen. 2015-04-17 12:57:25 +01:00
Erik Johnston
dfc46c6220 PEP8 2015-04-17 12:46:29 +01:00
Erik Johnston
6ba2e3df4e Merge branch 'develop' of github.com:matrix-org/synapse into postgres 2015-04-17 11:22:31 +01:00
Erik Johnston
427bcb7608 Fix port script after storage changes. Add very simple (off by default) curses UI to see progress. 2015-04-17 11:13:05 +01:00
Erik Johnston
0ec346d942 Add unique index to room_aliases, remove duplicates on upgrade. Convert some columns back to TEXT from bytea 2015-04-17 11:10:20 +01:00
Kegan Dougal
1352ae2c07 Add kick users script 2015-04-17 10:38:23 +01:00
David Baker
4cd5fb13a3 Oops, left debugging in. 2015-04-16 20:03:13 +01:00
David Baker
ea1776f556 Return user ID in use error straight away 2015-04-16 19:56:44 +01:00
Erik Johnston
e1c0970c11 PEP8 2015-04-16 11:18:45 +01:00
Erik Johnston
b8092fbc82 Go back to storing JSON in TEXT 2015-04-16 11:17:52 +01:00
Erik Johnston
bc9e69e160 Move encoding and decoding of JSON into storage layer 2015-04-16 11:01:09 +01:00
Paul "LeoNerd" Evans
f2cf37518b Filter typing nofication events to only those rooms the requesting user is a member of (SYN-328) 2015-04-15 23:34:16 +01:00
Paul "LeoNerd" Evans
04c7f3576e Various minor fixes to unit-test structure around typing notifications 2015-04-15 23:32:11 +01:00
Paul "LeoNerd" Evans
0268d40281 Have TypingNotificationEventSource.get_new_events_for_user() return a deferred, for consistency and extensibility 2015-04-15 23:09:35 +01:00
Paul "LeoNerd" Evans
399b5add58 Neater implementation of membership change auth checks, ensuring we can't forget to check if the calling user is a member of the room 2015-04-15 18:40:23 +01:00
Paul "LeoNerd" Evans
e6e130b9ba Ensure that non-room-members cannot ban others, even if they do have enough powerlevel (SYN-343) 2015-04-15 18:07:33 +01:00
David Baker
766bd8e880 Dummy login so we can do the first POST request to get login flows without it just succeeding 2015-04-15 17:14:25 +01:00
Erik Johnston
ffad75bd62 Remove mysql/maria support 2015-04-15 17:00:50 +01:00
Mark Haines
a429515bdd Add methods for storing and retrieving the raw key json 2015-04-15 16:58:35 +01:00
Mark Haines
8d761134c2 Fail quicker for 4xx responses in the key client, optional hit a different API path 2015-04-15 16:57:58 +01:00
Erik Johnston
cf04cedf21 Change full_schemas/11 to work with postgres 2015-04-15 16:53:47 +01:00
Erik Johnston
5b31afcbd1 Remove debug logging 2015-04-15 16:27:04 +01:00
Erik Johnston
6e91f14d09 Add missing yield 2015-04-15 16:25:07 +01:00
Erik Johnston
ed26e4012b pushers table requires a unique id. 2015-04-15 16:24:14 +01:00
Erik Johnston
806f380a8b Make LruCache thread safe, as its used for event cache 2015-04-15 16:07:59 +01:00
David Baker
a19b739909 Regstration with email in v2 2015-04-15 15:50:38 +01:00
Erik Johnston
a5c72780e6 Don't pass in removed flag 2015-04-15 15:13:22 +01:00
Erik Johnston
e19f794fee Change from exception to warn 2015-04-15 15:12:57 +01:00
Erik Johnston
d5ff9effcf Don't wait on federation_handler.handle_new_event 2015-04-15 15:05:57 +01:00
Erik Johnston
e845434028 Remove run_on_reactor()s 2015-04-15 15:05:45 +01:00
Erik Johnston
4af32a2817 Postgres does not allow you to continue using a cursor after a DB exception has been raised, so move _simple_insert or_ignore flag out of transaction 2015-04-15 14:51:21 +01:00
Erik Johnston
bc6cef823f Do more parellelization for initialSync 2015-04-15 14:21:59 +01:00
Erik Johnston
1ec6fa98c9 Parellelize initial sync 2015-04-15 14:17:16 +01:00
Erik Johnston
25d2914fba Turn off persistance of sent_transactions.response_json 2015-04-15 14:09:35 +01:00
Erik Johnston
cce5d057d3 Add index on events (topological_ordering, stream_ordering) to help with ORDER BY clauses 2015-04-15 11:43:25 +01:00
Erik Johnston
6606f7c659 Merge branch 'develop' of github.com:matrix-org/synapse into postgres 2015-04-15 10:27:20 +01:00
Erik Johnston
a971fa9d58 Use try..finally in contextlib.contextmanager 2015-04-15 10:25:43 +01:00
Erik Johnston
ded4128965 Use True for True rather than 1 2015-04-15 10:24:24 +01:00
Erik Johnston
f9e12f79ca Add missing yield in storage func 2015-04-15 10:24:07 +01:00
Erik Johnston
c756dfeb14 Correctly identify deadlocks 2015-04-15 10:23:42 +01:00
Erik Johnston
63677d1f47 Change port script to work with postgres 2015-04-15 10:23:24 +01:00
Mark Haines
32e14d8181 Return a sha256 fingerprint rather than the entire tls certificate 2015-04-14 19:10:09 +01:00
Mark Haines
4847a9534d Merge pull request #122 from matrix-org/upgrade_syutil_to_0.0.4
Update syutil version to 0.0.4
2015-04-14 16:36:36 +01:00
Mark Haines
88cb06e996 Update syutil version to 0.0.4 2015-04-14 16:18:17 +01:00
Mark Haines
d488463fa3 Add a version 2 of the key server api 2015-04-14 16:04:52 +01:00
Erik Johnston
127fad17dd Add postgres database engine 2015-04-14 14:50:29 +01:00
Erik Johnston
5a95cd4442 Rename user_ips.user -> user_id 2015-04-14 13:54:09 +01:00
Erik Johnston
58d8339966 Add support for postgres instead of mysql. Change sql accourdingly. blob + varbinary -> bytea. No support for UNSIGNED or CREATE INDEX IF NOT EXISTS. 2015-04-14 13:53:20 +01:00
Mark Haines
be7ead6946 Merge pull request #121 from matrix-org/key-api-restructure
Move server key api into rest/key/v1
2015-04-14 13:37:15 +01:00
Mark Haines
3cbc286d06 Move server key api into rest/key/v1 2015-04-14 13:28:11 +01:00
Erik Johnston
3c741682e5 Correctly increment the _next_id initially 2015-04-14 09:54:44 +01:00
Erik Johnston
86fc9b617c For backwards compat, make state_groups.id have a type of int, not varchar 2015-04-13 17:03:49 +01:00
Erik Johnston
90bcb86957 Support running porting script multiple times 2015-04-13 13:45:04 +01:00
Erik Johnston
1bede47843 Add beginnings of migration sqlite->maria db script 2015-04-10 18:47:20 +01:00
Erik Johnston
93937b2b31 Remove duplicate rows 2015-04-10 18:47:03 +01:00
Erik Johnston
c5365dee56 Use case sensitive collations 2015-04-10 18:46:33 +01:00
Erik Johnston
4103b1c470 DROP indexes before recreating them 2015-04-10 18:46:09 +01:00
Erik Johnston
4d5b098626 Use LONGBLOB and TEXT for arbitary length rows 2015-04-10 18:45:09 +01:00
Erik Johnston
7ed2ec3061 Handle the fact that in sqlite binary data might be stored as unicode or bytes 2015-04-10 13:41:54 +01:00
Erik Johnston
ce797ad373 Bump schema version 2015-04-10 11:22:30 +01:00
Erik Johnston
7e863c51e6 Use unsigned bigint 2015-04-10 11:22:04 +01:00
Erik Johnston
0f12772e32 SQLite wants INTEGER and not BIGINT for primary keys 2015-04-10 11:16:09 +01:00
Erik Johnston
d5d4281647 Update full_schemas/16 to match delta files. Add delta/16 scripts 2015-04-10 10:59:46 +01:00
Erik Johnston
cda4a6f93f Revert non-trivial changes to upgrade scripts 2015-04-10 10:19:50 +01:00
Erik Johnston
e2722f58ee Fix schema again 2015-04-10 10:16:29 +01:00
Erik Johnston
a1665c5094 Revert non-trivial schema changes and move them to a new schema version. 2015-04-10 10:05:44 +01:00
Erik Johnston
2ded344620 Remove unused import 2015-04-09 13:46:06 +01:00
Erik Johnston
9707acfc40 Remove spurious spaces 2015-04-09 13:45:20 +01:00
Erik Johnston
8bf285e082 Merge branch 'develop' of github.com:matrix-org/synapse into mysql 2015-04-09 13:13:26 +01:00
Mark Haines
cf59d68b17 Merge pull request #120 from matrix-org/bugs/SYN-339
SYN-339: Cancel the notifier timeout when the notifier fires
2015-04-09 11:45:13 +01:00
Mark Haines
1280a47fc6 Add comment 2015-04-09 11:42:21 +01:00
Mark Haines
23d285ad57 Unset the timer in the timeout callback so that we don't try to cancel it if it has been called 2015-04-09 11:41:50 +01:00
Erik Johnston
8ad0f4912e Stream ordering and out of order insertions.
Handle the fact that events can be persisted out of order, and so to get
the "current max" stream token becomes non trivial - as we need to make
sure that *all* stream tokens less than the current max have also
successfully been persisted.
2015-04-09 11:41:36 +01:00
Mark Haines
6f9dea7483 SYN-339: Cancel the notifier timeout when the notifier fires 2015-04-09 11:07:20 +01:00
Erik Johnston
22d7a59306 Fix tests after commit 9a0579 2015-04-08 16:57:14 +01:00
Erik Johnston
279a547a8b Use generic db exceptions rather than sqlite3 specific ones 2015-04-08 16:53:48 +01:00
Mark Haines
83f5125d52 Merge pull request #114 from matrix-org/improve_get_event_cache
Improve get event cache
2015-04-08 16:50:27 +01:00
Erik Johnston
a43b40449b Merge branch 'develop' of github.com:matrix-org/synapse into mysql 2015-04-08 16:46:56 +01:00
Erik Johnston
9cef051ce2 Merge branch 'master' of github.com:matrix-org/synapse into develop 2015-04-08 16:43:17 +01:00
Erik Johnston
0575840866 Merge pull request #119 from matrix-org/hotfixes-v0.8.1-r3
Hotfixes v0.8.1 r3
2015-04-08 16:41:45 +01:00
Erik Johnston
45131b2bca Bump version 2015-04-08 16:35:12 +01:00
Erik Johnston
ccda401dbf SYN-338: Fix typo that caused the cache to throw an exception in some instances 2015-04-08 16:34:23 +01:00
Erik Johnston
5b89052d2f Merge pull request #118 from matrix-org/dont-wait-for-notify
Don't yield on notifying all listeners
2015-04-08 14:12:36 +01:00
Erik Johnston
3887350e47 Merge branch 'develop' of github.com:matrix-org/synapse into mysql 2015-04-08 14:11:07 +01:00
Erik Johnston
19234cc6c3 typo 2015-04-08 14:10:06 +01:00
Erik Johnston
e8f1521605 Don't yield on notifying all listeners 2015-04-08 14:08:30 +01:00
Mark Haines
605941ee26 Merge pull request #117 from matrix-org/notifier-leak
Fix a notifier leak
2015-04-08 14:02:37 +01:00
Erik Johnston
5bc41fe9f8 Move comment into docstring 2015-04-08 14:01:22 +01:00
Erik Johnston
638be5a6b9 Factor out loops into '_discard_if_notified' 2015-04-08 13:58:32 +01:00
Erik Johnston
830d07db82 Also perform paranoia checks in 'on_new_user_event' 2015-04-08 13:40:20 +01:00
Erik Johnston
65f5e4e3e4 Add paranoia checks to make sure that we evict stale NotificationListeners when we are about to process them 2015-04-08 13:33:38 +01:00
Erik Johnston
07d4041709 Fix bug where we didn't inform the NotificataionListeners about new rooms they have been subscribed to. This meant that the listeners didn't clean themselves up fully from all the dicts 2015-04-08 13:33:38 +01:00
Erik Johnston
c1b34af441 Move database timer logging to seperate logger 2015-04-08 13:31:19 +01:00
Erik Johnston
9a05795619 Retry transaction, not SQL query 2015-04-08 13:11:28 +01:00
Erik Johnston
24d8134ac1 Fix maria engine to correctly recognize deadlocks 2015-04-08 13:10:54 +01:00
Matthew Hodgson
7f911ef4e3 typo 2015-04-08 10:55:12 +01:00
Erik Johnston
d5e7e6b9b6 Merge branch 'develop' of github.com:matrix-org/synapse into mysql 2015-04-07 18:17:22 +01:00
Erik Johnston
0775c62469 Fix --enable-registration flag to work if you don't give a value 2015-04-07 18:16:23 +01:00
Erik Johnston
38928c6609 Merge branch 'develop' of github.com:matrix-org/synapse into mysql 2015-04-07 18:08:38 +01:00
Erik Johnston
a2a93a4fa7 Make demo script use --enable-registration 2015-04-07 18:08:21 +01:00
Erik Johnston
4fe95094d1 Merge branch 'develop' of github.com:matrix-org/synapse into mysql 2015-04-07 18:05:39 +01:00
Kegan Dougal
ae8ff92e05 Fix a bug which causes a send event level of 0 to not be honoured.
Caused by a bad if check, which incorrectly executes for both 0 and None,
when None was the original intent.
2015-04-07 15:48:20 +01:00
Erik Johnston
49d6aa1394 Retry on deadlock 2015-04-07 15:28:37 +01:00
Erik Johnston
0bfa78b39b PEP8 2015-04-07 12:16:05 +01:00
Erik Johnston
6bc9edd8b2 Fix prepare_sqlite3_database's convert_param_style 2015-04-07 12:13:58 +01:00
Erik Johnston
05a35d62b6 Bump database version 2015-04-07 12:10:15 +01:00
Erik Johnston
8574bf62dc Add index to presence table 2015-04-07 12:09:36 +01:00
Erik Johnston
0af5f5efaf Don't use multiple UNIQUE constraints; it will cause deadlocks 2015-04-07 12:08:35 +01:00
Erik Johnston
c8d3f6486d Implement or_ignore flag on inserts 2015-04-07 12:06:22 +01:00
Erik Johnston
304111afd0 Don't use AUTOINCREMENT, use an in memory version 2015-04-07 12:05:36 +01:00
Erik Johnston
d0e444a648 Explicitly name the __main__ module logger 2015-04-07 12:04:02 +01:00
Matthew Hodgson
65fd446b4d update leo's contribs a bit 2015-04-03 11:48:08 -04:00
Matthew Hodgson
364c7f92b4 potential contributing guide & author list for synapse 2015-04-03 10:09:06 -04:00
David Baker
4eb6d66b45 Add app service auth back in to v2 register 2015-04-02 17:51:19 +01:00
David Baker
6b59650753 Throw sensible errors on not-json when allowing empty body 2015-04-02 17:45:16 +01:00
David Baker
41cd778d66 pep8 2015-04-02 17:06:17 +01:00
David Baker
70a84f17f3 Add shared secret auth into register v2 and switch the script over. 2015-04-02 17:01:29 +01:00
Erik Johnston
779f7b0f44 Fix unicode support 2015-04-02 10:06:22 +01:00
Paul "LeoNerd" Evans
ef1e019840 Appease pep8 2015-04-01 19:17:38 +01:00
Paul "LeoNerd" Evans
5583e29513 Report process open filehandles in metrics 2015-04-01 19:15:23 +01:00
David Baker
c5bf0343e8 Explain how I justified to myself making JsonResource not always send JSON. 2015-04-01 15:13:14 +01:00
Erik Johnston
e24c32e6f3 Fix SQLite support 2015-04-01 15:09:51 +01:00
David Baker
e9c908ebc0 Completely replace fallback auth for C/S V2:
* Now only the auth part goes to fallback, not the whole operation
 * Auth fallback is a normal API endpoint, not a static page
 * Params like the recaptcha pubkey can just live in the config
Involves a little engineering on JsonResource so its servlets aren't always forced to return JSON. I should document this more, in fact I'll do that now.
2015-04-01 15:05:30 +01:00
Erik Johnston
9236136f3a Make work in both Maria and SQLite. Fix tests 2015-04-01 14:12:33 +01:00
Kegan Dougal
813e54bd5b Fix more AS sender ID thinkos.
Specifically, the ASes own user ID wasn't being treated as 'exclusive' so
a human could nab it. Also, the HS would needlessly send user queries to the
AS for its own user ID.
2015-04-01 14:05:24 +01:00
Kegsay
80a620a83a Merge pull request #116 from matrix-org/application-services-registration-script
Application services registration changes (PR #116)
2015-04-01 13:51:15 +01:00
Kegan Dougal
9fa8bda099 Merge branch 'develop' into application-services-registration-script 2015-04-01 10:19:17 +01:00
David Baker
f129ee1e18 Make docs a bit more true 2015-03-31 18:25:10 +01:00
Kegan Dougal
09cbff174a Fix thinko whereby events *for the AS specifically* were not passed on.
This was caused by not explicitly checking the service.sender field. This
has now been fixed and a regression test has been added.
2015-03-31 16:44:45 +01:00
David Baker
d18e7779ca Grammar and deduplication 2015-03-31 14:40:02 +01:00
Kegan Dougal
5e88a09a42 Add same user_id char checks as registration. 2015-03-31 14:00:25 +01:00
Kegan Dougal
cf1fa59f4b Use a sender localpart instead of a user ID.
Form the user ID at runtime instead, This gives less room for error in AS
config files since they cannot specify the domain of another HS.
2015-03-31 13:48:03 +01:00
Kegan Dougal
3470cb36a8 Pyflakes 2015-03-31 13:03:31 +01:00
Kegan Dougal
c217504949 Edit SQL schema to use string IDs not ints. Use token as ID. Update tests. 2015-03-31 12:07:56 +01:00
Kegan Dougal
b59aa74556 Fix tests and missing returns on deferreds. 2015-03-31 11:35:45 +01:00
Kegan Dougal
d33ae65efc Remove more reg/unreg methods. Read config not database for cache. 2015-03-31 11:00:00 +01:00
David Baker
9f642a93ec pep8 2015-03-31 09:50:44 +01:00
Kegan Dougal
e7887e37a8 Remove appservice REST servlets 2015-03-31 09:32:40 +01:00
Kegan Dougal
af853a4cdb Add AppServiceConfig 2015-03-31 09:22:31 +01:00
David Baker
4891c4ff72 Update CAPTCHA_SETUP (it continues to ignore fallback, but I guess I should fix it so that doesn't need the key in two different places) 2015-03-30 18:27:42 +01:00
David Baker
46183cc69f Add original, unmodified CAPTCHA-SETUP from the webclient repo before modifying (captcha setup is now purely on the HS). 2015-03-30 18:18:19 +01:00
David Baker
59bf16eddc New registration for C/S API v2. Only ReCAPTCHA working currently. 2015-03-30 18:13:10 +01:00
Erik Johnston
9a506a191a Add note in changelog about change in config option names 2015-03-30 17:24:09 +01:00
Matthew Hodgson
8675ea03de actually tell users /how/ to turn on registration 2015-03-30 12:05:38 -04:00
Matthew Hodgson
8366fde82f turn --disable-registration into --enable-registration, given the default is for registration to be disabled by default now. this is backwards incompatible by removing the old --disable-registration arg, but makes for a much more intuitive arg 2015-03-30 12:01:09 -04:00
Paul "LeoNerd" Evans
3e420aebd8 Revert "Add another @cached wrapper, this time on get_presence_state()"
This reverts commit ff1fa0fbf8.
2015-03-27 16:16:58 +00:00
Paul "LeoNerd" Evans
ff1fa0fbf8 Add another @cached wrapper, this time on get_presence_state() 2015-03-27 15:57:16 +00:00
Erik Johnston
abcd03af02 Merge pull request #115 from matrix-org/allow_registration_for_federation_demo
Allow registration in the HSes federation demo
2015-03-27 10:14:29 +00:00
manuroe
5116946ae9 Allow registration in the HSes federation demo 2015-03-27 11:10:52 +01:00
David Baker
6f4f7e4e22 pep8 2015-03-26 14:12:06 +00:00
David Baker
a32e876ef4 Delete pushers when changing password 2015-03-26 13:40:16 +00:00
Paul "LeoNerd" Evans
a198894bf7 Appease pep8 2015-03-26 11:53:58 +00:00
Kegsay
5b999e206e Merge pull request #106 from matrix-org/application-services-txn-reliability
Application services transaction reliability (PR #106)
2015-03-26 10:30:47 +00:00
Kegan Dougal
32206dde3f Fixes from PR comments 2015-03-26 10:11:52 +00:00
Kegan Dougal
4edcbcee3b Merge branch 'develop' into application-services-txn-reliability
Conflicts:
	synapse/storage/__init__.py
2015-03-26 10:07:59 +00:00
Paul "LeoNerd" Evans
953e40f9dc Implement the main getEvent cache using Cache() instead of a custom application of LruCache; also unify its two-level structure into just one 2015-03-25 19:12:16 +00:00
David Baker
df4c12c762 pep8 blank lines 2015-03-25 19:08:17 +00:00
David Baker
c1a256cc4c Allow multiple pushers for a single app ID & pushkey, honouring the 'append' flag in the API. 2015-03-25 19:06:22 +00:00
Paul "LeoNerd" Evans
f173d40a32 Use FrozenEvent's reject_reason to decide whether to return it; don't include allow_rejected in the main getEvents cache key 2015-03-25 19:06:05 +00:00
Paul "LeoNerd" Evans
1b988b051b Store the rejected reason in (Frozen)Event structs 2015-03-25 19:06:05 +00:00
Paul "LeoNerd" Evans
033a517feb Indirect invalidations of _get_event_cache via a helper method to keep all uses of the cache lexically within one .py file 2015-03-25 19:06:05 +00:00
Paul "LeoNerd" Evans
9ba6487b3f Allow a choice of LRU behaviour for Cache() by using LruCache() or OrderedDict() 2015-03-25 19:05:34 +00:00
Paul "LeoNerd" Evans
d6b3ea75d4 Implement the 'key in dict' test for LruCache() 2015-03-25 19:04:59 +00:00
Paul "LeoNerd" Evans
7ab9f91a60 Unit-test that Cache() key eviction is ordered 2015-03-25 18:50:43 +00:00
Erik Johnston
0e8f5095c7 Fix unicode database support 2015-03-25 17:15:20 +00:00
David Baker
ce2766d19c Fix tests 2015-03-24 18:56:51 +00:00
David Baker
438a21c87b Don't test exact equality of the list: as long as it has the fields we expect, that's just fine. I added the user_id (as in database pkey) and it broke: no point testing what that comes out as: it's determined by the db. 2015-03-24 18:21:54 +00:00
David Baker
9aa0224cdf unused import 2015-03-24 17:25:59 +00:00
David Baker
c7023f2155 1) Pushers are now associated with an access token
2) Change places where we mean unauthenticated to 401, not 403, in C/S v2: hack so it stays as 403 in v1 because web client relies on it.
2015-03-24 17:24:15 +00:00
Erik Johnston
0ba393924a Escape non printing ascii character 2015-03-24 16:31:52 +00:00
Erik Johnston
f488293d96 Don't reinsert into event_edges 2015-03-24 16:20:26 +00:00
Erik Johnston
1aa44939fc Fix bugs in transactions storage 2015-03-24 16:20:05 +00:00
Erik Johnston
5a447098dd Don't use room hosts table 2015-03-24 16:19:24 +00:00
Erik Johnston
9e98f1022a Don't order by rowid 2015-03-24 16:19:01 +00:00
Erik Johnston
9115421ace Use _simple_upsert 2015-03-24 16:17:39 +00:00
David Baker
d19e79ecc9 Make deleting other access tokens when you change your password actually work 2015-03-24 15:33:48 +00:00
Paul "LeoNerd" Evans
ed008e85a8 Reduce activity timer granularity to avoid too many quick updates (SYN-247) 2015-03-23 17:25:50 +00:00
Erik Johnston
6e7131f02f Remove uses of REPLACE and ON CONFLICT IGNORE to make the SQL more portable. 2015-03-23 15:38:56 +00:00
Erik Johnston
9a7f496298 Sanitize RoomMemberStore 2015-03-23 15:29:04 +00:00
David Baker
78adccfaf4 pep8 / pyflakes 2015-03-23 14:23:51 +00:00
David Baker
d98660a60d Implement password changing (finally) along with a start on making client/server auth more general. 2015-03-23 14:20:28 +00:00
Erik Johnston
d5272b1d2c Use 'update or insert' rather than on 'conflict replace' 2015-03-23 14:02:34 +00:00
Erik Johnston
278149f533 Sanitize TransactionStore 2015-03-23 13:43:21 +00:00
Paul "LeoNerd" Evans
72d8406409 Put a cache on get_aliases_for_room 2015-03-20 19:21:13 +00:00
Paul "LeoNerd" Evans
a63b4f7101 Remember the 'last seen' time for a given user/IP/device combination and only bother INSERTing another if it's stale 2015-03-20 18:25:49 +00:00
Paul "LeoNerd" Evans
0f86312c4c Pull out the cache logic from the @cached wrapper into its own class we can reuse 2015-03-20 18:25:42 +00:00
Paul "LeoNerd" Evans
b1022ed8b5 func(*EXPR) is valid Python syntax, really... 2015-03-20 17:47:45 +00:00
Erik Johnston
f6583796fe Merge branch 'develop' of github.com:matrix-org/synapse into mysql 2015-03-20 16:31:48 +00:00
Erik Johnston
4848fdbf59 Merge pull request #113 from matrix-org/store_rearrangement
Store rearrangement
2015-03-20 16:23:01 +00:00
Erik Johnston
80cd08c190 PEP8 2015-03-20 16:03:25 +00:00
Erik Johnston
9517f4da4d Merge branch 'develop' of github.com:matrix-org/synapse into store_rearrangement 2015-03-20 16:02:47 +00:00
Erik Johnston
dc0c989ef4 Give sensible names for '_simple_...' transactions 2015-03-20 15:59:18 +00:00
Paul "LeoNerd" Evans
ceb61daa70 Add the tiniest of tiny one-element caches to get_room_events_max_id() as it's read every time someone hits eventstream 2015-03-20 15:44:06 +00:00
Erik Johnston
fce0114005 Start removing Tables 2015-03-20 15:05:44 +00:00
Erik Johnston
7e282a53a5 Tidy up _simple_... methods 2015-03-20 15:05:10 +00:00
Paul "LeoNerd" Evans
91cb46191d Allow @cached-wrapped functions to have more or fewer than 1 argument; assert on the total count of them though 2015-03-20 14:59:45 +00:00
Erik Johnston
87db64b839 Rearrange storage modules 2015-03-20 14:11:38 +00:00
Erik Johnston
cb8162d3d1 Rearrange storage modules 2015-03-20 13:52:56 +00:00
Erik Johnston
d288d273e1 Generate transaction id in code 2015-03-20 10:57:44 +00:00
Erik Johnston
d4f50f3ae5 decode_result takes an iterable 2015-03-20 10:57:26 +00:00
Erik Johnston
455579ca90 Make database selection configurable 2015-03-20 10:55:55 +00:00
Erik Johnston
0d0610870d Fix up schemas some more 2015-03-20 10:55:31 +00:00
Erik Johnston
532ebc4a82 Merge pull request #112 from matrix-org/hotfixes-v0.8.1-r2
Hotfixes v0.8.1 r2
2015-03-19 17:51:37 +00:00
Erik Johnston
56f2d31676 Bump version 2015-03-19 17:48:33 +00:00
Erik Johnston
c178e4e6ca Add missing servlet to list 2015-03-19 17:48:21 +00:00
Erik Johnston
d7a0496f3e Convert storage layer to be mysql compatible 2015-03-19 15:59:48 +00:00
Erik Johnston
58ed393235 Remove redundant key 2015-03-19 15:12:05 +00:00
Erik Johnston
fae059cc18 Fix up schemas to work with mariadb 2015-03-19 13:42:39 +00:00
Erik Johnston
b26d85c30f Merge branch 'hotfixes-0.8.1a' of github.com:matrix-org/synapse 2015-03-19 11:26:29 +00:00
Erik Johnston
0dcb145c7e Bump version 2015-03-19 11:26:03 +00:00
David Baker
64cf1483e5 D'oh - setup.py used the dict directly: make it use the wrapper function. 2015-03-19 11:21:34 +00:00
Erik Johnston
d028207a6e Merge branch 'release-v0.8.1' of github.com:matrix-org/synapse 2015-03-19 10:43:31 +00:00
Erik Johnston
0a55a2b692 Update CHANGES 2015-03-18 11:48:47 +00:00
Erik Johnston
6cc046302f Bump version 2015-03-18 11:41:00 +00:00
Erik Johnston
ed4d44d833 Merge pull request #109 from matrix-org/default_registration
Disable registration by default. Add script to register new users.
2015-03-18 11:38:52 +00:00
Erik Johnston
f88db7ac0b Factor out user id validation checks 2015-03-18 11:34:18 +00:00
Erik Johnston
57976f646f Do more validation of incoming request 2015-03-18 11:30:04 +00:00
Erik Johnston
bb24609158 Clean out event_forward_extremities table when the server rejoins the room 2015-03-18 11:19:47 +00:00
Erik Johnston
89036579ed Update schema to work with mariadb 2015-03-18 11:18:49 +00:00
Paul "LeoNerd" Evans
93978c5e2b @cached() annotate get_user_by_token() - achieves a minor DB performance improvement 2015-03-17 17:24:51 +00:00
Paul "LeoNerd" Evans
1489521ee5 Be polite and ensure we use @functools.wraps() when creating a function decorator 2015-03-17 17:19:22 +00:00
David Baker
6d33f97703 pep8 2015-03-17 11:53:55 +00:00
David Baker
7564dac8cb Wire up the webclient option
It existed but was hardcoded to True.
Give it an underscore for consistency.
Also don't pull in syweb unless we're actually using the web client.
2015-03-17 12:45:37 +01:00
Paul "LeoNerd" Evans
3f7a31d366 Add a DistributionMetric to HTTP request/response processing time in the server 2015-03-16 18:31:29 +00:00
Paul "LeoNerd" Evans
be170b1426 Add a metric for the scheduling latency of SQL queries 2015-03-16 17:21:59 +00:00
Erik Johnston
cd2539ab2a Merge pull request #110 from matrix-org/fix_ban
Fix ban
2015-03-16 15:36:52 +00:00
Kegan Dougal
f0d6f724a2 Set the service ID as soon as it is known. 2015-03-16 15:24:32 +00:00
Erik Johnston
6df319b6f0 Fix tests 2015-03-16 15:15:14 +00:00
Erik Johnston
f1d2b94e0b Copy dict of context.current_state before changing it. 2015-03-16 15:13:05 +00:00
Erik Johnston
857810d2dd Revert incorrect changes to where we send events 2015-03-16 15:12:47 +00:00
Erik Johnston
ac8eb0f319 Merge pull request #111 from matrix-org/send_event_dont_wait_on_notifier
Don't block waiting on waking up all the listeners when sending an event
2015-03-16 14:13:34 +00:00
Kegan Dougal
d04fa1f712 Implement ServiceQueuer with tests. 2015-03-16 14:03:16 +00:00
Erik Johnston
c2c9471cba Don't block waiting on waking up all the listeners when sending an event. 2015-03-16 13:16:37 +00:00
Kegan Dougal
6279285b2a Replace EventGrouper for ServiceQueuer to move to push-based txns. Fix tests and add stub tests for ServiceQueuer. 2015-03-16 13:15:40 +00:00
Erik Johnston
8bad40701b Comment. 2015-03-16 13:13:07 +00:00
Erik Johnston
250e143084 Use 403 instead of 400 2015-03-16 13:11:42 +00:00
Erik Johnston
b2e6ee5b43 Remove concept of context.auth_events, instead use context.current_state 2015-03-16 13:06:23 +00:00
Kegan Dougal
c9c444f562 Wrap polling/retry blocks in try/excepts to avoid sending to other ASes breaking permanently should an error occur. 2015-03-16 10:38:02 +00:00
Kegan Dougal
835e01fc70 Minor PR comment tweaks. 2015-03-16 10:16:59 +00:00
Kegan Dougal
f9232c7917 Merge branch 'develop' into application-services-txn-reliability
Conflicts:
	synapse/storage/appservice.py
2015-03-16 10:09:15 +00:00
Erik Johnston
e7ce5d8b06 Fix test 2015-03-16 00:30:59 +00:00
Erik Johnston
758d114cbc Send all membership events to the remote homeserver 2015-03-16 00:27:59 +00:00
Erik Johnston
ea8590cf66 Make context.auth_events grap auth events from current state. Otherwise auth is wrong. 2015-03-16 00:18:08 +00:00
Erik Johnston
ab8229479b Respect ban membership 2015-03-16 00:17:25 +00:00
Matthew Hodgson
b677ff6692 add ToC and fix typoe 2015-03-14 00:22:41 +00:00
Matthew Hodgson
c8032aec17 actually uphold the bind_host parameter. in theory should make ipv6 binds work like bind_host: 'fe80::1%lo0' 2015-03-14 00:12:20 +00:00
Matthew Hodgson
256fe08963 uncommited WIP from MWC 2015-03-14 00:09:21 +00:00
Paul Evans
e731d30d90 Merge pull request #108 from matrix-org/metrics
Metrics
2015-03-13 17:31:10 +00:00
Erik Johnston
a1abee013c Add note about disabling registration by default 2015-03-13 17:06:21 +00:00
Erik Johnston
98a3825614 Allow enabling of registration with --disable-registration false 2015-03-13 16:49:18 +00:00
Erik Johnston
7393c5ce4c Rename register script to 'register_new_matrix_user' 2015-03-13 15:33:21 +00:00
Erik Johnston
598c47a108 Change default server url to match default ports 2015-03-13 15:29:38 +00:00
Erik Johnston
9266cb0a22 PEP8 2015-03-13 15:26:00 +00:00
Erik Johnston
bcfce93ccd Add 'register_new_user' script 2015-03-13 15:25:30 +00:00
Erik Johnston
dea236e4fa Add missing commas 2015-03-13 15:24:03 +00:00
Erik Johnston
69135f59aa Implement registering with shared secret. 2015-03-13 15:23:37 +00:00
Erik Johnston
58367a9da2 Disable registration by default 2015-03-13 12:59:45 +00:00
Erik Johnston
58247c8b4b Also bump dependency link version 2015-03-13 11:39:57 +00:00
Matthew Hodgson
f55bd3f94b bump dep on syweb 0.6.5 2015-03-12 18:56:53 +00:00
Paul "LeoNerd" Evans
e90002ca1d Merge remote-tracking branch 'origin/develop' into metrics 2015-03-12 16:55:25 +00:00
David Baker
bbb010a30f More sacrifices to the pep8 gods. 2015-03-12 16:53:12 +00:00
Paul "LeoNerd" Evans
05a056a409 Appease pyflakes 2015-03-12 16:45:05 +00:00
Paul "LeoNerd" Evans
0eb7e6b9a8 Delete unused import of NOT_READY_YET 2015-03-12 16:39:52 +00:00
Paul "LeoNerd" Evans
128cf2daf7 Appease pep8 2015-03-12 16:24:51 +00:00
Paul "LeoNerd" Evans
b98b4c135d Option to serve metrics from their own localhost-only TCP port instead of muxed on the main listener 2015-03-12 16:24:51 +00:00
Paul "LeoNerd" Evans
a2cdd11d4a Fold the slightly-odd bind_port/secure_port/etc.. logic into SynapseHomeServer.start_listening() 2015-03-12 16:24:51 +00:00
Paul "LeoNerd" Evans
e0214a263b Build MetricsResource as a specific HomeServer dependency 2015-03-12 16:24:51 +00:00
Paul "LeoNerd" Evans
e75fa8bbbf Bugfix to sql_txn_timer increment - add only the per-TXN duration, not the total time ever spent since boot 2015-03-12 16:24:51 +00:00
Paul "LeoNerd" Evans
c782e893ec Neater metrics from TransactionQueue 2015-03-12 16:24:51 +00:00
Paul "LeoNerd" Evans
89ac1fa8ba Add a counter to track total number of events served by the notifier 2015-03-12 16:24:51 +00:00
Paul "LeoNerd" Evans
2e4f0b2bd7 Replace the @metrics.counted annotations in federation with specifically-written counters and distributions 2015-03-12 16:24:51 +00:00
Paul "LeoNerd" Evans
c1cdd7954d Add an .inc_by() method to CounterMetric; implement DistributionMetric a neater way 2015-03-12 16:24:51 +00:00
Paul "LeoNerd" Evans
63cb7ece62 Rename the timer metrics exported by synapse.storage to append _time, so the meaning of ':total' is clearer 2015-03-12 16:24:51 +00:00
Paul "LeoNerd" Evans
493e3fa0ca Don't forbid '_' in metric basenames any more, to allow things like foo_time 2015-03-12 16:24:51 +00:00
Paul "LeoNerd" Evans
f1fbe3e09f Rename TimerMetric to DistributionMetric; as it could count more than just time 2015-03-12 16:24:51 +00:00
Paul "LeoNerd" Evans
642f725fd7 Pretend the 'getEvent' cache is just another cache in the set of all the others for metric 2015-03-12 16:24:51 +00:00
Paul "LeoNerd" Evans
cbc0406be8 Export CacheMetric as hits+total, rather than hits+misses, as it's easier to derive hit ratio from that 2015-03-12 16:24:51 +00:00
Paul "LeoNerd" Evans
1748605c5d Count incoming HTTP requests per servlet that responds 2015-03-12 16:24:51 +00:00
Paul "LeoNerd" Evans
4d661ec0f3 Remember to emit final linefeed from /metrics page, or Prometheus gets upset 2015-03-12 16:24:51 +00:00
Paul "LeoNerd" Evans
0e847540c3 Prometheus needs "escaped" label values 2015-03-12 16:24:51 +00:00
Paul "LeoNerd" Evans
22b37b75db Kill unused CounterMetric.fetch() method 2015-03-12 16:24:51 +00:00
Paul "LeoNerd" Evans
b0cf867319 Use _ instead of . as a metric namespacing separator, for Prometheus 2015-03-12 16:24:51 +00:00
Paul "LeoNerd" Evans
0b96bb793e Have all @metrics.counted use a single metric name vectored on the method name, rather than a brand new scalar counter per counted method 2015-03-12 16:24:51 +00:00
Paul "LeoNerd" Evans
b3a0179d64 Bugfix to rendering output of vectored TimerMetrics 2015-03-12 16:24:51 +00:00
Paul "LeoNerd" Evans
f9478e475b Rename Metrics' "keys" to "labels" 2015-03-12 16:24:51 +00:00
Paul "LeoNerd" Evans
399689dcc7 Provide some process resource usage metrics 2015-03-12 16:24:51 +00:00
Paul "LeoNerd" Evans
fa319a5786 Add TimerMetrics to shadow the PerformanceCounters in synapse.storage; with the view to eventually replacing them entirely 2015-03-12 16:24:51 +00:00
Paul "LeoNerd" Evans
6d146e15df Put some gauge metrics on the number of notifier listeners, and notified-on objects (users, rooms, appservices) 2015-03-12 16:24:51 +00:00
Paul "LeoNerd" Evans
25187ab674 Collect per-SQL-verb timer stats on query execution time 2015-03-12 16:24:50 +00:00
Paul "LeoNerd" Evans
f52acf3b12 Neater register_* methods on overall Metrics container 2015-03-12 16:24:50 +00:00
Paul "LeoNerd" Evans
a99d6edc05 Neater implementation of metric render methods by pulling out 'render' as a base method that calls self.render_item 2015-03-12 16:24:50 +00:00
Paul "LeoNerd" Evans
72625f2f4d Initial hack at a TimerMetric; for storing counts + duration accumulators 2015-03-12 16:24:50 +00:00
Paul "LeoNerd" Evans
e1a7e3564f Delete a couple of TODO markers of monitoring stats now done 2015-03-12 16:24:50 +00:00
Paul "LeoNerd" Evans
094803cf82 Put vector gauges on transaction queue pending PDU and EDU dicts 2015-03-12 16:24:50 +00:00
Paul "LeoNerd" Evans
e9c4b0d178 Ensure that /_synapse/metrics response is UTF-8 encoded 2015-03-12 16:24:50 +00:00
Paul "LeoNerd" Evans
23ab0c68c2 Implement vector CallbackMetrics 2015-03-12 16:24:50 +00:00
Paul "LeoNerd" Evans
849300bc73 Neater introspection methods on BaseMetric so that subclasses don't need to touch self.keys directly 2015-03-12 16:24:50 +00:00
Paul "LeoNerd" Evans
8664599af7 Rename CacheCounterMetric to just CacheMetric; add a CallbackMetric component to give the size of the cache 2015-03-12 16:24:50 +00:00
Paul "LeoNerd" Evans
e02cc249da Ensure that exceptions while rendering individual metrics don't stop others from being rendered anyway - especially useful for CallbackMetric 2015-03-12 16:24:50 +00:00
Paul "LeoNerd" Evans
59c448f074 Add a scalar gauge metric on the size of the presence user cachemap 2015-03-12 16:24:50 +00:00
Paul "LeoNerd" Evans
d8caa5454d Initial attempt at a scalar callback-based metric to give instantaneous snapshot gauges 2015-03-12 16:24:50 +00:00
Paul "LeoNerd" Evans
b0cdf097f4 Sprinkle some CacheCounterMetrics around the synapse.storage layer 2015-03-12 16:24:50 +00:00
Paul "LeoNerd" Evans
ce8b5769f7 Create the concept of a cachecounter metric; generating two counters specific to caches 2015-03-12 16:24:50 +00:00
Paul "LeoNerd" Evans
7d72e44eb9 Add vector counters to HTTP clients and servers; count the requests by method and responses by method and response code 2015-03-12 16:24:50 +00:00
Paul "LeoNerd" Evans
c53ec53d80 Pull out all uses of the underlying HTTP user agent .request() method into a single wrapper function, to make adding metrics easier 2015-03-12 16:24:50 +00:00
Paul "LeoNerd" Evans
9470412316 Initial attempt at sprinkling some @metrics.counted decorations around the federation code 2015-03-12 16:24:50 +00:00
Paul "LeoNerd" Evans
a594087f06 Have the MetricsResource actually render metric counters 2015-03-12 16:24:50 +00:00
Paul "LeoNerd" Evans
74bc42cfdd An initial implementation of a 'metrics' instance, similar to a 'logger' for keeping counter stats on method calls 2015-03-12 16:24:50 +00:00
Paul "LeoNerd" Evans
120b689284 Delete pointless (and unreachable) __init__ method from FederationClient 2015-03-12 16:24:50 +00:00
Paul "LeoNerd" Evans
e7420a3bef Initial tiny attempt at (vectorable) counter metrics 2015-03-12 16:24:50 +00:00
Paul "LeoNerd" Evans
e07fc62833 A trivial 'hello world'-style resource on /_synapse/metrics, with optional commandline flag 2015-03-12 16:24:50 +00:00
Paul "LeoNerd" Evans
5b6e11d560 Commandline option to enable metrics system 2015-03-12 16:24:50 +00:00
Paul "LeoNerd" Evans
211c14c391 No need to explicitly pass 'web_client' in to create_resource_tree as it can be found via config 2015-03-12 16:24:50 +00:00
Paul "LeoNerd" Evans
ad5701f50f Expose 'config' as a real HomeServer dependency key 2015-03-12 16:24:50 +00:00
David Baker
c92fdf88a3 Log the matching push rule. 2015-03-11 22:17:31 +00:00
Paul Evans
d33a3b91c3 Merge pull request #107 from matrix-org/add_desc_to_storage_execute
Add desc to storage execute
2015-03-11 17:55:31 +00:00
Paul "LeoNerd" Evans
a7a28f85ae Appease pep8 2015-03-11 17:32:43 +00:00
Paul "LeoNerd" Evans
59a5f012cc Also give _execute() a description 2015-03-11 17:19:17 +00:00
Paul "LeoNerd" Evans
099e4b88d8 Add a description to storage layer's _execute_and_decode() 2015-03-11 17:08:57 +00:00
David Baker
cdb2e045ee Again, underscore, not hyphen 2015-03-11 14:22:35 +00:00
David Baker
465354ffde 'false' is not False 2015-03-11 11:24:50 +00:00
David Baker
83b1e7fb3c PEP8 blank lines 2015-03-11 10:01:17 +00:00
David Baker
04f8478aaa Add the master push rule for the break-my-push button. Allow server default rules to be disabled by default. 2015-03-10 17:26:25 +00:00
David Baker
8916acbc13 These aren't defined for redacted events so don't crash 2015-03-10 11:21:37 +00:00
Erik Johnston
abaf47bbb6 Fix bug in logging. 2015-03-10 10:28:29 +00:00
Erik Johnston
045afd6b61 in_thread takes no arguments 2015-03-10 10:19:03 +00:00
Erik Johnston
98b867f7b7 Fix bug in logging. 2015-03-10 10:16:09 +00:00
Kegan Dougal
db1fbc6c6f Fix remaining scheduler bugs. Add more informative logging. 2015-03-10 10:04:20 +00:00
Erik Johnston
e84fe3599b Merge pull request #105 from matrix-org/erikj-perf
Add a Twisted Service to synapse.app.homeserver
2015-03-10 10:02:26 +00:00
Erik Johnston
c37eceeb9e Split out the 'run' from 'setup' 2015-03-10 09:58:33 +00:00
Erik Johnston
b8a6692657 Add documentation. When starting via twistd respect soft_file_limit config option. 2015-03-10 09:39:42 +00:00
Kegan Dougal
7e0bba555c Remove unused import 2015-03-09 17:48:37 +00:00
Kegan Dougal
04c9751f24 Bug fixes whilst putting it all together 2015-03-09 17:45:41 +00:00
Erik Johnston
019422ebba Merge pull request #101 from matrix-org/neaten-federation-servlets
Neaten federation servlets
2015-03-09 17:39:06 +00:00
Kegan Dougal
b98cd03193 Use event IDs instead of dumping event content in the txns table. 2015-03-09 17:25:20 +00:00
Erik Johnston
9fccb0df08 Merge pull request #104 from matrix-org/get_joined_rooms_for_user
Get joined rooms for user
2015-03-09 17:22:29 +00:00
Kegan Dougal
21fd84dcb8 Use seconds; start gluing in the AS scheduler into the AS handler. 2015-03-09 17:01:19 +00:00
Erik Johnston
6d74e46621 Fix tests 2015-03-09 17:01:11 +00:00
Erik Johnston
8e28db5cc9 Change room handlers get_rooms_for_user to get_joined_rooms_for_user. This uses the a storage api that is cached. 2015-03-09 16:43:09 +00:00
Kegan Dougal
0a60bbf4fa Finish appservice txn storage impl and tests. 2015-03-09 15:53:03 +00:00
Erik Johnston
d5174065af Merge branch 'release-v0.8.0' of github.com:matrix-org/synapse 2015-03-09 14:25:06 +00:00
Kegan Dougal
1ead1caa18 Implement create_appservice_txn with tests. 2015-03-09 13:54:20 +00:00
Erik Johnston
f31e65ca8b Merge branch 'develop' of github.com:matrix-org/synapse into erikj-perf 2015-03-09 13:29:41 +00:00
Kegan Dougal
1c2dcf762a Partially implement txn store methods with tests. 2015-03-09 13:10:31 +00:00
David Baker
1df3ccf7ee D'oh: underscore, not hyphen 2015-03-09 12:39:56 +00:00
David Baker
118c883429 Call notifications should be override else they'll get clobbered by sender/room rules. 2015-03-06 19:41:36 +00:00
Kegan Dougal
406d32f8b5 Start implementing ApplicationServiceTransactionStore 2015-03-06 17:35:14 +00:00
Kegan Dougal
34ce2ca62f Merge branch 'develop' into application-services-txn-reliability 2015-03-06 17:28:49 +00:00
Kegan Dougal
4a6afa6abf Assign the AS ID from the database; replace old placeholder txn id. 2015-03-06 17:27:55 +00:00
Kegan Dougal
01c099d9ef Add appservice txns sql schema 2015-03-06 17:16:47 +00:00
Kegan Dougal
64345b7559 Upper bound the backoff. 2015-03-06 16:41:19 +00:00
Erik Johnston
5d43eaed61 Merge branch 'develop' into release-v0.8.0 2015-03-06 16:25:19 +00:00
Erik Johnston
9ccccd4874 When setting display name more graciously handle failures to update room state. 2015-03-06 16:24:05 +00:00
Kegan Dougal
10766f1e93 Update UTs 2015-03-06 16:17:01 +00:00
Kegan Dougal
2602ddc379 Apply clarity and docstrings 2015-03-06 16:16:14 +00:00
Kegan Dougal
0354659f9d Finish synapse.appservice.scheduler implementation.
With tests to assert behaviour. Not hooked up yet. Stub datastore methods
not implemented yet.
2015-03-06 16:09:05 +00:00
David Baker
be9dafcd37 Dial down logging for failed pushers 2015-03-06 15:32:38 +00:00
Kegan Dougal
7d3491c741 Add some loggers 2015-03-06 15:17:50 +00:00
David Baker
a2c6f25190 Update CHANGES to reflect no more fallback rule 2015-03-06 15:14:48 +00:00
David Baker
96eda876a4 Specify when we don't want to highlight 2015-03-06 15:12:37 +00:00
Kegan Dougal
f260cb72cd Flesh out more stub functions. 2015-03-06 15:12:24 +00:00
David Baker
e7d7152c3c Remove the fallback rule - we probably don't want to be notifying for everything even if we don't know what it is. 2015-03-06 15:03:34 +00:00
Erik Johnston
b67765dccf Update CHANGES 2015-03-06 15:00:33 +00:00
Erik Johnston
2763587acd Update UPGRADES.rst 2015-03-06 15:00:33 +00:00
Kegan Dougal
141ec04d19 Add stub ApplicationServiceTransactionStore. Bootstrap Recoverers. Fill in stub Transaction functions. 2015-03-06 14:53:35 +00:00
David Baker
5ecc768970 Add attribute so push gateways can tell if a member event is about the user in question 2015-03-06 14:41:50 +00:00
Kegan Dougal
0fbfe1b08a Add more tests; fix bugs. 2015-03-06 14:36:52 +00:00
Erik Johnston
369449827d Bump version 2015-03-06 14:24:53 +00:00
Erik Johnston
c54773473f Merge branch 'master' of github.com:matrix-org/synapse into develop 2015-03-06 14:23:41 +00:00
Erik Johnston
b102a87348 Merge pull request #96 from matrix-org/pushrules2
Evolution of push rules
2015-03-06 14:20:04 +00:00
David Baker
cf66ddc1b4 Schema change as delta in v14 2015-03-06 14:11:49 +00:00
David Baker
c06b45129c Add more server default rules so we have default rules for whether you get notifs for invites / random member events 2015-03-06 11:50:51 +00:00
Kegan Dougal
192e228a98 Start adding some tests 2015-03-06 11:50:27 +00:00
Erik Johnston
b1491dfd7c Merge pull request #103 from matrix-org/no_tls_private_key
Don't look for a TLS private key if we have set --no-tls
2015-03-06 11:45:22 +00:00
Erik Johnston
e49d6b1568 Unused import 2015-03-06 11:37:24 +00:00
David Baker
657a0d2568 Comment typo 2015-03-06 11:34:30 +00:00
Erik Johnston
3ce8540484 Don't look for an TLS private key if we have set --no-tls 2015-03-06 11:34:06 +00:00
Erik Johnston
e780492ecf Merge pull request #102 from matrix-org/randomize_stream_timeout
Add some randomness to the user specified timeout on event streams to mi...
2015-03-06 10:28:19 +00:00
David Baker
1487bba226 Suppress notices should trump content/room/sender rules. 2015-03-06 10:27:32 +00:00
David Baker
83d31144eb Add the highlight tweak where messages should be highlighted a different colour in appropriate clients. 2015-03-06 10:26:08 +00:00
Kegan Dougal
d516d68b29 Rejig structure given the appservice_handler already filters the correct ASes to use. 2015-03-06 10:25:50 +00:00
Erik Johnston
130df8fb01 Add some randomness to the user specified timeout on event streams to mitigate against thundering herds problems 2015-03-06 10:25:36 +00:00
Paul "LeoNerd" Evans
d79d91a4a7 Appease pep8 2015-03-05 20:55:40 +00:00
Paul "LeoNerd" Evans
5eab2549ab Append a $ on PATH at registration time, meaning each PATH attribute doesn't need it 2015-03-05 20:36:05 +00:00
Paul "LeoNerd" Evans
7644cb79b2 Slightly neater(?) arrangement of authentication wrapper for HTTP servlet methods 2015-03-05 20:33:16 +00:00
Paul "LeoNerd" Evans
ba8ac996f9 Remove the dead 'rate_limit_origin' method from TransportLayerServer 2015-03-05 19:43:17 +00:00
Paul "LeoNerd" Evans
a901ed16b5 Move federation API responding code out of weird mix of lambdas into Servlet-style methods on instances 2015-03-05 19:10:57 +00:00
Kegan Dougal
0c838f9f5e Minor tweaks 2015-03-05 17:45:52 +00:00
Kegan Dougal
773cb3b688 Add stub architecture for txn reliability. 2015-03-05 17:35:07 +00:00
Erik Johnston
5b5c7a28d6 Log error message when we fail to fetch remote server keys 2015-03-05 17:09:13 +00:00
Erik Johnston
12bcf3d179 Merge pull request #100 from matrix-org/missing_pdu_compat
Handle if get_missing_pdu returns 400 or not all events.
2015-03-05 16:42:15 +00:00
Erik Johnston
9708f49abf Docs 2015-03-05 16:35:16 +00:00
Erik Johnston
96fee64421 Remove unecessary check 2015-03-05 16:31:47 +00:00
Erik Johnston
39aa968a76 Respect min_depth argument 2015-03-05 16:31:32 +00:00
Erik Johnston
6dfd8c73fc Docs. 2015-03-05 16:31:13 +00:00
Kegan Dougal
e319071191 Add stub scheduler module for txn reliability 2015-03-05 16:30:33 +00:00
Paul "LeoNerd" Evans
9d9d39536b Slightly reduce the insane amounts of indentation in main http server response path, by 'continue'ing around a non-match or falling through 2015-03-05 16:24:13 +00:00
Erik Johnston
ae702d161a Handle if get_missing_pdu returns 400 or not all events. 2015-03-05 16:08:02 +00:00
Kegan Dougal
be09c23ff0 Add txn_id kwarg to push methods 2015-03-05 15:40:07 +00:00
Paul "LeoNerd" Evans
dc4b774f1e Rename rooms_to_listeners to room_to_listeners, for consistency with user_ and appservice_* 2015-03-05 14:30:20 +00:00
Paul "LeoNerd" Evans
027fd1242c Give LruCache a __len__, so that len(cache) works 2015-03-04 17:32:28 +00:00
David Baker
590b544f67 Add default rule to suppress notices. 2015-03-04 15:29:02 +00:00
David Baker
ed72fc3a50 Merge branch 'develop' into pushrules2
Conflicts:
	synapse/storage/schema/pusher.sql
2015-03-04 15:24:21 +00:00
Erik Johnston
1af1c45dc0 Merge pull request #99 from matrix-org/schema_versioning
Schema versioning
2015-03-04 15:18:30 +00:00
Erik Johnston
d56c01fff4 Note that we don't specify execution order 2015-03-04 15:10:05 +00:00
Erik Johnston
17d319a20d s/schema_deltas/applied_schema_deltas/ 2015-03-04 15:06:22 +00:00
David Baker
92b3dc3219 Merge branch 'develop' into pushrules2 2015-03-04 14:56:41 +00:00
Erik Johnston
5681264faa s/%r/%s/ 2015-03-04 14:21:53 +00:00
Erik Johnston
f701197227 Add example directory structures in doc 2015-03-04 14:20:14 +00:00
David Baker
2a45f3d448 Use if not results rather than len, as per feedback. 2015-03-04 14:17:59 +00:00
Erik Johnston
16dd87d848 Don't assume db conn is a Context Manager.
Twisted adbapi wrapped connections aren't context managers.
2015-03-04 14:03:41 +00:00
Erik Johnston
5eefd1f618 Add unique constraint on schema_version.lock schema. Use conflict clause in sql. 2015-03-04 13:52:18 +00:00
Erik Johnston
b4c38738f4 Change to use logger in db upgrade script 2015-03-04 13:43:35 +00:00
Erik Johnston
640e53935d Use context manager with db conn to correctly commit and rollback 2015-03-04 13:43:17 +00:00
Erik Johnston
8c8354e85a Actually add full_schemas dir 2015-03-04 13:34:38 +00:00
Erik Johnston
c3530c3fb3 More docs. Rename 'schema/current' to 'schema/full_schemas' 2015-03-04 13:34:11 +00:00
Erik Johnston
811355ccd0 Add some docs and remove unused variables 2015-03-04 13:11:01 +00:00
Erik Johnston
82b34e813d SYN-67: Finish up implementing new database schema management 2015-03-04 12:04:19 +00:00
Matthew Hodgson
84a4367657 WIP vertobridge AS 2015-03-04 01:28:04 +01:00
Erik Johnston
abbee6b29b Merge pull request #98 from matrix-org/hotfixes-v0.7.1-r4
Hotfixes v0.7.1 r4
2015-03-03 14:50:36 +00:00
Erik Johnston
527e0c43a5 Bump version 2015-03-03 14:49:34 +00:00
Erik Johnston
ede89ae3b4 Also bump version of downloaded syweb 2015-03-03 14:49:19 +00:00
Erik Johnston
a313e97873 Merge pull request #97 from matrix-org/hotfixes-v0.7.1-r3
Bump syweb dependency
2015-03-03 13:34:06 +00:00
Erik Johnston
da877aad15 Bump syweb dependency 2015-03-03 13:31:50 +00:00
Erik Johnston
8d33adfbbb SYN-67: Begin changing the way we handle schema versioning 2015-03-02 18:23:55 +00:00
David Baker
6fab7bd2c1 s/user_name/user/ as per mjark's comment 2015-03-02 18:17:19 +00:00
David Baker
09f9e8493c Oops, missed a replacement. 2015-03-02 17:37:22 +00:00
David Baker
3c8bd7809c Fix upgrade instructions 2015-03-02 16:40:38 +00:00
Erik Johnston
9f03553f48 Add missing comma 2015-03-02 16:38:40 +00:00
Erik Johnston
b41dc68773 We purposefully don't have a version 14 delta script. 2015-03-02 16:36:19 +00:00
David Baker
20436cdf75 Blank lines 2015-03-02 15:58:12 +00:00
Kegan Dougal
2de5b14fe0 Fix bug which prevented the HS pushing events to the AS due to FrozenEvents 2015-03-02 15:36:37 +00:00
Erik Johnston
8486910b64 Bump webclient version 2015-03-02 14:57:37 +00:00
Kegsay
8ad024ea80 Merge pull request #93 from matrix-org/application-services-exclusive
Application services exclusive flag support
2015-03-02 14:56:32 +00:00
Erik Johnston
b2d2118476 Merge pull request #88 from matrix-org/batched_get_pdu
Batched get pdu
2015-03-02 14:54:27 +00:00
Kegan Dougal
fb7b6c4681 Wording tweaks 2015-03-02 14:52:31 +00:00
Erik Johnston
0a036944bd Merge branch 'develop' of github.com:matrix-org/synapse into batched_get_pdu 2015-03-02 13:53:30 +00:00
Erik Johnston
3fce185c77 Merge pull request #83 from matrix-org/nofile_limit_config
Add config option to set the soft fd limit on start
2015-03-02 13:52:16 +00:00
Erik Johnston
e4f301e7a0 Merge pull request #94 from matrix-org/federation_rate_limit
Federation rate limit
2015-03-02 13:44:43 +00:00
Erik Johnston
4195e55ccc Merge branch 'develop' of github.com:matrix-org/synapse into federation_rate_limit 2015-03-02 13:39:22 +00:00
Kegan Dougal
c3c01641d2 Run deltas and bump user_version in upgrade script 2015-03-02 13:38:57 +00:00
Erik Johnston
210d3c5d72 Merge pull request #95 from matrix-org/serialize_transaction_processing
Process transactions serially.
2015-03-02 13:33:05 +00:00
Erik Johnston
3077cb2915 Use contextlib.contextmanager instead of a custom class 2015-03-02 13:32:44 +00:00
David Baker
769f8b58e8 Rename the room-with-two-people rule to be more compatible if we have actual one to one rooms. 2015-03-02 13:28:24 +00:00
Kegsay
33f93d389e Merge pull request #92 from matrix-org/application-services-event-stream
Application services event stream support
2015-03-02 12:02:48 +00:00
Erik Johnston
29481690c5 If we're yielding don't add errback 2015-03-02 11:50:43 +00:00
Kegan Dougal
3f6b36d96e Add upgrade script 2015-03-02 11:39:58 +00:00
Erik Johnston
23d9bd1d74 Process transactions serially.
Since the events received in a transaction are ordered, later events
might depend on earlier events and so we shouldn't blindly process them
in parellel.
2015-03-02 11:39:57 +00:00
Erik Johnston
9d9b230501 Make the federation server ratelimiting configurable. 2015-03-02 11:33:45 +00:00
Kegan Dougal
cb97ea3ec2 PEP8 2015-03-02 11:23:46 +00:00
Kegan Dougal
377ae369c1 Wrap all of get_app_service_rooms in a txn. 2015-03-02 11:20:51 +00:00
Kegan Dougal
b216b36892 JOIN state_events rather than parsing unrecognized_keys to pull out member state_keys 2015-03-02 10:41:35 +00:00
Kegan Dougal
3d73383d18 Modify _simple_select_list to allow an empty WHERE clause. Use it for get_all_rooms and get_all_users. 2015-03-02 10:16:24 +00:00
Kegan Dougal
ebc4830666 PR tweaks: set earlier on and use 'as json' for compat 2015-03-02 09:53:00 +00:00
David Baker
2a6dedd7cc It's set_tweak now, not set_sound 2015-02-27 18:38:56 +00:00
Erik Johnston
0554d07082 Move federation rate limiting out of transport layer 2015-02-27 15:41:52 +00:00
Erik Johnston
9dc9118e55 Document FederationRateLimiter 2015-02-27 15:16:47 +00:00
Kegan Dougal
58ff066064 Implement exclusive namespace checks. 2015-02-27 13:51:41 +00:00
Kegan Dougal
de190e49d5 Add more unit tests for exclusive namespaces. 2015-02-27 11:51:06 +00:00
Kegan Dougal
127efeeb68 Update unit tests to use new format. 2015-02-27 11:10:48 +00:00
Kegan Dougal
40c9896705 Add functions to return whether an AS has exclusively claimed a matching namespace. 2015-02-27 11:03:56 +00:00
Kegan Dougal
16b90764ad Convert expected format for AS regex to include exclusivity.
Previously you just specified the regex as a string, now it expects a JSON
object with a 'regex' key and an 'exclusive' boolean, as per spec.
2015-02-27 10:44:32 +00:00
Kegan Dougal
806a6c886a PEP8 2015-02-27 09:48:57 +00:00
Kegan Dougal
0ebd632d39 Fix unit tests 2015-02-27 09:46:38 +00:00
Kegan Dougal
1cc77145d4 Notify appservices of invites mid-poll.
This requires the notifier to have knowledge of appservice listeners so it can
do the regex checks on incoming invites to see if the state_key matches. It
isn't enough to just rely on the room listeners and store.get_app_service_rooms
as the room will initially not exist or won't be on the ASes radar due to
having none of its users in the room.
2015-02-27 09:39:12 +00:00
David Baker
cfac3b7873 SYN-267 Add a fallback rule as an explicit server default rule and make the default dont-notify so you effectively have a "notify for everything else" switch you can turn on and off. 2015-02-26 18:58:14 +00:00
David Baker
1959088156 Add API for getting/setting enabled-ness of push rules. 2015-02-26 18:07:44 +00:00
Kegan Dougal
f0995436e7 Check for membership invite events correctly. 2015-02-26 17:21:17 +00:00
Kegan Dougal
210ef79100 Update CHANGES 2015-02-26 16:25:37 +00:00
Kegan Dougal
dcec7175dc Finish impl to get new events for AS. ASes should now be able to poll /events 2015-02-26 16:23:01 +00:00
Erik Johnston
93d90765c4 Initial implementation of federation server rate limiting 2015-02-26 16:15:26 +00:00
Erik Johnston
59362454dd Must update pending_transactions map before yield'ing 2015-02-26 15:47:35 +00:00
Kegan Dougal
92478e96d6 Finish impl to extract all room IDs an AS may be interested in when polling the event stream. 2015-02-26 14:35:28 +00:00
David Baker
944003021b whitespace 2015-02-26 13:43:05 +00:00
David Baker
94fa334b01 Add enable/disable overlay for push rules (REST API not yet hooked up) 2015-02-25 19:17:07 +00:00
Kegan Dougal
29267cf9d7 PEP8 and pyflakes 2015-02-25 17:42:28 +00:00
Kegan Dougal
978ce87c86 Comment unused variables. 2015-02-25 17:37:48 +00:00
Kegan Dougal
2c79c4dc7f Fix alias query. 2015-02-25 17:37:14 +00:00
Kegan Dougal
2b8ca84296 Add support for extracting matching room_ids and room_aliases for a given AS. 2015-02-25 17:15:25 +00:00
Kegan Dougal
2d20466f9a Add stub functions and work out execution flow to implement AS event stream polling. 2015-02-25 15:00:59 +00:00
David Baker
a025055643 SYWEB-278 Don't allow rules with no rule_id. 2015-02-25 14:02:38 +00:00
David Baker
255f989c7b turns uris config options should append since it's a list 2015-02-24 20:57:58 +00:00
David Baker
e60353c4a0 Fix YAML syntax of turn config example 2015-02-24 19:34:21 +00:00
David Baker
4212e7a049 tabs/spaces 2015-02-24 16:02:48 +00:00
David Baker
64c23352f9 Use standard form submission so the go button on the keyboard works. 2015-02-24 16:01:38 +00:00
Kegsay
4390a36b6e Merge pull request #91 from matrix-org/registration-fallback-ios-display
Fallback registration page: make the page autoresize on iPhone Safari/Webview
2015-02-24 15:24:53 +00:00
manuroe
4a39c10eef Fallback registration page: oops. Removed dev test. 2015-02-24 16:20:48 +01:00
manuroe
1b4e3b7fa6 Fallback registration page: added the classic viewport meta to fix the display on iPhone Safari and webview. The width of input elements also needs to be fixed. 2015-02-24 16:16:40 +01:00
David Baker
443ba4eecc %s for strings otherwise you end up sending 'u"foo"' 2015-02-24 15:00:12 +00:00
Erik Johnston
c0aaf9fe76 Merge pull request #89 from matrix-org/registration-fallback
Registration fallback
2015-02-24 10:00:33 +00:00
Kegan Dougal
082c88a4b2 Update CHANGES and UPGRADE 2015-02-24 09:58:20 +00:00
Kegan Dougal
2eeb8ec4fa Set user-visible error when the server is misconfigured. 2015-02-24 09:53:30 +00:00
Paul "LeoNerd" Evans
9640510de2 Use OrderedDict for @cached backing store, so we can evict the oldest key unbiased 2015-02-23 18:41:58 +00:00
Paul "LeoNerd" Evans
f53fcbce97 Use cache.pop() instead of a separate membership test + del [] 2015-02-23 18:30:45 +00:00
Mark Haines
27080698e7 Fix code style warning 2015-02-23 18:19:13 +00:00
Mark Haines
74048bdd41 Remove unused import 2015-02-23 18:17:43 +00:00
Kegan Dougal
28d8614f48 Trailing comma 2015-02-23 17:36:37 +00:00
Paul "LeoNerd" Evans
bd84755e64 Merge remote-tracking branch 'origin/develop' into performance-cache-improvements 2015-02-23 17:16:03 +00:00
Kegan Dougal
f30d4d5308 Add jquery 2015-02-23 17:12:15 +00:00
Kegan Dougal
e36b18ad5b Get everything working 2015-02-23 17:09:11 +00:00
Paul "LeoNerd" Evans
a09e59a698 Pull the _get_event_cache.setdefault() call out of the try block, as it doesn't need to be there and is confusing 2015-02-23 16:55:57 +00:00
Kegan Dougal
e6363857d0 Add core registration html/js 2015-02-23 16:08:43 +00:00
Paul "LeoNerd" Evans
044d813ef7 Use the @cached decorator to implement the destination_retry_timings cache 2015-02-23 16:04:40 +00:00
Paul "LeoNerd" Evans
357fba2c24 RoomMemberStore no longer needs a _user_rooms_cache member 2015-02-23 15:57:41 +00:00
Paul "LeoNerd" Evans
e76d485e29 Allow @cached-wrapped functions to have a prefill method for setting entries 2015-02-23 15:41:54 +00:00
Kegan Dougal
0696dfd94b Actually treat this as static content, not random Resources. 2015-02-23 15:35:09 +00:00
Kegan Dougal
22399d3d8f Add RegisterFallbackResource to /_matrix/static/client/register
Try to keep both forms of registration logic (native/fallback) close
together for sanity.
2015-02-23 15:14:56 +00:00
Erik Johnston
852816befe Fix presence tests 2015-02-23 15:14:09 +00:00
Paul "LeoNerd" Evans
4631b737fd Squash out the now-redundant ApplicationServicesCache object class 2015-02-23 14:38:44 +00:00
Erik Johnston
e25e0f4da9 Merge branch 'develop' of github.com:matrix-org/synapse into batched_get_pdu 2015-02-23 14:36:00 +00:00
Erik Johnston
42b972bccd Revert get_auth_chain changes 2015-02-23 14:35:23 +00:00
Erik Johnston
db215b7e00 Implement and use new batched get missing pdu 2015-02-23 13:58:02 +00:00
Mark Haines
3741c336ff Merge pull request #87 from brabo/master
added "cd ~/.synapse" before setup of the homeserver to generate our fil...
2015-02-23 12:54:36 +00:00
brabo
596daf6e68 added "cd ~/.synapse" before setup of the homeserver to generate our files in there instead of ~ 2015-02-22 18:52:59 +01:00
Erik Johnston
b33a4cd6cc Merge pull request #86 from matrix-org/v0.7.1-r2
V0.7.1 r2
2015-02-21 13:51:00 +00:00
Erik Johnston
a87c56c673 Bump version 2015-02-21 13:45:07 +00:00
Erik Johnston
1f29fafc95 Don't exit if we can't work out if we're running in a git repo 2015-02-21 13:44:46 +00:00
Erik Johnston
7c56210f20 By default set soft limit to hard limit 2015-02-20 16:09:44 +00:00
Erik Johnston
7367ca42b5 Merge branch 'master' of github.com:matrix-org/synapse into develop 2015-02-20 16:06:28 +00:00
Erik Johnston
2b45ca1541 Merge branch 'hotfixes-v0.7.1-r1' of github.com:matrix-org/synapse 2015-02-20 15:40:47 +00:00
Erik Johnston
dc0ee55110 Change version scheme 2015-02-20 15:00:14 +00:00
Erik Johnston
0edfecc904 Bump version 2015-02-20 14:14:28 +00:00
Erik Johnston
2bafeca270 Add missing comma so that it generates a dict and not a set 2015-02-20 14:08:42 +00:00
Erik Johnston
e944b767d7 Merge pull request #84 from matrix-org/disable_registration
Disable Registration Config Option
2015-02-20 11:43:24 +00:00
Erik Johnston
15e2d7e387 Always allow AS to register 2015-02-20 11:39:53 +00:00
Paul "LeoNerd" Evans
55022d6ca5 Remove a TODO note 2015-02-19 18:38:09 +00:00
Paul "LeoNerd" Evans
ebc3db295b Take named arguments to @cached() decorator, add a 'max_entries' limit 2015-02-19 18:36:02 +00:00
Paul "LeoNerd" Evans
077d200342 Move @cached decorator out into synapse.storage._base; add minimal docs 2015-02-19 17:29:39 +00:00
Erik Johnston
0ac2a79faa Initial stab at implementing a batched get_missing_pdus request 2015-02-19 17:24:14 +00:00
Paul "LeoNerd" Evans
61959928bb Pull out the 'get_rooms_for_user' cache logic into a reüsable @cached decorator 2015-02-19 14:58:07 +00:00
Erik Johnston
5f4c28d313 Update tests 2015-02-19 14:34:32 +00:00
Erik Johnston
0722f982d3 Disable registration if config option was set. 2015-02-19 14:22:20 +00:00
Erik Johnston
81163f822e Add config option to disable registration. 2015-02-19 14:16:53 +00:00
Erik Johnston
939273c4b0 Rename resource variable so as to not shadow module import 2015-02-19 11:53:13 +00:00
Erik Johnston
c3eb7dd9c5 Add config option to set the soft fd limit on start 2015-02-19 11:50:49 +00:00
Erik Johnston
7f058c5ff7 Merge branch 'develop' of github.com:matrix-org/synapse into erikj-perf
Conflicts:
	synapse/app/homeserver.py
2015-01-22 13:35:34 +00:00
Erik Johnston
82be4457de Add twisted Service interface 2015-01-07 13:46:37 +00:00
275 changed files with 20731 additions and 5933 deletions

1
.gitignore vendored
View File

@@ -41,3 +41,4 @@ media_store/
build/
localhost-800*/
static/client/register/register_config.js

47
AUTHORS.rst Normal file
View File

@@ -0,0 +1,47 @@
Erik Johnston <erik at matrix.org>
* HS core
* Federation API impl
Mark Haines <mark at matrix.org>
* HS core
* Crypto
* Content repository
* CS v2 API impl
Kegan Dougal <kegan at matrix.org>
* HS core
* CS v1 API impl
* AS API impl
Paul "LeoNerd" Evans <paul at matrix.org>
* HS core
* Presence
* Typing Notifications
* Performance metrics and caching layer
Dave Baker <dave at matrix.org>
* Push notifications
* Auth CS v2 impl
Matthew Hodgson <matthew at matrix.org>
* General doc & housekeeping
* Vertobot/vertobridge matrix<->verto PoC
Emmanuel Rohee <manu at matrix.org>
* Supporting iOS clients (testability and fallback registration)
Turned to Dust <dwinslow86 at gmail.com>
* ArchLinux installation instructions
Brabo <brabo at riseup.net>
* Installation instruction fixes
Ivan Shapovalov <intelfx100 at gmail.com>
* contrib/systemd: a sample systemd unit file and a logger configuration
Eric Myhre <hash at exultant.us>
* Fix bug where ``media_store_path`` config option was ignored by v0 content
repository API.
Muthu Subramanian <muthu.subramanian.karunanidhi at ericsson.com>
* Add SAML2 support for registration and logins.

View File

@@ -1,3 +1,185 @@
Changes in synapse v0.9.3 (2015-07-01)
======================================
No changes from v0.9.3 Release Candidate 1.
Changes in synapse v0.9.3-rc1 (2015-06-23)
==========================================
General:
* Fix a memory leak in the notifier. (SYN-412)
* Improve performance of room initial sync. (SYN-418)
* General improvements to logging.
* Remove ``access_token`` query params from ``INFO`` level logging.
Configuration:
* Add support for specifying and configuring multiple listeners. (SYN-389)
Application services:
* Fix bug where synapse failed to send user queries to application services.
Changes in synapse v0.9.2-r2 (2015-06-15)
=========================================
Fix packaging so that schema delta python files get included in the package.
Changes in synapse v0.9.2 (2015-06-12)
======================================
General:
* Use ultrajson for json (de)serialisation when a canonical encoding is not
required. Ultrajson is significantly faster than simplejson in certain
circumstances.
* Use connection pools for outgoing HTTP connections.
* Process thumbnails on separate threads.
Configuration:
* Add option, ``gzip_responses``, to disable HTTP response compression.
Federation:
* Improve resilience of backfill by ensuring we fetch any missing auth events.
* Improve performance of backfill and joining remote rooms by removing
unnecessary computations. This included handling events we'd previously
handled as well as attempting to compute the current state for outliers.
Changes in synapse v0.9.1 (2015-05-26)
======================================
General:
* Add support for backfilling when a client paginates. This allows servers to
request history for a room from remote servers when a client tries to
paginate history the server does not have - SYN-36
* Fix bug where you couldn't disable non-default pushrules - SYN-378
* Fix ``register_new_user`` script - SYN-359
* Improve performance of fetching events from the database, this improves both
initialSync and sending of events.
* Improve performance of event streams, allowing synapse to handle more
simultaneous connected clients.
Federation:
* Fix bug with existing backfill implementation where it returned the wrong
selection of events in some circumstances.
* Improve performance of joining remote rooms.
Configuration:
* Add support for changing the bind host of the metrics listener via the
``metrics_bind_host`` option.
Changes in synapse v0.9.0-r5 (2015-05-21)
=========================================
* Add more database caches to reduce amount of work done for each pusher. This
radically reduces CPU usage when multiple pushers are set up in the same room.
Changes in synapse v0.9.0 (2015-05-07)
======================================
General:
* Add support for using a PostgreSQL database instead of SQLite. See
`docs/postgres.rst`_ for details.
* Add password change and reset APIs. See `Registration`_ in the spec.
* Fix memory leak due to not releasing stale notifiers - SYN-339.
* Fix race in caches that occasionally caused some presence updates to be
dropped - SYN-369.
* Check server name has not changed on restart.
* Add a sample systemd unit file and a logger configuration in
contrib/systemd. Contributed Ivan Shapovalov.
Federation:
* Add key distribution mechanisms for fetching public keys of unavailable
remote home servers. See `Retrieving Server Keys`_ in the spec.
Configuration:
* Add support for multiple config files.
* Add support for dictionaries in config files.
* Remove support for specifying config options on the command line, except
for:
* ``--daemonize`` - Daemonize the home server.
* ``--manhole`` - Turn on the twisted telnet manhole service on the given
port.
* ``--database-path`` - The path to a sqlite database to use.
* ``--verbose`` - The verbosity level.
* ``--log-file`` - File to log to.
* ``--log-config`` - Python logging config file.
* ``--enable-registration`` - Enable registration for new users.
Application services:
* Reliably retry sending of events from Synapse to application services, as per
`Application Services`_ spec.
* Application services can no longer register via the ``/register`` API,
instead their configuration should be saved to a file and listed in the
synapse ``app_service_config_files`` config option. The AS configuration file
has the same format as the old ``/register`` request.
See `docs/application_services.rst`_ for more information.
.. _`docs/postgres.rst`: docs/postgres.rst
.. _`docs/application_services.rst`: docs/application_services.rst
.. _`Registration`: https://github.com/matrix-org/matrix-doc/blob/master/specification/10_client_server_api.rst#registration
.. _`Retrieving Server Keys`: https://github.com/matrix-org/matrix-doc/blob/6f2698/specification/30_server_server_api.rst#retrieving-server-keys
.. _`Application Services`: https://github.com/matrix-org/matrix-doc/blob/0c6bd9/specification/25_application_service_api.rst#home-server---application-service-api
Changes in synapse v0.8.1 (2015-03-18)
======================================
* Disable registration by default. New users can be added using the command
``register_new_matrix_user`` or by enabling registration in the config.
* Add metrics to synapse. To enable metrics use config options
``enable_metrics`` and ``metrics_port``.
* Fix bug where banning only kicked the user.
Changes in synapse v0.8.0 (2015-03-06)
======================================
General:
* Add support for registration fallback. This is a page hosted on the server
which allows a user to register for an account, regardless of what client
they are using (e.g. mobile devices).
* Added new default push rules and made them configurable by clients:
* Suppress all notice messages.
* Notify when invited to a new room.
* Notify for messages that don't match any rule.
* Notify on incoming call.
Federation:
* Added per host server side rate-limiting of incoming federation requests.
* Added a ``/get_missing_events/`` API to federation to reduce number of
``/events/`` requests.
Configuration:
* Added configuration option to disable registration:
``disable_registration``.
* Added configuration option to change soft limit of number of open file
descriptors: ``soft_file_limit``.
* Make ``tls_private_key_path`` optional when running with ``no_tls``.
Application services:
* Application services can now poll on the CS API ``/events`` for their events,
by providing their application service ``access_token``.
* Added exclusive namespace support to application services API.
Changes in synapse v0.7.1 (2015-02-19)
======================================

118
CONTRIBUTING.rst Normal file
View File

@@ -0,0 +1,118 @@
Contributing code to Matrix
===========================
Everyone is welcome to contribute code to Matrix
(https://github.com/matrix-org), provided that they are willing to license
their contributions under the same license as the project itself. We follow a
simple 'inbound=outbound' model for contributions: the act of submitting an
'inbound' contribution means that the contributor agrees to license the code
under the same terms as the project's overall 'outbound' license - in our
case, this is almost always Apache Software License v2 (see LICENSE).
How to contribute
~~~~~~~~~~~~~~~~~
The preferred and easiest way to contribute changes to Matrix is to fork the
relevant project on github, and then create a pull request to ask us to pull
your changes into our repo
(https://help.github.com/articles/using-pull-requests/)
**The single biggest thing you need to know is: please base your changes on
the develop branch - /not/ master.**
We use the master branch to track the most recent release, so that folks who
blindly clone the repo and automatically check out master get something that
works. Develop is the unstable branch where all the development actually
happens: the workflow is that contributors should fork the develop branch to
make a 'feature' branch for a particular contribution, and then make a pull
request to merge this back into the matrix.org 'official' develop branch. We
use github's pull request workflow to review the contribution, and either ask
you to make any refinements needed or merge it and make them ourselves. The
changes will then land on master when we next do a release.
We use Jenkins for continuous integration (http://matrix.org/jenkins), and
typically all pull requests get automatically tested Jenkins: if your change breaks the build, Jenkins will yell about it in #matrix-dev:matrix.org so please lurk there and keep an eye open.
Code style
~~~~~~~~~~
All Matrix projects have a well-defined code-style - and sometimes we've even
got as far as documenting it... For instance, synapse's code style doc lives
at https://github.com/matrix-org/synapse/tree/master/docs/code_style.rst.
Please ensure your changes match the cosmetic style of the existing project,
and **never** mix cosmetic and functional changes in the same commit, as it
makes it horribly hard to review otherwise.
Attribution
~~~~~~~~~~~
Everyone who contributes anything to Matrix is welcome to be listed in the
AUTHORS.rst file for the project in question. Please feel free to include a
change to AUTHORS.rst in your pull request to list yourself and a short
description of the area(s) you've worked on. Also, we sometimes have swag to
give away to contributors - if you feel that Matrix-branded apparel is missing
from your life, please mail us your shipping address to matrix at matrix.org and we'll try to fix it :)
Sign off
~~~~~~~~
In order to have a concrete record that your contribution is intentional
and you agree to license it under the same terms as the project's license, we've adopted the
same lightweight approach that the Linux Kernel
(https://www.kernel.org/doc/Documentation/SubmittingPatches), Docker
(https://github.com/docker/docker/blob/master/CONTRIBUTING.md), and many other
projects use: the DCO (Developer Certificate of Origin:
http://developercertificate.org/). This is a simple declaration that you wrote
the contribution or otherwise have the right to contribute it to Matrix::
Developer Certificate of Origin
Version 1.1
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
660 York Street, Suite 102,
San Francisco, CA 94110 USA
Everyone is permitted to copy and distribute verbatim copies of this
license document, but changing it is not allowed.
Developer's Certificate of Origin 1.1
By making a contribution to this project, I certify that:
(a) The contribution was created in whole or in part by me and I
have the right to submit it under the open source license
indicated in the file; or
(b) The contribution is based upon previous work that, to the best
of my knowledge, is covered under an appropriate open source
license and I have the right under that license to submit that
work with modifications, whether created in whole or in part
by me, under the same open source license (unless I am
permitted to submit under a different license), as indicated
in the file; or
(c) The contribution was provided directly to me by some other
person who certified (a), (b) or (c) and I have not modified
it.
(d) I understand and agree that this project and the contribution
are public and that a record of the contribution (including all
personal information I submit with it, including my sign-off) is
maintained indefinitely and may be redistributed consistent with
this project or the open source license(s) involved.
If you agree to this for your contribution, then all that's needed is to
include the line in your commit or pull request comment::
Signed-off-by: Your Name <your@email.example.org>
...using your real name; unfortunately pseudonyms and anonymous contributions
can't be accepted. Git makes this trivial - just use the -s flag when you do
``git commit``, having first set ``user.name`` and ``user.email`` git configs
(which you should have done anyway :)
Conclusion
~~~~~~~~~~
That's it! Matrix is a very open and collaborative project as you might expect given our obsession with open communication. If we're going to successfully matrix together all the fragmented communication technologies out there we are reliant on contributions and collaboration from the community to do so. So please get involved - and we hope you have as much fun hacking on Matrix as we do!

View File

@@ -5,6 +5,7 @@ include *.rst
include demo/README
recursive-include synapse/storage/schema *.sql
recursive-include synapse/storage/schema *.py
recursive-include demo *.dh
recursive-include demo *.py

View File

@@ -1,3 +1,5 @@
.. contents::
Introduction
============
@@ -18,7 +20,7 @@ The overall architecture is::
https://somewhere.org/_matrix https://elsewhere.net/_matrix
``#matrix:matrix.org`` is the official support room for Matrix, and can be
accessed by the web client at http://matrix.org/alpha or via an IRC bridge at
accessed by the web client at http://matrix.org/beta or via an IRC bridge at
irc://irc.freenode.net/matrix.
Synapse is currently in rapid development, but as of version 0.5 we believe it
@@ -67,24 +69,30 @@ Synapse ships with two basic demo Matrix clients: webclient (a basic group chat
web client demo implemented in AngularJS) and cmdclient (a basic Python
command line utility which lets you easily see what the JSON APIs are up to).
Meanwhile, iOS and Android SDKs and clients are currently in development and available from:
Meanwhile, iOS and Android SDKs and clients are available from:
- https://github.com/matrix-org/matrix-ios-sdk
- https://github.com/matrix-org/matrix-ios-kit
- https://github.com/matrix-org/matrix-ios-console
- https://github.com/matrix-org/matrix-android-sdk
We'd like to invite you to join #matrix:matrix.org (via http://matrix.org/alpha), run a homeserver, take a look at the Matrix spec at
http://matrix.org/docs/spec, experiment with the APIs and the demo
clients, and report any bugs via http://matrix.org/jira.
We'd like to invite you to join #matrix:matrix.org (via
https://matrix.org/beta), run a homeserver, take a look at the Matrix spec at
https://matrix.org/docs/spec and API docs at https://matrix.org/docs/api,
experiment with the APIs and the demo clients, and report any bugs via
https://matrix.org/jira.
Thanks for using Matrix!
[1] End-to-end encryption is currently in development
Homeserver Installation
=======================
Synapse Installation
====================
Synapse is the reference python/twisted Matrix homeserver implementation.
System requirements:
- POSIX-compliant system (tested on Linux & OSX)
- POSIX-compliant system (tested on Linux & OS X)
- Python 2.7
Synapse is written in python but some of the libraries is uses are written in
@@ -93,80 +101,124 @@ header files for python C extensions.
Installing prerequisites on Ubuntu or Debian::
$ sudo apt-get install build-essential python2.7-dev libffi-dev \
sudo apt-get install build-essential python2.7-dev libffi-dev \
python-pip python-setuptools sqlite3 \
libssl-dev python-virtualenv libjpeg-dev
Installing prerequisites on ArchLinux::
$ sudo pacman -S base-devel python2 python-pip \
sudo pacman -S base-devel python2 python-pip \
python-setuptools python-virtualenv sqlite3
Installing prerequisites on Mac OS X::
$ xcode-select --install
$ sudo pip install virtualenv
xcode-select --install
sudo easy_install pip
sudo pip install virtualenv
To install the synapse homeserver run::
$ virtualenv ~/.synapse
$ source ~/.synapse/bin/activate
$ pip install --process-dependency-links https://github.com/matrix-org/synapse/tarball/master
virtualenv -p python2.7 ~/.synapse
source ~/.synapse/bin/activate
pip install --process-dependency-links https://github.com/matrix-org/synapse/tarball/master
This installs synapse, along with the libraries it uses, into a virtual
environment under ``~/.synapse``.
environment under ``~/.synapse``. Feel free to pick a different directory
if you prefer.
In case of problems, please see the _Troubleshooting section below.
Alternatively, Silvio Fricke has contributed a Dockerfile to automate the
above in Docker at https://registry.hub.docker.com/u/silviof/docker-matrix/.
To set up your homeserver, run (in your virtualenv, as before)::
$ python -m synapse.app.homeserver \
cd ~/.synapse
python -m synapse.app.homeserver \
--server-name machine.my.domain.name \
--config-path homeserver.yaml \
--generate-config
Substituting your host and domain name as appropriate.
This will generate you a config file that you can then customise, but it will
also generate a set of keys for you. These keys will allow your Home Server to
identify itself to other Home Servers, so don't lose or delete them. It would be
wise to back them up somewhere safe. If, for whatever reason, you do need to
change your Home Server's keys, you may find that other Home Servers have the
old key cached. If you update the signing key, you should change the name of the
key in the <server name>.signing.key file (the second word, which by default is
, 'auto') to something different.
By default, registration of new users is disabled. You can either enable
registration in the config by specifying ``enable_registration: true``
(it is then recommended to also set up CAPTCHA), or
you can use the command line to register new users::
$ source ~/.synapse/bin/activate
$ register_new_matrix_user -c homeserver.yaml https://localhost:8448
New user localpart: erikj
Password:
Confirm password:
Success!
For reliable VoIP calls to be routed via this homeserver, you MUST configure
a TURN server. See docs/turn-howto.rst for details.
Troubleshooting Installation
----------------------------
Using PostgreSQL
================
Synapse requires pip 1.7 or later, so if your OS provides too old a version and
you get errors about ``error: no such option: --process-dependency-links`` you
may need to manually upgrade it::
As of Synapse 0.9, `PostgreSQL <http://www.postgresql.org>`_ is supported as an
alternative to the `SQLite <http://sqlite.org/>`_ database that Synapse has
traditionally used for convenience and simplicity.
$ sudo pip install --upgrade pip
The advantages of Postgres include:
If pip crashes mid-installation for reason (e.g. lost terminal), pip may
refuse to run until you remove the temporary installation directory it
created. To reset the installation::
* significant performance improvements due to the superior threading and
caching model, smarter query optimiser
* allowing the DB to be run on separate hardware
* allowing basic active/backup high-availability with a "hot spare" synapse
pointing at the same DB master, as well as enabling DB replication in
synapse itself.
$ rm -rf /tmp/pip_install_matrix
The only disadvantage is that the code is relatively new as of April 2015 and
may have a few regressions relative to SQLite.
pip seems to leak *lots* of memory during installation. For instance, a Linux
host with 512MB of RAM may run out of memory whilst installing Twisted. If this
happens, you will have to individually install the dependencies which are
failing, e.g.::
For information on how to install and use PostgreSQL, please see
`docs/postgres.rst <docs/postgres.rst>`_.
$ pip install twisted
Running Synapse
===============
On OSX, if you encounter clang: error: unknown argument: '-mno-fused-madd' you
will need to export CFLAGS=-Qunused-arguments.
To actually run your new homeserver, pick a working directory for Synapse to run
(e.g. ``~/.synapse``), and::
cd ~/.synapse
source ./bin/activate
synctl start
Platform Specific Instructions
==============================
ArchLinux
---------
Installation on ArchLinux may encounter a few hiccups as Arch defaults to
python 3, but synapse currently assumes python 2.7 by default.
The quickest way to get up and running with ArchLinux is probably with Ivan
Shapovalov's AUR package from
https://aur.archlinux.org/packages/matrix-synapse/, which should pull in all
the necessary dependencies.
Alternatively, to install using pip a few changes may be needed as ArchLinux
defaults to python 3, but synapse currently assumes python 2.7 by default:
pip may be outdated (6.0.7-1 and needs to be upgraded to 6.0.8-1 )::
$ sudo pip2.7 install --upgrade pip
sudo pip2.7 install --upgrade pip
You also may need to explicitly specify python 2.7 again during the install
request::
$ pip2.7 install --process-dependency-links \
pip2.7 install --process-dependency-links \
https://github.com/matrix-org/synapse/tarball/master
If you encounter an error with lib bcrypt causing an Wrong ELF Class:
@@ -174,12 +226,13 @@ ELFCLASS32 (x64 Systems), you may need to reinstall py-bcrypt to correctly
compile it under the right architecture. (This should not be needed if
installing under virtualenv)::
$ sudo pip2.7 uninstall py-bcrypt
$ sudo pip2.7 install py-bcrypt
sudo pip2.7 uninstall py-bcrypt
sudo pip2.7 install py-bcrypt
During setup of homeserver you need to call python2.7 directly again::
During setup of Synapse you need to call python2.7 directly again::
$ python2.7 -m synapse.app.homeserver \
cd ~/.synapse
python2.7 -m synapse.app.homeserver \
--server-name machine.my.domain.name \
--config-path homeserver.yaml \
--generate-config
@@ -217,15 +270,33 @@ Troubleshooting:
you do, you may need to create a symlink to ``libsodium.a`` so ``ld`` can find
it: ``ln -s /usr/local/lib/libsodium.a /usr/lib/libsodium.a``
Running Your Homeserver
=======================
Troubleshooting
===============
To actually run your new homeserver, pick a working directory for Synapse to run
(e.g. ``~/.synapse``), and::
Troubleshooting Installation
----------------------------
$ cd ~/.synapse
$ source ./bin/activate
$ synctl start
Synapse requires pip 1.7 or later, so if your OS provides too old a version and
you get errors about ``error: no such option: --process-dependency-links`` you
may need to manually upgrade it::
sudo pip install --upgrade pip
If pip crashes mid-installation for reason (e.g. lost terminal), pip may
refuse to run until you remove the temporary installation directory it
created. To reset the installation::
rm -rf /tmp/pip_install_matrix
pip seems to leak *lots* of memory during installation. For instance, a Linux
host with 512MB of RAM may run out of memory whilst installing Twisted. If this
happens, you will have to individually install the dependencies which are
failing, e.g.::
pip install twisted
On OS X, if you encounter clang: error: unknown argument: '-mno-fused-madd' you
will need to export CFLAGS=-Qunused-arguments.
Troubleshooting Running
-----------------------
@@ -240,44 +311,46 @@ correctly, causing all tests to fail with errors about missing "sodium.h". To
fix try re-installing from PyPI or directly from
(https://github.com/pyca/pynacl)::
$ # Install from PyPI
$ pip install --user --upgrade --force pynacl
$ # Install from github
$ pip install --user https://github.com/pyca/pynacl/tarball/master
# Install from PyPI
pip install --user --upgrade --force pynacl
# Install from github
pip install --user https://github.com/pyca/pynacl/tarball/master
ArchLinux
---------
~~~~~~~~~
If running `$ synctl start` fails wit 'returned non-zero exit status 1', you will need to explicitly call Python2.7 - either running as::
If running `$ synctl start` fails with 'returned non-zero exit status 1',
you will need to explicitly call Python2.7 - either running as::
$ python2.7 -m synapse.app.homeserver --daemonize -c homeserver.yaml --pid-file homeserver.pid
python2.7 -m synapse.app.homeserver --daemonize -c homeserver.yaml
...or by editing synctl with the correct python executable.
Homeserver Development
======================
Synapse Development
===================
To check out a homeserver for development, clone the git repo into a working
To check out a synapse for development, clone the git repo into a working
directory of your choice::
$ git clone https://github.com/matrix-org/synapse.git
$ cd synapse
git clone https://github.com/matrix-org/synapse.git
cd synapse
The homeserver has a number of external dependencies, that are easiest
Synapse has a number of external dependencies, that are easiest
to install using pip and a virtualenv::
$ virtualenv env
$ source env/bin/activate
$ python synapse/python_dependencies.py | xargs -n1 pip install
$ pip install setuptools_trial mock
virtualenv env
source env/bin/activate
python synapse/python_dependencies.py | xargs -n1 pip install
pip install setuptools_trial mock
This will run a process of downloading and installing all the needed
dependencies into a virtual env.
Once this is done, you may wish to run the homeserver's unit tests, to
Once this is done, you may wish to run Synapse's unit tests, to
check that everything is installed as it should be::
$ python setup.py test
python setup.py test
This should end with a 'PASSED' result::
@@ -286,10 +359,10 @@ This should end with a 'PASSED' result::
PASSED (successes=143)
Upgrading an existing homeserver
================================
Upgrading an existing Synapse
=============================
IMPORTANT: Before upgrading an existing homeserver to a new version, please
IMPORTANT: Before upgrading an existing synapse to a new version, please
refer to UPGRADE.rst for any additional instructions.
Otherwise, simply re-install the new codebase over the current one - e.g.
@@ -318,11 +391,11 @@ IDs:
For the first form, simply pass the required hostname (of the machine) as the
--server-name parameter::
$ python -m synapse.app.homeserver \
python -m synapse.app.homeserver \
--server-name machine.my.domain.name \
--config-path homeserver.yaml \
--generate-config
$ python -m synapse.app.homeserver --config-path homeserver.yaml
python -m synapse.app.homeserver --config-path homeserver.yaml
Alternatively, you can run ``synctl start`` to guide you through the process.
@@ -332,37 +405,32 @@ and port where the server is running. (At the current time synapse does not
support clustering multiple servers into a single logical homeserver). The DNS
record would then look something like::
$ dig -t srv _matrix._tcp.machine.my.domaine.name
$ dig -t srv _matrix._tcp.machine.my.domain.name
_matrix._tcp IN SRV 10 0 8448 machine.my.domain.name.
At this point, you should then run the homeserver with the hostname of this
SRV record, as that is the name other machines will expect it to have::
$ python -m synapse.app.homeserver \
python -m synapse.app.homeserver \
--server-name YOURDOMAIN \
--bind-port 8448 \
--config-path homeserver.yaml \
--generate-config
$ python -m synapse.app.homeserver --config-path homeserver.yaml
python -m synapse.app.homeserver --config-path homeserver.yaml
You may additionally want to pass one or more "-v" options, in order to
increase the verbosity of logging output; at least for initial testing.
For the initial alpha release, the homeserver is not speaking TLS for
either client-server or server-server traffic for ease of debugging. We have
also not spent any time yet getting the homeserver to run behind loadbalancers.
Running a Demo Federation of Homeservers
----------------------------------------
Running a Demo Federation of Synapses
-------------------------------------
If you want to get up and running quickly with a trio of homeservers in a
private federation (``localhost:8080``, ``localhost:8081`` and
``localhost:8082``) which you can then access through the webclient running at
http://localhost:8080. Simply run::
$ demo/start.sh
demo/start.sh
This is mainly useful just for development purposes.
@@ -390,7 +458,10 @@ account. Your name will take the form of::
Specify your desired localpart in the topmost box of the "Register for an
account" form, and click the "Register" button. Hostnames can contain ports if
required due to lack of SRV records (e.g. @matthew:localhost:8448 on an
internal synapse sandbox running on localhost)
internal synapse sandbox running on localhost).
If registration fails, you may need to enable it in the homeserver (see
`Synapse Installation`_ above)
Logging In To An Existing Account
@@ -416,7 +487,7 @@ track 3PID logins and publish end-user public keys.
It's currently early days for identity servers as Matrix is not yet using 3PIDs
as the primary means of identity and E2E encryption is not complete. As such,
we are running a single identity server (http://matrix.org:8090) at the current
we are running a single identity server (https://matrix.org) at the current
time.
@@ -433,10 +504,10 @@ Building Internal API Documentation
Before building internal API documentation install sphinx and
sphinxcontrib-napoleon::
$ pip install sphinx
$ pip install sphinxcontrib-napoleon
pip install sphinx
pip install sphinxcontrib-napoleon
Building internal API documentation::
$ python setup.py build_sphinx
python setup.py build_sphinx

View File

@@ -1,3 +1,52 @@
Upgrading to v0.9.0
===================
Application services have had a breaking API change in this version.
They can no longer register themselves with a home server using the AS HTTP API. This
decision was made because a compromised application service with free reign to register
any regex in effect grants full read/write access to the home server if a regex of ``.*``
is used. An attack where a compromised AS re-registers itself with ``.*`` was deemed too
big of a security risk to ignore, and so the ability to register with the HS remotely has
been removed.
It has been replaced by specifying a list of application service registrations in
``homeserver.yaml``::
app_service_config_files: ["registration-01.yaml", "registration-02.yaml"]
Where ``registration-01.yaml`` looks like::
url: <String> # e.g. "https://my.application.service.com"
as_token: <String>
hs_token: <String>
sender_localpart: <String> # This is a new field which denotes the user_id localpart when using the AS token
namespaces:
users:
- exclusive: <Boolean>
regex: <String> # e.g. "@prefix_.*"
aliases:
- exclusive: <Boolean>
regex: <String>
rooms:
- exclusive: <Boolean>
regex: <String>
Upgrading to v0.8.0
===================
Servers which use captchas will need to add their public key to::
static/client/register/register_config.js
window.matrixRegistrationConfig = {
recaptcha_public_key: "YOUR_PUBLIC_KEY"
};
This is required in order to support registration fallback (typically used on
mobile devices).
Upgrading to v0.7.0
===================

93
contrib/scripts/kick_users.py Executable file
View File

@@ -0,0 +1,93 @@
#!/usr/bin/env python
from argparse import ArgumentParser
import json
import requests
import sys
import urllib
def _mkurl(template, kws):
for key in kws:
template = template.replace(key, kws[key])
return template
def main(hs, room_id, access_token, user_id_prefix, why):
if not why:
why = "Automated kick."
print "Kicking members on %s in room %s matching %s" % (hs, room_id, user_id_prefix)
room_state_url = _mkurl(
"$HS/_matrix/client/api/v1/rooms/$ROOM/state?access_token=$TOKEN",
{
"$HS": hs,
"$ROOM": room_id,
"$TOKEN": access_token
}
)
print "Getting room state => %s" % room_state_url
res = requests.get(room_state_url)
print "HTTP %s" % res.status_code
state_events = res.json()
if "error" in state_events:
print "FATAL"
print state_events
return
kick_list = []
room_name = room_id
for event in state_events:
if not event["type"] == "m.room.member":
if event["type"] == "m.room.name":
room_name = event["content"].get("name")
continue
if not event["content"].get("membership") == "join":
continue
if event["state_key"].startswith(user_id_prefix):
kick_list.append(event["state_key"])
if len(kick_list) == 0:
print "No user IDs match the prefix '%s'" % user_id_prefix
return
print "The following user IDs will be kicked from %s" % room_name
for uid in kick_list:
print uid
doit = raw_input("Continue? [Y]es\n")
if len(doit) > 0 and doit.lower() == 'y':
print "Kicking members..."
# encode them all
kick_list = [urllib.quote(uid) for uid in kick_list]
for uid in kick_list:
kick_url = _mkurl(
"$HS/_matrix/client/api/v1/rooms/$ROOM/state/m.room.member/$UID?access_token=$TOKEN",
{
"$HS": hs,
"$UID": uid,
"$ROOM": room_id,
"$TOKEN": access_token
}
)
kick_body = {
"membership": "leave",
"reason": why
}
print "Kicking %s" % uid
res = requests.put(kick_url, data=json.dumps(kick_body))
if res.status_code != 200:
print "ERROR: HTTP %s" % res.status_code
if res.json().get("error"):
print "ERROR: JSON %s" % res.json()
if __name__ == "__main__":
parser = ArgumentParser("Kick members in a room matching a certain user ID prefix.")
parser.add_argument("-u","--user-id",help="The user ID prefix e.g. '@irc_'")
parser.add_argument("-t","--token",help="Your access_token")
parser.add_argument("-r","--room",help="The room ID to kick members in")
parser.add_argument("-s","--homeserver",help="The base HS url e.g. http://matrix.org")
parser.add_argument("-w","--why",help="Reason for the kick. Optional.")
args = parser.parse_args()
if not args.room or not args.token or not args.user_id or not args.homeserver:
parser.print_help()
sys.exit(1)
else:
main(args.homeserver, args.room, args.token, args.user_id, args.why)

View File

@@ -0,0 +1,25 @@
version: 1
# In systemd's journal, loglevel is implicitly stored, so let's omit it
# from the message text.
formatters:
journal_fmt:
format: '%(name)s: [%(request)s] %(message)s'
filters:
context:
(): synapse.util.logcontext.LoggingContextFilter
request: ""
handlers:
journal:
class: systemd.journal.JournalHandler
formatter: journal_fmt
filters: [context]
SYSLOG_IDENTIFIER: synapse
root:
level: INFO
handlers: [journal]
disable_existing_loggers: False

View File

@@ -0,0 +1,16 @@
# This assumes that Synapse has been installed as a system package
# (e.g. https://aur.archlinux.org/packages/matrix-synapse/ for ArchLinux)
# rather than in a user home directory or similar under virtualenv.
[Unit]
Description=Synapse Matrix homeserver
[Service]
Type=simple
User=synapse
Group=synapse
WorkingDirectory=/var/lib/synapse
ExecStart=/usr/bin/python2.7 -m synapse.app.homeserver --config-path=/etc/synapse/homeserver.yaml --log-config=/etc/synapse/log_config.yaml
[Install]
WantedBy=multi-user.target

View File

@@ -175,13 +175,12 @@ sub on_room_message
my $verto_connecting = $loop->new_future;
$bot_verto->connect(
%{ $CONFIG{"verto-bot"} },
on_connected => sub {
warn("[Verto] connected to websocket");
$verto_connecting->done($bot_verto) if not $verto_connecting->is_done;
},
on_connect_error => sub { die "Cannot connect to verto - $_[-1]" },
on_resolve_error => sub { die "Cannot resolve to verto - $_[-1]" },
);
)->then( sub {
warn("[Verto] connected to websocket");
$verto_connecting->done($bot_verto) if not $verto_connecting->is_done;
});
Future->needs_all(
$bot_matrix->login( %{ $CONFIG{"matrix-bot"} } )->then( sub {

493
contrib/vertobot/bridge.pl Executable file
View File

@@ -0,0 +1,493 @@
#!/usr/bin/env perl
use strict;
use warnings;
use 5.010; # //
use IO::Socket::SSL qw(SSL_VERIFY_NONE);
use IO::Async::Loop;
use Net::Async::WebSocket::Client;
use Net::Async::HTTP;
use Net::Async::HTTP::Server;
use JSON;
use YAML;
use Data::UUID;
use Getopt::Long;
use Data::Dumper;
use URI::Encode qw(uri_encode uri_decode);
binmode STDOUT, ":encoding(UTF-8)";
binmode STDERR, ":encoding(UTF-8)";
my $msisdn_to_matrix = {
'447417892400' => '@matthew:matrix.org',
};
my $matrix_to_msisdn = {};
foreach (keys %$msisdn_to_matrix) {
$matrix_to_msisdn->{$msisdn_to_matrix->{$_}} = $_;
}
my $loop = IO::Async::Loop->new;
# Net::Async::HTTP + SSL + IO::Poll doesn't play well. See
# https://rt.cpan.org/Ticket/Display.html?id=93107
# ref $loop eq "IO::Async::Loop::Poll" and
# warn "Using SSL with IO::Poll causes known memory-leaks!!\n";
GetOptions(
'C|config=s' => \my $CONFIG,
'eval-from=s' => \my $EVAL_FROM,
) or exit 1;
if( defined $EVAL_FROM ) {
# An emergency 'eval() this file' hack
$SIG{HUP} = sub {
my $code = do {
open my $fh, "<", $EVAL_FROM or warn( "Cannot read - $!" ), return;
local $/; <$fh>
};
eval $code or warn "Cannot eval() - $@";
};
}
defined $CONFIG or die "Must supply --config\n";
my %CONFIG = %{ YAML::LoadFile( $CONFIG ) };
my %MATRIX_CONFIG = %{ $CONFIG{matrix} };
# No harm in always applying this
$MATRIX_CONFIG{SSL_verify_mode} = SSL_VERIFY_NONE;
my $bridgestate = {};
my $roomid_by_callid = {};
my $sessid = lc new Data::UUID->create_str();
my $as_token = $CONFIG{"matrix-bot"}->{as_token};
my $hs_domain = $CONFIG{"matrix-bot"}->{domain};
my $http = Net::Async::HTTP->new();
$loop->add( $http );
sub create_virtual_user
{
my ($localpart) = @_;
my ( $response ) = $http->do_request(
method => "POST",
uri => URI->new(
$CONFIG{"matrix"}->{server}.
"/_matrix/client/api/v1/register?".
"access_token=$as_token&user_id=$localpart"
),
content_type => "application/json",
content => <<EOT
{
"type": "m.login.application_service",
"user": "$localpart"
}
EOT
)->get;
warn $response->as_string if ($response->code != 200);
}
my $http_server = Net::Async::HTTP::Server->new(
on_request => sub {
my $self = shift;
my ( $req ) = @_;
my $response;
my $path = uri_decode($req->path);
warn("request: $path");
if ($path =~ m#/users/\@(\+.*)#) {
# when queried about virtual users, auto-create them in the HS
my $localpart = $1;
create_virtual_user($localpart);
$response = HTTP::Response->new( 200 );
$response->add_content('{}');
$response->content_type( "application/json" );
}
elsif ($path =~ m#/transactions/(.*)#) {
my $event = JSON->new->decode($req->body);
print Dumper($event);
my $room_id = $event->{room_id};
my %dp = %{$CONFIG{'verto-dialog-params'}};
$dp{callID} = $bridgestate->{$room_id}->{callid};
if ($event->{type} eq 'm.room.membership') {
my $membership = $event->{content}->{membership};
my $state_key = $event->{state_key};
my $room_id = $event->{state_id};
if ($membership eq 'invite') {
# autojoin invites
my ( $response ) = $http->do_request(
method => "POST",
uri => URI->new(
$CONFIG{"matrix"}->{server}.
"/_matrix/client/api/v1/rooms/$room_id/join?".
"access_token=$as_token&user_id=$state_key"
),
content_type => "application/json",
content => "{}",
)->get;
warn $response->as_string if ($response->code != 200);
}
}
elsif ($event->{type} eq 'm.call.invite') {
my $room_id = $event->{room_id};
$bridgestate->{$room_id}->{matrix_callid} = $event->{content}->{call_id};
$bridgestate->{$room_id}->{callid} = lc new Data::UUID->create_str();
$bridgestate->{$room_id}->{sessid} = $sessid;
# $bridgestate->{$room_id}->{offer} = $event->{content}->{offer}->{sdp};
my $offer = $event->{content}->{offer}->{sdp};
# $bridgestate->{$room_id}->{gathered_candidates} = 0;
$roomid_by_callid->{ $bridgestate->{$room_id}->{callid} } = $room_id;
# no trickle ICE in verto apparently
my $f = send_verto_json_request("verto.invite", {
"sdp" => $offer,
"dialogParams" => \%dp,
"sessid" => $bridgestate->{$room_id}->{sessid},
});
$self->adopt_future($f);
}
# elsif ($event->{type} eq 'm.call.candidates') {
# # XXX: this could fire for both matrix->verto and verto->matrix calls
# # and races as it collects candidates. much better to just turn off
# # candidate gathering in the webclient entirely for now
#
# my $room_id = $event->{room_id};
# # XXX: compare call IDs
# if (!$bridgestate->{$room_id}->{gathered_candidates}) {
# $bridgestate->{$room_id}->{gathered_candidates} = 1;
# my $offer = $bridgestate->{$room_id}->{offer};
# my $candidate_block = "";
# foreach (@{$event->{content}->{candidates}}) {
# $candidate_block .= "a=" . $_->{candidate} . "\r\n";
# }
# # XXX: collate using the right m= line - for now assume audio call
# $offer =~ s/(a=rtcp.*[\r\n]+)/$1$candidate_block/;
#
# my $f = send_verto_json_request("verto.invite", {
# "sdp" => $offer,
# "dialogParams" => \%dp,
# "sessid" => $bridgestate->{$room_id}->{sessid},
# });
# $self->adopt_future($f);
# }
# else {
# # ignore them, as no trickle ICE, although we might as well
# # batch them up
# # foreach (@{$event->{content}->{candidates}}) {
# # push @{$bridgestate->{$room_id}->{candidates}}, $_;
# # }
# }
# }
elsif ($event->{type} eq 'm.call.answer') {
# grab the answer and relay it to verto as a verto.answer
my $room_id = $event->{room_id};
my $answer = $event->{content}->{answer}->{sdp};
my $f = send_verto_json_request("verto.answer", {
"sdp" => $answer,
"dialogParams" => \%dp,
"sessid" => $bridgestate->{$room_id}->{sessid},
});
$self->adopt_future($f);
}
elsif ($event->{type} eq 'm.call.hangup') {
my $room_id = $event->{room_id};
if ($bridgestate->{$room_id}->{matrix_callid} eq $event->{content}->{call_id}) {
my $f = send_verto_json_request("verto.bye", {
"dialogParams" => \%dp,
"sessid" => $bridgestate->{$room_id}->{sessid},
});
$self->adopt_future($f);
}
else {
warn "Ignoring unrecognised callid: ".$event->{content}->{call_id};
}
}
else {
warn "Unhandled event: $event->{type}";
}
$response = HTTP::Response->new( 200 );
$response->add_content('{}');
$response->content_type( "application/json" );
}
else {
warn "Unhandled path: $path";
$response = HTTP::Response->new( 404 );
}
$req->respond( $response );
},
);
$loop->add( $http_server );
$http_server->listen(
addr => { family => "inet", socktype => "stream", port => 8009 },
on_listen_error => sub { die "Cannot listen - $_[-1]\n" },
);
my $bot_verto = Net::Async::WebSocket::Client->new(
on_frame => sub {
my ( $self, $frame ) = @_;
warn "[Verto] receiving $frame";
on_verto_json($frame);
},
);
$loop->add( $bot_verto );
my $verto_connecting = $loop->new_future;
$bot_verto->connect(
%{ $CONFIG{"verto-bot"} },
on_connected => sub {
warn("[Verto] connected to websocket");
if (not $verto_connecting->is_done) {
$verto_connecting->done($bot_verto);
send_verto_json_request("login", {
'login' => $CONFIG{'verto-dialog-params'}{'login'},
'passwd' => $CONFIG{'verto-config'}{'passwd'},
'sessid' => $sessid,
});
}
},
on_connect_error => sub { die "Cannot connect to verto - $_[-1]" },
on_resolve_error => sub { die "Cannot resolve to verto - $_[-1]" },
);
# die Dumper($verto_connecting);
my $as_url = $CONFIG{"matrix-bot"}->{as_url};
Future->needs_all(
$http->do_request(
method => "POST",
uri => URI->new( $CONFIG{"matrix"}->{server}."/_matrix/appservice/v1/register" ),
content_type => "application/json",
content => <<EOT
{
"as_token": "$as_token",
"url": "$as_url",
"namespaces": { "users": [ { "regex": "\@\\\\+.*", "exclusive": false } ] }
}
EOT
)->then( sub{
my ($response) = (@_);
warn $response->as_string if ($response->code != 200);
return Future->done;
}),
$verto_connecting,
)->get;
$loop->attach_signal(
PIPE => sub { warn "pipe\n" }
);
$loop->attach_signal(
INT => sub { $loop->stop },
);
$loop->attach_signal(
TERM => sub { $loop->stop },
);
eval {
$loop->run;
} or my $e = $@;
die $e if $e;
exit 0;
{
my $json_id;
my $requests;
sub send_verto_json_request
{
$json_id ||= 1;
my ($method, $params) = @_;
my $json = {
jsonrpc => "2.0",
method => $method,
params => $params,
id => $json_id,
};
my $text = JSON->new->encode( $json );
warn "[Verto] sending $text";
$bot_verto->send_frame ( $text );
my $request = $loop->new_future;
$requests->{$json_id} = $request;
$json_id++;
return $request;
}
sub send_verto_json_response
{
my ($result, $id) = @_;
my $json = {
jsonrpc => "2.0",
result => $result,
id => $id,
};
my $text = JSON->new->encode( $json );
warn "[Verto] sending $text";
$bot_verto->send_frame ( $text );
}
sub on_verto_json
{
my $json = JSON->new->decode( $_[0] );
if ($json->{method}) {
if (($json->{method} eq 'verto.answer' && $json->{params}->{sdp}) ||
$json->{method} eq 'verto.media') {
my $caller = $json->{dialogParams}->{caller_id_number};
my $callee = $json->{dialogParams}->{destination_number};
my $caller_user = '@+' . $caller . ':' . $hs_domain;
my $callee_user = $msisdn_to_matrix->{$callee} || warn "unrecogised callee: $callee";
my $room_id = $roomid_by_callid->{$json->{params}->{callID}};
if ($json->{params}->{sdp}) {
$http->do_request(
method => "POST",
uri => URI->new(
$CONFIG{"matrix"}->{server}.
"/_matrix/client/api/v1/send/m.call.answer?".
"access_token=$as_token&user_id=$caller_user"
),
content_type => "application/json",
content => JSON->new->encode({
call_id => $bridgestate->{$room_id}->{matrix_callid},
version => 0,
answer => {
sdp => $json->{params}->{sdp},
type => "answer",
},
}),
)->then( sub {
send_verto_json_response( {
method => $json->{method},
}, $json->{id});
})->get;
}
}
elsif ($json->{method} eq 'verto.invite') {
my $caller = $json->{dialogParams}->{caller_id_number};
my $callee = $json->{dialogParams}->{destination_number};
my $caller_user = '@+' . $caller . ':' . $hs_domain;
my $callee_user = $msisdn_to_matrix->{$callee} || warn "unrecogised callee: $callee";
my $alias = ($caller lt $callee) ? ($caller.'-'.$callee) : ($callee.'-'.$caller);
my $room_id;
# create a virtual user for the caller if needed.
create_virtual_user($caller);
# create a room of form #peer-peer and invite the callee
$http->do_request(
method => "POST",
uri => URI->new(
$CONFIG{"matrix"}->{server}.
"/_matrix/client/api/v1/createRoom?".
"access_token=$as_token&user_id=$caller_user"
),
content_type => "application/json",
content => JSON->new->encode({
room_alias_name => $alias,
invite => [ $callee_user ],
}),
)->then( sub {
my ( $response ) = @_;
my $resp = JSON->new->decode($response->content);
$room_id = $resp->{room_id};
$roomid_by_callid->{$json->{params}->{callID}} = $room_id;
})->get;
# join it
my ($response) = $http->do_request(
method => "POST",
uri => URI->new(
$CONFIG{"matrix"}->{server}.
"/_matrix/client/api/v1/join/$room_id?".
"access_token=$as_token&user_id=$caller_user"
),
content_type => "application/json",
content => '{}',
)->get;
$bridgestate->{$room_id}->{matrix_callid} = lc new Data::UUID->create_str();
$bridgestate->{$room_id}->{callid} = $json->{dialogParams}->{callID};
$bridgestate->{$room_id}->{sessid} = $sessid;
# put the m.call.invite in there
$http->do_request(
method => "POST",
uri => URI->new(
$CONFIG{"matrix"}->{server}.
"/_matrix/client/api/v1/send/m.call.invite?".
"access_token=$as_token&user_id=$caller_user"
),
content_type => "application/json",
content => JSON->new->encode({
call_id => $bridgestate->{$room_id}->{matrix_callid},
version => 0,
answer => {
sdp => $json->{params}->{sdp},
type => "offer",
},
}),
)->then( sub {
# acknowledge the verto
send_verto_json_response( {
method => $json->{method},
}, $json->{id});
})->get;
}
elsif ($json->{method} eq 'verto.bye') {
my $caller = $json->{dialogParams}->{caller_id_number};
my $callee = $json->{dialogParams}->{destination_number};
my $caller_user = '@+' . $caller . ':' . $hs_domain;
my $callee_user = $msisdn_to_matrix->{$callee} || warn "unrecogised callee: $callee";
my $room_id = $roomid_by_callid->{$json->{params}->{callID}};
# put the m.call.hangup into the room
$http->do_request(
method => "POST",
uri => URI->new(
$CONFIG{"matrix"}->{server}.
"/_matrix/client/api/v1/send/m.call.hangup?".
"access_token=$as_token&user_id=$caller_user"
),
content_type => "application/json",
content => JSON->new->encode({
call_id => $bridgestate->{$room_id}->{matrix_callid},
version => 0,
}),
)->then( sub {
# acknowledge the verto
send_verto_json_response( {
method => $json->{method},
}, $json->{id});
})->get;
}
else {
warn ("[Verto] unhandled method: " . $json->{method});
send_verto_json_response( {
method => $json->{method},
}, $json->{id});
}
}
elsif ($json->{result}) {
$requests->{$json->{id}}->done($json->{result});
}
elsif ($json->{error}) {
$requests->{$json->{id}}->fail($json->{error}->{message}, $json->{error});
}
}
}

View File

@@ -7,6 +7,9 @@ matrix:
matrix-bot:
user_id: '@vertobot:matrix.org'
password: ''
domain: 'matrix.org"
as_url: 'http://localhost:8009'
as_token: 'vertobot123'
verto-bot:
host: webrtc.freeswitch.org

View File

@@ -11,7 +11,9 @@ if [ -f $PID_FILE ]; then
exit 1
fi
find "$DIR" -name "*.log" -delete
find "$DIR" -name "*.db" -delete
for port in 8080 8081 8082; do
rm -rf $DIR/$port
rm -rf $DIR/media_store.$port
done
rm -rf $DIR/etc

View File

@@ -8,37 +8,41 @@ cd "$DIR/.."
mkdir -p demo/etc
# Check the --no-rate-limit param
PARAMS=""
if [ $# -eq 1 ]; then
if [ $1 = "--no-rate-limit" ]; then
PARAMS="--rc-messages-per-second 1000 --rc-message-burst-count 1000"
fi
fi
export PYTHONPATH=$(readlink -f $(pwd))
echo $PYTHONPATH
for port in 8080 8081 8082; do
echo "Starting server on port $port... "
https_port=$((port + 400))
mkdir -p demo/$port
pushd demo/$port
#rm $DIR/etc/$port.config
python -m synapse.app.homeserver \
--generate-config \
--config-path "demo/etc/$port.config" \
-p "$https_port" \
--unsecure-port "$port" \
-H "localhost:$https_port" \
-f "$DIR/$port.log" \
-d "$DIR/$port.db" \
-D --pid-file "$DIR/$port.pid" \
--manhole $((port + 1000)) \
--tls-dh-params-path "demo/demo.tls.dh" \
--media-store-path "demo/media_store.$port" \
$PARAMS $SYNAPSE_PARAMS \
--config-path "$DIR/etc/$port.config" \
# Check script parameters
if [ $# -eq 1 ]; then
if [ $1 = "--no-rate-limit" ]; then
# Set high limits in config file to disable rate limiting
perl -p -i -e 's/rc_messages_per_second.*/rc_messages_per_second: 1000/g' $DIR/etc/$port.config
perl -p -i -e 's/rc_message_burst_count.*/rc_message_burst_count: 1000/g' $DIR/etc/$port.config
fi
fi
perl -p -i -e 's/^enable_registration:.*/enable_registration: true/g' $DIR/etc/$port.config
python -m synapse.app.homeserver \
--config-path "demo/etc/$port.config" \
--config-path "$DIR/etc/$port.config" \
-D \
-vv \
popd
done
cd "$CWD"

31
docs/CAPTCHA_SETUP Normal file
View File

@@ -0,0 +1,31 @@
Captcha can be enabled for this home server. This file explains how to do that.
The captcha mechanism used is Google's ReCaptcha. This requires API keys from Google.
Getting keys
------------
Requires a public/private key pair from:
https://developers.google.com/recaptcha/
Setting ReCaptcha Keys
----------------------
The keys are a config option on the home server config. If they are not
visible, you can generate them via --generate-config. Set the following value:
recaptcha_public_key: YOUR_PUBLIC_KEY
recaptcha_private_key: YOUR_PRIVATE_KEY
In addition, you MUST enable captchas via:
enable_registration_captcha: true
Configuring IP used for auth
----------------------------
The ReCaptcha API requires that the IP address of the user who solved the
captcha is sent. If the client is connecting through a proxy or load balancer,
it may be required to use the X-Forwarded-For (XFF) header instead of the origin
IP address. This can be configured as an option on the home server like so:
captcha_ip_origin_is_x_forwarded: true

View File

@@ -0,0 +1,36 @@
Registering an Application Service
==================================
The registration of new application services depends on the homeserver used.
In synapse, you need to create a new configuration file for your AS and add it
to the list specified under the ``app_service_config_files`` config
option in your synapse config.
For example:
.. code-block:: yaml
app_service_config_files:
- /home/matrix/.synapse/<your-AS>.yaml
The format of the AS configuration file is as follows:
.. code-block:: yaml
url: <base url of AS>
as_token: <token AS will add to requests to HS>
hs_token: <token HS will add to requests to AS>
sender_localpart: <localpart of AS user>
namespaces:
users: # List of users we're interested in
- exclusive: <bool>
regex: <regex>
- ...
aliases: [] # List of aliases we're interested in
rooms: [] # List of room ids we're interested in
See the spec_ for further details on how application services work.
.. _spec: https://github.com/matrix-org/matrix-doc/blob/master/specification/25_application_service_api.rst#application-service-api

50
docs/metrics-howto.rst Normal file
View File

@@ -0,0 +1,50 @@
How to monitor Synapse metrics using Prometheus
===============================================
1: Install prometheus:
Follow instructions at http://prometheus.io/docs/introduction/install/
2: Enable synapse metrics:
Simply setting a (local) port number will enable it. Pick a port.
prometheus itself defaults to 9090, so starting just above that for
locally monitored services seems reasonable. E.g. 9092:
Add to homeserver.yaml
metrics_port: 9092
Restart synapse
3: Check out synapse-prometheus-config
https://github.com/matrix-org/synapse-prometheus-config
4: Add ``synapse.html`` and ``synapse.rules``
The ``.html`` file needs to appear in prometheus's ``consoles`` directory,
and the ``.rules`` file needs to be invoked somewhere in the main config
file. A symlink to each from the git checkout into the prometheus directory
might be easiest to ensure ``git pull`` keeps it updated.
5: Add a prometheus target for synapse
This is easiest if prometheus runs on the same machine as synapse, as it can
then just use localhost::
global: {
rule_file: "synapse.rules"
}
job: {
name: "synapse"
target_group: {
target: "http://localhost:9092/"
}
}
6: Start prometheus::
./prometheus -config.file=prometheus.conf
7: Wait a few seconds for it to start and perform the first scrape,
then visit the console:
http://server-where-prometheus-runs:9090/consoles/synapse.html

110
docs/postgres.rst Normal file
View File

@@ -0,0 +1,110 @@
Using Postgres
--------------
Set up database
===============
The PostgreSQL database used *must* have the correct encoding set, otherwise
would not be able to store UTF8 strings. To create a database with the correct
encoding use, e.g.::
CREATE DATABASE synapse
ENCODING 'UTF8'
LC_COLLATE='C'
LC_CTYPE='C'
template=template0
OWNER synapse_user;
This would create an appropriate database named ``synapse`` owned by the
``synapse_user`` user (which must already exist).
Set up client
=============
Postgres support depends on the postgres python connector ``psycopg2``. In the
virtual env::
sudo apt-get install libpq-dev
pip install psycopg2
Synapse config
==============
When you are ready to start using PostgreSQL, add the following line to your
config file::
database:
name: psycopg2
args:
user: <user>
password: <pass>
database: <db>
host: <host>
cp_min: 5
cp_max: 10
All key, values in ``args`` are passed to the ``psycopg2.connect(..)``
function, except keys beginning with ``cp_``, which are consumed by the twisted
adbapi connection pool.
Porting from SQLite
===================
Overview
~~~~~~~~
The script ``port_from_sqlite_to_postgres.py`` allows porting an existing
synapse server backed by SQLite to using PostgreSQL. This is done in as a two
phase process:
1. Copy the existing SQLite database to a separate location (while the server
is down) and running the port script against that offline database.
2. Shut down the server. Rerun the port script to port any data that has come
in since taking the first snapshot. Restart server against the PostgreSQL
database.
The port script is designed to be run repeatedly against newer snapshots of the
SQLite database file. This makes it safe to repeat step 1 if there was a delay
between taking the previous snapshot and being ready to do step 2.
It is safe to at any time kill the port script and restart it.
Using the port script
~~~~~~~~~~~~~~~~~~~~~
Firstly, shut down the currently running synapse server and copy its database
file (typically ``homeserver.db``) to another location. Once the copy is
complete, restart synapse. For instance::
./synctl stop
cp homeserver.db homeserver.db.snapshot
./synctl start
Assuming your new config file (as described in the section *Synapse config*)
is named ``homeserver-postgres.yaml`` and the SQLite snapshot is at
``homeserver.db.snapshot`` then simply run::
python scripts/port_from_sqlite_to_postgres.py \
--sqlite-database homeserver.db.snapshot \
--postgres-config homeserver-postgres.yaml
The flag ``--curses`` displays a coloured curses progress UI.
If the script took a long time to complete, or time has otherwise passed since
the original snapshot was taken, repeat the previous steps with a newer
snapshot.
To complete the conversion shut down the synapse server and run the port
script one last time, e.g. if the SQLite database is at ``homeserver.db``
run::
python scripts/port_from_sqlite_to_postgres.py \
--sqlite-database homeserver.db \
--postgres-config database_config.yaml
Once that has completed, change the synapse config to point at the PostgreSQL
database configuration file using the ``database_config`` parameter (see
`Synapse Config`_) and restart synapse. Synapse should now be running against
PostgreSQL.

View File

@@ -81,7 +81,7 @@ Your home server configuration file needs the following extra keys:
As an example, here is the relevant section of the config file for
matrix.org::
turn_uris: turn:turn.matrix.org:3478?transport=udp,turn:turn.matrix.org:3478?transport=tcp
turn_uris: [ "turn:turn.matrix.org:3478?transport=udp", "turn:turn.matrix.org:3478?transport=tcp" ]
turn_shared_secret: n0t4ctuAllymatr1Xd0TorgSshar3d5ecret4obvIousreAsons
turn_user_lifetime: 86400000

View File

@@ -0,0 +1,116 @@
import psycopg2
import yaml
import sys
import json
import time
import hashlib
from syutil.base64util import encode_base64
from syutil.crypto.signing_key import read_signing_keys
from syutil.crypto.jsonsign import sign_json
from syutil.jsonutil import encode_canonical_json
def select_v1_keys(connection):
cursor = connection.cursor()
cursor.execute("SELECT server_name, key_id, verify_key FROM server_signature_keys")
rows = cursor.fetchall()
cursor.close()
results = {}
for server_name, key_id, verify_key in rows:
results.setdefault(server_name, {})[key_id] = encode_base64(verify_key)
return results
def select_v1_certs(connection):
cursor = connection.cursor()
cursor.execute("SELECT server_name, tls_certificate FROM server_tls_certificates")
rows = cursor.fetchall()
cursor.close()
results = {}
for server_name, tls_certificate in rows:
results[server_name] = tls_certificate
return results
def select_v2_json(connection):
cursor = connection.cursor()
cursor.execute("SELECT server_name, key_id, key_json FROM server_keys_json")
rows = cursor.fetchall()
cursor.close()
results = {}
for server_name, key_id, key_json in rows:
results.setdefault(server_name, {})[key_id] = json.loads(str(key_json).decode("utf-8"))
return results
def convert_v1_to_v2(server_name, valid_until, keys, certificate):
return {
"old_verify_keys": {},
"server_name": server_name,
"verify_keys": {
key_id: {"key": key}
for key_id, key in keys.items()
},
"valid_until_ts": valid_until,
"tls_fingerprints": [fingerprint(certificate)],
}
def fingerprint(certificate):
finger = hashlib.sha256(certificate)
return {"sha256": encode_base64(finger.digest())}
def rows_v2(server, json):
valid_until = json["valid_until_ts"]
key_json = encode_canonical_json(json)
for key_id in json["verify_keys"]:
yield (server, key_id, "-", valid_until, valid_until, buffer(key_json))
def main():
config = yaml.load(open(sys.argv[1]))
valid_until = int(time.time() / (3600 * 24)) * 1000 * 3600 * 24
server_name = config["server_name"]
signing_key = read_signing_keys(open(config["signing_key_path"]))[0]
database = config["database"]
assert database["name"] == "psycopg2", "Can only convert for postgresql"
args = database["args"]
args.pop("cp_max")
args.pop("cp_min")
connection = psycopg2.connect(**args)
keys = select_v1_keys(connection)
certificates = select_v1_certs(connection)
json = select_v2_json(connection)
result = {}
for server in keys:
if not server in json:
v2_json = convert_v1_to_v2(
server, valid_until, keys[server], certificates[server]
)
v2_json = sign_json(v2_json, server_name, signing_key)
result[server] = v2_json
yaml.safe_dump(result, sys.stdout, default_flow_style=False)
rows = list(
row for server, json in result.items()
for row in rows_v2(server, json)
)
cursor = connection.cursor()
cursor.executemany(
"INSERT INTO server_keys_json ("
" server_name, key_id, from_server,"
" ts_added_ms, ts_valid_until_ms, key_json"
") VALUES (%s, %s, %s, %s, %s, %s)",
rows
)
connection.commit()
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,760 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2015 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from twisted.internet import defer, reactor
from twisted.enterprise import adbapi
from synapse.storage._base import LoggingTransaction, SQLBaseStore
from synapse.storage.engines import create_engine
import argparse
import curses
import logging
import sys
import time
import traceback
import yaml
logger = logging.getLogger("port_from_sqlite_to_postgres")
BOOLEAN_COLUMNS = {
"events": ["processed", "outlier"],
"rooms": ["is_public"],
"event_edges": ["is_state"],
"presence_list": ["accepted"],
}
APPEND_ONLY_TABLES = [
"event_content_hashes",
"event_reference_hashes",
"event_signatures",
"event_edge_hashes",
"events",
"event_json",
"state_events",
"room_memberships",
"feedback",
"topics",
"room_names",
"rooms",
"local_media_repository",
"local_media_repository_thumbnails",
"remote_media_cache",
"remote_media_cache_thumbnails",
"redactions",
"event_edges",
"event_auth",
"received_transactions",
"sent_transactions",
"transaction_id_to_pdu",
"users",
"state_groups",
"state_groups_state",
"event_to_state_groups",
"rejections",
]
end_error_exec_info = None
class Store(object):
"""This object is used to pull out some of the convenience API from the
Storage layer.
*All* database interactions should go through this object.
"""
def __init__(self, db_pool, engine):
self.db_pool = db_pool
self.database_engine = engine
_simple_insert_txn = SQLBaseStore.__dict__["_simple_insert_txn"]
_simple_insert = SQLBaseStore.__dict__["_simple_insert"]
_simple_select_onecol_txn = SQLBaseStore.__dict__["_simple_select_onecol_txn"]
_simple_select_onecol = SQLBaseStore.__dict__["_simple_select_onecol"]
_simple_select_one_onecol = SQLBaseStore.__dict__["_simple_select_one_onecol"]
_simple_select_one_onecol_txn = SQLBaseStore.__dict__["_simple_select_one_onecol_txn"]
_simple_update_one = SQLBaseStore.__dict__["_simple_update_one"]
_simple_update_one_txn = SQLBaseStore.__dict__["_simple_update_one_txn"]
_execute_and_decode = SQLBaseStore.__dict__["_execute_and_decode"]
def runInteraction(self, desc, func, *args, **kwargs):
def r(conn):
try:
i = 0
N = 5
while True:
try:
txn = conn.cursor()
return func(
LoggingTransaction(txn, desc, self.database_engine, []),
*args, **kwargs
)
except self.database_engine.module.DatabaseError as e:
if self.database_engine.is_deadlock(e):
logger.warn("[TXN DEADLOCK] {%s} %d/%d", desc, i, N)
if i < N:
i += 1
conn.rollback()
continue
raise
except Exception as e:
logger.debug("[TXN FAIL] {%s} %s", desc, e)
raise
return self.db_pool.runWithConnection(r)
def execute(self, f, *args, **kwargs):
return self.runInteraction(f.__name__, f, *args, **kwargs)
def execute_sql(self, sql, *args):
def r(txn):
txn.execute(sql, args)
return txn.fetchall()
return self.runInteraction("execute_sql", r)
def insert_many_txn(self, txn, table, headers, rows):
sql = "INSERT INTO %s (%s) VALUES (%s)" % (
table,
", ".join(k for k in headers),
", ".join("%s" for _ in headers)
)
try:
txn.executemany(sql, rows)
except:
logger.exception(
"Failed to insert: %s",
table,
)
raise
class Porter(object):
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
@defer.inlineCallbacks
def setup_table(self, table):
if table in APPEND_ONLY_TABLES:
# It's safe to just carry on inserting.
next_chunk = yield self.postgres_store._simple_select_one_onecol(
table="port_from_sqlite3",
keyvalues={"table_name": table},
retcol="rowid",
allow_none=True,
)
total_to_port = None
if next_chunk is None:
if table == "sent_transactions":
next_chunk, already_ported, total_to_port = (
yield self._setup_sent_transactions()
)
else:
yield self.postgres_store._simple_insert(
table="port_from_sqlite3",
values={"table_name": table, "rowid": 1}
)
next_chunk = 1
already_ported = 0
if total_to_port is None:
already_ported, total_to_port = yield self._get_total_count_to_port(
table, next_chunk
)
else:
def delete_all(txn):
txn.execute(
"DELETE FROM port_from_sqlite3 WHERE table_name = %s",
(table,)
)
txn.execute("TRUNCATE %s CASCADE" % (table,))
yield self.postgres_store.execute(delete_all)
yield self.postgres_store._simple_insert(
table="port_from_sqlite3",
values={"table_name": table, "rowid": 0}
)
next_chunk = 1
already_ported, total_to_port = yield self._get_total_count_to_port(
table, next_chunk
)
defer.returnValue((table, already_ported, total_to_port, next_chunk))
@defer.inlineCallbacks
def handle_table(self, table, postgres_size, table_size, next_chunk):
if not table_size:
return
self.progress.add_table(table, postgres_size, table_size)
select = (
"SELECT rowid, * FROM %s WHERE rowid >= ? ORDER BY rowid LIMIT ?"
% (table,)
)
while True:
def r(txn):
txn.execute(select, (next_chunk, self.batch_size,))
rows = txn.fetchall()
headers = [column[0] for column in txn.description]
return headers, rows
headers, rows = yield self.sqlite_store.runInteraction("select", r)
if rows:
next_chunk = rows[-1][0] + 1
self._convert_rows(table, headers, rows)
def insert(txn):
self.postgres_store.insert_many_txn(
txn, table, headers[1:], rows
)
self.postgres_store._simple_update_one_txn(
txn,
table="port_from_sqlite3",
keyvalues={"table_name": table},
updatevalues={"rowid": next_chunk},
)
yield self.postgres_store.execute(insert)
postgres_size += len(rows)
self.progress.update(table, postgres_size)
else:
return
def setup_db(self, db_config, database_engine):
db_conn = database_engine.module.connect(
**{
k: v for k, v in db_config.get("args", {}).items()
if not k.startswith("cp_")
}
)
database_engine.prepare_database(db_conn)
db_conn.commit()
@defer.inlineCallbacks
def run(self):
try:
sqlite_db_pool = adbapi.ConnectionPool(
self.sqlite_config["name"],
**self.sqlite_config["args"]
)
postgres_db_pool = adbapi.ConnectionPool(
self.postgres_config["name"],
**self.postgres_config["args"]
)
sqlite_engine = create_engine("sqlite3")
postgres_engine = create_engine("psycopg2")
self.sqlite_store = Store(sqlite_db_pool, sqlite_engine)
self.postgres_store = Store(postgres_db_pool, postgres_engine)
yield self.postgres_store.execute(
postgres_engine.check_database
)
# Step 1. Set up databases.
self.progress.set_state("Preparing SQLite3")
self.setup_db(sqlite_config, sqlite_engine)
self.progress.set_state("Preparing PostgreSQL")
self.setup_db(postgres_config, postgres_engine)
# Step 2. Get tables.
self.progress.set_state("Fetching tables")
sqlite_tables = yield self.sqlite_store._simple_select_onecol(
table="sqlite_master",
keyvalues={
"type": "table",
},
retcol="name",
)
postgres_tables = yield self.postgres_store._simple_select_onecol(
table="information_schema.tables",
keyvalues={
"table_schema": "public",
},
retcol="distinct table_name",
)
tables = set(sqlite_tables) & set(postgres_tables)
self.progress.set_state("Creating tables")
logger.info("Found %d tables", len(tables))
def create_port_table(txn):
txn.execute(
"CREATE TABLE port_from_sqlite3 ("
" table_name varchar(100) NOT NULL UNIQUE,"
" rowid bigint NOT NULL"
")"
)
try:
yield self.postgres_store.runInteraction(
"create_port_table", create_port_table
)
except Exception as e:
logger.info("Failed to create port table: %s", e)
self.progress.set_state("Setting up")
# Set up tables.
setup_res = yield defer.gatherResults(
[
self.setup_table(table)
for table in tables
if table not in ["schema_version", "applied_schema_deltas"]
and not table.startswith("sqlite_")
],
consumeErrors=True,
)
# Process tables.
yield defer.gatherResults(
[
self.handle_table(*res)
for res in setup_res
],
consumeErrors=True,
)
self.progress.done()
except:
global end_error_exec_info
end_error_exec_info = sys.exc_info()
logger.exception("")
finally:
reactor.stop()
def _convert_rows(self, table, headers, rows):
bool_col_names = BOOLEAN_COLUMNS.get(table, [])
bool_cols = [
i for i, h in enumerate(headers) if h in bool_col_names
]
def conv(j, col):
if j in bool_cols:
return bool(col)
return col
for i, row in enumerate(rows):
rows[i] = tuple(
conv(j, col)
for j, col in enumerate(row)
if j > 0
)
@defer.inlineCallbacks
def _setup_sent_transactions(self):
# Only save things from the last day
yesterday = int(time.time()*1000) - 86400000
# And save the max transaction id from each destination
select = (
"SELECT rowid, * FROM sent_transactions WHERE rowid IN ("
"SELECT max(rowid) FROM sent_transactions"
" GROUP BY destination"
")"
)
def r(txn):
txn.execute(select)
rows = txn.fetchall()
headers = [column[0] for column in txn.description]
ts_ind = headers.index('ts')
return headers, [r for r in rows if r[ts_ind] < yesterday]
headers, rows = yield self.sqlite_store.runInteraction(
"select", r,
)
self._convert_rows("sent_transactions", headers, rows)
inserted_rows = len(rows)
max_inserted_rowid = max(r[0] for r in rows)
def insert(txn):
self.postgres_store.insert_many_txn(
txn, "sent_transactions", headers[1:], rows
)
yield self.postgres_store.execute(insert)
def get_start_id(txn):
txn.execute(
"SELECT rowid FROM sent_transactions WHERE ts >= ?"
" ORDER BY rowid ASC LIMIT 1",
(yesterday,)
)
rows = txn.fetchall()
if rows:
return rows[0][0]
else:
return 1
next_chunk = yield self.sqlite_store.execute(get_start_id)
next_chunk = max(max_inserted_rowid + 1, next_chunk)
yield self.postgres_store._simple_insert(
table="port_from_sqlite3",
values={"table_name": "sent_transactions", "rowid": next_chunk}
)
def get_sent_table_size(txn):
txn.execute(
"SELECT count(*) FROM sent_transactions"
" WHERE ts >= ?",
(yesterday,)
)
size, = txn.fetchone()
return int(size)
remaining_count = yield self.sqlite_store.execute(
get_sent_table_size
)
total_count = remaining_count + inserted_rows
defer.returnValue((next_chunk, inserted_rows, total_count))
@defer.inlineCallbacks
def _get_remaining_count_to_port(self, table, next_chunk):
rows = yield self.sqlite_store.execute_sql(
"SELECT count(*) FROM %s WHERE rowid >= ?" % (table,),
next_chunk,
)
defer.returnValue(rows[0][0])
@defer.inlineCallbacks
def _get_already_ported_count(self, table):
rows = yield self.postgres_store.execute_sql(
"SELECT count(*) FROM %s" % (table,),
)
defer.returnValue(rows[0][0])
@defer.inlineCallbacks
def _get_total_count_to_port(self, table, next_chunk):
remaining, done = yield defer.gatherResults(
[
self._get_remaining_count_to_port(table, next_chunk),
self._get_already_ported_count(table),
],
consumeErrors=True,
)
remaining = int(remaining) if remaining else 0
done = int(done) if done else 0
defer.returnValue((done, remaining + done))
##############################################
###### The following is simply UI stuff ######
##############################################
class Progress(object):
"""Used to report progress of the port
"""
def __init__(self):
self.tables = {}
self.start_time = int(time.time())
def add_table(self, table, cur, size):
self.tables[table] = {
"start": cur,
"num_done": cur,
"total": size,
"perc": int(cur * 100 / size),
}
def update(self, table, num_done):
data = self.tables[table]
data["num_done"] = num_done
data["perc"] = int(num_done * 100 / data["total"])
def done(self):
pass
class CursesProgress(Progress):
"""Reports progress to a curses window
"""
def __init__(self, stdscr):
self.stdscr = stdscr
curses.use_default_colors()
curses.curs_set(0)
curses.init_pair(1, curses.COLOR_RED, -1)
curses.init_pair(2, curses.COLOR_GREEN, -1)
self.last_update = 0
self.finished = False
self.total_processed = 0
self.total_remaining = 0
super(CursesProgress, self).__init__()
def update(self, table, num_done):
super(CursesProgress, self).update(table, num_done)
self.total_processed = 0
self.total_remaining = 0
for table, data in self.tables.items():
self.total_processed += data["num_done"] - data["start"]
self.total_remaining += data["total"] - data["num_done"]
self.render()
def render(self, force=False):
now = time.time()
if not force and now - self.last_update < 0.2:
# reactor.callLater(1, self.render)
return
self.stdscr.clear()
rows, cols = self.stdscr.getmaxyx()
duration = int(now) - int(self.start_time)
minutes, seconds = divmod(duration, 60)
duration_str = '%02dm %02ds' % (minutes, seconds,)
if self.finished:
status = "Time spent: %s (Done!)" % (duration_str,)
else:
if self.total_processed > 0:
left = float(self.total_remaining) / self.total_processed
est_remaining = (int(now) - self.start_time) * left
est_remaining_str = '%02dm %02ds remaining' % divmod(est_remaining, 60)
else:
est_remaining_str = "Unknown"
status = (
"Time spent: %s (est. remaining: %s)"
% (duration_str, est_remaining_str,)
)
self.stdscr.addstr(
0, 0,
status,
curses.A_BOLD,
)
max_len = max([len(t) for t in self.tables.keys()])
left_margin = 5
middle_space = 1
items = self.tables.items()
items.sort(
key=lambda i: (i[1]["perc"], i[0]),
)
for i, (table, data) in enumerate(items):
if i + 2 >= rows:
break
perc = data["perc"]
color = curses.color_pair(2) if perc == 100 else curses.color_pair(1)
self.stdscr.addstr(
i+2, left_margin + max_len - len(table),
table,
curses.A_BOLD | color,
)
size = 20
progress = "[%s%s]" % (
"#" * int(perc*size/100),
" " * (size - int(perc*size/100)),
)
self.stdscr.addstr(
i+2, left_margin + max_len + middle_space,
"%s %3d%% (%d/%d)" % (progress, perc, data["num_done"], data["total"]),
)
if self.finished:
self.stdscr.addstr(
rows-1, 0,
"Press any key to exit...",
)
self.stdscr.refresh()
self.last_update = time.time()
def done(self):
self.finished = True
self.render(True)
self.stdscr.getch()
def set_state(self, state):
self.stdscr.clear()
self.stdscr.addstr(
0, 0,
state + "...",
curses.A_BOLD,
)
self.stdscr.refresh()
class TerminalProgress(Progress):
"""Just prints progress to the terminal
"""
def update(self, table, num_done):
super(TerminalProgress, self).update(table, num_done)
data = self.tables[table]
print "%s: %d%% (%d/%d)" % (
table, data["perc"],
data["num_done"], data["total"],
)
def set_state(self, state):
print state + "..."
##############################################
##############################################
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="A script to port an existing synapse SQLite database to"
" a new PostgreSQL database."
)
parser.add_argument("-v", action='store_true')
parser.add_argument(
"--sqlite-database", required=True,
help="The snapshot of the SQLite database file. This must not be"
" currently used by a running synapse server"
)
parser.add_argument(
"--postgres-config", type=argparse.FileType('r'), required=True,
help="The database config file for the PostgreSQL database"
)
parser.add_argument(
"--curses", action='store_true',
help="display a curses based progress UI"
)
parser.add_argument(
"--batch-size", type=int, default=1000,
help="The number of rows to select from the SQLite table each"
" iteration [default=1000]",
)
args = parser.parse_args()
logging_config = {
"level": logging.DEBUG if args.v else logging.INFO,
"format": "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(message)s"
}
if args.curses:
logging_config["filename"] = "port-synapse.log"
logging.basicConfig(**logging_config)
sqlite_config = {
"name": "sqlite3",
"args": {
"database": args.sqlite_database,
"cp_min": 1,
"cp_max": 1,
"check_same_thread": False,
},
}
postgres_config = yaml.safe_load(args.postgres_config)
if "database" in postgres_config:
postgres_config = postgres_config["database"]
if "name" not in postgres_config:
sys.stderr.write("Malformed database config: no 'name'")
sys.exit(2)
if postgres_config["name"] != "psycopg2":
sys.stderr.write("Database must use 'psycopg2' connector.")
sys.exit(3)
def start(stdscr=None):
if stdscr:
progress = CursesProgress(stdscr)
else:
progress = TerminalProgress()
porter = Porter(
sqlite_config=sqlite_config,
postgres_config=postgres_config,
progress=progress,
batch_size=args.batch_size,
)
reactor.callWhenRunning(porter.run)
reactor.run()
if args.curses:
curses.wrapper(start)
else:
start()
if end_error_exec_info:
exc_type, exc_value, exc_traceback = end_error_exec_info
traceback.print_exception(exc_type, exc_value, exc_traceback)

154
scripts/register_new_matrix_user Executable file
View File

@@ -0,0 +1,154 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2015 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import getpass
import hashlib
import hmac
import json
import sys
import urllib2
import yaml
def request_registration(user, password, server_location, shared_secret):
mac = hmac.new(
key=shared_secret,
msg=user,
digestmod=hashlib.sha1,
).hexdigest()
data = {
"user": user,
"password": password,
"mac": mac,
"type": "org.matrix.login.shared_secret",
}
server_location = server_location.rstrip("/")
print "Sending registration request..."
req = urllib2.Request(
"%s/_matrix/client/api/v1/register" % (server_location,),
data=json.dumps(data),
headers={'Content-Type': 'application/json'}
)
try:
if sys.version_info[:3] >= (2, 7, 9):
# As of version 2.7.9, urllib2 now checks SSL certs
import ssl
f = urllib2.urlopen(req, context=ssl.SSLContext(ssl.PROTOCOL_SSLv23))
else:
f = urllib2.urlopen(req)
f.read()
f.close()
print "Success."
except urllib2.HTTPError as e:
print "ERROR! Received %d %s" % (e.code, e.reason,)
if 400 <= e.code < 500:
if e.info().type == "application/json":
resp = json.load(e)
if "error" in resp:
print resp["error"]
sys.exit(1)
def register_new_user(user, password, server_location, shared_secret):
if not user:
try:
default_user = getpass.getuser()
except:
default_user = None
if default_user:
user = raw_input("New user localpart [%s]: " % (default_user,))
if not user:
user = default_user
else:
user = raw_input("New user localpart: ")
if not user:
print "Invalid user name"
sys.exit(1)
if not password:
password = getpass.getpass("Password: ")
if not password:
print "Password cannot be blank."
sys.exit(1)
confirm_password = getpass.getpass("Confirm password: ")
if password != confirm_password:
print "Passwords do not match"
sys.exit(1)
request_registration(user, password, server_location, shared_secret)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Used to register new users with a given home server when"
" registration has been disabled. The home server must be"
" configured with the 'registration_shared_secret' option"
" set.",
)
parser.add_argument(
"-u", "--user",
default=None,
help="Local part of the new user. Will prompt if omitted.",
)
parser.add_argument(
"-p", "--password",
default=None,
help="New password for user. Will prompt if omitted.",
)
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument(
"-c", "--config",
type=argparse.FileType('r'),
help="Path to server config file. Used to read in shared secret.",
)
group.add_argument(
"-k", "--shared-secret",
help="Shared secret as defined in server config file.",
)
parser.add_argument(
"server_url",
default="https://localhost:8448",
nargs='?',
help="URL to use to talk to the home server. Defaults to "
" 'https://localhost:8448'.",
)
args = parser.parse_args()
if "config" in args and args.config:
config = yaml.safe_load(args.config)
secret = config.get("registration_shared_secret", None)
if not secret:
print "No 'registration_shared_secret' defined in config."
sys.exit(1)
else:
secret = args.shared_secret
register_new_user(args.user, args.password, args.server_url, secret)

2
scripts/upgrade_db_to_v0.6.0.py Normal file → Executable file
View File

@@ -1,4 +1,4 @@
#!/usr/bin/env python
from synapse.storage import SCHEMA_VERSION, read_schema
from synapse.storage._base import SQLBaseStore
from synapse.storage.signatures import SignatureStore

View File

@@ -16,3 +16,6 @@ ignore =
docs/*
pylint.cfg
tox.ini
[flake8]
max-line-length = 90

View File

@@ -14,6 +14,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import os
from setuptools import setup, find_packages
@@ -45,9 +46,9 @@ setup(
version=version,
packages=find_packages(exclude=["tests", "tests.*"]),
description="Reference Synapse Home Server",
install_requires=dependencies["REQUIREMENTS"].keys(),
install_requires=dependencies['requirements'](include_conditional=True).keys(),
setup_requires=[
"Twisted==14.0.2", # Here to override setuptools_trial's dependency on Twisted>=2.4.0
"Twisted>=15.1.0", # Here to override setuptools_trial's dependency on Twisted>=2.4.0
"setuptools_trial",
"mock"
],
@@ -55,5 +56,5 @@ setup(
include_package_data=True,
zip_safe=False,
long_description=long_description,
scripts=["synctl"],
scripts=["synctl"] + glob.glob("scripts/*"),
)

View File

@@ -0,0 +1,32 @@
<html>
<head>
<title> Registration </title>
<meta name='viewport' content='width=device-width, initial-scale=1, user-scalable=no, minimum-scale=1.0, maximum-scale=1.0'>
<link rel="stylesheet" href="style.css">
<script src="js/jquery-2.1.3.min.js"></script>
<script src="js/recaptcha_ajax.js"></script>
<script src="register_config.js"></script>
<script src="js/register.js"></script>
</head>
<body onload="matrixRegistration.onLoad()">
<form id="registrationForm" onsubmit="matrixRegistration.signUp(); return false;">
<div>
Create account:<br/>
<div style="text-align: center">
<input id="desired_user_id" size="32" type="text" placeholder="Matrix ID (e.g. bob)" autocapitalize="off" autocorrect="off" />
<br/>
<input id="pwd1" size="32" type="password" placeholder="Type a password"/>
<br/>
<input id="pwd2" size="32" type="password" placeholder="Confirm your password"/>
<br/>
<span id="feedback" style="color: #f00"></span>
<br/>
<div id="regcaptcha"></div>
<button type="submit" style="margin: 10px">Sign up</button>
</div>
</div>
</form>
</body>
</html>

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,117 @@
window.matrixRegistration = {
endpoint: location.origin + "/_matrix/client/api/v1/register"
};
var setupCaptcha = function() {
if (!window.matrixRegistrationConfig) {
return;
}
$.get(matrixRegistration.endpoint, function(response) {
var serverExpectsCaptcha = false;
for (var i=0; i<response.flows.length; i++) {
var flow = response.flows[i];
if ("m.login.recaptcha" === flow.type) {
serverExpectsCaptcha = true;
break;
}
}
if (!serverExpectsCaptcha) {
console.log("This server does not require a captcha.");
return;
}
console.log("Setting up ReCaptcha for "+matrixRegistration.endpoint);
var public_key = window.matrixRegistrationConfig.recaptcha_public_key;
if (public_key === undefined) {
console.error("No public key defined for captcha!");
setFeedbackString("Misconfigured captcha for server. Contact server admin.");
return;
}
Recaptcha.create(public_key,
"regcaptcha",
{
theme: "red",
callback: Recaptcha.focus_response_field
});
window.matrixRegistration.isUsingRecaptcha = true;
}).error(errorFunc);
};
var submitCaptcha = function(user, pwd) {
var challengeToken = Recaptcha.get_challenge();
var captchaEntry = Recaptcha.get_response();
var data = {
type: "m.login.recaptcha",
challenge: challengeToken,
response: captchaEntry
};
console.log("Submitting captcha");
$.post(matrixRegistration.endpoint, JSON.stringify(data), function(response) {
console.log("Success -> "+JSON.stringify(response));
submitPassword(user, pwd, response.session);
}).error(function(err) {
Recaptcha.reload();
errorFunc(err);
});
};
var submitPassword = function(user, pwd, session) {
console.log("Registering...");
var data = {
type: "m.login.password",
user: user,
password: pwd,
session: session
};
$.post(matrixRegistration.endpoint, JSON.stringify(data), function(response) {
matrixRegistration.onRegistered(
response.home_server, response.user_id, response.access_token
);
}).error(errorFunc);
};
var errorFunc = function(err) {
if (err.responseJSON && err.responseJSON.error) {
setFeedbackString(err.responseJSON.error + " (" + err.responseJSON.errcode + ")");
}
else {
setFeedbackString("Request failed: " + err.status);
}
};
var setFeedbackString = function(text) {
$("#feedback").text(text);
};
matrixRegistration.onLoad = function() {
setupCaptcha();
};
matrixRegistration.signUp = function() {
var user = $("#desired_user_id").val();
if (user.length == 0) {
setFeedbackString("Must specify a username.");
return;
}
var pwd1 = $("#pwd1").val();
var pwd2 = $("#pwd2").val();
if (pwd1.length < 6) {
setFeedbackString("Password: min. 6 characters.");
return;
}
if (pwd1 != pwd2) {
setFeedbackString("Passwords do not match.");
return;
}
if (window.matrixRegistration.isUsingRecaptcha) {
submitCaptcha(user, pwd1);
}
else {
submitPassword(user, pwd1);
}
};
matrixRegistration.onRegistered = function(hs_url, user_id, access_token) {
// clobber this function
console.log("onRegistered - This function should be replaced to proceed.");
};

View File

@@ -0,0 +1,3 @@
window.matrixRegistrationConfig = {
recaptcha_public_key: "YOUR_PUBLIC_KEY"
};

View File

@@ -0,0 +1,60 @@
html {
height: 100%;
}
body {
height: 100%;
font-family: "Myriad Pro", "Myriad", Helvetica, Arial, sans-serif;
font-size: 12pt;
margin: 0px;
}
h1 {
font-size: 20pt;
}
a:link { color: #666; }
a:visited { color: #666; }
a:hover { color: #000; }
a:active { color: #000; }
input {
width: 100%
}
textarea, input {
font-family: inherit;
font-size: inherit;
}
.smallPrint {
color: #888;
font-size: 9pt ! important;
font-style: italic ! important;
}
#recaptcha_area {
margin: auto
}
.g-recaptcha div {
margin: auto;
}
#registrationForm {
text-align: left;
padding: 5px;
margin-bottom: 40px;
display: inline-block;
-webkit-border-radius: 10px;
-moz-border-radius: 10px;
border-radius: 10px;
-webkit-box-shadow: 0px 0px 20px 0px rgba(0,0,0,0.15);
-moz-box-shadow: 0px 0px 20px 0px rgba(0,0,0,0.15);
box-shadow: 0px 0px 20px 0px rgba(0,0,0,0.15);
background-color: #f8f8f8;
border: 1px #ccc solid;
}

View File

@@ -16,4 +16,4 @@
""" This is a reference implementation of a Matrix home server.
"""
__version__ = "0.7.1"
__version__ = "0.9.3"

View File

@@ -18,9 +18,8 @@
from twisted.internet import defer
from synapse.api.constants import EventTypes, Membership, JoinRules
from synapse.api.errors import AuthError, StoreError, Codes, SynapseError
from synapse.api.errors import AuthError, Codes, SynapseError
from synapse.util.logutils import log_function
from synapse.util.async import run_on_reactor
from synapse.types import UserID, ClientInfo
import logging
@@ -28,16 +27,28 @@ import logging
logger = logging.getLogger(__name__)
AuthEventTypes = (
EventTypes.Create, EventTypes.Member, EventTypes.PowerLevels,
EventTypes.JoinRules, EventTypes.RoomHistoryVisibility,
)
class Auth(object):
def __init__(self, hs):
self.hs = hs
self.store = hs.get_datastore()
self.state = hs.get_state_handler()
self.TOKEN_NOT_FOUND_HTTP_STATUS = 401
def check(self, event, auth_events):
""" Checks if this event is correctly authed.
Args:
event: the event being checked.
auth_events (dict: event-key -> event): the existing room state.
Returns:
True if the auth checks pass.
"""
@@ -58,7 +69,10 @@ class Auth(object):
if event.type == EventTypes.Aliases:
return True
logger.debug("Auth events: %s", auth_events)
logger.debug(
"Auth events: %s",
[a.event_id for a in auth_events.values()]
)
if event.type == EventTypes.Member:
allowed = self.is_membership_change_allowed(
@@ -166,6 +180,7 @@ class Auth(object):
target = auth_events.get(key)
target_in_room = target and target.membership == Membership.JOIN
target_banned = target and target.membership == Membership.BAN
key = (EventTypes.JoinRules, "", )
join_rule_event = auth_events.get(key)
@@ -176,24 +191,20 @@ class Auth(object):
else:
join_rule = JoinRules.INVITE
user_level = self._get_power_level_from_event_state(
event,
event.user_id,
auth_events,
user_level = self._get_user_power_level(event.user_id, auth_events)
target_level = self._get_user_power_level(
target_user_id, auth_events
)
ban_level, kick_level, redact_level = (
self._get_ops_level_from_event_state(
event,
auth_events,
)
)
# FIXME (erikj): What should we do here as the default?
ban_level = self._get_named_level(auth_events, "ban", 50)
logger.debug(
"is_membership_change_allowed: %s",
{
"caller_in_room": caller_in_room,
"caller_invited": caller_invited,
"target_banned": target_banned,
"target_in_room": target_in_room,
"membership": membership,
"join_rule": join_rule,
@@ -202,25 +213,41 @@ class Auth(object):
}
)
if Membership.INVITE == membership:
# TODO (erikj): We should probably handle this more intelligently
# PRIVATE join rules.
# Invites are valid iff caller is in the room and target isn't.
if Membership.JOIN != membership:
# JOIN is the only action you can perform if you're not in the room
if not caller_in_room: # caller isn't joined
raise AuthError(
403,
"%s not in room %s." % (event.user_id, event.room_id,)
)
if Membership.INVITE == membership:
# TODO (erikj): We should probably handle this more intelligently
# PRIVATE join rules.
# Invites are valid iff caller is in the room and target isn't.
if target_banned:
raise AuthError(
403, "%s is banned from the room" % (target_user_id,)
)
elif target_in_room: # the target is already in the room.
raise AuthError(403, "%s is already in the room." %
target_user_id)
else:
invite_level = self._get_named_level(auth_events, "invite", 0)
if user_level < invite_level:
raise AuthError(
403, "You cannot invite user %s." % target_user_id
)
elif Membership.JOIN == membership:
# Joins are valid iff caller == target and they were:
# invited: They are accepting the invitation
# joined: It's a NOOP
if event.user_id != target_user_id:
raise AuthError(403, "Cannot force another user to join.")
elif target_banned:
raise AuthError(403, "You are banned from this room")
elif join_rule == JoinRules.PUBLIC:
pass
elif join_rule == JoinRules.INVITE:
@@ -232,63 +259,61 @@ class Auth(object):
raise AuthError(403, "You are not allowed to join this room")
elif Membership.LEAVE == membership:
# TODO (erikj): Implement kicks.
if not caller_in_room: # trying to leave a room you aren't joined
if target_banned and user_level < ban_level:
raise AuthError(
403,
"%s not in room %s." % (target_user_id, event.room_id,)
403, "You cannot unban user &s." % (target_user_id,)
)
elif target_user_id != event.user_id:
if kick_level:
kick_level = int(kick_level)
else:
kick_level = 50 # FIXME (erikj): What should we do here?
kick_level = self._get_named_level(auth_events, "kick", 50)
if user_level < kick_level:
if user_level < kick_level or user_level <= target_level:
raise AuthError(
403, "You cannot kick user %s." % target_user_id
)
elif Membership.BAN == membership:
if ban_level:
ban_level = int(ban_level)
else:
ban_level = 50 # FIXME (erikj): What should we do here?
if user_level < ban_level:
if user_level < ban_level or user_level <= target_level:
raise AuthError(403, "You don't have permission to ban")
else:
raise AuthError(500, "Unknown membership %s" % membership)
return True
def _get_power_level_from_event_state(self, event, user_id, auth_events):
def _get_power_level_event(self, auth_events):
key = (EventTypes.PowerLevels, "", )
power_level_event = auth_events.get(key)
level = None
return auth_events.get(key)
def _get_user_power_level(self, user_id, auth_events):
power_level_event = self._get_power_level_event(auth_events)
if power_level_event:
level = power_level_event.content.get("users", {}).get(user_id)
if not level:
level = power_level_event.content.get("users_default", 0)
if level is None:
return 0
else:
return int(level)
else:
key = (EventTypes.Create, "", )
create_event = auth_events.get(key)
if (create_event is not None and
create_event.content["creator"] == user_id):
return 100
else:
return 0
return level
def _get_named_level(self, auth_events, name, default):
power_level_event = self._get_power_level_event(auth_events)
def _get_ops_level_from_event_state(self, event, auth_events):
key = (EventTypes.PowerLevels, "", )
power_level_event = auth_events.get(key)
if not power_level_event:
return default
if power_level_event:
return (
power_level_event.content.get("ban", 50),
power_level_event.content.get("kick", 50),
power_level_event.content.get("redact", 50),
)
return None, None, None,
level = power_level_event.content.get(name, None)
if level is not None:
return int(level)
else:
return default
@defer.inlineCallbacks
def get_user_by_req(self, request):
@@ -299,7 +324,7 @@ class Auth(object):
Returns:
tuple : of UserID and device string:
User ID object of the user making the request
Client ID object of the client instance the user is using
ClientInfo object of the client instance the user is using
Raises:
AuthError if no user by that token exists or the token is invalid.
"""
@@ -327,12 +352,14 @@ class Auth(object):
if not user_id:
raise KeyError
request.authenticated_entity = user_id
defer.returnValue(
(UserID.from_string(user_id), ClientInfo("", ""))
)
return
except KeyError:
pass # normal users won't have this query parameter set
pass # normal users won't have the user_id query parameter set.
user_info = yield self.get_user_by_token(access_token)
user = user_info["user"]
@@ -345,7 +372,7 @@ class Auth(object):
default=[""]
)[0]
if user and access_token and ip_addr:
yield self.store.insert_client_ip(
self.store.insert_client_ip(
user=user,
access_token=access_token,
device_id=user_info["device_id"],
@@ -353,9 +380,14 @@ class Auth(object):
user_agent=user_agent
)
request.authenticated_entity = user.to_string()
defer.returnValue((user, ClientInfo(device_id, token_id)))
except KeyError:
raise AuthError(403, "Missing access token.")
raise AuthError(
self.TOKEN_NOT_FOUND_HTTP_STATUS, "Missing access token.",
errcode=Codes.MISSING_TOKEN
)
@defer.inlineCallbacks
def get_user_by_token(self, token):
@@ -369,10 +401,12 @@ class Auth(object):
Raises:
AuthError if no user by that token exists or the token is invalid.
"""
try:
ret = yield self.store.get_user_by_token(token=token)
ret = yield self.store.get_user_by_token(token)
if not ret:
raise StoreError(400, "Unknown token")
raise AuthError(
self.TOKEN_NOT_FOUND_HTTP_STATUS, "Unrecognised access token.",
errcode=Codes.UNKNOWN_TOKEN
)
user_info = {
"admin": bool(ret.get("admin", False)),
"device_id": ret.get("device_id"),
@@ -381,9 +415,6 @@ class Auth(object):
}
defer.returnValue(user_info)
except StoreError:
raise AuthError(403, "Unrecognised access token.",
errcode=Codes.UNKNOWN_TOKEN)
@defer.inlineCallbacks
def get_appservice_by_req(self, request):
@@ -391,19 +422,23 @@ class Auth(object):
token = request.args["access_token"][0]
service = yield self.store.get_app_service_by_token(token)
if not service:
raise AuthError(403, "Unrecognised access token.",
errcode=Codes.UNKNOWN_TOKEN)
raise AuthError(
self.TOKEN_NOT_FOUND_HTTP_STATUS,
"Unrecognised access token.",
errcode=Codes.UNKNOWN_TOKEN
)
request.authenticated_entity = service.sender
defer.returnValue(service)
except KeyError:
raise AuthError(403, "Missing access token.")
raise AuthError(
self.TOKEN_NOT_FOUND_HTTP_STATUS, "Missing access token."
)
def is_server_admin(self, user):
return self.store.is_server_admin(user)
@defer.inlineCallbacks
def add_auth_events(self, builder, context):
yield run_on_reactor()
auth_ids = self.compute_auth_events(builder, context.current_state)
auth_events_entries = yield self.store.add_event_hashes(
@@ -412,12 +447,6 @@ class Auth(object):
builder.auth_events = auth_events_entries
context.auth_events = {
k: v
for k, v in context.current_state.items()
if v.event_id in auth_ids
}
def compute_auth_events(self, event, current_state):
if event.type == EventTypes.Create:
return []
@@ -474,7 +503,7 @@ class Auth(object):
send_level = send_level_event.content.get("events", {}).get(
event.type
)
if not send_level:
if send_level is None:
if hasattr(event, "state_key"):
send_level = send_level_event.content.get(
"state_default", 50
@@ -489,16 +518,7 @@ class Auth(object):
else:
send_level = 0
user_level = self._get_power_level_from_event_state(
event,
event.user_id,
auth_events,
)
if user_level:
user_level = int(user_level)
else:
user_level = 0
user_level = self._get_user_power_level(event.user_id, auth_events)
if user_level < send_level:
raise AuthError(
@@ -509,7 +529,6 @@ class Auth(object):
# Check state_key
if hasattr(event, "state_key"):
if not event.state_key.startswith("_"):
if event.state_key.startswith("@"):
if event.state_key != event.user_id:
raise AuthError(
@@ -530,16 +549,9 @@ class Auth(object):
return True
def _check_redaction(self, event, auth_events):
user_level = self._get_power_level_from_event_state(
event,
event.user_id,
auth_events,
)
user_level = self._get_user_power_level(event.user_id, auth_events)
_, _, redact_level = self._get_ops_level_from_event_state(
event,
auth_events,
)
redact_level = self._get_named_level(auth_events, "redact", 50)
if user_level < redact_level:
raise AuthError(
@@ -567,32 +579,30 @@ class Auth(object):
if not current_state:
return
user_level = self._get_power_level_from_event_state(
event,
event.user_id,
auth_events,
)
user_level = self._get_user_power_level(event.user_id, auth_events)
# Check other levels:
levels_to_check = [
("users_default", []),
("events_default", []),
("ban", []),
("redact", []),
("kick", []),
("users_default", None),
("events_default", None),
("state_default", None),
("ban", None),
("redact", None),
("kick", None),
("invite", None),
]
old_list = current_state.content.get("users")
for user in set(old_list.keys() + user_list.keys()):
levels_to_check.append(
(user, ["users"])
(user, "users")
)
old_list = current_state.content.get("events")
new_list = event.content.get("events")
for ev_id in set(old_list.keys() + new_list.keys()):
levels_to_check.append(
(ev_id, ["events"])
(ev_id, "events")
)
old_state = current_state.content
@@ -600,12 +610,10 @@ class Auth(object):
for level_to_check, dir in levels_to_check:
old_loc = old_state
for d in dir:
old_loc = old_loc.get(d, {})
new_loc = new_state
for d in dir:
new_loc = new_loc.get(d, {})
if dir:
old_loc = old_loc.get(dir, {})
new_loc = new_loc.get(dir, {})
if level_to_check in old_loc:
old_level = int(old_loc[level_to_check])
@@ -621,6 +629,14 @@ class Auth(object):
if new_level == old_level:
continue
if dir == "users" and level_to_check != event.user_id:
if old_level == user_level:
raise AuthError(
403,
"You don't have permission to remove ops level equal "
"to your own"
)
if old_level > user_level or new_level > user_level:
raise AuthError(
403,

View File

@@ -59,7 +59,11 @@ class LoginType(object):
EMAIL_URL = u"m.login.email.url"
EMAIL_IDENTITY = u"m.login.email.identity"
RECAPTCHA = u"m.login.recaptcha"
DUMMY = u"m.login.dummy"
# Only for C/S API v1
APPLICATION_SERVICE = u"m.login.application_service"
SHARED_SECRET = u"org.matrix.login.shared_secret"
class EventTypes(object):
@@ -71,6 +75,8 @@ class EventTypes(object):
Redaction = "m.room.redaction"
Feedback = "m.room.message.feedback"
RoomHistoryVisibility = "m.room.history_visibility"
# These are used for validation
Message = "m.room.message"
Topic = "m.room.topic"
@@ -81,3 +87,8 @@ class RejectedReason(object):
AUTH_ERROR = "auth_error"
REPLACED = "replaced"
NOT_ANCESTOR = "not_ancestor"
class RoomCreationPreset(object):
PRIVATE_CHAT = "private_chat"
PUBLIC_CHAT = "public_chat"

View File

@@ -31,13 +31,15 @@ class Codes(object):
BAD_PAGINATION = "M_BAD_PAGINATION"
UNKNOWN = "M_UNKNOWN"
NOT_FOUND = "M_NOT_FOUND"
MISSING_TOKEN = "M_MISSING_TOKEN"
UNKNOWN_TOKEN = "M_UNKNOWN_TOKEN"
LIMIT_EXCEEDED = "M_LIMIT_EXCEEDED"
CAPTCHA_NEEDED = "M_CAPTCHA_NEEDED"
CAPTCHA_INVALID = "M_CAPTCHA_INVALID"
MISSING_PARAM = "M_MISSING_PARAM",
TOO_LARGE = "M_TOO_LARGE",
MISSING_PARAM = "M_MISSING_PARAM"
TOO_LARGE = "M_TOO_LARGE"
EXCLUSIVE = "M_EXCLUSIVE"
THREEPID_AUTH_FAILED = "M_THREEPID_AUTH_FAILED"
class CodeMessageException(RuntimeError):

View File

@@ -18,8 +18,10 @@
CLIENT_PREFIX = "/_matrix/client/api/v1"
CLIENT_V2_ALPHA_PREFIX = "/_matrix/client/v2_alpha"
FEDERATION_PREFIX = "/_matrix/federation/v1"
STATIC_PREFIX = "/_matrix/static"
WEB_CLIENT_PREFIX = "/_matrix/client"
CONTENT_REPO_PREFIX = "/_matrix/content"
SERVER_KEY_PREFIX = "/_matrix/key/v1"
SERVER_KEY_V2_PREFIX = "/_matrix/key/v2"
MEDIA_PREFIX = "/_matrix/media/v1"
APP_SERVICE_PREFIX = "/_matrix/appservice/v1"

View File

@@ -16,47 +16,70 @@
import sys
sys.dont_write_bytecode = True
from synapse.python_dependencies import check_requirements
from synapse.storage import prepare_database, UpgradeDatabaseException
if __name__ == '__main__':
check_requirements()
from synapse.storage.engines import create_engine, IncorrectDatabaseSetup
from synapse.storage import (
are_all_users_on_domain, UpgradeDatabaseException,
)
from synapse.server import HomeServer
from synapse.python_dependencies import check_requirements
from twisted.internet import reactor
from twisted.application import service
from twisted.enterprise import adbapi
from twisted.web.resource import Resource
from twisted.web.resource import Resource, EncodingResourceWrapper
from twisted.web.static import File
from twisted.web.server import Site
from twisted.web.server import Site, GzipEncoderFactory, Request
from synapse.http.server import JsonResource, RootRedirect
from synapse.rest.appservice.v1 import AppServiceRestResource
from synapse.rest.media.v0.content_repository import ContentRepoResource
from synapse.rest.media.v1.media_repository import MediaRepositoryResource
from synapse.http.server_key_resource import LocalKey
from synapse.rest.key.v1.server_key_resource import LocalKey
from synapse.rest.key.v2 import KeyApiV2Resource
from synapse.http.matrixfederationclient import MatrixFederationHttpClient
from synapse.api.urls import (
CLIENT_PREFIX, FEDERATION_PREFIX, WEB_CLIENT_PREFIX, CONTENT_REPO_PREFIX,
SERVER_KEY_PREFIX, MEDIA_PREFIX, CLIENT_V2_ALPHA_PREFIX, APP_SERVICE_PREFIX
SERVER_KEY_PREFIX, MEDIA_PREFIX, CLIENT_V2_ALPHA_PREFIX, STATIC_PREFIX,
SERVER_KEY_V2_PREFIX,
)
from synapse.config.homeserver import HomeServerConfig
from synapse.crypto import context_factory
from synapse.util.logcontext import LoggingContext
from synapse.rest.client.v1 import ClientV1RestResource
from synapse.rest.client.v2_alpha import ClientV2AlphaRestResource
from synapse.metrics.resource import MetricsResource, METRICS_PREFIX
from synapse import events
from daemonize import Daemonize
import twisted.manhole.telnet
import synapse
import contextlib
import logging
import os
import re
import resource
import subprocess
import sqlite3
import syweb
import time
logger = logging.getLogger(__name__)
logger = logging.getLogger("synapse.app.homeserver")
class GzipFile(File):
def getChild(self, path, request):
child = File.getChild(self, path, request)
return EncodingResourceWrapper(child, [GzipEncoderFactory()])
def gz_wrap(r):
return EncodingResourceWrapper(r, [GzipEncoderFactory()])
class SynapseHomeServer(HomeServer):
@@ -73,17 +96,25 @@ class SynapseHomeServer(HomeServer):
def build_resource_for_federation(self):
return JsonResource(self)
def build_resource_for_app_services(self):
return AppServiceRestResource(self)
def build_resource_for_web_client(self):
import syweb
syweb_path = os.path.dirname(syweb.__file__)
webclient_path = os.path.join(syweb_path, "webclient")
# GZip is disabled here due to
# https://twistedmatrix.com/trac/ticket/7678
# (It can stay enabled for the API resources: they call
# write() with the whole body and then finish() straight
# after and so do not trigger the bug.
# return GzipFile(webclient_path) # TODO configurable?
return File(webclient_path) # TODO configurable?
def build_resource_for_static_content(self):
# This is old and should go away: not going to bother adding gzip
return File("static")
def build_resource_for_content_repo(self):
return ContentRepoResource(
self, self.upload_dir, self.auth, self.content_addr
self, self.config.uploads_path, self.auth, self.content_addr
)
def build_resource_for_media_repository(self):
@@ -92,124 +123,153 @@ class SynapseHomeServer(HomeServer):
def build_resource_for_server_key(self):
return LocalKey(self)
def build_resource_for_server_key_v2(self):
return KeyApiV2Resource(self)
def build_resource_for_metrics(self):
if self.get_config().enable_metrics:
return MetricsResource(self)
else:
return None
def build_db_pool(self):
name = self.db_config["name"]
return adbapi.ConnectionPool(
"sqlite3", self.get_db_name(),
check_same_thread=False,
cp_min=1,
cp_max=1,
cp_openfun=prepare_database, # Prepare the database for each conn
# so that :memory: sqlite works
name,
**self.db_config.get("args", {})
)
def create_resource_tree(self, web_client, redirect_root_to_web_client):
"""Create the resource tree for this Home Server.
def _listener_http(self, config, listener_config):
port = listener_config["port"]
bind_address = listener_config.get("bind_address", "")
tls = listener_config.get("tls", False)
site_tag = listener_config.get("tag", port)
This in unduly complicated because Twisted does not support putting
child resources more than 1 level deep at a time.
if tls and config.no_tls:
return
Args:
web_client (bool): True to enable the web client.
redirect_root_to_web_client (bool): True to redirect '/' to the
location of the web client. This does nothing if web_client is not
True.
"""
# list containing (path_str, Resource) e.g:
# [ ("/aaa/bbb/cc", Resource1), ("/aaa/dummy", Resource2) ]
desired_tree = [
(CLIENT_PREFIX, self.get_resource_for_client()),
(CLIENT_V2_ALPHA_PREFIX, self.get_resource_for_client_v2_alpha()),
(FEDERATION_PREFIX, self.get_resource_for_federation()),
(CONTENT_REPO_PREFIX, self.get_resource_for_content_repo()),
(SERVER_KEY_PREFIX, self.get_resource_for_server_key()),
(MEDIA_PREFIX, self.get_resource_for_media_repository()),
(APP_SERVICE_PREFIX, self.get_resource_for_app_services()),
]
if web_client:
logger.info("Adding the web client.")
desired_tree.append((WEB_CLIENT_PREFIX,
self.get_resource_for_web_client()))
metrics_resource = self.get_resource_for_metrics()
if web_client and redirect_root_to_web_client:
self.root_resource = RootRedirect(WEB_CLIENT_PREFIX)
resources = {}
for res in listener_config["resources"]:
for name in res["names"]:
if name == "client":
if res["compress"]:
client_v1 = gz_wrap(self.get_resource_for_client())
client_v2 = gz_wrap(self.get_resource_for_client_v2_alpha())
else:
self.root_resource = Resource()
client_v1 = self.get_resource_for_client()
client_v2 = self.get_resource_for_client_v2_alpha()
# ideally we'd just use getChild and putChild but getChild doesn't work
# unless you give it a Request object IN ADDITION to the name :/ So
# instead, we'll store a copy of this mapping so we can actually add
# extra resources to existing nodes. See self._resource_id for the key.
resource_mappings = {}
for (full_path, resource) in desired_tree:
logger.info("Attaching %s to path %s", resource, full_path)
last_resource = self.root_resource
for path_seg in full_path.split('/')[1:-1]:
if path_seg not in last_resource.listNames():
# resource doesn't exist, so make a "dummy resource"
child_resource = Resource()
last_resource.putChild(path_seg, child_resource)
res_id = self._resource_id(last_resource, path_seg)
resource_mappings[res_id] = child_resource
last_resource = child_resource
else:
# we have an existing Resource, use that instead.
res_id = self._resource_id(last_resource, path_seg)
last_resource = resource_mappings[res_id]
resources.update({
CLIENT_PREFIX: client_v1,
CLIENT_V2_ALPHA_PREFIX: client_v2,
})
# ===========================
# now attach the actual desired resource
last_path_seg = full_path.split('/')[-1]
if name == "federation":
resources.update({
FEDERATION_PREFIX: self.get_resource_for_federation(),
})
# if there is already a resource here, thieve its children and
# replace it
res_id = self._resource_id(last_resource, last_path_seg)
if res_id in resource_mappings:
# there is a dummy resource at this path already, which needs
# to be replaced with the desired resource.
existing_dummy_resource = resource_mappings[res_id]
for child_name in existing_dummy_resource.listNames():
child_res_id = self._resource_id(existing_dummy_resource,
child_name)
child_resource = resource_mappings[child_res_id]
# steal the children
resource.putChild(child_name, child_resource)
if name in ["static", "client"]:
resources.update({
STATIC_PREFIX: self.get_resource_for_static_content(),
})
# finally, insert the desired resource in the right place
last_resource.putChild(last_path_seg, resource)
res_id = self._resource_id(last_resource, last_path_seg)
resource_mappings[res_id] = resource
if name in ["media", "federation", "client"]:
resources.update({
MEDIA_PREFIX: self.get_resource_for_media_repository(),
CONTENT_REPO_PREFIX: self.get_resource_for_content_repo(),
})
return self.root_resource
if name in ["keys", "federation"]:
resources.update({
SERVER_KEY_PREFIX: self.get_resource_for_server_key(),
SERVER_KEY_V2_PREFIX: self.get_resource_for_server_key_v2(),
})
def _resource_id(self, resource, path_seg):
"""Construct an arbitrary resource ID so you can retrieve the mapping
later.
if name == "webclient":
resources[WEB_CLIENT_PREFIX] = self.get_resource_for_web_client()
If you want to represent resource A putChild resource B with path C,
the mapping should looks like _resource_id(A,C) = B.
if name == "metrics" and metrics_resource:
resources[METRICS_PREFIX] = metrics_resource
Args:
resource (Resource): The *parent* Resource
path_seg (str): The name of the child Resource to be attached.
Returns:
str: A unique string which can be a key to the child Resource.
"""
return "%s-%s" % (resource, path_seg)
def start_listening(self, secure_port, unsecure_port):
if secure_port is not None:
root_resource = create_resource_tree(resources)
if tls:
reactor.listenSSL(
secure_port, Site(self.root_resource), self.tls_context_factory
port,
SynapseSite(
"synapse.access.https.%s" % (site_tag,),
site_tag,
listener_config,
root_resource,
),
self.tls_context_factory,
interface=bind_address
)
logger.info("Synapse now listening on port %d", secure_port)
if unsecure_port is not None:
else:
reactor.listenTCP(
unsecure_port, Site(self.root_resource)
port,
SynapseSite(
"synapse.access.http.%s" % (site_tag,),
site_tag,
listener_config,
root_resource,
),
interface=bind_address
)
logger.info("Synapse now listening on port %d", unsecure_port)
logger.info("Synapse now listening on port %d", port)
def start_listening(self):
config = self.get_config()
for listener in config.listeners:
if listener["type"] == "http":
self._listener_http(config, listener)
elif listener["type"] == "manhole":
f = twisted.manhole.telnet.ShellFactory()
f.username = "matrix"
f.password = "rabbithole"
f.namespace['hs'] = self
reactor.listenTCP(
listener["port"],
f,
interface=listener.get("bind_address", '127.0.0.1')
)
else:
logger.warn("Unrecognized listener type: %s", listener["type"])
def run_startup_checks(self, db_conn, database_engine):
all_users_native = are_all_users_on_domain(
db_conn.cursor(), database_engine, self.hostname
)
if not all_users_native:
quit_with_error(
"Found users in database not native to %s!\n"
"You cannot changed a synapse server_name after it's been configured"
% (self.hostname,)
)
try:
database_engine.check_database(db_conn.cursor())
except IncorrectDatabaseSetup as e:
quit_with_error(e.message)
def quit_with_error(error_string):
message_lines = error_string.split("\n")
line_length = max([len(l) for l in message_lines]) + 2
sys.stderr.write("*" * line_length + '\n')
for line in message_lines:
if line.strip():
sys.stderr.write(" %s\n" % (line.strip(),))
sys.stderr.write("*" * line_length + '\n')
sys.exit(1)
def get_version_string():
try:
null = open(os.devnull, 'w')
cwd = os.path.dirname(os.path.abspath(__file__))
try:
@@ -265,56 +325,83 @@ def get_version_string():
synapse.__version__, git_version,
)
).encode("ascii")
except Exception as e:
logger.warn("Failed to check for git repository: %s", e)
return ("Synapse/%s" % (synapse.__version__,)).encode("ascii")
def setup():
def change_resource_limit(soft_file_no):
try:
soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
if not soft_file_no:
soft_file_no = hard
resource.setrlimit(resource.RLIMIT_NOFILE, (soft_file_no, hard))
logger.info("Set file limit to: %d", soft_file_no)
except (ValueError, resource.error) as e:
logger.warn("Failed to set file limit: %s", e)
def setup(config_options):
"""
Args:
config_options_options: The options passed to Synapse. Usually
`sys.argv[1:]`.
should_run (bool): Whether to start the reactor.
Returns:
HomeServer
"""
config = HomeServerConfig.load_config(
"Synapse Homeserver",
sys.argv[1:],
config_options,
generate_section="Homeserver"
)
config.setup_logging()
check_requirements()
# check any extra requirements we have now we have a config
check_requirements(config)
version_string = get_version_string()
logger.info("Server hostname: %s", config.server_name)
logger.info("Server version: %s", version_string)
if re.search(":[0-9]+$", config.server_name):
domain_with_port = config.server_name
else:
domain_with_port = "%s:%s" % (config.server_name, config.bind_port)
events.USE_FROZEN_DICTS = config.use_frozen_dicts
tls_context_factory = context_factory.ServerContextFactory(config)
database_engine = create_engine(config.database_config["name"])
config.database_config["args"]["cp_openfun"] = database_engine.on_new_connection
hs = SynapseHomeServer(
config.server_name,
domain_with_port=domain_with_port,
upload_dir=os.path.abspath("uploads"),
db_name=config.database_path,
db_config=config.database_config,
tls_context_factory=tls_context_factory,
config=config,
content_addr=config.content_addr,
version_string=version_string,
database_engine=database_engine,
)
hs.create_resource_tree(
web_client=config.webclient,
redirect_root_to_web_client=True,
)
db_name = hs.get_db_name()
logger.info("Preparing database: %s...", db_name)
logger.info("Preparing database: %r...", config.database_config)
try:
with sqlite3.connect(db_name) as db_conn:
prepare_database(db_conn)
db_conn = database_engine.module.connect(
**{
k: v for k, v in config.database_config.get("args", {}).items()
if not k.startswith("cp_")
}
)
database_engine.prepare_database(db_conn)
hs.run_startup_checks(db_conn, database_engine)
db_conn.commit()
except UpgradeDatabaseException:
sys.stderr.write(
"\nFailed to upgrade database.\n"
@@ -323,32 +410,260 @@ def setup():
)
sys.exit(1)
logger.info("Database prepared in %s.", db_name)
logger.info("Database prepared in %r.", config.database_config)
if config.manhole:
f = twisted.manhole.telnet.ShellFactory()
f.username = "matrix"
f.password = "rabbithole"
f.namespace['hs'] = hs
reactor.listenTCP(config.manhole, f, interface='127.0.0.1')
bind_port = config.bind_port
if config.no_tls:
bind_port = None
hs.start_listening(bind_port, config.unsecure_port)
hs.start_listening()
hs.get_pusherpool().start()
hs.get_state_handler().start_caching()
hs.get_datastore().start_profiling()
hs.get_replication_layer().start_get_pdu_cache()
if config.daemonize:
print config.pid_file
return hs
class SynapseService(service.Service):
"""A twisted Service class that will start synapse. Used to run synapse
via twistd and a .tac.
"""
def __init__(self, config):
self.config = config
def startService(self):
hs = setup(self.config)
change_resource_limit(hs.config.soft_file_limit)
def stopService(self):
return self._port.stopListening()
class SynapseRequest(Request):
def __init__(self, site, *args, **kw):
Request.__init__(self, *args, **kw)
self.site = site
self.authenticated_entity = None
self.start_time = 0
def __repr__(self):
# We overwrite this so that we don't log ``access_token``
return '<%s at 0x%x method=%s uri=%s clientproto=%s site=%s>' % (
self.__class__.__name__,
id(self),
self.method,
self.get_redacted_uri(),
self.clientproto,
self.site.site_tag,
)
def get_redacted_uri(self):
return re.sub(
r'(\?.*access_token=)[^&]*(.*)$',
r'\1<redacted>\2',
self.uri
)
def get_user_agent(self):
return self.requestHeaders.getRawHeaders("User-Agent", [None])[-1]
def started_processing(self):
self.site.access_logger.info(
"%s - %s - Received request: %s %s",
self.getClientIP(),
self.site.site_tag,
self.method,
self.get_redacted_uri()
)
self.start_time = int(time.time() * 1000)
def finished_processing(self):
self.site.access_logger.info(
"%s - %s - {%s}"
" Processed request: %dms %sB %s \"%s %s %s\" \"%s\"",
self.getClientIP(),
self.site.site_tag,
self.authenticated_entity,
int(time.time() * 1000) - self.start_time,
self.sentLength,
self.code,
self.method,
self.get_redacted_uri(),
self.clientproto,
self.get_user_agent(),
)
@contextlib.contextmanager
def processing(self):
self.started_processing()
yield
self.finished_processing()
class XForwardedForRequest(SynapseRequest):
def __init__(self, *args, **kw):
SynapseRequest.__init__(self, *args, **kw)
"""
Add a layer on top of another request that only uses the value of an
X-Forwarded-For header as the result of C{getClientIP}.
"""
def getClientIP(self):
"""
@return: The client address (the first address) in the value of the
I{X-Forwarded-For header}. If the header is not present, return
C{b"-"}.
"""
return self.requestHeaders.getRawHeaders(
b"x-forwarded-for", [b"-"])[0].split(b",")[0].strip()
class SynapseRequestFactory(object):
def __init__(self, site, x_forwarded_for):
self.site = site
self.x_forwarded_for = x_forwarded_for
def __call__(self, *args, **kwargs):
if self.x_forwarded_for:
return XForwardedForRequest(self.site, *args, **kwargs)
else:
return SynapseRequest(self.site, *args, **kwargs)
class SynapseSite(Site):
"""
Subclass of a twisted http Site that does access logging with python's
standard logging
"""
def __init__(self, logger_name, site_tag, config, resource, *args, **kwargs):
Site.__init__(self, resource, *args, **kwargs)
self.site_tag = site_tag
proxied = config.get("x_forwarded", False)
self.requestFactory = SynapseRequestFactory(self, proxied)
self.access_logger = logging.getLogger(logger_name)
def log(self, request):
pass
def create_resource_tree(desired_tree, redirect_root_to_web_client=True):
"""Create the resource tree for this Home Server.
This in unduly complicated because Twisted does not support putting
child resources more than 1 level deep at a time.
Args:
web_client (bool): True to enable the web client.
redirect_root_to_web_client (bool): True to redirect '/' to the
location of the web client. This does nothing if web_client is not
True.
"""
if redirect_root_to_web_client and WEB_CLIENT_PREFIX in desired_tree:
root_resource = RootRedirect(WEB_CLIENT_PREFIX)
else:
root_resource = Resource()
# ideally we'd just use getChild and putChild but getChild doesn't work
# unless you give it a Request object IN ADDITION to the name :/ So
# instead, we'll store a copy of this mapping so we can actually add
# extra resources to existing nodes. See self._resource_id for the key.
resource_mappings = {}
for full_path, res in desired_tree.items():
logger.info("Attaching %s to path %s", res, full_path)
last_resource = root_resource
for path_seg in full_path.split('/')[1:-1]:
if path_seg not in last_resource.listNames():
# resource doesn't exist, so make a "dummy resource"
child_resource = Resource()
last_resource.putChild(path_seg, child_resource)
res_id = _resource_id(last_resource, path_seg)
resource_mappings[res_id] = child_resource
last_resource = child_resource
else:
# we have an existing Resource, use that instead.
res_id = _resource_id(last_resource, path_seg)
last_resource = resource_mappings[res_id]
# ===========================
# now attach the actual desired resource
last_path_seg = full_path.split('/')[-1]
# if there is already a resource here, thieve its children and
# replace it
res_id = _resource_id(last_resource, last_path_seg)
if res_id in resource_mappings:
# there is a dummy resource at this path already, which needs
# to be replaced with the desired resource.
existing_dummy_resource = resource_mappings[res_id]
for child_name in existing_dummy_resource.listNames():
child_res_id = _resource_id(
existing_dummy_resource, child_name
)
child_resource = resource_mappings[child_res_id]
# steal the children
res.putChild(child_name, child_resource)
# finally, insert the desired resource in the right place
last_resource.putChild(last_path_seg, res)
res_id = _resource_id(last_resource, last_path_seg)
resource_mappings[res_id] = res
return root_resource
def _resource_id(resource, path_seg):
"""Construct an arbitrary resource ID so you can retrieve the mapping
later.
If you want to represent resource A putChild resource B with path C,
the mapping should looks like _resource_id(A,C) = B.
Args:
resource (Resource): The *parent* Resource
path_seg (str): The name of the child Resource to be attached.
Returns:
str: A unique string which can be a key to the child Resource.
"""
return "%s-%s" % (resource, path_seg)
def run(hs):
PROFILE_SYNAPSE = False
if PROFILE_SYNAPSE:
def profile(func):
from cProfile import Profile
from threading import current_thread
def profiled(*args, **kargs):
profile = Profile()
profile.enable()
func(*args, **kargs)
profile.disable()
ident = current_thread().ident
profile.dump_stats("/tmp/%s.%s.%i.pstat" % (
hs.hostname, func.__name__, ident
))
return profiled
from twisted.python.threadpool import ThreadPool
ThreadPool._worker = profile(ThreadPool._worker)
reactor.run = profile(reactor.run)
def in_thread():
with LoggingContext("run"):
change_resource_limit(hs.config.soft_file_limit)
reactor.run()
if hs.config.daemonize:
if hs.config.print_pidfile:
print hs.config.pid_file
daemon = Daemonize(
app="synapse-homeserver",
pid=config.pid_file,
action=run,
pid=hs.config.pid_file,
action=lambda: in_thread(),
auto_close_fds=False,
verbose=True,
logger=logger,
@@ -356,18 +671,15 @@ def setup():
daemon.start()
else:
reactor.run()
def run():
with LoggingContext("run"):
reactor.run()
in_thread()
def main():
with LoggingContext("main"):
# check base requirements
check_requirements()
setup()
hs = setup(sys.argv[1:])
run(hs)
if __name__ == '__main__':

View File

@@ -18,18 +18,16 @@ import sys
import os
import subprocess
import signal
import yaml
SYNAPSE = ["python", "-B", "-m", "synapse.app.homeserver"]
CONFIGFILE = "homeserver.yaml"
PIDFILE = "homeserver.pid"
GREEN = "\x1b[1;32m"
NORMAL = "\x1b[m"
def start():
if not os.path.exists(CONFIGFILE):
if not os.path.exists(CONFIGFILE):
sys.stderr.write(
"No config file found\n"
"To generate a config file, run '%s -c %s --generate-config"
@@ -38,9 +36,15 @@ def start():
)
)
sys.exit(1)
CONFIG = yaml.load(open(CONFIGFILE))
PIDFILE = CONFIG["pid_file"]
def start():
print "Starting ...",
args = SYNAPSE
args.extend(["--daemonize", "-c", CONFIGFILE, "--pid-file", PIDFILE])
args.extend(["--daemonize", "-c", CONFIGFILE])
subprocess.check_call(args)
print GREEN + "started" + NORMAL

View File

@@ -20,6 +20,50 @@ import re
logger = logging.getLogger(__name__)
class ApplicationServiceState(object):
DOWN = "down"
UP = "up"
class AppServiceTransaction(object):
"""Represents an application service transaction."""
def __init__(self, service, id, events):
self.service = service
self.id = id
self.events = events
def send(self, as_api):
"""Sends this transaction using the provided AS API interface.
Args:
as_api(ApplicationServiceApi): The API to use to send.
Returns:
A Deferred which resolves to True if the transaction was sent.
"""
return as_api.push_bulk(
service=self.service,
events=self.events,
txn_id=self.id
)
def complete(self, store):
"""Completes this transaction as successful.
Marks this transaction ID on the application service and removes the
transaction contents from the database.
Args:
store: The database store to operate on.
Returns:
A Deferred which resolves to True if the transaction was completed.
"""
return store.complete_appservice_txn(
service=self.service,
txn_id=self.id
)
class ApplicationService(object):
"""Defines an application service. This definition is mostly what is
provided to the /register AS API.
@@ -35,33 +79,45 @@ class ApplicationService(object):
NS_LIST = [NS_USERS, NS_ALIASES, NS_ROOMS]
def __init__(self, token, url=None, namespaces=None, hs_token=None,
sender=None, txn_id=None):
sender=None, id=None):
self.token = token
self.url = url
self.hs_token = hs_token
self.sender = sender
self.namespaces = self._check_namespaces(namespaces)
self.txn_id = txn_id
self.id = id
def _check_namespaces(self, namespaces):
# Sanity check that it is of the form:
# {
# users: ["regex",...],
# aliases: ["regex",...],
# rooms: ["regex",...],
# users: [ {regex: "[A-z]+.*", exclusive: true}, ...],
# aliases: [ {regex: "[A-z]+.*", exclusive: true}, ...],
# rooms: [ {regex: "[A-z]+.*", exclusive: true}, ...],
# }
if not namespaces:
return None
namespaces = {}
for ns in ApplicationService.NS_LIST:
if ns not in namespaces:
namespaces[ns] = []
continue
if type(namespaces[ns]) != list:
raise ValueError("Bad namespace value for '%s'", ns)
for regex in namespaces[ns]:
if not isinstance(regex, basestring):
raise ValueError("Expected string regex for ns '%s'", ns)
raise ValueError("Bad namespace value for '%s'" % ns)
for regex_obj in namespaces[ns]:
if not isinstance(regex_obj, dict):
raise ValueError("Expected dict regex for ns '%s'" % ns)
if not isinstance(regex_obj.get("exclusive"), bool):
raise ValueError(
"Expected bool for 'exclusive' in ns '%s'" % ns
)
if not isinstance(regex_obj.get("regex"), basestring):
raise ValueError(
"Expected string for 'regex' in ns '%s'" % ns
)
return namespaces
def _matches_regex(self, test_string, namespace_key):
def _matches_regex(self, test_string, namespace_key, return_obj=False):
if not isinstance(test_string, basestring):
logger.error(
"Expected a string to test regex against, but got %s",
@@ -69,11 +125,19 @@ class ApplicationService(object):
)
return False
for regex in self.namespaces[namespace_key]:
if re.match(regex, test_string):
for regex_obj in self.namespaces[namespace_key]:
if re.match(regex_obj["regex"], test_string):
if return_obj:
return regex_obj
return True
return False
def _is_exclusive(self, ns_key, test_string):
regex_obj = self._matches_regex(test_string, ns_key, return_obj=True)
if regex_obj:
return regex_obj["exclusive"]
return False
def _matches_user(self, event, member_list):
if (hasattr(event, "sender") and
self.is_interested_in_user(event.sender)):
@@ -84,8 +148,8 @@ class ApplicationService(object):
and self.is_interested_in_user(event.state_key)):
return True
# check joined member events
for member in member_list:
if self.is_interested_in_user(member.state_key):
for user_id in member_list:
if self.is_interested_in_user(user_id):
return True
return False
@@ -109,7 +173,7 @@ class ApplicationService(object):
restrict_to(str): The namespace to restrict regex tests to.
aliases_for_event(list): A list of all the known room aliases for
this event.
member_list(list): A list of all joined room members in this room.
member_list(list): A list of all joined user_ids in this room.
Returns:
bool: True if this service would like to know about this event.
"""
@@ -135,7 +199,10 @@ class ApplicationService(object):
return self._matches_user(event, member_list)
def is_interested_in_user(self, user_id):
return self._matches_regex(user_id, ApplicationService.NS_USERS)
return (
self._matches_regex(user_id, ApplicationService.NS_USERS)
or user_id == self.sender
)
def is_interested_in_alias(self, alias):
return self._matches_regex(alias, ApplicationService.NS_ALIASES)
@@ -143,5 +210,17 @@ class ApplicationService(object):
def is_interested_in_room(self, room_id):
return self._matches_regex(room_id, ApplicationService.NS_ROOMS)
def is_exclusive_user(self, user_id):
return (
self._is_exclusive(ApplicationService.NS_USERS, user_id)
or user_id == self.sender
)
def is_exclusive_alias(self, alias):
return self._is_exclusive(ApplicationService.NS_ALIASES, alias)
def is_exclusive_room(self, room_id):
return self._is_exclusive(ApplicationService.NS_ROOMS, room_id)
def __str__(self):
return "ApplicationService: %s" % (self.__dict__,)

View File

@@ -72,14 +72,19 @@ class ApplicationServiceApi(SimpleHttpClient):
defer.returnValue(False)
@defer.inlineCallbacks
def push_bulk(self, service, events):
def push_bulk(self, service, events, txn_id=None):
events = self._serialize(events)
if txn_id is None:
logger.warning("push_bulk: Missing txn ID sending events to %s",
service.url)
txn_id = str(0)
txn_id = str(txn_id)
uri = service.url + ("/transactions/%s" %
urllib.quote(str(0))) # TODO txn_ids
response = None
urllib.quote(txn_id))
try:
response = yield self.put_json(
yield self.put_json(
uri=uri,
json_body={
"events": events
@@ -87,9 +92,8 @@ class ApplicationServiceApi(SimpleHttpClient):
args={
"access_token": service.hs_token
})
if response: # just an empty json object
# TODO: Mark txn as sent successfully
defer.returnValue(True)
return
except CodeMessageException as e:
logger.warning("push_bulk to %s received %s", uri, e.code)
except Exception as ex:
@@ -97,8 +101,8 @@ class ApplicationServiceApi(SimpleHttpClient):
defer.returnValue(False)
@defer.inlineCallbacks
def push(self, service, event):
response = yield self.push_bulk(service, [event])
def push(self, service, event, txn_id=None):
response = yield self.push_bulk(service, [event], txn_id)
defer.returnValue(response)
def _serialize(self, events):

View File

@@ -0,0 +1,254 @@
# -*- coding: utf-8 -*-
# Copyright 2015 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module controls the reliability for application service transactions.
The nominal flow through this module looks like:
__________
1---ASa[e]-->| Service |--> Queue ASa[f]
2----ASb[e]->| Queuer |
3--ASa[f]--->|__________|-----------+ ASa[e], ASb[e]
V
-````````- +------------+
|````````|<--StoreTxn-|Transaction |
|Database| | Controller |---> SEND TO AS
`--------` +------------+
What happens on SEND TO AS depends on the state of the Application Service:
- If the AS is marked as DOWN, do nothing.
- If the AS is marked as UP, send the transaction.
* SUCCESS : Increment where the AS is up to txn-wise and nuke the txn
contents from the db.
* FAILURE : Marked AS as DOWN and start Recoverer.
Recoverer attempts to recover ASes who have died. The flow for this looks like:
,--------------------- backoff++ --------------.
V |
START ---> Wait exp ------> Get oldest txn ID from ----> FAILURE
backoff DB and try to send it
^ |___________
Mark AS as | V
UP & quit +---------- YES SUCCESS
| | |
NO <--- Have more txns? <------ Mark txn success & nuke <-+
from db; incr AS pos.
Reset backoff.
This is all tied together by the AppServiceScheduler which DIs the required
components.
"""
from synapse.appservice import ApplicationServiceState
from twisted.internet import defer
import logging
logger = logging.getLogger(__name__)
class AppServiceScheduler(object):
""" Public facing API for this module. Does the required DI to tie the
components together. This also serves as the "event_pool", which in this
case is a simple array.
"""
def __init__(self, clock, store, as_api):
self.clock = clock
self.store = store
self.as_api = as_api
def create_recoverer(service, callback):
return _Recoverer(clock, store, as_api, service, callback)
self.txn_ctrl = _TransactionController(
clock, store, as_api, create_recoverer
)
self.queuer = _ServiceQueuer(self.txn_ctrl)
@defer.inlineCallbacks
def start(self):
logger.info("Starting appservice scheduler")
# check for any DOWN ASes and start recoverers for them.
recoverers = yield _Recoverer.start(
self.clock, self.store, self.as_api, self.txn_ctrl.on_recovered
)
self.txn_ctrl.add_recoverers(recoverers)
def submit_event_for_as(self, service, event):
self.queuer.enqueue(service, event)
class _ServiceQueuer(object):
"""Queues events for the same application service together, sending
transactions as soon as possible. Once a transaction is sent successfully,
this schedules any other events in the queue to run.
"""
def __init__(self, txn_ctrl):
self.queued_events = {} # dict of {service_id: [events]}
self.pending_requests = {} # dict of {service_id: Deferred}
self.txn_ctrl = txn_ctrl
def enqueue(self, service, event):
# if this service isn't being sent something
if not self.pending_requests.get(service.id):
self._send_request(service, [event])
else:
# add to queue for this service
if service.id not in self.queued_events:
self.queued_events[service.id] = []
self.queued_events[service.id].append(event)
def _send_request(self, service, events):
# send request and add callbacks
d = self.txn_ctrl.send(service, events)
d.addBoth(self._on_request_finish)
d.addErrback(self._on_request_fail)
self.pending_requests[service.id] = d
def _on_request_finish(self, service):
self.pending_requests[service.id] = None
# if there are queued events, then send them.
if (service.id in self.queued_events
and len(self.queued_events[service.id]) > 0):
self._send_request(service, self.queued_events[service.id])
self.queued_events[service.id] = []
def _on_request_fail(self, err):
logger.error("AS request failed: %s", err)
class _TransactionController(object):
def __init__(self, clock, store, as_api, recoverer_fn):
self.clock = clock
self.store = store
self.as_api = as_api
self.recoverer_fn = recoverer_fn
# keep track of how many recoverers there are
self.recoverers = []
@defer.inlineCallbacks
def send(self, service, events):
try:
txn = yield self.store.create_appservice_txn(
service=service,
events=events
)
service_is_up = yield self._is_service_up(service)
if service_is_up:
sent = yield txn.send(self.as_api)
if sent:
txn.complete(self.store)
else:
self._start_recoverer(service)
except Exception as e:
logger.exception(e)
self._start_recoverer(service)
# request has finished
defer.returnValue(service)
@defer.inlineCallbacks
def on_recovered(self, recoverer):
self.recoverers.remove(recoverer)
logger.info("Successfully recovered application service AS ID %s",
recoverer.service.id)
logger.info("Remaining active recoverers: %s", len(self.recoverers))
yield self.store.set_appservice_state(
recoverer.service,
ApplicationServiceState.UP
)
def add_recoverers(self, recoverers):
for r in recoverers:
self.recoverers.append(r)
if len(recoverers) > 0:
logger.info("New active recoverers: %s", len(self.recoverers))
@defer.inlineCallbacks
def _start_recoverer(self, service):
yield self.store.set_appservice_state(
service,
ApplicationServiceState.DOWN
)
logger.info(
"Application service falling behind. Starting recoverer. AS ID %s",
service.id
)
recoverer = self.recoverer_fn(service, self.on_recovered)
self.add_recoverers([recoverer])
recoverer.recover()
@defer.inlineCallbacks
def _is_service_up(self, service):
state = yield self.store.get_appservice_state(service)
defer.returnValue(state == ApplicationServiceState.UP or state is None)
class _Recoverer(object):
@staticmethod
@defer.inlineCallbacks
def start(clock, store, as_api, callback):
services = yield store.get_appservices_by_state(
ApplicationServiceState.DOWN
)
recoverers = [
_Recoverer(clock, store, as_api, s, callback) for s in services
]
for r in recoverers:
logger.info("Starting recoverer for AS ID %s which was marked as "
"DOWN", r.service.id)
r.recover()
defer.returnValue(recoverers)
def __init__(self, clock, store, as_api, service, callback):
self.clock = clock
self.store = store
self.as_api = as_api
self.service = service
self.callback = callback
self.backoff_counter = 1
def recover(self):
self.clock.call_later((2 ** self.backoff_counter), self.retry)
def _backoff(self):
# cap the backoff to be around 18h => (2^16) = 65536 secs
if self.backoff_counter < 16:
self.backoff_counter += 1
self.recover()
@defer.inlineCallbacks
def retry(self):
try:
txn = yield self.store.get_oldest_unsent_txn(self.service)
if txn:
logger.info("Retrying transaction %s for AS ID %s",
txn.id, txn.service.id)
sent = yield txn.send(self.as_api)
if sent:
yield txn.complete(self.store)
# reset the backoff counter and retry immediately
self.backoff_counter = 1
yield self.retry()
else:
self._backoff()
else:
self._set_service_recovered()
except Exception as e:
logger.exception(e)
self._backoff()
def _set_service_recovered(self):
self.callback(self)

View File

@@ -14,9 +14,10 @@
# limitations under the License.
import argparse
import sys
import os
import yaml
import sys
from textwrap import dedent
class ConfigError(Exception):
@@ -24,18 +25,35 @@ class ConfigError(Exception):
class Config(object):
def __init__(self, args):
pass
@staticmethod
def parse_size(string):
def parse_size(value):
if isinstance(value, int) or isinstance(value, long):
return value
sizes = {"K": 1024, "M": 1024 * 1024}
size = 1
suffix = string[-1]
suffix = value[-1]
if suffix in sizes:
string = string[:-1]
value = value[:-1]
size = sizes[suffix]
return int(string) * size
return int(value) * size
@staticmethod
def parse_duration(value):
if isinstance(value, int) or isinstance(value, long):
return value
second = 1000
hour = 60 * 60 * second
day = 24 * hour
week = 7 * day
year = 365 * day
sizes = {"s": second, "h": hour, "d": day, "w": week, "y": year}
size = 1
suffix = value[-1]
if suffix in sizes:
value = value[:-1]
size = sizes[suffix]
return int(value) * size
@staticmethod
def abspath(file_path):
@@ -86,83 +104,130 @@ class Config(object):
with open(file_path) as file_stream:
return yaml.load(file_stream)
@classmethod
def add_arguments(cls, parser):
pass
def invoke_all(self, name, *args, **kargs):
results = []
for cls in type(self).mro():
if name in cls.__dict__:
results.append(getattr(cls, name)(self, *args, **kargs))
return results
@classmethod
def generate_config(cls, args, config_dir_path):
pass
def generate_config(self, config_dir_path, server_name):
default_config = "# vim:ft=yaml\n"
default_config += "\n\n".join(dedent(conf) for conf in self.invoke_all(
"default_config", config_dir_path, server_name
))
config = yaml.load(default_config)
return default_config, config
@classmethod
def load_config(cls, description, argv, generate_section=None):
obj = cls()
config_parser = argparse.ArgumentParser(add_help=False)
config_parser.add_argument(
"-c", "--config-path",
action="append",
metavar="CONFIG_FILE",
help="Specify config file"
)
config_parser.add_argument(
"--generate-config",
action="store_true",
help="Generate config file"
help="Generate a config file for the server name"
)
config_parser.add_argument(
"--generate-keys",
action="store_true",
help="Generate any missing key files then exit"
)
config_parser.add_argument(
"-H", "--server-name",
help="The server name to generate a config file for"
)
config_args, remaining_args = config_parser.parse_known_args(argv)
generate_keys = config_args.generate_keys
if config_args.generate_config:
if not config_args.config_path:
config_parser.error(
"Must specify where to generate the config file"
"Must supply a config file.\nA config file can be automatically"
" generated using \"--generate-config -H SERVER_NAME"
" -c CONFIG-FILE\""
)
config_dir_path = os.path.dirname(config_args.config_path)
if os.path.exists(config_args.config_path):
defaults = cls.read_config_file(config_args.config_path)
(config_path,) = config_args.config_path
if not os.path.exists(config_path):
config_dir_path = os.path.dirname(config_path)
config_dir_path = os.path.abspath(config_dir_path)
server_name = config_args.server_name
if not server_name:
print "Must specify a server_name to a generate config for."
sys.exit(1)
if not os.path.exists(config_dir_path):
os.makedirs(config_dir_path)
with open(config_path, "wb") as config_file:
config_bytes, config = obj.generate_config(
config_dir_path, server_name
)
obj.invoke_all("generate_files", config)
config_file.write(config_bytes)
print (
"A config file has been generated in %r for server name"
" %r with corresponding SSL keys and self-signed"
" certificates. Please review this file and customise it"
" to your needs."
) % (config_path, server_name)
print (
"If this server name is incorrect, you will need to"
" regenerate the SSL certificates"
)
sys.exit(0)
else:
defaults = {}
else:
if config_args.config_path:
defaults = cls.read_config_file(config_args.config_path)
else:
defaults = {}
print (
"Config file %r already exists. Generating any missing key"
" files."
) % (config_path,)
generate_keys = True
parser = argparse.ArgumentParser(
parents=[config_parser],
description=description,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
cls.add_arguments(parser)
parser.set_defaults(**defaults)
obj.invoke_all("add_arguments", parser)
args = parser.parse_args(remaining_args)
if config_args.generate_config:
config_dir_path = os.path.dirname(config_args.config_path)
if not config_args.config_path:
config_parser.error(
"Must supply a config file.\nA config file can be automatically"
" generated using \"--generate-config -H SERVER_NAME"
" -c CONFIG-FILE\""
)
config_dir_path = os.path.dirname(config_args.config_path[-1])
config_dir_path = os.path.abspath(config_dir_path)
if not os.path.exists(config_dir_path):
os.makedirs(config_dir_path)
cls.generate_config(args, config_dir_path)
config = {}
for key, value in vars(args).items():
if (key not in set(["config_path", "generate_config"])
and value is not None):
config[key] = value
with open(config_args.config_path, "w") as config_file:
# TODO(paul) it would be lovely if we wrote out vim- and emacs-
# style mode markers into the file, to hint to people that
# this is a YAML file.
yaml.dump(config, config_file, default_flow_style=False)
print (
"A config file has been generated in %s for server name"
" '%s' with corresponding SSL keys and self-signed"
" certificates. Please review this file and customise it to"
" your needs."
) % (
config_args.config_path, config['server_name']
)
print (
"If this server name is incorrect, you will need to regenerate"
" the SSL certificates"
)
specified_config = {}
for config_path in config_args.config_path:
yaml_config = cls.read_config_file(config_path)
specified_config.update(yaml_config)
server_name = specified_config["server_name"]
_, config = obj.generate_config(config_dir_path, server_name)
config.pop("log_config")
config.update(specified_config)
if generate_keys:
obj.invoke_all("generate_files", config)
sys.exit(0)
return cls(args)
obj.invoke_all("read_config", config)
obj.invoke_all("read_arguments", args)
return obj

View File

@@ -0,0 +1,27 @@
# Copyright 2015 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ._base import Config
class AppServiceConfig(Config):
def read_config(self, config):
self.app_service_config_files = config.get("app_service_config_files", [])
def default_config(cls, config_dir_path, server_name):
return """\
# A list of application service config file to use
app_service_config_files: []
"""

View File

@@ -17,35 +17,31 @@ from ._base import Config
class CaptchaConfig(Config):
def __init__(self, args):
super(CaptchaConfig, self).__init__(args)
self.recaptcha_private_key = args.recaptcha_private_key
self.enable_registration_captcha = args.enable_registration_captcha
self.captcha_ip_origin_is_x_forwarded = (
args.captcha_ip_origin_is_x_forwarded
)
self.captcha_bypass_secret = args.captcha_bypass_secret
def read_config(self, config):
self.recaptcha_private_key = config["recaptcha_private_key"]
self.recaptcha_public_key = config["recaptcha_public_key"]
self.enable_registration_captcha = config["enable_registration_captcha"]
self.captcha_bypass_secret = config.get("captcha_bypass_secret")
self.recaptcha_siteverify_api = config["recaptcha_siteverify_api"]
@classmethod
def add_arguments(cls, parser):
super(CaptchaConfig, cls).add_arguments(parser)
group = parser.add_argument_group("recaptcha")
group.add_argument(
"--recaptcha-private-key", type=str, default="YOUR_PRIVATE_KEY",
help="The matching private key for the web client's public key."
)
group.add_argument(
"--enable-registration-captcha", type=bool, default=False,
help="Enables ReCaptcha checks when registering, preventing signup"
+ " unless a captcha is answered. Requires a valid ReCaptcha "
+ "public/private key."
)
group.add_argument(
"--captcha_ip_origin_is_x_forwarded", type=bool, default=False,
help="When checking captchas, use the X-Forwarded-For (XFF) header"
+ " as the client IP and not the actual client IP."
)
group.add_argument(
"--captcha_bypass_secret", type=str,
help="A secret key used to bypass the captcha test entirely."
)
def default_config(self, config_dir_path, server_name):
return """\
## Captcha ##
# This Home Server's ReCAPTCHA public key.
recaptcha_private_key: "YOUR_PRIVATE_KEY"
# This Home Server's ReCAPTCHA private key.
recaptcha_public_key: "YOUR_PUBLIC_KEY"
# Enables ReCaptcha checks when registering, preventing signup
# unless a captcha is answered. Requires a valid ReCaptcha
# public/private key.
enable_registration_captcha: False
# A secret key used to bypass the captcha test entirely.
#captcha_bypass_secret: "YOUR_SECRET_HERE"
# The API endpoint to use for verifying m.login.recaptcha responses.
recaptcha_siteverify_api: "https://www.google.com/recaptcha/api/siteverify"
"""

View File

@@ -14,32 +14,66 @@
# limitations under the License.
from ._base import Config
import os
class DatabaseConfig(Config):
def __init__(self, args):
super(DatabaseConfig, self).__init__(args)
if args.database_path == ":memory:":
self.database_path = ":memory:"
else:
self.database_path = self.abspath(args.database_path)
self.event_cache_size = self.parse_size(args.event_cache_size)
@classmethod
def add_arguments(cls, parser):
super(DatabaseConfig, cls).add_arguments(parser)
def read_config(self, config):
self.event_cache_size = self.parse_size(
config.get("event_cache_size", "10K")
)
self.database_config = config.get("database")
if self.database_config is None:
self.database_config = {
"name": "sqlite3",
"args": {},
}
name = self.database_config.get("name", None)
if name == "psycopg2":
pass
elif name == "sqlite3":
self.database_config.setdefault("args", {}).update({
"cp_min": 1,
"cp_max": 1,
"check_same_thread": False,
})
else:
raise RuntimeError("Unsupported database type '%s'" % (name,))
self.set_databasepath(config.get("database_path"))
def default_config(self, config, config_dir_path):
database_path = self.abspath("homeserver.db")
return """\
# Database configuration
database:
# The database engine name
name: "sqlite3"
# Arguments to pass to the engine
args:
# Path to the database
database: "%(database_path)s"
# Number of events to cache in memory.
event_cache_size: "10K"
""" % locals()
def read_arguments(self, args):
self.set_databasepath(args.database_path)
def set_databasepath(self, database_path):
if database_path != ":memory:":
database_path = self.abspath(database_path)
if self.database_config.get("name", None) == "sqlite3":
if database_path is not None:
self.database_config["args"]["database"] = database_path
def add_arguments(self, parser):
db_group = parser.add_argument_group("database")
db_group.add_argument(
"-d", "--database-path", default="homeserver.db",
help="The database name."
"-d", "--database-path", metavar="SQLITE_DATABASE_PATH",
help="The path to a sqlite database to use."
)
db_group.add_argument(
"--event-cache-size", default="100K",
help="Number of events to cache in memory."
)
@classmethod
def generate_config(cls, args, config_dir_path):
super(DatabaseConfig, cls).generate_config(args, config_dir_path)
args.database_path = os.path.abspath(args.database_path)

View File

@@ -1,42 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ._base import Config
class EmailConfig(Config):
def __init__(self, args):
super(EmailConfig, self).__init__(args)
self.email_from_address = args.email_from_address
self.email_smtp_server = args.email_smtp_server
@classmethod
def add_arguments(cls, parser):
super(EmailConfig, cls).add_arguments(parser)
email_group = parser.add_argument_group("email")
email_group.add_argument(
"--email-from-address",
default="FROM@EXAMPLE.COM",
help="The address to send emails from (e.g. for password resets)."
)
email_group.add_argument(
"--email-smtp-server",
default="",
help=(
"The SMTP server to send emails from (e.g. for password"
" resets)."
)
)

View File

@@ -20,16 +20,23 @@ from .database import DatabaseConfig
from .ratelimiting import RatelimitConfig
from .repository import ContentRepositoryConfig
from .captcha import CaptchaConfig
from .email import EmailConfig
from .voip import VoipConfig
from .registration import RegistrationConfig
from .metrics import MetricsConfig
from .appservice import AppServiceConfig
from .key import KeyConfig
from .saml2 import SAML2Config
class HomeServerConfig(TlsConfig, ServerConfig, DatabaseConfig, LoggingConfig,
RatelimitConfig, ContentRepositoryConfig, CaptchaConfig,
EmailConfig, VoipConfig):
VoipConfig, RegistrationConfig, MetricsConfig,
AppServiceConfig, KeyConfig, SAML2Config, ):
pass
if __name__ == '__main__':
import sys
HomeServerConfig.load_config("Generate config", sys.argv[1:], "HomeServer")
sys.stdout.write(
HomeServerConfig().generate_config(sys.argv[1], sys.argv[2])[0]
)

133
synapse/config/key.py Normal file
View File

@@ -0,0 +1,133 @@
# -*- coding: utf-8 -*-
# Copyright 2015 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from ._base import Config, ConfigError
import syutil.crypto.signing_key
from syutil.crypto.signing_key import (
is_signing_algorithm_supported, decode_verify_key_bytes
)
from syutil.base64util import decode_base64
from synapse.util.stringutils import random_string
class KeyConfig(Config):
def read_config(self, config):
self.signing_key = self.read_signing_key(config["signing_key_path"])
self.old_signing_keys = self.read_old_signing_keys(
config["old_signing_keys"]
)
self.key_refresh_interval = self.parse_duration(
config["key_refresh_interval"]
)
self.perspectives = self.read_perspectives(
config["perspectives"]
)
def default_config(self, config_dir_path, server_name):
base_key_name = os.path.join(config_dir_path, server_name)
return """\
## Signing Keys ##
# Path to the signing key to sign messages with
signing_key_path: "%(base_key_name)s.signing.key"
# The keys that the server used to sign messages with but won't use
# to sign new messages. E.g. it has lost its private key
old_signing_keys: {}
# "ed25519:auto":
# # Base64 encoded public key
# key: "The public part of your old signing key."
# # Millisecond POSIX timestamp when the key expired.
# expired_ts: 123456789123
# How long key response published by this server is valid for.
# Used to set the valid_until_ts in /key/v2 APIs.
# Determines how quickly servers will query to check which keys
# are still valid.
key_refresh_interval: "1d" # 1 Day.
# The trusted servers to download signing keys from.
perspectives:
servers:
"matrix.org":
verify_keys:
"ed25519:auto":
key: "Noi6WqcDj0QmPxCNQqgezwTlBKrfqehY1u2FyWP9uYw"
""" % locals()
def read_perspectives(self, perspectives_config):
servers = {}
for server_name, server_config in perspectives_config["servers"].items():
for key_id, key_data in server_config["verify_keys"].items():
if is_signing_algorithm_supported(key_id):
key_base64 = key_data["key"]
key_bytes = decode_base64(key_base64)
verify_key = decode_verify_key_bytes(key_id, key_bytes)
servers.setdefault(server_name, {})[key_id] = verify_key
return servers
def read_signing_key(self, signing_key_path):
signing_keys = self.read_file(signing_key_path, "signing_key")
try:
return syutil.crypto.signing_key.read_signing_keys(
signing_keys.splitlines(True)
)
except Exception:
raise ConfigError(
"Error reading signing_key."
" Try running again with --generate-config"
)
def read_old_signing_keys(self, old_signing_keys):
keys = {}
for key_id, key_data in old_signing_keys.items():
if is_signing_algorithm_supported(key_id):
key_base64 = key_data["key"]
key_bytes = decode_base64(key_base64)
verify_key = decode_verify_key_bytes(key_id, key_bytes)
verify_key.expired_ts = key_data["expired_ts"]
keys[key_id] = verify_key
else:
raise ConfigError(
"Unsupported signing algorithm for old key: %r" % (key_id,)
)
return keys
def generate_files(self, config):
signing_key_path = config["signing_key_path"]
if not os.path.exists(signing_key_path):
with open(signing_key_path, "w") as signing_key_file:
key_id = "a_" + random_string(4)
syutil.crypto.signing_key.write_signing_keys(
signing_key_file,
(syutil.crypto.signing_key.generate_signing_key(key_id),),
)
else:
signing_keys = self.read_file(signing_key_path, "signing_key")
if len(signing_keys.split("\n")[0].split()) == 1:
# handle keys in the old format.
key_id = "a_" + random_string(4)
key = syutil.crypto.signing_key.decode_signing_key_base64(
syutil.crypto.signing_key.NACL_ED25519,
key_id,
signing_keys.split("\n")[0]
)
with open(signing_key_path, "w") as signing_key_file:
syutil.crypto.signing_key.write_signing_keys(
signing_key_file,
(key,),
)

View File

@@ -19,25 +19,88 @@ from twisted.python.log import PythonLoggingObserver
import logging
import logging.config
import yaml
from string import Template
import os
DEFAULT_LOG_CONFIG = Template("""
version: 1
formatters:
precise:
format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s\
- %(message)s'
filters:
context:
(): synapse.util.logcontext.LoggingContextFilter
request: ""
handlers:
file:
class: logging.handlers.RotatingFileHandler
formatter: precise
filename: ${log_file}
maxBytes: 104857600
backupCount: 10
filters: [context]
level: INFO
console:
class: logging.StreamHandler
formatter: precise
loggers:
synapse:
level: INFO
synapse.storage.SQL:
level: INFO
root:
level: INFO
handlers: [file, console]
""")
class LoggingConfig(Config):
def __init__(self, args):
super(LoggingConfig, self).__init__(args)
self.verbosity = int(args.verbose) if args.verbose else None
self.log_config = self.abspath(args.log_config)
self.log_file = self.abspath(args.log_file)
@classmethod
def read_config(self, config):
self.verbosity = config.get("verbose", 0)
self.log_config = self.abspath(config.get("log_config"))
self.log_file = self.abspath(config.get("log_file"))
def default_config(self, config_dir_path, server_name):
log_file = self.abspath("homeserver.log")
log_config = self.abspath(
os.path.join(config_dir_path, server_name + ".log.config")
)
return """
# Logging verbosity level.
verbose: 0
# File to write logging to
log_file: "%(log_file)s"
# A yaml python logging config file
log_config: "%(log_config)s"
""" % locals()
def read_arguments(self, args):
if args.verbose is not None:
self.verbosity = args.verbose
if args.log_config is not None:
self.log_config = args.log_config
if args.log_file is not None:
self.log_file = args.log_file
def add_arguments(cls, parser):
super(LoggingConfig, cls).add_arguments(parser)
logging_group = parser.add_argument_group("logging")
logging_group.add_argument(
'-v', '--verbose', dest="verbose", action='count',
help="The verbosity level."
)
logging_group.add_argument(
'-f', '--log-file', dest="log_file", default="homeserver.log",
'-f', '--log-file', dest="log_file",
help="File to log to."
)
logging_group.add_argument(
@@ -45,6 +108,14 @@ class LoggingConfig(Config):
help="Python logging config file"
)
def generate_files(self, config):
log_config = config.get("log_config")
if log_config and not os.path.exists(log_config):
with open(log_config, "wb") as log_config_file:
log_config_file.write(
DEFAULT_LOG_CONFIG.substitute(log_file=config["log_file"])
)
def setup_logging(self):
log_format = (
"%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s"
@@ -78,7 +149,6 @@ class LoggingConfig(Config):
handler.addFilter(LoggingContextFilter(request=""))
logger.addHandler(handler)
logger.info("Test")
else:
with open(self.log_config, 'r') as f:
logging.config.dictConfig(yaml.load(f))

31
synapse/config/metrics.py Normal file
View File

@@ -0,0 +1,31 @@
# -*- coding: utf-8 -*-
# Copyright 2015 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ._base import Config
class MetricsConfig(Config):
def read_config(self, config):
self.enable_metrics = config["enable_metrics"]
self.metrics_port = config.get("metrics_port")
self.metrics_bind_host = config.get("metrics_bind_host", "127.0.0.1")
def default_config(self, config_dir_path, server_name):
return """\
## Metrics ###
# Enable collection and rendering of performance metrics
enable_metrics: False
"""

View File

@@ -17,20 +17,42 @@ from ._base import Config
class RatelimitConfig(Config):
def __init__(self, args):
super(RatelimitConfig, self).__init__(args)
self.rc_messages_per_second = args.rc_messages_per_second
self.rc_message_burst_count = args.rc_message_burst_count
def read_config(self, config):
self.rc_messages_per_second = config["rc_messages_per_second"]
self.rc_message_burst_count = config["rc_message_burst_count"]
@classmethod
def add_arguments(cls, parser):
super(RatelimitConfig, cls).add_arguments(parser)
rc_group = parser.add_argument_group("ratelimiting")
rc_group.add_argument(
"--rc-messages-per-second", type=float, default=0.2,
help="number of messages a client can send per second"
)
rc_group.add_argument(
"--rc-message-burst-count", type=float, default=10,
help="number of message a client can send before being throttled"
)
self.federation_rc_window_size = config["federation_rc_window_size"]
self.federation_rc_sleep_limit = config["federation_rc_sleep_limit"]
self.federation_rc_sleep_delay = config["federation_rc_sleep_delay"]
self.federation_rc_reject_limit = config["federation_rc_reject_limit"]
self.federation_rc_concurrent = config["federation_rc_concurrent"]
def default_config(self, config_dir_path, server_name):
return """\
## Ratelimiting ##
# Number of messages a client can send per second
rc_messages_per_second: 0.2
# Number of message a client can send before being throttled
rc_message_burst_count: 10.0
# The federation window size in milliseconds
federation_rc_window_size: 1000
# The number of federation requests from a single server in a window
# before the server will delay processing the request.
federation_rc_sleep_limit: 10
# The duration in milliseconds to delay processing events from
# remote servers by if they go over the sleep limit.
federation_rc_sleep_delay: 500
# The maximum number of concurrent federation requests allowed
# from a single server
federation_rc_reject_limit: 50
# The number of federation requests to concurrently process from a
# single server
federation_rc_concurrent: 3
"""

View File

@@ -0,0 +1,60 @@
# -*- coding: utf-8 -*-
# Copyright 2015 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ._base import Config
from synapse.util.stringutils import random_string_with_symbols
from distutils.util import strtobool
class RegistrationConfig(Config):
def read_config(self, config):
self.disable_registration = not bool(
strtobool(str(config["enable_registration"]))
)
if "disable_registration" in config:
self.disable_registration = bool(
strtobool(str(config["disable_registration"]))
)
self.registration_shared_secret = config.get("registration_shared_secret")
def default_config(self, config_dir, server_name):
registration_shared_secret = random_string_with_symbols(50)
return """\
## Registration ##
# Enable registration for new users.
enable_registration: False
# If set, allows registration by anyone who also has the shared
# secret, even if registration is otherwise disabled.
registration_shared_secret: "%(registration_shared_secret)s"
""" % locals()
def add_arguments(self, parser):
reg_group = parser.add_argument_group("registration")
reg_group.add_argument(
"--enable-registration", action="store_true", default=None,
help="Enable registration for new users."
)
def read_arguments(self, args):
if args.enable_registration is not None:
self.disable_registration = not bool(
strtobool(str(args.enable_registration))
)

View File

@@ -17,32 +17,25 @@ from ._base import Config
class ContentRepositoryConfig(Config):
def __init__(self, args):
super(ContentRepositoryConfig, self).__init__(args)
self.max_upload_size = self.parse_size(args.max_upload_size)
self.max_image_pixels = self.parse_size(args.max_image_pixels)
self.media_store_path = self.ensure_directory(args.media_store_path)
def read_config(self, config):
self.max_upload_size = self.parse_size(config["max_upload_size"])
self.max_image_pixels = self.parse_size(config["max_image_pixels"])
self.media_store_path = self.ensure_directory(config["media_store_path"])
self.uploads_path = self.ensure_directory(config["uploads_path"])
def parse_size(self, string):
sizes = {"K": 1024, "M": 1024 * 1024}
size = 1
suffix = string[-1]
if suffix in sizes:
string = string[:-1]
size = sizes[suffix]
return int(string) * size
def default_config(self, config_dir_path, server_name):
media_store = self.default_path("media_store")
uploads_path = self.default_path("uploads")
return """
# Directory where uploaded images and attachments are stored.
media_store_path: "%(media_store)s"
@classmethod
def add_arguments(cls, parser):
super(ContentRepositoryConfig, cls).add_arguments(parser)
db_group = parser.add_argument_group("content_repository")
db_group.add_argument(
"--max-upload-size", default="10M"
)
db_group.add_argument(
"--media-store-path", default=cls.default_path("media_store")
)
db_group.add_argument(
"--max-image-pixels", default="32M",
help="Maximum number of pixels that will be thumbnailed"
)
# Directory where in-progress uploads are stored.
uploads_path: "%(uploads_path)s"
# The largest allowed upload size in bytes
max_upload_size: "10M"
# Maximum number of pixels that will be thumbnailed
max_image_pixels: "32M"
""" % locals()

54
synapse/config/saml2.py Normal file
View File

@@ -0,0 +1,54 @@
# -*- coding: utf-8 -*-
# Copyright 2015 Ericsson
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ._base import Config
class SAML2Config(Config):
"""SAML2 Configuration
Synapse uses pysaml2 libraries for providing SAML2 support
config_path: Path to the sp_conf.py configuration file
idp_redirect_url: Identity provider URL which will redirect
the user back to /login/saml2 with proper info.
sp_conf.py file is something like:
https://github.com/rohe/pysaml2/blob/master/example/sp-repoze/sp_conf.py.example
More information: https://pythonhosted.org/pysaml2/howto/config.html
"""
def read_config(self, config):
saml2_config = config.get("saml2_config", None)
if saml2_config:
self.saml2_enabled = True
self.saml2_config_path = saml2_config["config_path"]
self.saml2_idp_redirect_url = saml2_config["idp_redirect_url"]
else:
self.saml2_enabled = False
self.saml2_config_path = None
self.saml2_idp_redirect_url = None
def default_config(self, config_dir_path, server_name):
return """
# Enable SAML2 for registration and login. Uses pysaml2
# config_path: Path to the sp_conf.py configuration file
# idp_redirect_url: Identity provider URL which will redirect
# the user back to /login/saml2 with proper info.
# See pysaml2 docs for format of config.
#saml2_config:
# config_path: "%s/sp_conf.py"
# idp_redirect_url: "http://%s/idp"
""" % (config_dir_path, server_name)

View File

@@ -13,110 +13,215 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from ._base import Config, ConfigError
import syutil.crypto.signing_key
from ._base import Config
class ServerConfig(Config):
def __init__(self, args):
super(ServerConfig, self).__init__(args)
self.server_name = args.server_name
self.signing_key = self.read_signing_key(args.signing_key_path)
self.bind_port = args.bind_port
self.bind_host = args.bind_host
self.unsecure_port = args.unsecure_port
self.daemonize = args.daemonize
self.pid_file = self.abspath(args.pid_file)
self.webclient = True
self.manhole = args.manhole
self.no_tls = args.no_tls
if not args.content_addr:
host = args.server_name
def read_config(self, config):
self.server_name = config["server_name"]
self.pid_file = self.abspath(config.get("pid_file"))
self.web_client = config["web_client"]
self.soft_file_limit = config["soft_file_limit"]
self.daemonize = config.get("daemonize")
self.print_pidfile = config.get("print_pidfile")
self.use_frozen_dicts = config.get("use_frozen_dicts", True)
self.listeners = config.get("listeners", [])
bind_port = config.get("bind_port")
if bind_port:
self.listeners = []
bind_host = config.get("bind_host", "")
gzip_responses = config.get("gzip_responses", True)
names = ["client", "webclient"] if self.web_client else ["client"]
self.listeners.append({
"port": bind_port,
"bind_address": bind_host,
"tls": True,
"type": "http",
"resources": [
{
"names": names,
"compress": gzip_responses,
},
{
"names": ["federation"],
"compress": False,
}
]
})
unsecure_port = config.get("unsecure_port", bind_port - 400)
if unsecure_port:
self.listeners.append({
"port": unsecure_port,
"bind_address": bind_host,
"tls": False,
"type": "http",
"resources": [
{
"names": names,
"compress": gzip_responses,
},
{
"names": ["federation"],
"compress": False,
}
]
})
manhole = config.get("manhole")
if manhole:
self.listeners.append({
"port": manhole,
"bind_address": "127.0.0.1",
"type": "manhole",
})
metrics_port = config.get("metrics_port")
if metrics_port:
self.listeners.append({
"port": metrics_port,
"bind_address": config.get("metrics_bind_host", "127.0.0.1"),
"tls": False,
"type": "http",
"resources": [
{
"names": ["metrics"],
"compress": False,
},
]
})
# Attempt to guess the content_addr for the v0 content repostitory
content_addr = config.get("content_addr")
if not content_addr:
for listener in self.listeners:
if listener["type"] == "http" and not listener.get("tls", False):
unsecure_port = listener["port"]
break
else:
raise RuntimeError("Could not determine 'content_addr'")
host = self.server_name
if ':' not in host:
host = "%s:%d" % (host, args.unsecure_port)
host = "%s:%d" % (host, unsecure_port)
else:
host = host.split(':')[0]
host = "%s:%d" % (host, args.unsecure_port)
args.content_addr = "http://%s" % (host,)
host = "%s:%d" % (host, unsecure_port)
content_addr = "http://%s" % (host,)
self.content_addr = args.content_addr
self.content_addr = content_addr
@classmethod
def add_arguments(cls, parser):
super(ServerConfig, cls).add_arguments(parser)
def default_config(self, config_dir_path, server_name):
if ":" in server_name:
bind_port = int(server_name.split(":")[1])
unsecure_port = bind_port - 400
else:
bind_port = 8448
unsecure_port = 8008
pid_file = self.abspath("homeserver.pid")
return """\
## Server ##
# The domain name of the server, with optional explicit port.
# This is used by remote servers to connect to this server,
# e.g. matrix.org, localhost:8080, etc.
server_name: "%(server_name)s"
# When running as a daemon, the file to store the pid in
pid_file: %(pid_file)s
# Whether to serve a web client from the HTTP/HTTPS root resource.
web_client: True
# Set the soft limit on the number of file descriptors synapse can use
# Zero is used to indicate synapse should set the soft limit to the
# hard limit.
soft_file_limit: 0
# List of ports that Synapse should listen on, their purpose and their
# configuration.
listeners:
# Main HTTPS listener
# For when matrix traffic is sent directly to synapse.
-
# The port to listen for HTTPS requests on.
port: %(bind_port)s
# Local interface to listen on.
# The empty string will cause synapse to listen on all interfaces.
bind_address: ''
# This is a 'http' listener, allows us to specify 'resources'.
type: http
tls: true
# Use the X-Forwarded-For (XFF) header as the client IP and not the
# actual client IP.
x_forwarded: false
# List of HTTP resources to serve on this listener.
resources:
-
# List of resources to host on this listener.
names:
- client # The client-server APIs, both v1 and v2
- webclient # The bundled webclient.
# Should synapse compress HTTP responses to clients that support it?
# This should be disabled if running synapse behind a load balancer
# that can do automatic compression.
compress: true
- names: [federation] # Federation APIs
compress: false
# Unsecure HTTP listener,
# For when matrix traffic passes through loadbalancer that unwraps TLS.
- port: %(unsecure_port)s
tls: false
bind_address: ''
type: http
x_forwarded: false
resources:
- names: [client, webclient]
compress: true
- names: [federation]
compress: false
# Turn on the twisted telnet manhole service on localhost on the given
# port.
# - port: 9000
# bind_address: 127.0.0.1
# type: manhole
""" % locals()
def read_arguments(self, args):
if args.manhole is not None:
self.manhole = args.manhole
if args.daemonize is not None:
self.daemonize = args.daemonize
if args.print_pidfile is not None:
self.print_pidfile = args.print_pidfile
def add_arguments(self, parser):
server_group = parser.add_argument_group("server")
server_group.add_argument(
"-H", "--server-name", default="localhost",
help="The domain name of the server, with optional explicit port. "
"This is used by remote servers to connect to this server, "
"e.g. matrix.org, localhost:8080, etc."
)
server_group.add_argument("--signing-key-path",
help="The signing key to sign messages with")
server_group.add_argument("-p", "--bind-port", metavar="PORT",
type=int, help="https port to listen on",
default=8448)
server_group.add_argument("--unsecure-port", metavar="PORT",
type=int, help="http port to listen on",
default=8008)
server_group.add_argument("--bind-host", default="",
help="Local interface to listen on")
server_group.add_argument("-D", "--daemonize", action='store_true',
default=None,
help="Daemonize the home server")
server_group.add_argument('--pid-file', default="homeserver.pid",
help="When running as a daemon, the file to"
" store the pid in")
server_group.add_argument("--print-pidfile", action='store_true',
default=None,
help="Print the path to the pidfile just"
" before daemonizing")
server_group.add_argument("--manhole", metavar="PORT", dest="manhole",
type=int,
help="Turn on the twisted telnet manhole"
" service on the given port.")
server_group.add_argument("--content-addr", default=None,
help="The host and scheme to use for the "
"content repository")
server_group.add_argument("--no-tls", action='store_true',
help="Don't bind to the https port.")
def read_signing_key(self, signing_key_path):
signing_keys = self.read_file(signing_key_path, "signing_key")
try:
return syutil.crypto.signing_key.read_signing_keys(
signing_keys.splitlines(True)
)
except Exception:
raise ConfigError(
"Error reading signing_key."
" Try running again with --generate-config"
)
@classmethod
def generate_config(cls, args, config_dir_path):
super(ServerConfig, cls).generate_config(args, config_dir_path)
base_key_name = os.path.join(config_dir_path, args.server_name)
args.pid_file = os.path.abspath(args.pid_file)
if not args.signing_key_path:
args.signing_key_path = base_key_name + ".signing.key"
if not os.path.exists(args.signing_key_path):
with open(args.signing_key_path, "w") as signing_key_file:
syutil.crypto.signing_key.write_signing_keys(
signing_key_file,
(syutil.crypto.signing_key.generate_singing_key("auto"),),
)
else:
signing_keys = cls.read_file(args.signing_key_path, "signing_key")
if len(signing_keys.split("\n")[0].split()) == 1:
# handle keys in the old format.
key = syutil.crypto.signing_key.decode_signing_key_base64(
syutil.crypto.signing_key.NACL_ED25519,
"auto",
signing_keys.split("\n")[0]
)
with open(args.signing_key_path, "w") as signing_key_file:
syutil.crypto.signing_key.write_signing_keys(
signing_key_file,
(key,),
)

View File

@@ -23,28 +23,49 @@ GENERATE_DH_PARAMS = False
class TlsConfig(Config):
def __init__(self, args):
super(TlsConfig, self).__init__(args)
def read_config(self, config):
self.tls_certificate = self.read_tls_certificate(
args.tls_certificate_path
config.get("tls_certificate_path")
)
self.tls_certificate_file = config.get("tls_certificate_path")
self.no_tls = config.get("no_tls", False)
if self.no_tls:
self.tls_private_key = None
else:
self.tls_private_key = self.read_tls_private_key(
args.tls_private_key_path
)
self.tls_dh_params_path = self.check_file(
args.tls_dh_params_path, "tls_dh_params"
config.get("tls_private_key_path")
)
@classmethod
def add_arguments(cls, parser):
super(TlsConfig, cls).add_arguments(parser)
tls_group = parser.add_argument_group("tls")
tls_group.add_argument("--tls-certificate-path",
help="PEM encoded X509 certificate for TLS")
tls_group.add_argument("--tls-private-key-path",
help="PEM encoded private key for TLS")
tls_group.add_argument("--tls-dh-params-path",
help="PEM dh parameters for ephemeral keys")
self.tls_dh_params_path = self.check_file(
config.get("tls_dh_params_path"), "tls_dh_params"
)
def default_config(self, config_dir_path, server_name):
base_key_name = os.path.join(config_dir_path, server_name)
tls_certificate_path = base_key_name + ".tls.crt"
tls_private_key_path = base_key_name + ".tls.key"
tls_dh_params_path = base_key_name + ".tls.dh"
return """\
# PEM encoded X509 certificate for TLS.
# You can replace the self-signed certificate that synapse
# autogenerates on launch with your own SSL certificate + key pair
# if you like. Any required intermediary certificates can be
# appended after the primary certificate in hierarchical order.
tls_certificate_path: "%(tls_certificate_path)s"
# PEM encoded private key for TLS
tls_private_key_path: "%(tls_private_key_path)s"
# PEM dh parameters for ephemeral keys
tls_dh_params_path: "%(tls_dh_params_path)s"
# Don't bind to the https port
no_tls: False
""" % locals()
def read_tls_certificate(self, cert_path):
cert_pem = self.read_file(cert_path, "tls_certificate")
@@ -54,22 +75,13 @@ class TlsConfig(Config):
private_key_pem = self.read_file(private_key_path, "tls_private_key")
return crypto.load_privatekey(crypto.FILETYPE_PEM, private_key_pem)
@classmethod
def generate_config(cls, args, config_dir_path):
super(TlsConfig, cls).generate_config(args, config_dir_path)
base_key_name = os.path.join(config_dir_path, args.server_name)
def generate_files(self, config):
tls_certificate_path = config["tls_certificate_path"]
tls_private_key_path = config["tls_private_key_path"]
tls_dh_params_path = config["tls_dh_params_path"]
if args.tls_certificate_path is None:
args.tls_certificate_path = base_key_name + ".tls.crt"
if args.tls_private_key_path is None:
args.tls_private_key_path = base_key_name + ".tls.key"
if args.tls_dh_params_path is None:
args.tls_dh_params_path = base_key_name + ".tls.dh"
if not os.path.exists(args.tls_private_key_path):
with open(args.tls_private_key_path, "w") as private_key_file:
if not os.path.exists(tls_private_key_path):
with open(tls_private_key_path, "w") as private_key_file:
tls_private_key = crypto.PKey()
tls_private_key.generate_key(crypto.TYPE_RSA, 2048)
private_key_pem = crypto.dump_privatekey(
@@ -77,17 +89,17 @@ class TlsConfig(Config):
)
private_key_file.write(private_key_pem)
else:
with open(args.tls_private_key_path) as private_key_file:
with open(tls_private_key_path) as private_key_file:
private_key_pem = private_key_file.read()
tls_private_key = crypto.load_privatekey(
crypto.FILETYPE_PEM, private_key_pem
)
if not os.path.exists(args.tls_certificate_path):
with open(args.tls_certificate_path, "w") as certifcate_file:
if not os.path.exists(tls_certificate_path):
with open(tls_certificate_path, "w") as certificate_file:
cert = crypto.X509()
subject = cert.get_subject()
subject.CN = args.server_name
subject.CN = config["server_name"]
cert.set_serial_number(1000)
cert.gmtime_adj_notBefore(0)
@@ -99,18 +111,18 @@ class TlsConfig(Config):
cert_pem = crypto.dump_certificate(crypto.FILETYPE_PEM, cert)
certifcate_file.write(cert_pem)
certificate_file.write(cert_pem)
if not os.path.exists(args.tls_dh_params_path):
if not os.path.exists(tls_dh_params_path):
if GENERATE_DH_PARAMS:
subprocess.check_call([
"openssl", "dhparam",
"-outform", "PEM",
"-out", args.tls_dh_params_path,
"-out", tls_dh_params_path,
"2048"
])
else:
with open(args.tls_dh_params_path, "w") as dh_params_file:
with open(tls_dh_params_path, "w") as dh_params_file:
dh_params_file.write(
"2048-bit DH parameters taken from rfc3526\n"
"-----BEGIN DH PARAMETERS-----\n"

View File

@@ -17,28 +17,21 @@ from ._base import Config
class VoipConfig(Config):
def __init__(self, args):
super(VoipConfig, self).__init__(args)
self.turn_uris = args.turn_uris
self.turn_shared_secret = args.turn_shared_secret
self.turn_user_lifetime = args.turn_user_lifetime
def read_config(self, config):
self.turn_uris = config.get("turn_uris", [])
self.turn_shared_secret = config["turn_shared_secret"]
self.turn_user_lifetime = self.parse_duration(config["turn_user_lifetime"])
@classmethod
def add_arguments(cls, parser):
super(VoipConfig, cls).add_arguments(parser)
group = parser.add_argument_group("voip")
group.add_argument(
"--turn-uris", type=str, default=None,
help="The public URIs of the TURN server to give to clients"
)
group.add_argument(
"--turn-shared-secret", type=str, default=None,
help=(
"The shared secret used to compute passwords for the TURN"
" server"
)
)
group.add_argument(
"--turn-user-lifetime", type=int, default=(1000 * 60 * 60),
help="How long generated TURN credentials last, in ms"
)
def default_config(self, config_dir_path, server_name):
return """\
## Turn ##
# The public URIs of the TURN server to give to clients
turn_uris: []
# The shared secret used to compute passwords for the TURN server
turn_shared_secret: "YOUR_SHARED_SECRET"
# How long generated TURN credentials last
turn_user_lifetime: "1h"
"""

View File

@@ -35,10 +35,13 @@ class ServerContextFactory(ssl.ContextFactory):
_ecCurve = _OpenSSLECCurve(_defaultCurveName)
_ecCurve.addECKeyToContext(context)
except:
logger.exception("Failed to enable eliptic curve for TLS")
logger.exception("Failed to enable elliptic curve for TLS")
context.set_options(SSL.OP_NO_SSLv2 | SSL.OP_NO_SSLv3)
context.use_certificate(config.tls_certificate)
context.use_certificate_chain_file(config.tls_certificate_file)
if not config.no_tls:
context.use_privatekey(config.tls_private_key)
context.load_tmp_dh(config.tls_dh_params_path)
context.set_cipher_list("!ADH:HIGH+kEDH:!AECDH:HIGH+kEECDH")

View File

@@ -18,37 +18,51 @@ from twisted.web.http import HTTPClient
from twisted.internet.protocol import Factory
from twisted.internet import defer, reactor
from synapse.http.endpoint import matrix_federation_endpoint
from synapse.util.logcontext import PreserveLoggingContext
from synapse.util.logcontext import (
preserve_context_over_fn, preserve_context_over_deferred
)
import simplejson as json
import logging
logger = logging.getLogger(__name__)
KEY_API_V1 = b"/_matrix/key/v1/"
@defer.inlineCallbacks
def fetch_server_key(server_name, ssl_context_factory):
def fetch_server_key(server_name, ssl_context_factory, path=KEY_API_V1):
"""Fetch the keys for a remote server."""
factory = SynapseKeyClientFactory()
factory.path = path
endpoint = matrix_federation_endpoint(
reactor, server_name, ssl_context_factory, timeout=30
)
for i in range(5):
try:
with PreserveLoggingContext():
protocol = yield endpoint.connect(factory)
server_response, server_certificate = yield protocol.remote_key
protocol = yield preserve_context_over_fn(
endpoint.connect, factory
)
server_response, server_certificate = yield preserve_context_over_deferred(
protocol.remote_key
)
defer.returnValue((server_response, server_certificate))
return
except SynapseKeyClientError as e:
logger.exception("Error getting key for %r" % (server_name,))
if e.status.startswith("4"):
# Don't retry for 4xx responses.
raise IOError("Cannot get key for %r" % server_name)
except Exception as e:
logger.exception(e)
raise IOError("Cannot get key for %s" % server_name)
raise IOError("Cannot get key for %r" % server_name)
class SynapseKeyClientError(Exception):
"""The key wasn't retrieved from the remote server."""
status = None
pass
@@ -66,17 +80,30 @@ class SynapseKeyClientProtocol(HTTPClient):
def connectionMade(self):
self.host = self.transport.getHost()
logger.debug("Connected to %s", self.host)
self.sendCommand(b"GET", b"/_matrix/key/v1/")
self.sendCommand(b"GET", self.path)
self.endHeaders()
self.timer = reactor.callLater(
self.timeout,
self.on_timeout
)
def errback(self, error):
if not self.remote_key.called:
self.remote_key.errback(error)
def callback(self, result):
if not self.remote_key.called:
self.remote_key.callback(result)
def handleStatus(self, version, status, message):
if status != b"200":
# logger.info("Non-200 response from %s: %s %s",
# self.transport.getHost(), status, message)
error = SynapseKeyClientError(
"Non-200 response %r from %r" % (status, self.host)
)
error.status = status
self.errback(error)
self.transport.abortConnection()
def handleResponse(self, response_body_bytes):
@@ -89,15 +116,18 @@ class SynapseKeyClientProtocol(HTTPClient):
return
certificate = self.transport.getPeerCertificate()
self.remote_key.callback((json_response, certificate))
self.callback((json_response, certificate))
self.transport.abortConnection()
self.timer.cancel()
def on_timeout(self):
logger.debug("Timeout waiting for response from %s", self.host)
self.remote_key.errback(IOError("Timeout waiting for response"))
self.errback(IOError("Timeout waiting for response"))
self.transport.abortConnection()
class SynapseKeyClientFactory(Factory):
protocol = SynapseKeyClientProtocol
def protocol(self):
protocol = SynapseKeyClientProtocol()
protocol.path = self.path
return protocol

View File

@@ -15,7 +15,9 @@
from synapse.crypto.keyclient import fetch_server_key
from twisted.internet import defer
from syutil.crypto.jsonsign import verify_signed_json, signature_ids
from syutil.crypto.jsonsign import (
verify_signed_json, signature_ids, sign_json, encode_canonical_json
)
from syutil.crypto.signing_key import (
is_signing_algorithm_supported, decode_verify_key_bytes
)
@@ -23,45 +25,107 @@ from syutil.base64util import decode_base64, encode_base64
from synapse.api.errors import SynapseError, Codes
from synapse.util.retryutils import get_retry_limiter
from synapse.util import unwrapFirstError
from synapse.util.async import ObservableDeferred
from OpenSSL import crypto
from collections import namedtuple
import urllib
import hashlib
import logging
logger = logging.getLogger(__name__)
KeyGroup = namedtuple("KeyGroup", ("server_name", "group_id", "key_ids"))
class Keyring(object):
def __init__(self, hs):
self.store = hs.get_datastore()
self.clock = hs.get_clock()
self.client = hs.get_http_client()
self.config = hs.get_config()
self.perspective_servers = self.config.perspectives
self.hs = hs
@defer.inlineCallbacks
self.key_downloads = {}
def verify_json_for_server(self, server_name, json_object):
return self.verify_json_objects_for_server(
[(server_name, json_object)]
)[0]
def verify_json_objects_for_server(self, server_and_json):
"""Bulk verfies signatures of json objects, bulk fetching keys as
necessary.
Args:
server_and_json (list): List of pairs of (server_name, json_object)
Returns:
list of deferreds indicating success or failure to verify each
json object's signature for the given server_name.
"""
group_id_to_json = {}
group_id_to_group = {}
group_ids = []
next_group_id = 0
deferreds = {}
for server_name, json_object in server_and_json:
logger.debug("Verifying for %s", server_name)
group_id = next_group_id
next_group_id += 1
group_ids.append(group_id)
key_ids = signature_ids(json_object, server_name)
if not key_ids:
raise SynapseError(
deferreds[group_id] = defer.fail(SynapseError(
400,
"Not signed with a supported algorithm",
Codes.UNAUTHORIZED,
)
))
else:
deferreds[group_id] = defer.Deferred()
group = KeyGroup(server_name, group_id, key_ids)
group_id_to_group[group_id] = group
group_id_to_json[group_id] = json_object
@defer.inlineCallbacks
def handle_key_deferred(group, deferred):
server_name = group.server_name
try:
verify_key = yield self.get_server_verify_key(server_name, key_ids)
except IOError:
_, _, key_id, verify_key = yield deferred
except IOError as e:
logger.warn(
"Got IOError when downloading keys for %s: %s %s",
server_name, type(e).__name__, str(e.message),
)
raise SynapseError(
502,
"Error downloading keys for %s" % (server_name,),
Codes.UNAUTHORIZED,
)
except:
except Exception as e:
logger.exception(
"Got Exception when downloading keys for %s: %s %s",
server_name, type(e).__name__, str(e.message),
)
raise SynapseError(
401,
"No key for %s with id %s" % (server_name, key_ids),
Codes.UNAUTHORIZED,
)
json_object = group_id_to_json[group.group_id]
try:
verify_signed_json(json_object, server_name, verify_key)
except:
@@ -73,30 +137,465 @@ class Keyring(object):
Codes.UNAUTHORIZED,
)
server_to_deferred = {
server_name: defer.Deferred()
for server_name, _ in server_and_json
}
# We want to wait for any previous lookups to complete before
# proceeding.
wait_on_deferred = self.wait_for_previous_lookups(
[server_name for server_name, _ in server_and_json],
server_to_deferred,
)
# Actually start fetching keys.
wait_on_deferred.addBoth(
lambda _: self.get_server_verify_keys(group_id_to_group, deferreds)
)
# When we've finished fetching all the keys for a given server_name,
# resolve the deferred passed to `wait_for_previous_lookups` so that
# any lookups waiting will proceed.
server_to_gids = {}
def remove_deferreds(res, server_name, group_id):
server_to_gids[server_name].discard(group_id)
if not server_to_gids[server_name]:
server_to_deferred.pop(server_name).callback(None)
return res
for g_id, deferred in deferreds.items():
server_name = group_id_to_group[g_id].server_name
server_to_gids.setdefault(server_name, set()).add(g_id)
deferred.addBoth(remove_deferreds, server_name, g_id)
# Pass those keys to handle_key_deferred so that the json object
# signatures can be verified
return [
handle_key_deferred(
group_id_to_group[g_id],
deferreds[g_id],
)
for g_id in group_ids
]
@defer.inlineCallbacks
def get_server_verify_key(self, server_name, key_ids):
def wait_for_previous_lookups(self, server_names, server_to_deferred):
"""Waits for any previous key lookups for the given servers to finish.
Args:
server_names (list): list of server_names we want to lookup
server_to_deferred (dict): server_name to deferred which gets
resolved once we've finished looking up keys for that server
"""
while True:
wait_on = [
self.key_downloads[server_name]
for server_name in server_names
if server_name in self.key_downloads
]
if wait_on:
yield defer.DeferredList(wait_on)
else:
break
for server_name, deferred in server_to_deferred:
self.key_downloads[server_name] = ObservableDeferred(deferred)
def get_server_verify_keys(self, group_id_to_group, group_id_to_deferred):
"""Takes a dict of KeyGroups and tries to find at least one key for
each group.
"""
# These are functions that produce keys given a list of key ids
key_fetch_fns = (
self.get_keys_from_store, # First try the local store
self.get_keys_from_perspectives, # Then try via perspectives
self.get_keys_from_server, # Then try directly
)
@defer.inlineCallbacks
def do_iterations():
merged_results = {}
missing_keys = {
group.server_name: key_id
for group in group_id_to_group.values()
for key_id in group.key_ids
}
for fn in key_fetch_fns:
results = yield fn(missing_keys.items())
merged_results.update(results)
# We now need to figure out which groups we have keys for
# and which we don't
missing_groups = {}
for group in group_id_to_group.values():
for key_id in group.key_ids:
if key_id in merged_results[group.server_name]:
group_id_to_deferred[group.group_id].callback((
group.group_id,
group.server_name,
key_id,
merged_results[group.server_name][key_id],
))
break
else:
missing_groups.setdefault(
group.server_name, []
).append(group)
if not missing_groups:
break
missing_keys = {
server_name: set(
key_id for group in groups for key_id in group.key_ids
)
for server_name, groups in missing_groups.items()
}
for group in missing_groups.values():
group_id_to_deferred[group.group_id].errback(SynapseError(
401,
"No key for %s with id %s" % (
group.server_name, group.key_ids,
),
Codes.UNAUTHORIZED,
))
def on_err(err):
for deferred in group_id_to_deferred.values():
if not deferred.called:
deferred.errback(err)
do_iterations().addErrback(on_err)
return group_id_to_deferred
@defer.inlineCallbacks
def get_keys_from_store(self, server_name_and_key_ids):
res = yield defer.gatherResults(
[
self.store.get_server_verify_keys(server_name, key_ids)
for server_name, key_ids in server_name_and_key_ids
],
consumeErrors=True,
).addErrback(unwrapFirstError)
defer.returnValue(dict(zip(
[server_name for server_name, _ in server_name_and_key_ids],
res
)))
@defer.inlineCallbacks
def get_keys_from_perspectives(self, server_name_and_key_ids):
@defer.inlineCallbacks
def get_key(perspective_name, perspective_keys):
try:
result = yield self.get_server_verify_key_v2_indirect(
server_name_and_key_ids, perspective_name, perspective_keys
)
defer.returnValue(result)
except Exception as e:
logger.exception(
"Unable to get key from %r: %s %s",
perspective_name,
type(e).__name__, str(e.message),
)
defer.returnValue({})
results = yield defer.gatherResults(
[
get_key(p_name, p_keys)
for p_name, p_keys in self.perspective_servers.items()
],
consumeErrors=True,
).addErrback(unwrapFirstError)
union_of_keys = {}
for result in results:
for server_name, keys in result.items():
union_of_keys.setdefault(server_name, {}).update(keys)
defer.returnValue(union_of_keys)
@defer.inlineCallbacks
def get_keys_from_server(self, server_name_and_key_ids):
@defer.inlineCallbacks
def get_key(server_name, key_ids):
limiter = yield get_retry_limiter(
server_name,
self.clock,
self.store,
)
with limiter:
keys = None
try:
keys = yield self.get_server_verify_key_v2_direct(
server_name, key_ids
)
except Exception as e:
logger.info(
"Unable to getting key %r for %r directly: %s %s",
key_ids, server_name,
type(e).__name__, str(e.message),
)
if not keys:
keys = yield self.get_server_verify_key_v1_direct(
server_name, key_ids
)
keys = {server_name: keys}
defer.returnValue(keys)
results = yield defer.gatherResults(
[
get_key(server_name, key_ids)
for server_name, key_ids in server_name_and_key_ids
],
consumeErrors=True,
).addErrback(unwrapFirstError)
merged = {}
for result in results:
merged.update(result)
defer.returnValue({
server_name: keys
for server_name, keys in merged.items()
if keys
})
@defer.inlineCallbacks
def get_server_verify_key_v2_indirect(self, server_names_and_key_ids,
perspective_name,
perspective_keys):
limiter = yield get_retry_limiter(
perspective_name, self.clock, self.store
)
with limiter:
# TODO(mark): Set the minimum_valid_until_ts to that needed by
# the events being validated or the current time if validating
# an incoming request.
query_response = yield self.client.post_json(
destination=perspective_name,
path=b"/_matrix/key/v2/query",
data={
u"server_keys": {
server_name: {
key_id: {
u"minimum_valid_until_ts": 0
} for key_id in key_ids
}
for server_name, key_ids in server_names_and_key_ids
}
},
)
keys = {}
responses = query_response["server_keys"]
for response in responses:
if (u"signatures" not in response
or perspective_name not in response[u"signatures"]):
raise ValueError(
"Key response not signed by perspective server"
" %r" % (perspective_name,)
)
verified = False
for key_id in response[u"signatures"][perspective_name]:
if key_id in perspective_keys:
verify_signed_json(
response,
perspective_name,
perspective_keys[key_id]
)
verified = True
if not verified:
logging.info(
"Response from perspective server %r not signed with a"
" known key, signed with: %r, known keys: %r",
perspective_name,
list(response[u"signatures"][perspective_name]),
list(perspective_keys)
)
raise ValueError(
"Response not signed with a known key for perspective"
" server %r" % (perspective_name,)
)
processed_response = yield self.process_v2_response(
perspective_name, response
)
for server_name, response_keys in processed_response.items():
keys.setdefault(server_name, {}).update(response_keys)
yield defer.gatherResults(
[
self.store_keys(
server_name=server_name,
from_server=perspective_name,
verify_keys=response_keys,
)
for server_name, response_keys in keys.items()
],
consumeErrors=True
).addErrback(unwrapFirstError)
defer.returnValue(keys)
@defer.inlineCallbacks
def get_server_verify_key_v2_direct(self, server_name, key_ids):
keys = {}
for requested_key_id in key_ids:
if requested_key_id in keys:
continue
(response, tls_certificate) = yield fetch_server_key(
server_name, self.hs.tls_context_factory,
path=(b"/_matrix/key/v2/server/%s" % (
urllib.quote(requested_key_id),
)).encode("ascii"),
)
if (u"signatures" not in response
or server_name not in response[u"signatures"]):
raise ValueError("Key response not signed by remote server")
if "tls_fingerprints" not in response:
raise ValueError("Key response missing TLS fingerprints")
certificate_bytes = crypto.dump_certificate(
crypto.FILETYPE_ASN1, tls_certificate
)
sha256_fingerprint = hashlib.sha256(certificate_bytes).digest()
sha256_fingerprint_b64 = encode_base64(sha256_fingerprint)
response_sha256_fingerprints = set()
for fingerprint in response[u"tls_fingerprints"]:
if u"sha256" in fingerprint:
response_sha256_fingerprints.add(fingerprint[u"sha256"])
if sha256_fingerprint_b64 not in response_sha256_fingerprints:
raise ValueError("TLS certificate not allowed by fingerprints")
response_keys = yield self.process_v2_response(
from_server=server_name,
requested_ids=[requested_key_id],
response_json=response,
)
keys.update(response_keys)
yield defer.gatherResults(
[
self.store_keys(
server_name=key_server_name,
from_server=server_name,
verify_keys=verify_keys,
)
for key_server_name, verify_keys in keys.items()
],
consumeErrors=True
).addErrback(unwrapFirstError)
defer.returnValue(keys)
@defer.inlineCallbacks
def process_v2_response(self, from_server, response_json,
requested_ids=[]):
time_now_ms = self.clock.time_msec()
response_keys = {}
verify_keys = {}
for key_id, key_data in response_json["verify_keys"].items():
if is_signing_algorithm_supported(key_id):
key_base64 = key_data["key"]
key_bytes = decode_base64(key_base64)
verify_key = decode_verify_key_bytes(key_id, key_bytes)
verify_key.time_added = time_now_ms
verify_keys[key_id] = verify_key
old_verify_keys = {}
for key_id, key_data in response_json["old_verify_keys"].items():
if is_signing_algorithm_supported(key_id):
key_base64 = key_data["key"]
key_bytes = decode_base64(key_base64)
verify_key = decode_verify_key_bytes(key_id, key_bytes)
verify_key.expired = key_data["expired_ts"]
verify_key.time_added = time_now_ms
old_verify_keys[key_id] = verify_key
results = {}
server_name = response_json["server_name"]
for key_id in response_json["signatures"].get(server_name, {}):
if key_id not in response_json["verify_keys"]:
raise ValueError(
"Key response must include verification keys for all"
" signatures"
)
if key_id in verify_keys:
verify_signed_json(
response_json,
server_name,
verify_keys[key_id]
)
signed_key_json = sign_json(
response_json,
self.config.server_name,
self.config.signing_key[0],
)
signed_key_json_bytes = encode_canonical_json(signed_key_json)
ts_valid_until_ms = signed_key_json[u"valid_until_ts"]
updated_key_ids = set(requested_ids)
updated_key_ids.update(verify_keys)
updated_key_ids.update(old_verify_keys)
response_keys.update(verify_keys)
response_keys.update(old_verify_keys)
yield defer.gatherResults(
[
self.store.store_server_keys_json(
server_name=server_name,
key_id=key_id,
from_server=server_name,
ts_now_ms=time_now_ms,
ts_expires_ms=ts_valid_until_ms,
key_json_bytes=signed_key_json_bytes,
)
for key_id in updated_key_ids
],
consumeErrors=True,
).addErrback(unwrapFirstError)
results[server_name] = response_keys
defer.returnValue(results)
@defer.inlineCallbacks
def get_server_verify_key_v1_direct(self, server_name, key_ids):
"""Finds a verification key for the server with one of the key ids.
Args:
server_name (str): The name of the server to fetch a key for.
keys_ids (list of str): The key_ids to check for.
"""
# Check the datastore to see if we have one cached.
cached = yield self.store.get_server_verify_keys(server_name, key_ids)
if cached:
defer.returnValue(cached[0])
return
# Try to fetch the key from the remote server.
limiter = yield get_retry_limiter(
server_name,
self.clock,
self.store,
)
with limiter:
(response, tls_certificate) = yield fetch_server_key(
server_name, self.hs.tls_context_factory
)
@@ -119,11 +618,16 @@ class Keyring(object):
if encode_base64(x509_certificate_bytes) != tls_certificate_b64:
raise ValueError("TLS certificate doesn't match")
# Cache the result in the datastore.
time_now_ms = self.clock.time_msec()
verify_keys = {}
for key_id, key_base64 in response["verify_keys"].items():
if is_signing_algorithm_supported(key_id):
key_bytes = decode_base64(key_base64)
verify_key = decode_verify_key_bytes(key_id, key_bytes)
verify_key.time_added = time_now_ms
verify_keys[key_id] = verify_key
for key_id in response["signatures"][server_name]:
@@ -139,10 +643,6 @@ class Keyring(object):
verify_keys[key_id]
)
# Cache the result in the datastore.
time_now_ms = self.clock.time_msec()
yield self.store.store_server_certificate(
server_name,
server_name,
@@ -150,14 +650,31 @@ class Keyring(object):
tls_certificate,
)
for key_id, key in verify_keys.items():
yield self.store.store_server_verify_key(
server_name, server_name, time_now_ms, key
yield self.store_keys(
server_name=server_name,
from_server=server_name,
verify_keys=verify_keys,
)
for key_id in key_ids:
if key_id in verify_keys:
defer.returnValue(verify_keys[key_id])
return
defer.returnValue(verify_keys)
raise ValueError("No verification key found for given key ids")
@defer.inlineCallbacks
def store_keys(self, server_name, from_server, verify_keys):
"""Store a collection of verify keys for a given server
Args:
server_name(str): The name of the server the keys are for.
from_server(str): The server the keys were downloaded from.
verify_keys(dict): A mapping of key_id to VerifyKey.
Returns:
A deferred that completes when the keys are stored.
"""
# TODO(markjh): Store whether the keys have expired.
yield defer.gatherResults(
[
self.store.store_server_verify_key(
server_name, server_name, key.time_added, key
)
for key_id, key in verify_keys.items()
],
consumeErrors=True,
).addErrback(unwrapFirstError)

View File

@@ -16,6 +16,12 @@
from synapse.util.frozenutils import freeze
# Whether we should use frozen_dict in FrozenEvent. Using frozen_dicts prevents
# bugs where we accidentally share e.g. signature dicts. However, converting
# a dict to frozen_dicts is expensive.
USE_FROZEN_DICTS = True
class _EventInternalMetadata(object):
def __init__(self, internal_metadata_dict):
self.__dict__ = dict(internal_metadata_dict)
@@ -46,9 +52,10 @@ def _event_dict_property(key):
class EventBase(object):
def __init__(self, event_dict, signatures={}, unsigned={},
internal_metadata_dict={}):
internal_metadata_dict={}, rejected_reason=None):
self.signatures = signatures
self.unsigned = unsigned
self.rejected_reason = rejected_reason
self._event_dict = event_dict
@@ -109,7 +116,7 @@ class EventBase(object):
class FrozenEvent(EventBase):
def __init__(self, event_dict, internal_metadata_dict={}):
def __init__(self, event_dict, internal_metadata_dict={}, rejected_reason=None):
event_dict = dict(event_dict)
# Signatures is a dict of dicts, and this is faster than doing a
@@ -121,13 +128,17 @@ class FrozenEvent(EventBase):
unsigned = dict(event_dict.pop("unsigned", {}))
if USE_FROZEN_DICTS:
frozen_dict = freeze(event_dict)
else:
frozen_dict = event_dict
super(FrozenEvent, self).__init__(
frozen_dict,
signatures=signatures,
unsigned=unsigned,
internal_metadata_dict=internal_metadata_dict,
rejected_reason=rejected_reason,
)
@staticmethod

View File

@@ -16,8 +16,7 @@
class EventContext(object):
def __init__(self, current_state=None, auth_events=None):
def __init__(self, current_state=None):
self.current_state = current_state
self.auth_events = auth_events
self.state_group = None
self.rejected = False

View File

@@ -74,6 +74,8 @@ def prune_event(event):
)
elif event_type == EventTypes.Aliases:
add_fields("aliases")
elif event_type == EventTypes.RoomHistoryVisibility:
add_fields("history_visibility")
allowed_fields = {
k: v

View File

@@ -18,12 +18,12 @@ from twisted.internet import defer
from synapse.events.utils import prune_event
from syutil.jsonutil import encode_canonical_json
from synapse.crypto.event_signing import check_event_content_hash
from synapse.api.errors import SynapseError
from synapse.util import unwrapFirstError
import logging
@@ -32,7 +32,8 @@ logger = logging.getLogger(__name__)
class FederationBase(object):
@defer.inlineCallbacks
def _check_sigs_and_hash_and_fetch(self, origin, pdus, outlier=False):
def _check_sigs_and_hash_and_fetch(self, origin, pdus, outlier=False,
include_none=False):
"""Takes a list of PDUs and checks the signatures and hashs of each
one. If a PDU fails its signature check then we check if we have it in
the database and if not then request if from the originating server of
@@ -50,84 +51,108 @@ class FederationBase(object):
Returns:
Deferred : A list of PDUs that have valid signatures and hashes.
"""
deferreds = self._check_sigs_and_hashes(pdus)
signed_pdus = []
def callback(pdu):
return pdu
@defer.inlineCallbacks
def do(pdu):
try:
new_pdu = yield self._check_sigs_and_hash(pdu)
signed_pdus.append(new_pdu)
except SynapseError:
# FIXME: We should handle signature failures more gracefully.
def errback(failure, pdu):
failure.trap(SynapseError)
return None
def try_local_db(res, pdu):
if not res:
# Check local db.
new_pdu = yield self.store.get_event(
return self.store.get_event(
pdu.event_id,
allow_rejected=True,
allow_none=True,
)
if new_pdu:
signed_pdus.append(new_pdu)
return
return res
# Check pdu.origin
if pdu.origin != origin:
try:
new_pdu = yield self.get_pdu(
def try_remote(res, pdu):
if not res and pdu.origin != origin:
return self.get_pdu(
destinations=[pdu.origin],
event_id=pdu.event_id,
outlier=outlier,
)
if new_pdu:
signed_pdus.append(new_pdu)
return
except:
pass
timeout=10000,
).addErrback(lambda e: None)
return res
def warn(res, pdu):
if not res:
logger.warn(
"Failed to find copy of %s with valid signature",
pdu.event_id,
)
return res
yield defer.gatherResults(
[do(pdu) for pdu in pdus],
consumeErrors=True
for pdu, deferred in zip(pdus, deferreds):
deferred.addCallbacks(
callback, errback, errbackArgs=[pdu]
).addCallback(
try_local_db, pdu
).addCallback(
try_remote, pdu
).addCallback(
warn, pdu
)
defer.returnValue(signed_pdus)
valid_pdus = yield defer.gatherResults(
deferreds,
consumeErrors=True
).addErrback(unwrapFirstError)
if include_none:
defer.returnValue(valid_pdus)
else:
defer.returnValue([p for p in valid_pdus if p])
@defer.inlineCallbacks
def _check_sigs_and_hash(self, pdu):
"""Throws a SynapseError if the PDU does not have the correct
return self._check_sigs_and_hashes([pdu])[0]
def _check_sigs_and_hashes(self, pdus):
"""Throws a SynapseError if a PDU does not have the correct
signatures.
Returns:
FrozenEvent: Either the given event or it redacted if it failed the
content hash check.
"""
# Check signatures are correct.
redacted_event = prune_event(pdu)
redacted_pdu_json = redacted_event.get_pdu_json()
try:
yield self.keyring.verify_json_for_server(
pdu.origin, redacted_pdu_json
)
except SynapseError:
logger.warn(
"Signature check failed for %s redacted to %s",
encode_canonical_json(pdu.get_pdu_json()),
encode_canonical_json(redacted_pdu_json),
)
raise
redacted_pdus = [
prune_event(pdu)
for pdu in pdus
]
deferreds = self.keyring.verify_json_objects_for_server([
(p.origin, p.get_pdu_json())
for p in redacted_pdus
])
def callback(_, pdu, redacted):
if not check_event_content_hash(pdu):
logger.warn(
"Event content has been tampered, redacting %s, %s",
pdu.event_id, encode_canonical_json(pdu.get_dict())
"Event content has been tampered, redacting %s: %s",
pdu.event_id, pdu.get_pdu_json()
)
defer.returnValue(redacted_event)
return redacted
return pdu
defer.returnValue(pdu)
def errback(failure, pdu):
failure.trap(SynapseError)
logger.warn(
"Signature check failed for %s",
pdu.event_id,
)
return failure
for deferred, pdu, redacted in zip(deferreds, pdus, redacted_pdus):
deferred.addCallbacks(
callback, errback,
callbackArgs=[pdu, redacted],
errbackArgs=[pdu],
)
return deferreds

View File

@@ -19,22 +19,37 @@ from twisted.internet import defer
from .federation_base import FederationBase
from .units import Edu
from synapse.api.errors import CodeMessageException, SynapseError
from synapse.util.expiringcache import ExpiringCache
from synapse.api.errors import (
CodeMessageException, HttpResponseException, SynapseError,
)
from synapse.util import unwrapFirstError
from synapse.util.caches.expiringcache import ExpiringCache
from synapse.util.logutils import log_function
from synapse.events import FrozenEvent
import synapse.metrics
from synapse.util.retryutils import get_retry_limiter, NotRetryingDestination
import copy
import itertools
import logging
import random
logger = logging.getLogger(__name__)
# synapse.federation.federation_client is a silly name
metrics = synapse.metrics.get_metrics_for("synapse.federation.client")
sent_pdus_destination_dist = metrics.register_distribution("sent_pdu_destinations")
sent_edus_counter = metrics.register_counter("sent_edus")
sent_queries_counter = metrics.register_counter("sent_queries", labels=["type"])
class FederationClient(FederationBase):
def __init__(self):
self._get_pdu_cache = None
def start_get_pdu_cache(self):
self._get_pdu_cache = ExpiringCache(
@@ -64,6 +79,8 @@ class FederationClient(FederationBase):
order = self._order
self._order += 1
sent_pdus_destination_dist.inc_by(len(destinations))
logger.debug("[%s] transaction_layer.enqueue_pdu... ", pdu.event_id)
# TODO, add errback, etc.
@@ -83,6 +100,8 @@ class FederationClient(FederationBase):
content=content,
)
sent_edus_counter.inc()
# TODO, add errback, etc.
self._transaction_queue.enqueue_edu(edu)
return defer.succeed(None)
@@ -109,10 +128,42 @@ class FederationClient(FederationBase):
a Deferred which will eventually yield a JSON object from the
response
"""
sent_queries_counter.inc(query_type)
return self.transport_layer.make_query(
destination, query_type, args, retry_on_dns_fail=retry_on_dns_fail
)
@log_function
def query_client_keys(self, destination, content):
"""Query device keys for a device hosted on a remote server.
Args:
destination (str): Domain name of the remote homeserver
content (dict): The query content.
Returns:
a Deferred which will eventually yield a JSON object from the
response
"""
sent_queries_counter.inc("client_device_keys")
return self.transport_layer.query_client_keys(destination, content)
@log_function
def claim_client_keys(self, destination, content):
"""Claims one-time keys for a device hosted on a remote server.
Args:
destination (str): Domain name of the remote homeserver
content (dict): The query content.
Returns:
a Deferred which will eventually yield a JSON object from the
response
"""
sent_queries_counter.inc("client_one_time_keys")
return self.transport_layer.claim_client_keys(destination, content)
@defer.inlineCallbacks
@log_function
def backfill(self, dest, context, limit, extremities):
@@ -145,16 +196,17 @@ class FederationClient(FederationBase):
for p in transaction_data["pdus"]
]
for i, pdu in enumerate(pdus):
pdus[i] = yield self._check_sigs_and_hash(pdu)
# FIXME: We should handle signature failures more gracefully.
pdus[:] = yield defer.gatherResults(
self._check_sigs_and_hashes(pdus),
consumeErrors=True,
).addErrback(unwrapFirstError)
defer.returnValue(pdus)
@defer.inlineCallbacks
@log_function
def get_pdu(self, destinations, event_id, outlier=False):
def get_pdu(self, destinations, event_id, outlier=False, timeout=None):
"""Requests the PDU with given origin and ID from the remote home
servers.
@@ -170,6 +222,8 @@ class FederationClient(FederationBase):
outlier (bool): Indicates whether the PDU is an `outlier`, i.e. if
it's from an arbitary point in the context as opposed to part
of the current block of PDUs. Defaults to `False`
timeout (int): How long to try (in ms) each destination for before
moving to the next destination. None indicates no timeout.
Returns:
Deferred: Results in the requested PDU.
@@ -193,7 +247,7 @@ class FederationClient(FederationBase):
with limiter:
transaction_data = yield self.transport_layer.get_event(
destination, event_id
destination, event_id, timeout=timeout,
)
logger.debug("transaction_data %r", transaction_data)
@@ -203,11 +257,11 @@ class FederationClient(FederationBase):
for p in transaction_data["pdus"]
]
if pdu_list:
if pdu_list and pdu_list[0]:
pdu = pdu_list[0]
# Check signatures are correct.
pdu = yield self._check_sigs_and_hash(pdu)
pdu = yield self._check_sigs_and_hashes([pdu])[0]
break
@@ -236,7 +290,7 @@ class FederationClient(FederationBase):
)
continue
if self._get_pdu_cache is not None:
if self._get_pdu_cache is not None and pdu:
self._get_pdu_cache[event_id] = pdu
defer.returnValue(pdu)
@@ -304,6 +358,9 @@ class FederationClient(FederationBase):
@defer.inlineCallbacks
def make_join(self, destinations, room_id, user_id):
for destination in destinations:
if destination == self.server_name:
continue
try:
ret = yield self.transport_layer.make_join(
destination, room_id, user_id
@@ -330,6 +387,9 @@ class FederationClient(FederationBase):
@defer.inlineCallbacks
def send_join(self, destinations, pdu):
for destination in destinations:
if destination == self.server_name:
continue
try:
time_now = self._clock.time_msec()
_, content = yield self.transport_layer.send_join(
@@ -351,13 +411,39 @@ class FederationClient(FederationBase):
for p in content.get("auth_chain", [])
]
signed_state = yield self._check_sigs_and_hash_and_fetch(
destination, state, outlier=True
pdus = {
p.event_id: p
for p in itertools.chain(state, auth_chain)
}
valid_pdus = yield self._check_sigs_and_hash_and_fetch(
destination, pdus.values(),
outlier=True,
)
signed_auth = yield self._check_sigs_and_hash_and_fetch(
destination, auth_chain, outlier=True
)
valid_pdus_map = {
p.event_id: p
for p in valid_pdus
}
# NB: We *need* to copy to ensure that we don't have multiple
# references being passed on, as that causes... issues.
signed_state = [
copy.copy(valid_pdus_map[p.event_id])
for p in state
if p.event_id in valid_pdus_map
]
signed_auth = [
valid_pdus_map[p.event_id]
for p in auth_chain
if p.event_id in valid_pdus_map
]
# NB: We *need* to copy to ensure that we don't have multiple
# references being passed on, as that causes... issues.
for s in signed_state:
s.internal_metadata = copy.deepcopy(s.internal_metadata)
auth_chain.sort(key=lambda e: e.depth)
@@ -369,7 +455,7 @@ class FederationClient(FederationBase):
except CodeMessageException:
raise
except Exception as e:
logger.warn(
logger.exception(
"Failed to send_join via %s: %s",
destination, e.message
)
@@ -439,6 +525,116 @@ class FederationClient(FederationBase):
defer.returnValue(ret)
@defer.inlineCallbacks
def get_missing_events(self, destination, room_id, earliest_events_ids,
latest_events, limit, min_depth):
"""Tries to fetch events we are missing. This is called when we receive
an event without having received all of its ancestors.
Args:
destination (str)
room_id (str)
earliest_events_ids (list): List of event ids. Effectively the
events we expected to receive, but haven't. `get_missing_events`
should only return events that didn't happen before these.
latest_events (list): List of events we have received that we don't
have all previous events for.
limit (int): Maximum number of events to return.
min_depth (int): Minimum depth of events tor return.
"""
try:
content = yield self.transport_layer.get_missing_events(
destination=destination,
room_id=room_id,
earliest_events=earliest_events_ids,
latest_events=[e.event_id for e in latest_events],
limit=limit,
min_depth=min_depth,
)
events = [
self.event_from_pdu_json(e)
for e in content.get("events", [])
]
signed_events = yield self._check_sigs_and_hash_and_fetch(
destination, events, outlier=False
)
have_gotten_all_from_destination = True
except HttpResponseException as e:
if not e.code == 400:
raise
# We are probably hitting an old server that doesn't support
# get_missing_events
signed_events = []
have_gotten_all_from_destination = False
if len(signed_events) >= limit:
defer.returnValue(signed_events)
servers = yield self.store.get_joined_hosts_for_room(room_id)
servers = set(servers)
servers.discard(self.server_name)
failed_to_fetch = set()
while len(signed_events) < limit:
# Are we missing any?
seen_events = set(earliest_events_ids)
seen_events.update(e.event_id for e in signed_events if e)
missing_events = {}
for e in itertools.chain(latest_events, signed_events):
if e.depth > min_depth:
missing_events.update({
e_id: e.depth for e_id, _ in e.prev_events
if e_id not in seen_events
and e_id not in failed_to_fetch
})
if not missing_events:
break
have_seen = yield self.store.have_events(missing_events)
for k in have_seen:
missing_events.pop(k, None)
if not missing_events:
break
# Okay, we haven't gotten everything yet. Lets get them.
ordered_missing = sorted(missing_events.items(), key=lambda x: x[0])
if have_gotten_all_from_destination:
servers.discard(destination)
def random_server_list():
srvs = list(servers)
random.shuffle(srvs)
return srvs
deferreds = [
self.get_pdu(
destinations=random_server_list(),
event_id=e_id,
)
for e_id, depth in ordered_missing[:limit - len(signed_events)]
]
res = yield defer.DeferredList(deferreds, consumeErrors=True)
for (result, val), (e_id, _) in zip(res, ordered_missing):
if result and val:
signed_events.append(val)
else:
failed_to_fetch.add(e_id)
defer.returnValue(signed_events)
def event_from_pdu_json(self, pdu_json, outlier=False):
event = FrozenEvent(
pdu_json

View File

@@ -20,18 +20,28 @@ from .federation_base import FederationBase
from .units import Transaction, Edu
from synapse.util.logutils import log_function
from synapse.util.logcontext import PreserveLoggingContext
from synapse.events import FrozenEvent
import synapse.metrics
from synapse.api.errors import FederationError, SynapseError
from synapse.crypto.event_signing import compute_event_signature
import simplejson as json
import logging
logger = logging.getLogger(__name__)
# synapse.federation.federation_server is a silly name
metrics = synapse.metrics.get_metrics_for("synapse.federation.server")
received_pdus_counter = metrics.register_counter("received_pdus")
received_edus_counter = metrics.register_counter("received_edus")
received_queries_counter = metrics.register_counter("received_queries", labels=["type"])
class FederationServer(FederationBase):
def set_handler(self, handler):
@@ -84,6 +94,8 @@ class FederationServer(FederationBase):
def on_incoming_transaction(self, transaction_data):
transaction = Transaction(**transaction_data)
received_pdus_counter.inc_by(len(transaction.pdus))
for p in transaction.pdus:
if "unsigned" in p:
unsigned = p["unsigned"]
@@ -111,18 +123,20 @@ class FederationServer(FederationBase):
logger.debug("[%s] Transaction is new", transaction.transaction_id)
with PreserveLoggingContext():
dl = []
results = []
for pdu in pdu_list:
d = self._handle_new_pdu(transaction.origin, pdu)
def handle_failure(failure):
failure.trap(FederationError)
self.send_failure(failure.value, transaction.origin)
d.addErrback(handle_failure)
dl.append(d)
try:
yield d
results.append({})
except FederationError as e:
self.send_failure(e, transaction.origin)
results.append({"error": str(e)})
except Exception as e:
results.append({"error": str(e)})
logger.exception("Failed to handle PDU")
if hasattr(transaction, "edus"):
for edu in [Edu(**x) for x in transaction.edus]:
@@ -135,21 +149,11 @@ class FederationServer(FederationBase):
for failure in getattr(transaction, "pdu_failures", []):
logger.info("Got failure %r", failure)
results = yield defer.DeferredList(dl, consumeErrors=True)
ret = []
for r in results:
if r[0]:
ret.append({})
else:
logger.exception(r[1])
ret.append({"error": str(r[1].value)})
logger.debug("Returning: %s", str(ret))
logger.debug("Returning: %s", str(results))
response = {
"pdus": dict(zip(
(p.event_id for p in pdu_list), ret
(p.event_id for p in pdu_list), results
)),
}
@@ -160,6 +164,8 @@ class FederationServer(FederationBase):
defer.returnValue((200, response))
def received_edu(self, origin, edu_type, content):
received_edus_counter.inc()
if edu_type in self.edu_handlers:
self.edu_handlers[edu_type](origin, content)
else:
@@ -211,6 +217,8 @@ class FederationServer(FederationBase):
@defer.inlineCallbacks
def on_query_request(self, query_type, args):
received_queries_counter.inc(query_type)
if query_type in self.query_handlers:
response = yield self.query_handlers[query_type](args)
defer.returnValue((200, response))
@@ -305,6 +313,62 @@ class FederationServer(FederationBase):
(200, send_content)
)
@defer.inlineCallbacks
@log_function
def on_query_client_keys(self, origin, content):
query = []
for user_id, device_ids in content.get("device_keys", {}).items():
if not device_ids:
query.append((user_id, None))
else:
for device_id in device_ids:
query.append((user_id, device_id))
results = yield self.store.get_e2e_device_keys(query)
json_result = {}
for user_id, device_keys in results.items():
for device_id, json_bytes in device_keys.items():
json_result.setdefault(user_id, {})[device_id] = json.loads(
json_bytes
)
defer.returnValue({"device_keys": json_result})
@defer.inlineCallbacks
@log_function
def on_claim_client_keys(self, origin, content):
query = []
for user_id, device_keys in content.get("one_time_keys", {}).items():
for device_id, algorithm in device_keys.items():
query.append((user_id, device_id, algorithm))
results = yield self.store.claim_e2e_one_time_keys(query)
json_result = {}
for user_id, device_keys in results.items():
for device_id, keys in device_keys.items():
for key_id, json_bytes in keys.items():
json_result.setdefault(user_id, {})[device_id] = {
key_id: json.loads(json_bytes)
}
defer.returnValue({"one_time_keys": json_result})
@defer.inlineCallbacks
@log_function
def on_get_missing_events(self, origin, room_id, earliest_events,
latest_events, limit, min_depth):
missing_events = yield self.handler.on_get_missing_events(
origin, room_id, earliest_events, latest_events, limit, min_depth
)
time_now = self._clock.time_msec()
defer.returnValue({
"events": [ev.get_pdu_json(time_now) for ev in missing_events],
})
@log_function
def _get_persisted_pdu(self, origin, event_id, do_auth=True):
""" Get a PDU from the database with given origin and id.
@@ -331,7 +395,7 @@ class FederationServer(FederationBase):
@defer.inlineCallbacks
@log_function
def _handle_new_pdu(self, origin, pdu, max_recursion=10):
def _handle_new_pdu(self, origin, pdu, get_missing=True):
# We reprocess pdus when we have seen them only as outliers
existing = yield self._get_persisted_pdu(
origin, pdu.event_id, do_auth=False
@@ -383,48 +447,54 @@ class FederationServer(FederationBase):
pdu.room_id, min_depth
)
prevs = {e_id for e_id, _ in pdu.prev_events}
seen = set(have_seen.keys())
if min_depth and pdu.depth < min_depth:
# This is so that we don't notify the user about this
# message, to work around the fact that some events will
# reference really really old events we really don't want to
# send to the clients.
pdu.internal_metadata.outlier = True
elif min_depth and pdu.depth > min_depth and max_recursion > 0:
for event_id, hashes in pdu.prev_events:
if event_id not in have_seen:
logger.debug(
"_handle_new_pdu requesting pdu %s",
event_id
elif min_depth and pdu.depth > min_depth:
if get_missing and prevs - seen:
latest = yield self.store.get_latest_event_ids_in_room(
pdu.room_id
)
try:
new_pdu = yield self.federation_client.get_pdu(
[origin, pdu.origin],
event_id=event_id,
# We add the prev events that we have seen to the latest
# list to ensure the remote server doesn't give them to us
latest = set(latest)
latest |= seen
missing_events = yield self.get_missing_events(
origin,
pdu.room_id,
earliest_events_ids=list(latest),
latest_events=[pdu],
limit=10,
min_depth=min_depth,
)
if new_pdu:
# We want to sort these by depth so we process them and
# tell clients about them in order.
missing_events.sort(key=lambda x: x.depth)
for e in missing_events:
yield self._handle_new_pdu(
origin,
new_pdu,
max_recursion=max_recursion-1
e,
get_missing=False
)
have_seen = yield self.store.have_events(
[ev for ev, _ in pdu.prev_events]
)
logger.debug("Processed pdu %s", event_id)
else:
logger.warn("Failed to get PDU %s", event_id)
fetch_state = True
except:
# TODO(erikj): Do some more intelligent retries.
logger.exception("Failed to get PDU")
fetch_state = True
else:
prevs = {e_id for e_id, _ in pdu.prev_events}
seen = set(have_seen.keys())
if prevs - seen:
fetch_state = True
else:
fetch_state = True
if fetch_state:
# We need to get the state at this event, since we haven't

View File

@@ -23,8 +23,6 @@ from twisted.internet import defer
from synapse.util.logutils import log_function
from syutil.jsonutil import encode_canonical_json
import logging
@@ -71,7 +69,7 @@ class TransactionActions(object):
transaction.transaction_id,
transaction.origin,
code,
encode_canonical_json(response)
response,
)
@defer.inlineCallbacks
@@ -101,5 +99,5 @@ class TransactionActions(object):
transaction.transaction_id,
transaction.destination,
response_code,
encode_canonical_json(response_dict)
response_dict,
)

View File

@@ -25,12 +25,15 @@ from synapse.util.logcontext import PreserveLoggingContext
from synapse.util.retryutils import (
get_retry_limiter, NotRetryingDestination,
)
import synapse.metrics
import logging
logger = logging.getLogger(__name__)
metrics = synapse.metrics.get_metrics_for(__name__)
class TransactionQueue(object):
"""This class makes sure we only have one transaction in flight at
@@ -54,11 +57,25 @@ class TransactionQueue(object):
# done
self.pending_transactions = {}
metrics.register_callback(
"pending_destinations",
lambda: len(self.pending_transactions),
)
# Is a mapping from destination -> list of
# tuple(pending pdus, deferred, order)
self.pending_pdus_by_dest = {}
self.pending_pdus_by_dest = pdus = {}
# destination -> list of tuple(edu, deferred)
self.pending_edus_by_dest = {}
self.pending_edus_by_dest = edus = {}
metrics.register_callback(
"pending_pdus",
lambda: sum(map(len, pdus.values())),
)
metrics.register_callback(
"pending_edus",
lambda: sum(map(len, edus.values())),
)
# destination -> list of tuple(failure, deferred)
self.pending_failures_by_dest = {}
@@ -87,7 +104,6 @@ class TransactionQueue(object):
return not destination.startswith("localhost")
@defer.inlineCallbacks
@log_function
def enqueue_pdu(self, pdu, destinations, order):
# We loop through all destinations to see whether we already have
# a transaction in progress. If we do, stick it in the pending_pdus
@@ -115,8 +131,8 @@ class TransactionQueue(object):
if not deferred.called:
deferred.errback(failure)
def log_failure(failure):
logger.warn("Failed to send pdu", failure.value)
def log_failure(f):
logger.warn("Failed to send pdu to %s: %s", destination, f.value)
deferred.addErrback(log_failure)
@@ -143,8 +159,8 @@ class TransactionQueue(object):
if not deferred.called:
deferred.errback(failure)
def log_failure(failure):
logger.warn("Failed to send pdu", failure.value)
def log_failure(f):
logger.warn("Failed to send edu to %s: %s", destination, f.value)
deferred.addErrback(log_failure)
@@ -174,7 +190,7 @@ class TransactionQueue(object):
deferred.errback(f)
def log_failure(f):
logger.warn("Failed to send pdu", f.value)
logger.warn("Failed to send failure to %s: %s", destination, f.value)
deferred.addErrback(log_failure)
@@ -191,13 +207,13 @@ class TransactionQueue(object):
# request at which point pending_pdus_by_dest just keeps growing.
# we need application-layer timeouts of some flavour of these
# requests
logger.info(
logger.debug(
"TX [%s] Transaction already in progress",
destination
)
return
logger.info("TX [%s] _attempt_new_transaction", destination)
logger.debug("TX [%s] _attempt_new_transaction", destination)
# list of (pending_pdu, deferred, order)
pending_pdus = self.pending_pdus_by_dest.pop(destination, [])
@@ -205,11 +221,11 @@ class TransactionQueue(object):
pending_failures = self.pending_failures_by_dest.pop(destination, [])
if pending_pdus:
logger.info("TX [%s] len(pending_pdus_by_dest[dest]) = %d",
logger.debug("TX [%s] len(pending_pdus_by_dest[dest]) = %d",
destination, len(pending_pdus))
if not pending_pdus and not pending_edus and not pending_failures:
logger.info("TX [%s] Nothing to send", destination)
logger.debug("TX [%s] Nothing to send", destination)
return
# Sort based on the order field
@@ -224,6 +240,10 @@ class TransactionQueue(object):
]
try:
self.pending_transactions[destination] = 1
txn_id = str(self._next_txn_id)
limiter = yield get_retry_limiter(
destination,
self._clock,
@@ -231,21 +251,19 @@ class TransactionQueue(object):
)
logger.debug(
"TX [%s] Attempting new transaction"
"TX [%s] {%s} Attempting new transaction"
" (pdus: %d, edus: %d, failures: %d)",
destination,
destination, txn_id,
len(pending_pdus),
len(pending_edus),
len(pending_failures)
)
self.pending_transactions[destination] = 1
logger.debug("TX [%s] Persisting transaction...", destination)
transaction = Transaction.create_new(
origin_server_ts=int(self._clock.time_msec()),
transaction_id=str(self._next_txn_id),
transaction_id=txn_id,
origin=self.server_name,
destination=destination,
pdus=pdus,
@@ -259,9 +277,13 @@ class TransactionQueue(object):
logger.debug("TX [%s] Persisted transaction", destination)
logger.info(
"TX [%s] Sending transaction [%s]",
destination,
"TX [%s] {%s} Sending transaction [%s],"
" (PDUs: %d, EDUs: %d, failures: %d)",
destination, txn_id,
transaction.transaction_id,
len(pending_pdus),
len(pending_edus),
len(pending_failures),
)
with limiter:
@@ -287,7 +309,7 @@ class TransactionQueue(object):
code = 200
if response:
for e_id, r in getattr(response, "pdus", {}).items():
for e_id, r in response.get("pdus", {}).items():
if "error" in r:
logger.warn(
"Transaction returned error for %s: %s",
@@ -297,7 +319,10 @@ class TransactionQueue(object):
code = e.code
response = e.response
logger.info("TX [%s] got %d response", destination, code)
logger.info(
"TX [%s] {%s} got %d response",
destination, txn_id, code
)
logger.debug("TX [%s] Sent transaction", destination)
logger.debug("TX [%s] Marking as delivered...", destination)

View File

@@ -24,6 +24,8 @@ communicate over a different (albeit still reliable) protocol.
from .server import TransportLayerServer
from .client import TransportLayerClient
from synapse.util.ratelimitutils import FederationRateLimiter
class TransportLayer(TransportLayerServer, TransportLayerClient):
"""This is a basic implementation of the transport layer that translates
@@ -55,8 +57,18 @@ class TransportLayer(TransportLayerServer, TransportLayerClient):
send requests
"""
self.keyring = homeserver.get_keyring()
self.clock = homeserver.get_clock()
self.server_name = server_name
self.server = server
self.client = client
self.request_handler = None
self.received_handler = None
self.ratelimiter = FederationRateLimiter(
self.clock,
window_size=homeserver.config.federation_rc_window_size,
sleep_limit=homeserver.config.federation_rc_sleep_limit,
sleep_msec=homeserver.config.federation_rc_sleep_delay,
reject_limit=homeserver.config.federation_rc_reject_limit,
concurrent_requests=homeserver.config.federation_rc_concurrent,
)

View File

@@ -50,13 +50,15 @@ class TransportLayerClient(object):
)
@log_function
def get_event(self, destination, event_id):
def get_event(self, destination, event_id, timeout=None):
""" Requests the pdu with give id and origin from the given server.
Args:
destination (str): The host name of the remote home server we want
to get the state from.
event_id (str): The id of the event being requested.
timeout (int): How long to try (in ms) the destination for before
giving up. None indicates no timeout.
Returns:
Deferred: Results in a dict received from the remote homeserver.
@@ -65,7 +67,7 @@ class TransportLayerClient(object):
destination, event_id)
path = PREFIX + "/event/%s/" % (event_id, )
return self.client.get_json(destination, path=path)
return self.client.get_json(destination, path=path, timeout=timeout)
@log_function
def backfill(self, destination, room_id, event_tuples, limit):
@@ -219,3 +221,92 @@ class TransportLayerClient(object):
)
defer.returnValue(content)
@defer.inlineCallbacks
@log_function
def query_client_keys(self, destination, query_content):
"""Query the device keys for a list of user ids hosted on a remote
server.
Request:
{
"device_keys": {
"<user_id>": ["<device_id>"]
} }
Response:
{
"device_keys": {
"<user_id>": {
"<device_id>": {...}
} } }
Args:
destination(str): The server to query.
query_content(dict): The user ids to query.
Returns:
A dict containg the device keys.
"""
path = PREFIX + "/user/keys/query"
content = yield self.client.post_json(
destination=destination,
path=path,
data=query_content,
)
defer.returnValue(content)
@defer.inlineCallbacks
@log_function
def claim_client_keys(self, destination, query_content):
"""Claim one-time keys for a list of devices hosted on a remote server.
Request:
{
"one_time_keys": {
"<user_id>": {
"<device_id>": "<algorithm>"
} } }
Response:
{
"device_keys": {
"<user_id>": {
"<device_id>": {
"<algorithm>:<key_id>": "<key_base64>"
} } } }
Args:
destination(str): The server to query.
query_content(dict): The user ids to query.
Returns:
A dict containg the one-time keys.
"""
path = PREFIX + "/user/keys/claim"
content = yield self.client.post_json(
destination=destination,
path=path,
data=query_content,
)
defer.returnValue(content)
@defer.inlineCallbacks
@log_function
def get_missing_events(self, destination, room_id, earliest_events,
latest_events, limit, min_depth):
path = PREFIX + "/get_missing_events/%s" % (room_id,)
content = yield self.client.post_json(
destination=destination,
path=path,
data={
"limit": int(limit),
"min_depth": int(min_depth),
"earliest_events": earliest_events,
"latest_events": latest_events,
}
)
defer.returnValue(content)

View File

@@ -19,6 +19,7 @@ from synapse.api.urls import FEDERATION_PREFIX as PREFIX
from synapse.api.errors import Codes, SynapseError
from synapse.util.logutils import log_function
import functools
import logging
import simplejson as json
import re
@@ -30,8 +31,9 @@ logger = logging.getLogger(__name__)
class TransportLayerServer(object):
"""Handles incoming federation HTTP requests"""
# A method just so we can pass 'self' as the authenticator to the Servlets
@defer.inlineCallbacks
def _authenticate_request(self, request):
def authenticate_request(self, request):
json_request = {
"method": request.method,
"uri": request.uri,
@@ -91,21 +93,10 @@ class TransportLayerServer(object):
yield self.keyring.verify_json_for_server(origin, json_request)
defer.returnValue((origin, content))
logger.info("Request from %s", origin)
request.authenticated_entity = origin
def _with_authentication(self, handler):
@defer.inlineCallbacks
def new_handler(request, *args, **kwargs):
try:
(origin, content) = yield self._authenticate_request(request)
response = yield handler(
origin, content, request.args, *args, **kwargs
)
except:
logger.exception("_authenticate_request failed")
raise
defer.returnValue(response)
return new_handler
defer.returnValue((origin, content))
@log_function
def register_received_handler(self, handler):
@@ -114,14 +105,12 @@ class TransportLayerServer(object):
Args:
handler (TransportReceivedHandler)
"""
self.received_handler = handler
# This is when someone is trying to send us a bunch of data.
self.server.register_path(
"PUT",
re.compile("^" + PREFIX + "/send/([^/]*)/$"),
self._with_authentication(self._on_send_request)
)
FederationSendServlet(
handler,
authenticator=self,
ratelimiter=self.ratelimiter,
server_name=self.server_name,
).register(self.server)
@log_function
def register_request_handler(self, handler):
@@ -130,124 +119,65 @@ class TransportLayerServer(object):
Args:
handler (TransportRequestHandler)
"""
self.request_handler = handler
for servletclass in SERVLET_CLASSES:
servletclass(
handler,
authenticator=self,
ratelimiter=self.ratelimiter,
).register(self.server)
# This is for when someone asks us for everything since version X
self.server.register_path(
"GET",
re.compile("^" + PREFIX + "/pull/$"),
self._with_authentication(
lambda origin, content, query:
handler.on_pull_request(query["origin"][0], query["v"])
)
)
# This is when someone asks for a data item for a given server
# data_id pair.
self.server.register_path(
"GET",
re.compile("^" + PREFIX + "/event/([^/]*)/$"),
self._with_authentication(
lambda origin, content, query, event_id:
handler.on_pdu_request(origin, event_id)
)
)
class BaseFederationServlet(object):
def __init__(self, handler, authenticator, ratelimiter):
self.handler = handler
self.authenticator = authenticator
self.ratelimiter = ratelimiter
# This is when someone asks for all data for a given context.
self.server.register_path(
"GET",
re.compile("^" + PREFIX + "/state/([^/]*)/$"),
self._with_authentication(
lambda origin, content, query, context:
handler.on_context_state_request(
origin,
context,
query.get("event_id", [None])[0],
)
)
)
self.server.register_path(
"GET",
re.compile("^" + PREFIX + "/backfill/([^/]*)/$"),
self._with_authentication(
lambda origin, content, query, context:
self._on_backfill_request(
origin, context, query["v"], query["limit"]
)
)
)
# This is when we receive a server-server Query
self.server.register_path(
"GET",
re.compile("^" + PREFIX + "/query/([^/]*)$"),
self._with_authentication(
lambda origin, content, query, query_type:
handler.on_query_request(
query_type,
{k: v[0].decode("utf-8") for k, v in query.items()}
)
)
)
self.server.register_path(
"GET",
re.compile("^" + PREFIX + "/make_join/([^/]*)/([^/]*)$"),
self._with_authentication(
lambda origin, content, query, context, user_id:
self._on_make_join_request(
origin, content, query, context, user_id
)
)
)
self.server.register_path(
"GET",
re.compile("^" + PREFIX + "/event_auth/([^/]*)/([^/]*)$"),
self._with_authentication(
lambda origin, content, query, context, event_id:
handler.on_event_auth(
origin, context, event_id,
)
)
)
self.server.register_path(
"PUT",
re.compile("^" + PREFIX + "/send_join/([^/]*)/([^/]*)$"),
self._with_authentication(
lambda origin, content, query, context, event_id:
self._on_send_join_request(
origin, content, query,
)
)
)
self.server.register_path(
"PUT",
re.compile("^" + PREFIX + "/invite/([^/]*)/([^/]*)$"),
self._with_authentication(
lambda origin, content, query, context, event_id:
self._on_invite_request(
origin, content, query,
)
)
)
self.server.register_path(
"POST",
re.compile("^" + PREFIX + "/query_auth/([^/]*)/([^/]*)$"),
self._with_authentication(
lambda origin, content, query, context, event_id:
self._on_query_auth_request(
origin, content, event_id,
)
)
)
def _wrap(self, code):
authenticator = self.authenticator
ratelimiter = self.ratelimiter
@defer.inlineCallbacks
@log_function
def _on_send_request(self, origin, content, query, transaction_id):
@functools.wraps(code)
def new_code(request, *args, **kwargs):
try:
(origin, content) = yield authenticator.authenticate_request(request)
with ratelimiter.ratelimit(origin) as d:
yield d
response = yield code(
origin, content, request.args, *args, **kwargs
)
except:
logger.exception("authenticate_request failed")
raise
defer.returnValue(response)
# Extra logic that functools.wraps() doesn't finish
new_code.__self__ = code.__self__
return new_code
def register(self, server):
pattern = re.compile("^" + PREFIX + self.PATH + "$")
for method in ("GET", "PUT", "POST"):
code = getattr(self, "on_%s" % (method), None)
if code is None:
continue
server.register_path(method, pattern, self._wrap(code))
class FederationSendServlet(BaseFederationServlet):
PATH = "/send/([^/]*)/"
def __init__(self, handler, server_name, **kwargs):
super(FederationSendServlet, self).__init__(handler, **kwargs)
self.server_name = server_name
# This is when someone is trying to send us a bunch of data.
@defer.inlineCallbacks
def on_PUT(self, origin, content, query, transaction_id):
""" Called on PUT /send/<transaction_id>/
Args:
@@ -269,6 +199,14 @@ class TransportLayerServer(object):
transaction_id, str(transaction_data)
)
logger.info(
"Received txn %s from %s. (PDUs: %d, EDUs: %d, failures: %d)",
transaction_id, origin,
len(transaction_data.get("pdus", [])),
len(transaction_data.get("edus", [])),
len(transaction_data.get("failures", [])),
)
# We should ideally be getting this from the security layer.
# origin = body["origin"]
@@ -285,8 +223,7 @@ class TransportLayerServer(object):
return
try:
handler = self.received_handler
code, response = yield handler.on_incoming_transaction(
code, response = yield self.handler.on_incoming_transaction(
transaction_data
)
except:
@@ -295,52 +232,165 @@ class TransportLayerServer(object):
defer.returnValue((code, response))
@log_function
def _on_backfill_request(self, origin, context, v_list, limits):
if not limits:
return defer.succeed(
(400, {"error": "Did not include limit param"})
class FederationPullServlet(BaseFederationServlet):
PATH = "/pull/"
# This is for when someone asks us for everything since version X
def on_GET(self, origin, content, query):
return self.handler.on_pull_request(query["origin"][0], query["v"])
class FederationEventServlet(BaseFederationServlet):
PATH = "/event/([^/]*)/"
# This is when someone asks for a data item for a given server data_id pair.
def on_GET(self, origin, content, query, event_id):
return self.handler.on_pdu_request(origin, event_id)
class FederationStateServlet(BaseFederationServlet):
PATH = "/state/([^/]*)/"
# This is when someone asks for all data for a given context.
def on_GET(self, origin, content, query, context):
return self.handler.on_context_state_request(
origin,
context,
query.get("event_id", [None])[0],
)
class FederationBackfillServlet(BaseFederationServlet):
PATH = "/backfill/([^/]*)/"
def on_GET(self, origin, content, query, context):
versions = query["v"]
limits = query["limit"]
if not limits:
return defer.succeed((400, {"error": "Did not include limit param"}))
limit = int(limits[-1])
versions = v_list
return self.handler.on_backfill_request(origin, context, versions, limit)
return self.request_handler.on_backfill_request(
origin, context, versions, limit
class FederationQueryServlet(BaseFederationServlet):
PATH = "/query/([^/]*)"
# This is when we receive a server-server Query
def on_GET(self, origin, content, query, query_type):
return self.handler.on_query_request(
query_type,
{k: v[0].decode("utf-8") for k, v in query.items()}
)
class FederationMakeJoinServlet(BaseFederationServlet):
PATH = "/make_join/([^/]*)/([^/]*)"
@defer.inlineCallbacks
@log_function
def _on_make_join_request(self, origin, content, query, context, user_id):
content = yield self.request_handler.on_make_join_request(
context, user_id,
)
def on_GET(self, origin, content, query, context, user_id):
content = yield self.handler.on_make_join_request(context, user_id)
defer.returnValue((200, content))
@defer.inlineCallbacks
@log_function
def _on_send_join_request(self, origin, content, query):
content = yield self.request_handler.on_send_join_request(
origin, content,
)
class FederationEventAuthServlet(BaseFederationServlet):
PATH = "/event_auth/([^/]*)/([^/]*)"
def on_GET(self, origin, content, query, context, event_id):
return self.handler.on_event_auth(origin, context, event_id)
class FederationSendJoinServlet(BaseFederationServlet):
PATH = "/send_join/([^/]*)/([^/]*)"
@defer.inlineCallbacks
def on_PUT(self, origin, content, query, context, event_id):
# TODO(paul): assert that context/event_id parsed from path actually
# match those given in content
content = yield self.handler.on_send_join_request(origin, content)
defer.returnValue((200, content))
@defer.inlineCallbacks
@log_function
def _on_invite_request(self, origin, content, query):
content = yield self.request_handler.on_invite_request(
origin, content,
)
class FederationInviteServlet(BaseFederationServlet):
PATH = "/invite/([^/]*)/([^/]*)"
@defer.inlineCallbacks
def on_PUT(self, origin, content, query, context, event_id):
# TODO(paul): assert that context/event_id parsed from path actually
# match those given in content
content = yield self.handler.on_invite_request(origin, content)
defer.returnValue((200, content))
class FederationClientKeysQueryServlet(BaseFederationServlet):
PATH = "/user/keys/query"
@defer.inlineCallbacks
@log_function
def _on_query_auth_request(self, origin, content, event_id):
new_content = yield self.request_handler.on_query_auth_request(
def on_POST(self, origin, content, query):
response = yield self.handler.on_query_client_keys(origin, content)
defer.returnValue((200, response))
class FederationClientKeysClaimServlet(BaseFederationServlet):
PATH = "/user/keys/claim"
@defer.inlineCallbacks
def on_POST(self, origin, content, query):
response = yield self.handler.on_claim_client_keys(origin, content)
defer.returnValue((200, response))
class FederationQueryAuthServlet(BaseFederationServlet):
PATH = "/query_auth/([^/]*)/([^/]*)"
@defer.inlineCallbacks
def on_POST(self, origin, content, query, context, event_id):
new_content = yield self.handler.on_query_auth_request(
origin, content, event_id
)
defer.returnValue((200, new_content))
class FederationGetMissingEventsServlet(BaseFederationServlet):
# TODO(paul): Why does this path alone end with "/?" optional?
PATH = "/get_missing_events/([^/]*)/?"
@defer.inlineCallbacks
def on_POST(self, origin, content, query, room_id):
limit = int(content.get("limit", 10))
min_depth = int(content.get("min_depth", 0))
earliest_events = content.get("earliest_events", [])
latest_events = content.get("latest_events", [])
content = yield self.handler.on_get_missing_events(
origin,
room_id=room_id,
earliest_events=earliest_events,
latest_events=latest_events,
min_depth=min_depth,
limit=limit,
)
defer.returnValue((200, content))
SERVLET_CLASSES = (
FederationPullServlet,
FederationEventServlet,
FederationStateServlet,
FederationBackfillServlet,
FederationQueryServlet,
FederationMakeJoinServlet,
FederationEventServlet,
FederationSendJoinServlet,
FederationInviteServlet,
FederationQueryAuthServlet,
FederationGetMissingEventsServlet,
FederationEventAuthServlet,
FederationClientKeysQueryServlet,
FederationClientKeysClaimServlet,
)

View File

@@ -13,6 +13,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from synapse.appservice.scheduler import AppServiceScheduler
from synapse.appservice.api import ApplicationServiceApi
from .register import RegistrationHandler
from .room import (
@@ -21,7 +22,6 @@ from .room import (
from .message import MessageHandler
from .events import EventStreamHandler, EventHandler
from .federation import FederationHandler
from .login import LoginHandler
from .profile import ProfileHandler
from .presence import PresenceHandler
from .directory import DirectoryHandler
@@ -29,6 +29,9 @@ from .typing import TypingNotificationHandler
from .admin import AdminHandler
from .appservice import ApplicationServicesHandler
from .sync import SyncHandler
from .auth import AuthHandler
from .identity import IdentityHandler
from .receipts import ReceiptsHandler
class Handlers(object):
@@ -50,11 +53,18 @@ class Handlers(object):
self.profile_handler = ProfileHandler(hs)
self.presence_handler = PresenceHandler(hs)
self.room_list_handler = RoomListHandler(hs)
self.login_handler = LoginHandler(hs)
self.directory_handler = DirectoryHandler(hs)
self.typing_notification_handler = TypingNotificationHandler(hs)
self.admin_handler = AdminHandler(hs)
self.receipts_handler = ReceiptsHandler(hs)
asapi = ApplicationServiceApi(hs)
self.appservice_handler = ApplicationServicesHandler(
hs, ApplicationServiceApi(hs)
hs, asapi, AppServiceScheduler(
clock=hs.get_clock(),
store=hs.get_datastore(),
as_api=asapi
)
)
self.sync_handler = SyncHandler(hs)
self.auth_handler = AuthHandler(hs)
self.identity_handler = IdentityHandler(hs)

View File

@@ -16,11 +16,12 @@
from twisted.internet import defer
from synapse.api.errors import LimitExceededError, SynapseError
from synapse.util.async import run_on_reactor
from synapse.crypto.event_signing import add_hashes_and_signatures
from synapse.api.constants import Membership, EventTypes
from synapse.types import UserID
from synapse.util.logcontext import PreserveLoggingContext
import logging
@@ -58,8 +59,6 @@ class BaseHandler(object):
@defer.inlineCallbacks
def _create_new_client_event(self, builder):
yield run_on_reactor()
latest_ret = yield self.store.get_latest_events_in_room(
builder.room_id,
)
@@ -79,7 +78,9 @@ class BaseHandler(object):
context = yield state_handler.compute_event_context(builder)
if builder.is_state():
builder.prev_state = context.prev_state_events
builder.prev_state = yield self.store.add_event_hashes(
context.prev_state_events
)
yield self.auth.add_auth_events(builder, context)
@@ -90,8 +91,8 @@ class BaseHandler(object):
event = builder.build()
logger.debug(
"Created event %s with auth_events: %s, current state: %s",
event.event_id, context.auth_events, context.current_state,
"Created event %s with current state: %s",
event.event_id, context.current_state,
)
defer.returnValue(
@@ -101,14 +102,14 @@ class BaseHandler(object):
@defer.inlineCallbacks
def handle_new_client_event(self, event, context, extra_destinations=[],
extra_users=[], suppress_auth=False):
yield run_on_reactor()
# We now need to go and hit out to wherever we need to hit out to.
if not suppress_auth:
self.auth.check(event, auth_events=context.auth_events)
self.auth.check(event, auth_events=context.current_state)
yield self.store.persist_event(event, context=context)
(event_stream_id, max_stream_id) = yield self.store.persist_event(
event, context=context
)
federation_handler = self.hs.get_handlers().federation_handler
@@ -142,8 +143,21 @@ class BaseHandler(object):
"Failed to get destination from event %s", s.event_id
)
yield self.notifier.on_new_room_event(event, extra_users=extra_users)
with PreserveLoggingContext():
# Don't block waiting on waking up all the listeners.
notify_d = self.notifier.on_new_room_event(
event, event_stream_id, max_stream_id,
extra_users=extra_users
)
yield federation_handler.handle_new_event(
def log_failure(f):
logger.warn(
"Failed to notify about %s: %s",
event.event_id, f.value
)
notify_d.addErrback(log_failure)
federation_handler.handle_new_event(
event, destinations=destinations,
)

View File

@@ -15,58 +15,37 @@
from twisted.internet import defer
from synapse.api.constants import EventTypes, Membership
from synapse.api.errors import Codes, StoreError, SynapseError
from synapse.api.constants import EventTypes
from synapse.appservice import ApplicationService
from synapse.types import UserID
import synapse.util.stringutils as stringutils
import logging
logger = logging.getLogger(__name__)
def log_failure(failure):
logger.error(
"Application Services Failure",
exc_info=(
failure.type,
failure.value,
failure.getTracebackObject()
)
)
# NB: Purposefully not inheriting BaseHandler since that contains way too much
# setup code which this handler does not need or use. This makes testing a lot
# easier.
class ApplicationServicesHandler(object):
def __init__(self, hs, appservice_api):
def __init__(self, hs, appservice_api, appservice_scheduler):
self.store = hs.get_datastore()
self.hs = hs
self.appservice_api = appservice_api
@defer.inlineCallbacks
def register(self, app_service):
logger.info("Register -> %s", app_service)
# check the token is recognised
try:
stored_service = yield self.store.get_app_service_by_token(
app_service.token
)
if not stored_service:
raise StoreError(404, "Application service not found")
except StoreError:
raise SynapseError(
403, "Unrecognised application services token. "
"Consult the home server admin.",
errcode=Codes.FORBIDDEN
)
app_service.hs_token = self._generate_hs_token()
# create a sender for this application service which is used when
# creating rooms, etc..
account = yield self.hs.get_handlers().registration_handler.register()
app_service.sender = account[0]
yield self.store.update_app_service(app_service)
defer.returnValue(app_service)
@defer.inlineCallbacks
def unregister(self, token):
logger.info("Unregister as_token=%s", token)
yield self.store.unregister_app_service(token)
self.scheduler = appservice_scheduler
self.started_scheduler = False
@defer.inlineCallbacks
def notify_interested_services(self, event):
@@ -90,9 +69,13 @@ class ApplicationServicesHandler(object):
if event.type == EventTypes.Member:
yield self._check_user_exists(event.state_key)
# Fork off pushes to these services - XXX First cut, best effort
if not self.started_scheduler:
self.scheduler.start().addErrback(log_failure)
self.started_scheduler = True
# Fork off pushes to these services
for service in services:
self.appservice_api.push(service, event)
self.scheduler.submit_event_for_as(service, event)
@defer.inlineCallbacks
def query_user_exists(self, user_id):
@@ -164,10 +147,7 @@ class ApplicationServicesHandler(object):
)
# We need to know the members associated with this event.room_id,
# if any.
member_list = yield self.store.get_room_members(
room_id=event.room_id,
membership=Membership.JOIN
)
member_list = yield self.store.get_users_in_room(event.room_id)
services = yield self.store.get_app_services()
interested_list = [
@@ -197,7 +177,14 @@ class ApplicationServicesHandler(object):
return
user_info = yield self.store.get_user_by_id(user_id)
defer.returnValue(len(user_info) == 0)
if user_info:
defer.returnValue(False)
return
# user not found; could be the AS though, so check.
services = yield self.store.get_app_services()
service_list = [s for s in services if s.sender == user_id]
defer.returnValue(len(service_list) == 0)
@defer.inlineCallbacks
def _check_user_exists(self, user_id):
@@ -206,6 +193,3 @@ class ApplicationServicesHandler(object):
exists = yield self.query_user_exists(user_id)
defer.returnValue(exists)
defer.returnValue(True)
def _generate_hs_token(self):
return stringutils.random_string(24)

330
synapse/handlers/auth.py Normal file
View File

@@ -0,0 +1,330 @@
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from twisted.internet import defer
from ._base import BaseHandler
from synapse.api.constants import LoginType
from synapse.types import UserID
from synapse.api.errors import LoginError, Codes
from synapse.http.client import SimpleHttpClient
from synapse.util.async import run_on_reactor
from twisted.web.client import PartialDownloadError
import logging
import bcrypt
import simplejson
import synapse.util.stringutils as stringutils
logger = logging.getLogger(__name__)
class AuthHandler(BaseHandler):
def __init__(self, hs):
super(AuthHandler, self).__init__(hs)
self.checkers = {
LoginType.PASSWORD: self._check_password_auth,
LoginType.RECAPTCHA: self._check_recaptcha,
LoginType.EMAIL_IDENTITY: self._check_email_identity,
LoginType.DUMMY: self._check_dummy_auth,
}
self.sessions = {}
@defer.inlineCallbacks
def check_auth(self, flows, clientdict, clientip):
"""
Takes a dictionary sent by the client in the login / registration
protocol and handles the login flow.
As a side effect, this function fills in the 'creds' key on the user's
session with a map, which maps each auth-type (str) to the relevant
identity authenticated by that auth-type (mostly str, but for captcha, bool).
Args:
flows (list): A list of login flows. Each flow is an ordered list of
strings representing auth-types. At least one full
flow must be completed in order for auth to be successful.
clientdict: The dictionary from the client root level, not the
'auth' key: this method prompts for auth if none is sent.
clientip (str): The IP address of the client.
Returns:
A tuple of (authed, dict, dict) where authed is true if the client
has successfully completed an auth flow. If it is true, the first
dict contains the authenticated credentials of each stage.
If authed is false, the first dictionary is the server response to
the login request and should be passed back to the client.
In either case, the second dict contains the parameters for this
request (which may have been given only in a previous call).
"""
authdict = None
sid = None
if clientdict and 'auth' in clientdict:
authdict = clientdict['auth']
del clientdict['auth']
if 'session' in authdict:
sid = authdict['session']
session = self._get_session_info(sid)
if len(clientdict) > 0:
# This was designed to allow the client to omit the parameters
# and just supply the session in subsequent calls so it split
# auth between devices by just sharing the session, (eg. so you
# could continue registration from your phone having clicked the
# email auth link on there). It's probably too open to abuse
# because it lets unauthenticated clients store arbitrary objects
# on a home server.
# Revisit: Assumimg the REST APIs do sensible validation, the data
# isn't arbintrary.
session['clientdict'] = clientdict
self._save_session(session)
elif 'clientdict' in session:
clientdict = session['clientdict']
if not authdict:
defer.returnValue(
(False, self._auth_dict_for_flows(flows, session), clientdict)
)
if 'creds' not in session:
session['creds'] = {}
creds = session['creds']
# check auth type currently being presented
if 'type' in authdict:
if authdict['type'] not in self.checkers:
raise LoginError(400, "", Codes.UNRECOGNIZED)
result = yield self.checkers[authdict['type']](authdict, clientip)
if result:
creds[authdict['type']] = result
self._save_session(session)
for f in flows:
if len(set(f) - set(creds.keys())) == 0:
logger.info("Auth completed with creds: %r", creds)
self._remove_session(session)
defer.returnValue((True, creds, clientdict))
ret = self._auth_dict_for_flows(flows, session)
ret['completed'] = creds.keys()
defer.returnValue((False, ret, clientdict))
@defer.inlineCallbacks
def add_oob_auth(self, stagetype, authdict, clientip):
"""
Adds the result of out-of-band authentication into an existing auth
session. Currently used for adding the result of fallback auth.
"""
if stagetype not in self.checkers:
raise LoginError(400, "", Codes.MISSING_PARAM)
if 'session' not in authdict:
raise LoginError(400, "", Codes.MISSING_PARAM)
sess = self._get_session_info(
authdict['session']
)
if 'creds' not in sess:
sess['creds'] = {}
creds = sess['creds']
result = yield self.checkers[stagetype](authdict, clientip)
if result:
creds[stagetype] = result
self._save_session(sess)
defer.returnValue(True)
defer.returnValue(False)
@defer.inlineCallbacks
def _check_password_auth(self, authdict, _):
if "user" not in authdict or "password" not in authdict:
raise LoginError(400, "", Codes.MISSING_PARAM)
user_id = authdict["user"]
password = authdict["password"]
if not user_id.startswith('@'):
user_id = UserID.create(user_id, self.hs.hostname).to_string()
self._check_password(user_id, password)
defer.returnValue(user_id)
@defer.inlineCallbacks
def _check_recaptcha(self, authdict, clientip):
try:
user_response = authdict["response"]
except KeyError:
# Client tried to provide captcha but didn't give the parameter:
# bad request.
raise LoginError(
400, "Captcha response is required",
errcode=Codes.CAPTCHA_NEEDED
)
logger.info(
"Submitting recaptcha response %s with remoteip %s",
user_response, clientip
)
# TODO: get this from the homeserver rather than creating a new one for
# each request
try:
client = SimpleHttpClient(self.hs)
resp_body = yield client.post_urlencoded_get_json(
self.hs.config.recaptcha_siteverify_api,
args={
'secret': self.hs.config.recaptcha_private_key,
'response': user_response,
'remoteip': clientip,
}
)
except PartialDownloadError as pde:
# Twisted is silly
data = pde.response
resp_body = simplejson.loads(data)
if 'success' in resp_body and resp_body['success']:
defer.returnValue(True)
raise LoginError(401, "", errcode=Codes.UNAUTHORIZED)
@defer.inlineCallbacks
def _check_email_identity(self, authdict, _):
yield run_on_reactor()
if 'threepid_creds' not in authdict:
raise LoginError(400, "Missing threepid_creds", Codes.MISSING_PARAM)
threepid_creds = authdict['threepid_creds']
identity_handler = self.hs.get_handlers().identity_handler
logger.info("Getting validated threepid. threepidcreds: %r" % (threepid_creds,))
threepid = yield identity_handler.threepid_from_creds(threepid_creds)
if not threepid:
raise LoginError(401, "", errcode=Codes.UNAUTHORIZED)
threepid['threepid_creds'] = authdict['threepid_creds']
defer.returnValue(threepid)
@defer.inlineCallbacks
def _check_dummy_auth(self, authdict, _):
yield run_on_reactor()
defer.returnValue(True)
def _get_params_recaptcha(self):
return {"public_key": self.hs.config.recaptcha_public_key}
def _auth_dict_for_flows(self, flows, session):
public_flows = []
for f in flows:
public_flows.append(f)
get_params = {
LoginType.RECAPTCHA: self._get_params_recaptcha,
}
params = {}
for f in public_flows:
for stage in f:
if stage in get_params and stage not in params:
params[stage] = get_params[stage]()
return {
"session": session['id'],
"flows": [{"stages": f} for f in public_flows],
"params": params
}
def _get_session_info(self, session_id):
if session_id not in self.sessions:
session_id = None
if not session_id:
# create a new session
while session_id is None or session_id in self.sessions:
session_id = stringutils.random_string(24)
self.sessions[session_id] = {
"id": session_id,
}
return self.sessions[session_id]
@defer.inlineCallbacks
def login_with_password(self, user_id, password):
"""
Authenticates the user with their username and password.
Used only by the v1 login API.
Args:
user_id (str): User ID
password (str): Password
Returns:
The access token for the user's session.
Raises:
StoreError if there was a problem storing the token.
LoginError if there was an authentication problem.
"""
yield self._check_password(user_id, password)
reg_handler = self.hs.get_handlers().registration_handler
access_token = reg_handler.generate_token(user_id)
logger.info("Logging in user %s", user_id)
yield self.store.add_access_token_to_user(user_id, access_token)
defer.returnValue(access_token)
@defer.inlineCallbacks
def _check_password(self, user_id, password):
"""Checks that user_id has passed password, raises LoginError if not."""
user_info = yield self.store.get_user_by_id(user_id=user_id)
if not user_info:
logger.warn("Attempted to login as %s but they do not exist", user_id)
raise LoginError(403, "", errcode=Codes.FORBIDDEN)
stored_hash = user_info["password_hash"]
if not bcrypt.checkpw(password, stored_hash):
logger.warn("Failed password login for user %s", user_id)
raise LoginError(403, "", errcode=Codes.FORBIDDEN)
@defer.inlineCallbacks
def set_password(self, user_id, newpassword):
password_hash = bcrypt.hashpw(newpassword, bcrypt.gensalt())
yield self.store.user_set_password_hash(user_id, password_hash)
yield self.store.user_delete_access_tokens(user_id)
yield self.hs.get_pusherpool().remove_pushers_by_user(user_id)
yield self.store.flush_user(user_id)
@defer.inlineCallbacks
def add_threepid(self, user_id, medium, address, validated_at):
yield self.store.user_add_threepid(
user_id, medium, address, validated_at,
self.hs.get_clock().time_msec()
)
def _save_session(self, session):
# TODO: Persistent storage
logger.debug("Saving session %s", session)
self.sessions[session["id"]] = session
def _remove_session(self, session):
logger.debug("Removing session %s", session)
del self.sessions[session["id"]]

View File

@@ -22,6 +22,7 @@ from synapse.api.constants import EventTypes
from synapse.types import RoomAlias
import logging
import string
logger = logging.getLogger(__name__)
@@ -40,6 +41,10 @@ class DirectoryHandler(BaseHandler):
def _create_association(self, room_alias, room_id, servers=None):
# general association creation for both human users and app services
for wchar in string.whitespace:
if wchar in room_alias.localpart:
raise SynapseError(400, "Invalid characters in room alias")
if not self.hs.is_mine(room_alias):
raise SynapseError(400, "Room alias must be local")
# TODO(erikj): Change this.
@@ -160,7 +165,7 @@ class DirectoryHandler(BaseHandler):
if not room_id:
raise SynapseError(
404,
"Room alias %r not found" % (room_alias.to_string(),),
"Room alias %s not found" % (room_alias.to_string(),),
Codes.NOT_FOUND
)
@@ -232,13 +237,23 @@ class DirectoryHandler(BaseHandler):
@defer.inlineCallbacks
def can_modify_alias(self, alias, user_id=None):
# Any application service "interested" in an alias they are regexing on
# can modify the alias.
# Users can only modify the alias if ALL the interested services have
# non-exclusive locks on the alias (or there are no interested services)
services = yield self.store.get_app_services()
interested_services = [
s for s in services if s.is_interested_in_alias(alias.to_string())
]
for service in interested_services:
if user_id == service.sender:
# this user IS the app service
# this user IS the app service so they can do whatever they like
defer.returnValue(True)
return
defer.returnValue(len(interested_services) == 0)
elif service.is_exclusive_alias(alias.to_string()):
# another service has an exclusive lock on this alias.
defer.returnValue(False)
return
# either no interested services, or no service with an exclusive lock
defer.returnValue(True)

View File

@@ -15,7 +15,6 @@
from twisted.internet import defer
from synapse.util.logcontext import PreserveLoggingContext
from synapse.util.logutils import log_function
from synapse.types import UserID
from synapse.events.utils import serialize_event
@@ -23,6 +22,7 @@ from synapse.events.utils import serialize_event
from ._base import BaseHandler
import logging
import random
logger = logging.getLogger(__name__)
@@ -69,13 +69,17 @@ class EventStreamHandler(BaseHandler):
)
self._streams_per_user[auth_user] += 1
if pagin_config.from_token is None:
pagin_config.from_token = None
rm_handler = self.hs.get_handlers().room_member_handler
room_ids = yield rm_handler.get_rooms_for_user(auth_user)
room_ids = yield rm_handler.get_joined_rooms_for_user(auth_user)
if timeout:
# If they've set a timeout set a minimum limit.
timeout = max(timeout, 500)
# Add some randomness to this value to try and mitigate against
# thundering herds on restart.
timeout = random.randint(int(timeout*0.9), int(timeout*1.1))
with PreserveLoggingContext():
events, tokens = yield self.notifier.get_events_for(
auth_user, room_ids, pagin_config, timeout
)

View File

@@ -18,9 +18,11 @@
from ._base import BaseHandler
from synapse.api.errors import (
AuthError, FederationError, StoreError,
AuthError, FederationError, StoreError, CodeMessageException, SynapseError,
)
from synapse.api.constants import EventTypes, Membership, RejectedReason
from synapse.util import unwrapFirstError
from synapse.util.logcontext import PreserveLoggingContext
from synapse.util.logutils import log_function
from synapse.util.async import run_on_reactor
from synapse.util.frozenutils import unfreeze
@@ -29,6 +31,10 @@ from synapse.crypto.event_signing import (
)
from synapse.types import UserID
from synapse.events.utils import prune_event
from synapse.util.retryutils import NotRetryingDestination
from twisted.internet import defer
import itertools
@@ -73,8 +79,6 @@ class FederationHandler(BaseHandler):
# When joining a room we need to queue any events for that room up
self.room_queues = {}
@log_function
@defer.inlineCallbacks
def handle_new_event(self, event, destinations):
""" Takes in an event from the client to server side, that has already
been authed and handled by the state module, and sends it to any
@@ -89,9 +93,7 @@ class FederationHandler(BaseHandler):
processing.
"""
yield run_on_reactor()
self.replication_layer.send_pdu(event, destinations)
return self.replication_layer.send_pdu(event, destinations)
@log_function
@defer.inlineCallbacks
@@ -138,29 +140,32 @@ class FederationHandler(BaseHandler):
if state and auth_chain is not None:
# If we have any state or auth_chain given to us by the replication
# layer, then we should handle them (if we haven't before.)
event_infos = []
for e in itertools.chain(auth_chain, state):
if e.event_id in seen_ids:
continue
e.internal_metadata.outlier = True
try:
auth_ids = [e_id for e_id, _ in e.auth_events]
auth = {
(e.type, e.state_key): e for e in auth_chain
if e.event_id in auth_ids
}
yield self._handle_new_event(
origin, e, auth_events=auth
)
event_infos.append({
"event": e,
"auth_events": auth,
})
seen_ids.add(e.event_id)
except:
logger.exception(
"Failed to handle state event %s",
e.event_id,
yield self._handle_new_events(
origin,
event_infos,
outliers=True
)
try:
yield self._handle_new_event(
_, event_stream_id, max_stream_id = yield self._handle_new_event(
origin,
event,
state=state,
@@ -179,7 +184,7 @@ class FederationHandler(BaseHandler):
# it's probably a good idea to mark it as not in retry-state
# for sending (although this is a bit of a leap)
retry_timings = yield self.store.get_destination_retry_timings(origin)
if (retry_timings and retry_timings.retry_last_ts):
if retry_timings and retry_timings["retry_last_ts"]:
self.store.set_destination_retry_timings(origin, 0, 0)
room = yield self.store.get_room(event.room_id)
@@ -201,10 +206,20 @@ class FederationHandler(BaseHandler):
target_user = UserID.from_string(target_user_id)
extra_users.append(target_user)
yield self.notifier.on_new_room_event(
event, extra_users=extra_users
with PreserveLoggingContext():
d = self.notifier.on_new_room_event(
event, event_stream_id, max_stream_id,
extra_users=extra_users
)
def log_failure(f):
logger.warn(
"Failed to notify about %s: %s",
event.event_id, f.value
)
d.addErrback(log_failure)
if event.type == EventTypes.Member:
if event.membership == Membership.JOIN:
user = UserID.from_string(event.state_key)
@@ -212,38 +227,300 @@ class FederationHandler(BaseHandler):
"user_joined_room", user=user, room_id=event.room_id
)
@defer.inlineCallbacks
def _filter_events_for_server(self, server_name, room_id, events):
event_to_state = yield self.store.get_state_for_events(
room_id, frozenset(e.event_id for e in events),
types=(
(EventTypes.RoomHistoryVisibility, ""),
(EventTypes.Member, None),
)
)
def redact_disallowed(event, state):
if not state:
return event
history = state.get((EventTypes.RoomHistoryVisibility, ''), None)
if history:
visibility = history.content.get("history_visibility", "shared")
if visibility in ["invited", "joined"]:
# We now loop through all state events looking for
# membership states for the requesting server to determine
# if the server is either in the room or has been invited
# into the room.
for ev in state.values():
if ev.type != EventTypes.Member:
continue
try:
domain = UserID.from_string(ev.state_key).domain
except:
continue
if domain != server_name:
continue
memtype = ev.membership
if memtype == Membership.JOIN:
return event
elif memtype == Membership.INVITE:
if visibility == "invited":
return event
else:
return prune_event(event)
return event
defer.returnValue([
redact_disallowed(e, event_to_state[e.event_id])
for e in events
])
@log_function
@defer.inlineCallbacks
def backfill(self, dest, room_id, limit):
def backfill(self, dest, room_id, limit, extremities=[]):
""" Trigger a backfill request to `dest` for the given `room_id`
"""
if not extremities:
extremities = yield self.store.get_oldest_events_in_room(room_id)
pdus = yield self.replication_layer.backfill(
events = yield self.replication_layer.backfill(
dest,
room_id,
limit,
limit=limit,
extremities=extremities,
)
events = []
event_map = {e.event_id: e for e in events}
for pdu in pdus:
event = pdu
event_ids = set(e.event_id for e in events)
# FIXME (erikj): Not sure this actually works :/
context = yield self.state_handler.compute_event_context(event)
edges = [
ev.event_id
for ev in events
if set(e_id for e_id, _ in ev.prev_events) - event_ids
]
events.append((event, context))
logger.info(
"backfill: Got %d events with %d edges",
len(events), len(edges),
)
yield self.store.persist_event(
event,
context=context,
backfilled=True
# For each edge get the current state.
auth_events = {}
state_events = {}
events_to_state = {}
for e_id in edges:
state, auth = yield self.replication_layer.get_state_for_room(
destination=dest,
room_id=room_id,
event_id=e_id
)
auth_events.update({a.event_id: a for a in auth})
auth_events.update({s.event_id: s for s in state})
state_events.update({s.event_id: s for s in state})
events_to_state[e_id] = state
seen_events = yield self.store.have_events(
set(auth_events.keys()) | set(state_events.keys())
)
all_events = events + state_events.values() + auth_events.values()
required_auth = set(
a_id for event in all_events for a_id, _ in event.auth_events
)
missing_auth = required_auth - set(auth_events)
results = yield defer.gatherResults(
[
self.replication_layer.get_pdu(
[dest],
event_id,
outlier=True,
timeout=10000,
)
for event_id in missing_auth
],
consumeErrors=True
).addErrback(unwrapFirstError)
auth_events.update({a.event_id: a for a in results})
ev_infos = []
for a in auth_events.values():
if a.event_id in seen_events:
continue
ev_infos.append({
"event": a,
"auth_events": {
(auth_events[a_id].type, auth_events[a_id].state_key):
auth_events[a_id]
for a_id, _ in a.auth_events
}
})
for e_id in events_to_state:
ev_infos.append({
"event": event_map[e_id],
"state": events_to_state[e_id],
"auth_events": {
(auth_events[a_id].type, auth_events[a_id].state_key):
auth_events[a_id]
for a_id, _ in event_map[e_id].auth_events
}
})
events.sort(key=lambda e: e.depth)
for event in events:
if event in events_to_state:
continue
ev_infos.append({
"event": event,
})
yield self._handle_new_events(
dest, ev_infos,
backfilled=True,
)
defer.returnValue(events)
@defer.inlineCallbacks
def maybe_backfill(self, room_id, current_depth):
"""Checks the database to see if we should backfill before paginating,
and if so do.
"""
extremities = yield self.store.get_oldest_events_with_depth_in_room(
room_id
)
if not extremities:
logger.debug("Not backfilling as no extremeties found.")
return
# Check if we reached a point where we should start backfilling.
sorted_extremeties_tuple = sorted(
extremities.items(),
key=lambda e: -int(e[1])
)
max_depth = sorted_extremeties_tuple[0][1]
if current_depth > max_depth:
logger.debug(
"Not backfilling as we don't need to. %d < %d",
max_depth, current_depth,
)
return
# Now we need to decide which hosts to hit first.
# First we try hosts that are already in the room
# TODO: HEURISTIC ALERT.
curr_state = yield self.state_handler.get_current_state(room_id)
def get_domains_from_state(state):
joined_users = [
(state_key, int(event.depth))
for (e_type, state_key), event in state.items()
if e_type == EventTypes.Member
and event.membership == Membership.JOIN
]
joined_domains = {}
for u, d in joined_users:
try:
dom = UserID.from_string(u).domain
old_d = joined_domains.get(dom)
if old_d:
joined_domains[dom] = min(d, old_d)
else:
joined_domains[dom] = d
except:
pass
return sorted(joined_domains.items(), key=lambda d: d[1])
curr_domains = get_domains_from_state(curr_state)
likely_domains = [
domain for domain, depth in curr_domains
if domain is not self.server_name
]
@defer.inlineCallbacks
def try_backfill(domains):
# TODO: Should we try multiple of these at a time?
for dom in domains:
try:
events = yield self.backfill(
dom, room_id,
limit=100,
extremities=[e for e in extremities.keys()]
)
except SynapseError:
logger.info(
"Failed to backfill from %s because %s",
dom, e,
)
continue
except CodeMessageException as e:
if 400 <= e.code < 500:
raise
logger.info(
"Failed to backfill from %s because %s",
dom, e,
)
continue
except NotRetryingDestination as e:
logger.info(e.message)
continue
except Exception as e:
logger.exception(
"Failed to backfill from %s because %s",
dom, e,
)
continue
if events:
defer.returnValue(True)
defer.returnValue(False)
success = yield try_backfill(likely_domains)
if success:
defer.returnValue(True)
# Huh, well *those* domains didn't work out. Lets try some domains
# from the time.
tried_domains = set(likely_domains)
tried_domains.add(self.server_name)
event_ids = list(extremities.keys())
states = yield defer.gatherResults([
self.state_handler.resolve_state_groups(room_id, [e])
for e in event_ids
])
states = dict(zip(event_ids, [s[1] for s in states]))
for e_id, _ in sorted_extremeties_tuple:
likely_domains = get_domains_from_state(states[e_id])
success = yield try_backfill([
dom for dom in likely_domains
if dom not in tried_domains
])
if success:
defer.returnValue(True)
tried_domains.update(likely_domains)
defer.returnValue(False)
@defer.inlineCallbacks
def send_invite(self, target_host, event):
""" Sends the invite to the remote server for signing.
@@ -290,6 +567,8 @@ class FederationHandler(BaseHandler):
"""
logger.debug("Joining %s to %s", joinee, room_id)
yield self.store.clean_room_for_join(room_id)
origin, pdu = yield self.replication_layer.make_join(
target_hosts,
room_id,
@@ -370,46 +649,22 @@ class FederationHandler(BaseHandler):
# FIXME
pass
for e in auth_chain:
e.internal_metadata.outlier = True
if e.event_id == event.event_id:
continue
try:
auth_ids = [e_id for e_id, _ in e.auth_events]
auth = {
(e.type, e.state_key): e for e in auth_chain
if e.event_id in auth_ids
}
yield self._handle_new_event(
origin, e, auth_events=auth
)
except:
logger.exception(
"Failed to handle auth event %s",
e.event_id,
)
for e in state:
ev_infos = []
for e in itertools.chain(state, auth_chain):
if e.event_id == event.event_id:
continue
e.internal_metadata.outlier = True
try:
auth_ids = [e_id for e_id, _ in e.auth_events]
auth = {
ev_infos.append({
"event": e,
"auth_events": {
(e.type, e.state_key): e for e in auth_chain
if e.event_id in auth_ids
}
yield self._handle_new_event(
origin, e, auth_events=auth
)
except:
logger.exception(
"Failed to handle state event %s",
e.event_id,
)
})
yield self._handle_new_events(origin, ev_infos, outliers=True)
auth_ids = [e_id for e_id, _ in event.auth_events]
auth_events = {
@@ -417,7 +672,7 @@ class FederationHandler(BaseHandler):
if e.event_id in auth_ids
}
yield self._handle_new_event(
_, event_stream_id, max_stream_id = yield self._handle_new_event(
origin,
new_event,
state=state,
@@ -425,10 +680,20 @@ class FederationHandler(BaseHandler):
auth_events=auth_events,
)
yield self.notifier.on_new_room_event(
new_event, extra_users=[joinee]
with PreserveLoggingContext():
d = self.notifier.on_new_room_event(
new_event, event_stream_id, max_stream_id,
extra_users=[joinee]
)
def log_failure(f):
logger.warn(
"Failed to notify about %s: %s",
new_event.event_id, f.value
)
d.addErrback(log_failure)
logger.debug("Finished joining %s to %s", joinee, room_id)
finally:
room_queue = self.room_queues[room_id]
@@ -464,11 +729,9 @@ class FederationHandler(BaseHandler):
builder=builder,
)
self.auth.check(event, auth_events=context.auth_events)
self.auth.check(event, auth_events=context.current_state)
pdu = event
defer.returnValue(pdu)
defer.returnValue(event)
@defer.inlineCallbacks
@log_function
@@ -486,7 +749,9 @@ class FederationHandler(BaseHandler):
event.internal_metadata.outlier = False
context = yield self._handle_new_event(origin, event)
context, event_stream_id, max_stream_id = yield self._handle_new_event(
origin, event
)
logger.debug(
"on_send_join_request: After _handle_new_event: %s, sigs: %s",
@@ -500,10 +765,19 @@ class FederationHandler(BaseHandler):
target_user = UserID.from_string(target_user_id)
extra_users.append(target_user)
yield self.notifier.on_new_room_event(
event, extra_users=extra_users
with PreserveLoggingContext():
d = self.notifier.on_new_room_event(
event, event_stream_id, max_stream_id, extra_users=extra_users
)
def log_failure(f):
logger.warn(
"Failed to notify about %s: %s",
event.event_id, f.value
)
d.addErrback(log_failure)
if event.type == EventTypes.Member:
if event.content["membership"] == Membership.JOIN:
user = UserID.from_string(event.state_key)
@@ -567,23 +841,34 @@ class FederationHandler(BaseHandler):
context = yield self.state_handler.compute_event_context(event)
yield self.store.persist_event(
event_stream_id, max_stream_id = yield self.store.persist_event(
event,
context=context,
backfilled=False,
)
target_user = UserID.from_string(event.state_key)
yield self.notifier.on_new_room_event(
event, extra_users=[target_user],
with PreserveLoggingContext():
d = self.notifier.on_new_room_event(
event, event_stream_id, max_stream_id,
extra_users=[target_user],
)
def log_failure(f):
logger.warn(
"Failed to notify about %s: %s",
event.event_id, f.value
)
d.addErrback(log_failure)
defer.returnValue(event)
@defer.inlineCallbacks
def get_state_for_pdu(self, origin, room_id, event_id):
def get_state_for_pdu(self, origin, room_id, event_id, do_auth=True):
yield run_on_reactor()
if do_auth:
in_room = yield self.auth.check_host_in_room(room_id, origin)
if not in_room:
raise AuthError(403, "Host not in room.")
@@ -636,6 +921,8 @@ class FederationHandler(BaseHandler):
limit
)
events = yield self._filter_events_for_server(origin, room_id, events)
defer.returnValue(events)
@defer.inlineCallbacks
@@ -694,31 +981,72 @@ class FederationHandler(BaseHandler):
def _handle_new_event(self, origin, event, state=None, backfilled=False,
current_state=None, auth_events=None):
logger.debug(
"_handle_new_event: %s, sigs: %s",
event.event_id, event.signatures,
outlier = event.internal_metadata.is_outlier()
context = yield self._prep_event(
origin, event,
state=state,
backfilled=backfilled,
current_state=current_state,
auth_events=auth_events,
)
event_stream_id, max_stream_id = yield self.store.persist_event(
event,
context=context,
backfilled=backfilled,
is_new_state=(not outlier and not backfilled),
current_state=current_state,
)
defer.returnValue((context, event_stream_id, max_stream_id))
@defer.inlineCallbacks
def _handle_new_events(self, origin, event_infos, backfilled=False,
outliers=False):
contexts = yield defer.gatherResults(
[
self._prep_event(
origin,
ev_info["event"],
state=ev_info.get("state"),
backfilled=backfilled,
auth_events=ev_info.get("auth_events"),
)
for ev_info in event_infos
]
)
yield self.store.persist_events(
[
(ev_info["event"], context)
for ev_info, context in itertools.izip(event_infos, contexts)
],
backfilled=backfilled,
is_new_state=(not outliers and not backfilled),
)
@defer.inlineCallbacks
def _prep_event(self, origin, event, state=None, backfilled=False,
current_state=None, auth_events=None):
outlier = event.internal_metadata.is_outlier()
context = yield self.state_handler.compute_event_context(
event, old_state=state
event, old_state=state, outlier=outlier,
)
if not auth_events:
auth_events = context.auth_events
logger.debug(
"_handle_new_event: %s, auth_events: %s",
event.event_id, auth_events,
)
is_new_state = not event.internal_metadata.is_outlier()
auth_events = context.current_state
# This is a hack to fix some old rooms where the initial join event
# didn't reference the create event in its auth events.
if event.type == EventTypes.Member and not event.auth_events:
if len(event.prev_events) == 1:
c = yield self.store.get_event(event.prev_events[0][0])
if c.type == EventTypes.Create:
if len(event.prev_events) == 1 and event.depth < 5:
c = yield self.store.get_event(
event.prev_events[0][0],
allow_none=True,
)
if c and c.type == EventTypes.Create:
auth_events[(c.type, c.state_key)] = c
try:
@@ -733,25 +1061,6 @@ class FederationHandler(BaseHandler):
context.rejected = RejectedReason.AUTH_ERROR
# FIXME: Don't store as rejected with AUTH_ERROR if we haven't
# seen all the auth events.
yield self.store.persist_event(
event,
context=context,
backfilled=backfilled,
is_new_state=False,
current_state=current_state,
)
raise
yield self.store.persist_event(
event,
context=context,
backfilled=backfilled,
is_new_state=(is_new_state and not backfilled),
current_state=current_state,
)
defer.returnValue(context)
@defer.inlineCallbacks
@@ -788,18 +1097,51 @@ class FederationHandler(BaseHandler):
defer.returnValue(ret)
@defer.inlineCallbacks
def on_get_missing_events(self, origin, room_id, earliest_events,
latest_events, limit, min_depth):
in_room = yield self.auth.check_host_in_room(
room_id,
origin
)
if not in_room:
raise AuthError(403, "Host not in room.")
limit = min(limit, 20)
min_depth = max(min_depth, 0)
missing_events = yield self.store.get_missing_events(
room_id=room_id,
earliest_events=earliest_events,
latest_events=latest_events,
limit=limit,
min_depth=min_depth,
)
defer.returnValue(missing_events)
@defer.inlineCallbacks
@log_function
def do_auth(self, origin, event, context, auth_events):
# Check if we have all the auth events.
have_events = yield self.store.have_events(
[e_id for e_id, _ in event.auth_events]
)
current_state = set(e.event_id for e in auth_events.values())
event_auth_events = set(e_id for e_id, _ in event.auth_events)
if event_auth_events - current_state:
have_events = yield self.store.have_events(
event_auth_events - current_state
)
else:
have_events = {}
have_events.update({
e.event_id: ""
for e in auth_events.values()
})
seen_events = set(have_events.keys())
missing_auth = event_auth_events - seen_events
missing_auth = event_auth_events - seen_events - current_state
if missing_auth:
logger.info("Missing auth: %s", missing_auth)
@@ -869,13 +1211,13 @@ class FederationHandler(BaseHandler):
if d in have_events and not have_events[d]
],
consumeErrors=True
)
).addErrback(unwrapFirstError)
if different_events:
local_view = dict(auth_events)
remote_view = dict(auth_events)
remote_view.update({
(d.type, d.state_key) for d in different_events
(d.type, d.state_key): d for d in different_events
})
new_state, prev_state = self.state_handler.resolve_events(
@@ -1114,3 +1456,52 @@ class FederationHandler(BaseHandler):
},
"missing": [e.event_id for e in missing_locals],
})
@defer.inlineCallbacks
def _handle_auth_events(self, origin, auth_events):
auth_ids_to_deferred = {}
def process_auth_ev(ev):
auth_ids = [e_id for e_id, _ in ev.auth_events]
prev_ds = [
auth_ids_to_deferred[i]
for i in auth_ids
if i in auth_ids_to_deferred
]
d = defer.Deferred()
auth_ids_to_deferred[ev.event_id] = d
@defer.inlineCallbacks
def f(*_):
ev.internal_metadata.outlier = True
try:
auth = {
(e.type, e.state_key): e for e in auth_events
if e.event_id in auth_ids
}
yield self._handle_new_event(
origin, ev, auth_events=auth
)
except:
logger.exception(
"Failed to handle auth event %s",
ev.event_id,
)
d.callback(None)
if prev_ds:
dx = defer.DeferredList(prev_ds)
dx.addBoth(f)
else:
f()
for e in auth_events:
process_auth_ev(e)
yield defer.DeferredList(auth_ids_to_deferred.values())

View File

@@ -0,0 +1,119 @@
# -*- coding: utf-8 -*-
# Copyright 2015 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for interacting with Identity Servers"""
from twisted.internet import defer
from synapse.api.errors import (
CodeMessageException
)
from ._base import BaseHandler
from synapse.http.client import SimpleHttpClient
from synapse.util.async import run_on_reactor
from synapse.api.errors import SynapseError
import json
import logging
logger = logging.getLogger(__name__)
class IdentityHandler(BaseHandler):
def __init__(self, hs):
super(IdentityHandler, self).__init__(hs)
@defer.inlineCallbacks
def threepid_from_creds(self, creds):
yield run_on_reactor()
# TODO: get this from the homeserver rather than creating a new one for
# each request
http_client = SimpleHttpClient(self.hs)
# XXX: make this configurable!
# trustedIdServers = ['matrix.org', 'localhost:8090']
trustedIdServers = ['matrix.org', 'vector.im']
if 'id_server' in creds:
id_server = creds['id_server']
elif 'idServer' in creds:
id_server = creds['idServer']
else:
raise SynapseError(400, "No id_server in creds")
if 'client_secret' in creds:
client_secret = creds['client_secret']
elif 'clientSecret' in creds:
client_secret = creds['clientSecret']
else:
raise SynapseError(400, "No client_secret in creds")
if id_server not in trustedIdServers:
logger.warn('%s is not a trusted ID server: rejecting 3pid ' +
'credentials', id_server)
defer.returnValue(None)
data = {}
try:
data = yield http_client.get_json(
"https://%s%s" % (
id_server,
"/_matrix/identity/api/v1/3pid/getValidated3pid"
),
{'sid': creds['sid'], 'client_secret': client_secret}
)
except CodeMessageException as e:
data = json.loads(e.msg)
if 'medium' in data:
defer.returnValue(data)
defer.returnValue(None)
@defer.inlineCallbacks
def bind_threepid(self, creds, mxid):
yield run_on_reactor()
logger.debug("binding threepid %r to %s", creds, mxid)
http_client = SimpleHttpClient(self.hs)
data = None
if 'id_server' in creds:
id_server = creds['id_server']
elif 'idServer' in creds:
id_server = creds['idServer']
else:
raise SynapseError(400, "No id_server in creds")
if 'client_secret' in creds:
client_secret = creds['client_secret']
elif 'clientSecret' in creds:
client_secret = creds['clientSecret']
else:
raise SynapseError(400, "No client_secret in creds")
try:
data = yield http_client.post_urlencoded_get_json(
"https://%s%s" % (
id_server, "/_matrix/identity/api/v1/3pid/bind"
),
{
'sid': creds['sid'],
'client_secret': client_secret,
'mxid': mxid,
}
)
logger.debug("bound threepid %r to %s", creds, mxid)
except CodeMessageException as e:
data = json.loads(e.msg)
defer.returnValue(data)

View File

@@ -1,116 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from twisted.internet import defer
from ._base import BaseHandler
from synapse.api.errors import LoginError, Codes, CodeMessageException
from synapse.http.client import SimpleHttpClient
from synapse.util.emailutils import EmailException
import synapse.util.emailutils as emailutils
import bcrypt
import json
import logging
logger = logging.getLogger(__name__)
class LoginHandler(BaseHandler):
def __init__(self, hs):
super(LoginHandler, self).__init__(hs)
self.hs = hs
@defer.inlineCallbacks
def login(self, user, password):
"""Login as the specified user with the specified password.
Args:
user (str): The user ID.
password (str): The password.
Returns:
The newly allocated access token.
Raises:
StoreError if there was a problem storing the token.
LoginError if there was an authentication problem.
"""
# TODO do this better, it can't go in __init__ else it cyclic loops
if not hasattr(self, "reg_handler"):
self.reg_handler = self.hs.get_handlers().registration_handler
# pull out the hash for this user if they exist
user_info = yield self.store.get_user_by_id(user_id=user)
if not user_info:
logger.warn("Attempted to login as %s but they do not exist", user)
raise LoginError(403, "", errcode=Codes.FORBIDDEN)
stored_hash = user_info[0]["password_hash"]
if bcrypt.checkpw(password, stored_hash):
# generate an access token and store it.
token = self.reg_handler._generate_token(user)
logger.info("Adding token %s for user %s", token, user)
yield self.store.add_access_token_to_user(user, token)
defer.returnValue(token)
else:
logger.warn("Failed password login for user %s", user)
raise LoginError(403, "", errcode=Codes.FORBIDDEN)
@defer.inlineCallbacks
def reset_password(self, user_id, email):
is_valid = yield self._check_valid_association(user_id, email)
logger.info("reset_password user=%s email=%s valid=%s", user_id, email,
is_valid)
if is_valid:
try:
# send an email out
emailutils.send_email(
smtp_server=self.hs.config.email_smtp_server,
from_addr=self.hs.config.email_from_address,
to_addr=email,
subject="Password Reset",
body="TODO."
)
except EmailException as e:
logger.exception(e)
@defer.inlineCallbacks
def _check_valid_association(self, user_id, email):
identity = yield self._query_email(email)
if identity and "mxid" in identity:
if identity["mxid"] == user_id:
defer.returnValue(True)
return
defer.returnValue(False)
@defer.inlineCallbacks
def _query_email(self, email):
http_client = SimpleHttpClient(self.hs)
try:
data = yield http_client.get_json(
# TODO FIXME This should be configurable.
# XXX: ID servers need to use HTTPS
"http://%s%s" % (
"matrix.org:8090", "/_matrix/identity/api/v1/lookup"
),
{
'medium': 'email',
'address': email
}
)
defer.returnValue(data)
except CodeMessageException as e:
data = json.loads(e.msg)
defer.returnValue(data)

View File

@@ -20,8 +20,9 @@ from synapse.api.errors import RoomError, SynapseError
from synapse.streams.config import PaginationConfig
from synapse.events.utils import serialize_event
from synapse.events.validator import EventValidator
from synapse.util import unwrapFirstError
from synapse.util.logcontext import PreserveLoggingContext
from synapse.types import UserID
from synapse.types import UserID, RoomStreamToken
from ._base import BaseHandler
@@ -89,7 +90,17 @@ class MessageHandler(BaseHandler):
if not pagin_config.from_token:
pagin_config.from_token = (
yield self.hs.get_event_sources().get_current_token()
yield self.hs.get_event_sources().get_current_token(
direction='b'
)
)
room_token = RoomStreamToken.parse(pagin_config.from_token.room_key)
if room_token.topological is None:
raise SynapseError(400, "Invalid token")
yield self.hs.get_handlers().federation_handler.maybe_backfill(
room_id, room_token.topological
)
user = UserID.from_string(user_id)
@@ -102,11 +113,21 @@ class MessageHandler(BaseHandler):
"room_key", next_key
)
if not events:
defer.returnValue({
"chunk": [],
"start": pagin_config.from_token.to_string(),
"end": next_token.to_string(),
})
events = yield self._filter_events_for_client(user_id, room_id, events)
time_now = self.clock.time_msec()
chunk = {
"chunk": [
serialize_event(e, time_now, as_client_event) for e in events
serialize_event(e, time_now, as_client_event)
for e in events
],
"start": pagin_config.from_token.to_string(),
"end": next_token.to_string(),
@@ -114,6 +135,52 @@ class MessageHandler(BaseHandler):
defer.returnValue(chunk)
@defer.inlineCallbacks
def _filter_events_for_client(self, user_id, room_id, events):
event_id_to_state = yield self.store.get_state_for_events(
room_id, frozenset(e.event_id for e in events),
types=(
(EventTypes.RoomHistoryVisibility, ""),
(EventTypes.Member, user_id),
)
)
def allowed(event, state):
if event.type == EventTypes.RoomHistoryVisibility:
return True
membership_ev = state.get((EventTypes.Member, user_id), None)
if membership_ev:
membership = membership_ev.membership
else:
membership = Membership.LEAVE
if membership == Membership.JOIN:
return True
history = state.get((EventTypes.RoomHistoryVisibility, ''), None)
if history:
visibility = history.content.get("history_visibility", "shared")
else:
visibility = "shared"
if visibility == "public":
return True
elif visibility == "shared":
return True
elif visibility == "joined":
return membership == Membership.JOIN
elif visibility == "invited":
return membership == Membership.INVITE
return True
defer.returnValue([
event
for event in events
if allowed(event, event_id_to_state[event.event_id])
])
@defer.inlineCallbacks
def create_and_send_event(self, event_dict, ratelimit=True,
client=None, txn_id=None):
@@ -267,14 +334,19 @@ class MessageHandler(BaseHandler):
user, pagination_config.get_source_config("presence"), None
)
public_rooms = yield self.store.get_rooms(is_public=True)
public_room_ids = [r["room_id"] for r in public_rooms]
receipt_stream = self.hs.get_event_sources().sources["receipt"]
receipt, _ = yield receipt_stream.get_pagination_rows(
user, pagination_config.get_source_config("receipt"), None
)
public_room_ids = yield self.store.get_public_room_ids()
limit = pagin_config.limit
if limit is None:
limit = 10
for event in room_list:
@defer.inlineCallbacks
def handle_room(event):
d = {
"room_id": event.room_id,
"membership": event.membership,
@@ -290,12 +362,23 @@ class MessageHandler(BaseHandler):
rooms_ret.append(d)
if event.membership != Membership.JOIN:
continue
return
try:
messages, token = yield self.store.get_recent_events_for_room(
(messages, token), current_state = yield defer.gatherResults(
[
self.store.get_recent_events_for_room(
event.room_id,
limit=limit,
end_token=now_token.room_key,
),
self.state_handler.get_current_state(
event.room_id
),
]
).addErrback(unwrapFirstError)
messages = yield self._filter_events_for_client(
user_id, event.room_id, messages
)
start_token = now_token.copy_and_replace("room_key", token[0])
@@ -311,9 +394,6 @@ class MessageHandler(BaseHandler):
"end": end_token.to_string(),
}
current_state = yield self.state_handler.get_current_state(
event.room_id
)
d["state"] = [
serialize_event(c, time_now, as_client_event)
for c in current_state.values()
@@ -321,10 +401,20 @@ class MessageHandler(BaseHandler):
except:
logger.exception("Failed to get snapshot")
# Only do N rooms at once
n = 5
d_list = [handle_room(e) for e in room_list]
for i in range(0, len(d_list), n):
yield defer.gatherResults(
d_list[i:i + n],
consumeErrors=True
).addErrback(unwrapFirstError)
ret = {
"rooms": rooms_ret,
"presence": presence,
"end": now_token.to_string()
"receipts": receipt,
"end": now_token.to_string(),
}
defer.returnValue(ret)
@@ -360,15 +450,6 @@ class MessageHandler(BaseHandler):
if limit is None:
limit = 10
messages, token = yield self.store.get_recent_events_for_room(
room_id,
limit=limit,
end_token=now_token.room_key,
)
start_token = now_token.copy_and_replace("room_key", token[0])
end_token = now_token.copy_and_replace("room_key", token[1])
room_members = [
m for m in current_state.values()
if m.type == EventTypes.Member
@@ -376,19 +457,39 @@ class MessageHandler(BaseHandler):
]
presence_handler = self.hs.get_handlers().presence_handler
presence = []
for m in room_members:
try:
member_presence = yield presence_handler.get_state(
target_user=UserID.from_string(m.user_id),
@defer.inlineCallbacks
def get_presence():
states = yield presence_handler.get_states(
target_users=[UserID.from_string(m.user_id) for m in room_members],
auth_user=auth_user,
as_event=True,
check_auth=False,
)
presence.append(member_presence)
except SynapseError:
logger.exception(
"Failed to get member presence of %r", m.user_id
defer.returnValue(states.values())
receipts_handler = self.hs.get_handlers().receipts_handler
presence, receipts, (messages, token) = yield defer.gatherResults(
[
get_presence(),
receipts_handler.get_receipts_for_room(room_id, now_token.receipt_key),
self.store.get_recent_events_for_room(
room_id,
limit=limit,
end_token=now_token.room_key,
)
],
consumeErrors=True,
).addErrback(unwrapFirstError)
messages = yield self._filter_events_for_client(
user_id, room_id, messages
)
start_token = now_token.copy_and_replace("room_key", token[0])
end_token = now_token.copy_and_replace("room_key", token[1])
time_now = self.clock.time_msec()
@@ -401,5 +502,6 @@ class MessageHandler(BaseHandler):
"end": end_token.to_string(),
},
"state": state,
"presence": presence
"presence": presence,
"receipts": receipts,
})

View File

@@ -18,9 +18,10 @@ from twisted.internet import defer
from synapse.api.errors import SynapseError, AuthError
from synapse.api.constants import PresenceState
from synapse.util.logutils import log_function
from synapse.util.logcontext import PreserveLoggingContext
from synapse.util.logutils import log_function
from synapse.types import UserID
import synapse.metrics
from ._base import BaseHandler
@@ -29,6 +30,15 @@ import logging
logger = logging.getLogger(__name__)
metrics = synapse.metrics.get_metrics_for(__name__)
# Don't bother bumping "last active" time if it differs by less than 60 seconds
LAST_ACTIVE_GRANULARITY = 60*1000
# Keep no more than this number of offline serial revisions
MAX_OFFLINE_SERIALS = 1000
# TODO(paul): Maybe there's one of these I can steal from somewhere
def partition(l, func):
@@ -128,11 +138,23 @@ class PresenceHandler(BaseHandler):
self._remote_sendmap = {}
# map remote users to sets of local users who're interested in them
self._remote_recvmap = {}
# list of (serial, set of(userids)) tuples, ordered by serial, latest
# first
self._remote_offline_serials = []
# map any user to a UserPresenceCache
self._user_cachemap = {}
self._user_cachemap_latest_serial = 0
# map room_ids to the latest presence serial for a member of that
# room
self._room_serials = {}
metrics.register_callback(
"userCachemap:size",
lambda: len(self._user_cachemap),
)
def _get_or_make_usercache(self, user):
"""If the cache entry doesn't exist, initialise a new one."""
if user not in self._user_cachemap:
@@ -169,8 +191,23 @@ class PresenceHandler(BaseHandler):
defer.returnValue(False)
@defer.inlineCallbacks
def get_state(self, target_user, auth_user, as_event=False):
def get_state(self, target_user, auth_user, as_event=False, check_auth=True):
"""Get the current presence state of the given user.
Args:
target_user (UserID): The user whose presence we want
auth_user (UserID): The user requesting the presence, used for
checking if said user is allowed to see the persence of the
`target_user`
as_event (bool): Format the return as an event or not?
check_auth (bool): Perform the auth checks or not?
Returns:
dict: The presence state of the `target_user`, whose format depends
on the `as_event` argument.
"""
if self.hs.is_mine(target_user):
if check_auth:
visible = yield self.is_presence_visible(
observer_user=auth_user,
observed_user=target_user
@@ -178,15 +215,14 @@ class PresenceHandler(BaseHandler):
if not visible:
raise SynapseError(404, "Presence information not visible")
if target_user in self._user_cachemap:
state = self._user_cachemap[target_user].get_state()
else:
state = yield self.store.get_presence_state(target_user.localpart)
if "mtime" in state:
del state["mtime"]
state["presence"] = state.pop("state")
if target_user in self._user_cachemap:
cached_state = self._user_cachemap[target_user].get_state()
if "last_active" in cached_state:
state["last_active"] = cached_state["last_active"]
else:
# TODO(paul): Have remote server send us permissions set
state = self._get_or_offline_usercache(target_user).get_state()
@@ -210,6 +246,81 @@ class PresenceHandler(BaseHandler):
else:
defer.returnValue(state)
@defer.inlineCallbacks
def get_states(self, target_users, auth_user, as_event=False, check_auth=True):
"""A batched version of the `get_state` method that accepts a list of
`target_users`
Args:
target_users (list): The list of UserID's whose presence we want
auth_user (UserID): The user requesting the presence, used for
checking if said user is allowed to see the persence of the
`target_users`
as_event (bool): Format the return as an event or not?
check_auth (bool): Perform the auth checks or not?
Returns:
dict: A mapping from user -> presence_state
"""
local_users, remote_users = partitionbool(
target_users,
lambda u: self.hs.is_mine(u)
)
if check_auth:
for user in local_users:
visible = yield self.is_presence_visible(
observer_user=auth_user,
observed_user=user
)
if not visible:
raise SynapseError(404, "Presence information not visible")
results = {}
if local_users:
for user in local_users:
if user in self._user_cachemap:
results[user] = self._user_cachemap[user].get_state()
local_to_user = {u.localpart: u for u in local_users}
states = yield self.store.get_presence_states(
[u.localpart for u in local_users if u not in results]
)
for local_part, state in states.items():
if state is None:
continue
res = {"presence": state["state"]}
if "status_msg" in state and state["status_msg"]:
res["status_msg"] = state["status_msg"]
results[local_to_user[local_part]] = res
for user in remote_users:
# TODO(paul): Have remote server send us permissions set
results[user] = self._get_or_offline_usercache(user).get_state()
for state in results.values():
if "last_active" in state:
state["last_active_ago"] = int(
self.clock.time_msec() - state.pop("last_active")
)
if as_event:
for user, state in results.items():
content = state
content["user_id"] = user.to_string()
if "last_active" in content:
content["last_active_ago"] = int(
self._clock.time_msec() - content.pop("last_active")
)
results[user] = {"type": "m.presence", "content": content}
defer.returnValue(results)
@defer.inlineCallbacks
@log_function
def set_state(self, target_user, auth_user, state):
@@ -260,7 +371,6 @@ class PresenceHandler(BaseHandler):
now_online = state["presence"] != PresenceState.OFFLINE
was_polling = target_user in self._user_cachemap
with PreserveLoggingContext():
if now_online and not was_polling:
self.start_polling_presence(target_user, state=state)
elif not now_online and was_polling:
@@ -274,15 +384,40 @@ class PresenceHandler(BaseHandler):
if now is None:
now = self.clock.time_msec()
prev_state = self._get_or_make_usercache(user)
if now - prev_state.state.get("last_active", 0) < LAST_ACTIVE_GRANULARITY:
return
self.changed_presencelike_data(user, {"last_active": now})
def get_joined_rooms_for_user(self, user):
"""Get the list of rooms a user is joined to.
Args:
user(UserID): The user.
Returns:
A Deferred of a list of room id strings.
"""
rm_handler = self.homeserver.get_handlers().room_member_handler
return rm_handler.get_joined_rooms_for_user(user)
def get_joined_users_for_room_id(self, room_id):
rm_handler = self.homeserver.get_handlers().room_member_handler
return rm_handler.get_room_members(room_id)
@defer.inlineCallbacks
def changed_presencelike_data(self, user, state):
statuscache = self._get_or_make_usercache(user)
"""Updates the presence state of a local user.
Args:
user(UserID): The user being updated.
state(dict): The new presence state for the user.
Returns:
A Deferred
"""
self._user_cachemap_latest_serial += 1
statuscache.update(state, serial=self._user_cachemap_latest_serial)
return self.push_presence(user, statuscache=statuscache)
statuscache = yield self.update_presence_cache(user, state)
yield self.push_presence(user, statuscache=statuscache)
@log_function
def started_user_eventstream(self, user):
@@ -296,14 +431,21 @@ class PresenceHandler(BaseHandler):
@defer.inlineCallbacks
def user_joined_room(self, user, room_id):
if self.hs.is_mine(user):
statuscache = self._get_or_make_usercache(user)
"""Called via the distributor whenever a user joins a room.
Notifies the new member of the presence of the current members.
Notifies the current members of the room of the new member's presence.
Args:
user(UserID): The user who joined the room.
room_id(str): The room id the user joined.
"""
if self.hs.is_mine(user):
# No actual update but we need to bump the serial anyway for the
# event source
self._user_cachemap_latest_serial += 1
statuscache.update({}, serial=self._user_cachemap_latest_serial)
statuscache = yield self.update_presence_cache(
user, room_ids=[room_id]
)
self.push_update_to_local_and_remote(
observed_user=user,
room_ids=[room_id],
@@ -311,18 +453,22 @@ class PresenceHandler(BaseHandler):
)
# We also want to tell them about current presence of people.
rm_handler = self.homeserver.get_handlers().room_member_handler
curr_users = yield rm_handler.get_room_members(room_id)
curr_users = yield self.get_joined_users_for_room_id(room_id)
for local_user in [c for c in curr_users if self.hs.is_mine(c)]:
statuscache = yield self.update_presence_cache(
local_user, room_ids=[room_id], add_to_cache=False
)
self.push_update_to_local_and_remote(
observed_user=local_user,
users_to_push=[user],
statuscache=self._get_or_offline_usercache(local_user),
statuscache=statuscache,
)
@defer.inlineCallbacks
def send_invite(self, observer_user, observed_user):
"""Request the presence of a local or remote user for a local user"""
if not self.hs.is_mine(observer_user):
raise SynapseError(400, "User is not hosted on this Home Server")
@@ -357,6 +503,15 @@ class PresenceHandler(BaseHandler):
@defer.inlineCallbacks
def invite_presence(self, observed_user, observer_user):
"""Handles a m.presence_invite EDU. A remote or local user has
requested presence updates for a local user. If the invite is accepted
then allow the local or remote user to see the presence of the local
user.
Args:
observed_user(UserID): The local user whose presence is requested.
observer_user(UserID): The remote or local user requesting presence.
"""
accept = yield self._should_accept_invite(observed_user, observer_user)
if accept:
@@ -383,16 +538,34 @@ class PresenceHandler(BaseHandler):
@defer.inlineCallbacks
def accept_presence(self, observed_user, observer_user):
"""Handles a m.presence_accept EDU. Mark a presence invite from a
local or remote user as accepted in a local user's presence list.
Starts polling for presence updates from the local or remote user.
Args:
observed_user(UserID): The user to update in the presence list.
observer_user(UserID): The owner of the presence list to update.
"""
yield self.store.set_presence_list_accepted(
observer_user.localpart, observed_user.to_string()
)
with PreserveLoggingContext():
self.start_polling_presence(
observer_user, target_user=observed_user
)
@defer.inlineCallbacks
def deny_presence(self, observed_user, observer_user):
"""Handle a m.presence_deny EDU. Removes a local or remote user from a
local user's presence list.
Args:
observed_user(UserID): The local or remote user to remove from the
list.
observer_user(UserID): The local owner of the presence list.
Returns:
A Deferred.
"""
yield self.store.del_presence_list(
observer_user.localpart, observed_user.to_string()
)
@@ -401,6 +574,16 @@ class PresenceHandler(BaseHandler):
@defer.inlineCallbacks
def drop(self, observed_user, observer_user):
"""Remove a local or remote user from a local user's presence list and
unsubscribe the local user from updates that user.
Args:
observed_user(UserId): The local or remote user to remove from the
list.
observer_user(UserId): The local owner of the presence list.
Returns:
A Deferred.
"""
if not self.hs.is_mine(observer_user):
raise SynapseError(400, "User is not hosted on this Home Server")
@@ -408,34 +591,66 @@ class PresenceHandler(BaseHandler):
observer_user.localpart, observed_user.to_string()
)
with PreserveLoggingContext():
self.stop_polling_presence(
observer_user, target_user=observed_user
)
@defer.inlineCallbacks
def get_presence_list(self, observer_user, accepted=None):
"""Get the presence list for a local user. The retured list includes
the current presence state for each user listed.
Args:
observer_user(UserID): The local user whose presence list to fetch.
accepted(bool or None): If not none then only include users who
have or have not accepted the presence invite request.
Returns:
A Deferred list of presence state events.
"""
if not self.hs.is_mine(observer_user):
raise SynapseError(400, "User is not hosted on this Home Server")
presence = yield self.store.get_presence_list(
presence_list = yield self.store.get_presence_list(
observer_user.localpart, accepted=accepted
)
for p in presence:
observed_user = UserID.from_string(p.pop("observed_user_id"))
p["observed_user"] = observed_user
p.update(self._get_or_offline_usercache(observed_user).get_state())
if "last_active" in p:
p["last_active_ago"] = int(
self.clock.time_msec() - p.pop("last_active")
results = []
for row in presence_list:
observed_user = UserID.from_string(row["observed_user_id"])
result = {
"observed_user": observed_user, "accepted": row["accepted"]
}
result.update(
self._get_or_offline_usercache(observed_user).get_state()
)
if "last_active" in result:
result["last_active_ago"] = int(
self.clock.time_msec() - result.pop("last_active")
)
results.append(result)
defer.returnValue(presence)
defer.returnValue(results)
@defer.inlineCallbacks
@log_function
def start_polling_presence(self, user, target_user=None, state=None):
"""Subscribe a local user to presence updates from a local or remote
user. If no target_user is supplied then subscribe to all users stored
in the presence list for the local user.
Additonally this pushes the current presence state of this user to all
target_users. That state can be provided directly or will be read from
the stored state for the local user.
Also this attempts to notify the local user of the current state of
any local target users.
Args:
user(UserID): The local user that whishes for presence updates.
target_user(UserID): The local or remote user whose updates are
wanted.
state(dict): Optional presence state for the local user.
"""
logger.debug("Start polling for presence from %s", user)
if target_user:
@@ -451,8 +666,7 @@ class PresenceHandler(BaseHandler):
# Also include people in all my rooms
rm_handler = self.homeserver.get_handlers().room_member_handler
room_ids = yield rm_handler.get_rooms_for_user(user)
room_ids = yield self.get_joined_rooms_for_user(user)
if state is None:
state = yield self.store.get_presence_state(user.localpart)
@@ -476,9 +690,7 @@ class PresenceHandler(BaseHandler):
# We want to tell the person that just came online
# presence state of people they are interested in?
self.push_update_to_clients(
observed_user=target_user,
users_to_push=[user],
statuscache=self._get_or_offline_usercache(target_user),
)
deferreds = []
@@ -495,6 +707,12 @@ class PresenceHandler(BaseHandler):
yield defer.DeferredList(deferreds, consumeErrors=True)
def _start_polling_local(self, user, target_user):
"""Subscribe a local user to presence updates for a local user
Args:
user(UserId): The local user that wishes for updates.
target_user(UserId): The local users whose updates are wanted.
"""
target_localpart = target_user.localpart
if target_localpart not in self._local_pushmap:
@@ -503,6 +721,17 @@ class PresenceHandler(BaseHandler):
self._local_pushmap[target_localpart].add(user)
def _start_polling_remote(self, user, domain, remoteusers):
"""Subscribe a local user to presence updates for remote users on a
given remote domain.
Args:
user(UserID): The local user that wishes for updates.
domain(str): The remote server the local user wants updates from.
remoteusers(UserID): The remote users that local user wants to be
told about.
Returns:
A Deferred.
"""
to_poll = set()
for u in remoteusers:
@@ -523,6 +752,17 @@ class PresenceHandler(BaseHandler):
@log_function
def stop_polling_presence(self, user, target_user=None):
"""Unsubscribe a local user from presence updates from a local or
remote user. If no target user is supplied then unsubscribe the user
from all presence updates that the user had subscribed to.
Args:
user(UserID): The local user that no longer wishes for updates.
target_user(UserID or None): The user whose updates are no longer
wanted.
Returns:
A Deferred.
"""
logger.debug("Stop polling for presence from %s", user)
if not target_user or self.hs.is_mine(target_user):
@@ -551,6 +791,13 @@ class PresenceHandler(BaseHandler):
return defer.DeferredList(deferreds, consumeErrors=True)
def _stop_polling_local(self, user, target_user):
"""Unsubscribe a local user from presence updates from a local user on
this server.
Args:
user(UserID): The local user that no longer wishes for updates.
target_user(UserID): The user whose updates are no longer wanted.
"""
for localpart in self._local_pushmap.keys():
if target_user and localpart != target_user.localpart:
continue
@@ -563,6 +810,17 @@ class PresenceHandler(BaseHandler):
@log_function
def _stop_polling_remote(self, user, domain, remoteusers):
"""Unsubscribe a local user from presence updates from remote users on
a given domain.
Args:
user(UserID): The local user that no longer wishes for updates.
domain(str): The remote server to unsubscribe from.
remoteusers([UserID]): The users on that remote server that the
local user no longer wishes to be updated about.
Returns:
A Deferred.
"""
to_unpoll = set()
for u in remoteusers:
@@ -584,6 +842,19 @@ class PresenceHandler(BaseHandler):
@defer.inlineCallbacks
@log_function
def push_presence(self, user, statuscache):
"""
Notify local and remote users of a change in presence of a local user.
Pushes the update to local clients and remote domains that are directly
subscribed to the presence of the local user.
Also pushes that update to any local user or remote domain that shares
a room with the local user.
Args:
user(UserID): The local user whose presence was updated.
statuscache(UserPresenceCache): Cache of the user's presence state
Returns:
A Deferred.
"""
assert(self.hs.is_mine(user))
logger.debug("Pushing presence update from %s", user)
@@ -595,8 +866,7 @@ class PresenceHandler(BaseHandler):
# and also user is informed of server-forced pushes
localusers.add(user)
rm_handler = self.homeserver.get_handlers().room_member_handler
room_ids = yield rm_handler.get_rooms_for_user(user)
room_ids = yield self.get_joined_rooms_for_user(user)
if not localusers and not room_ids:
defer.returnValue(None)
@@ -610,45 +880,24 @@ class PresenceHandler(BaseHandler):
)
yield self.distributor.fire("user_presence_changed", user, statuscache)
@defer.inlineCallbacks
def _push_presence_remote(self, user, destination, state=None):
if state is None:
state = yield self.store.get_presence_state(user.localpart)
del state["mtime"]
state["presence"] = state.pop("state")
if user in self._user_cachemap:
state["last_active"] = (
self._user_cachemap[user].get_state()["last_active"]
)
yield self.distributor.fire(
"collect_presencelike_data", user, state
)
if "last_active" in state:
state = dict(state)
state["last_active_ago"] = int(
self.clock.time_msec() - state.pop("last_active")
)
user_state = {
"user_id": user.to_string(),
}
user_state.update(**state)
yield self.federation.send_edu(
destination=destination,
edu_type="m.presence",
content={
"push": [
user_state,
],
}
)
@defer.inlineCallbacks
def incoming_presence(self, origin, content):
"""Handle an incoming m.presence EDU.
For each presence update in the "push" list update our local cache and
notify the appropriate local clients. Only clients that share a room
or are directly subscribed to the presence for a user should be
notified of the update.
For each subscription request in the "poll" list start pushing presence
updates to the remote server.
For unsubscribe request in the "unpoll" list stop pushing presence
updates to the remote server.
Args:
orgin(str): The source of this m.presence EDU.
content(dict): The content of this m.presence EDU.
Returns:
A Deferred.
"""
deferreds = []
for push in content.get("push", []):
@@ -662,8 +911,7 @@ class PresenceHandler(BaseHandler):
" | %d interested local observers %r", len(observers), observers
)
rm_handler = self.homeserver.get_handlers().room_member_handler
room_ids = yield rm_handler.get_rooms_for_user(user)
room_ids = yield self.get_joined_rooms_for_user(user)
if room_ids:
logger.debug(" | %d interested room IDs %r", len(room_ids), room_ids)
@@ -682,24 +930,35 @@ class PresenceHandler(BaseHandler):
self.clock.time_msec() - state.pop("last_active_ago")
)
statuscache = self._get_or_make_usercache(user)
self._user_cachemap_latest_serial += 1
statuscache.update(state, serial=self._user_cachemap_latest_serial)
yield self.update_presence_cache(user, state, room_ids=room_ids)
if not observers and not room_ids:
logger.debug(" | no interested observers or room IDs")
continue
self.push_update_to_clients(
observed_user=user,
users_to_push=observers,
room_ids=room_ids,
statuscache=statuscache,
users_to_push=observers, room_ids=room_ids
)
user_id = user.to_string()
if state["presence"] == PresenceState.OFFLINE:
self._remote_offline_serials.insert(
0,
(self._user_cachemap_latest_serial, set([user_id]))
)
while len(self._remote_offline_serials) > MAX_OFFLINE_SERIALS:
self._remote_offline_serials.pop() # remove the oldest
del self._user_cachemap[user]
else:
# Remove the user from remote_offline_serials now that they're
# no longer offline
for idx, elem in enumerate(self._remote_offline_serials):
(_, user_ids) = elem
user_ids.discard(user_id)
if not user_ids:
self._remote_offline_serials.pop(idx)
for poll in content.get("poll", []):
user = UserID.from_string(poll)
@@ -728,13 +987,58 @@ class PresenceHandler(BaseHandler):
if not self._remote_sendmap[user]:
del self._remote_sendmap[user]
with PreserveLoggingContext():
yield defer.DeferredList(deferreds, consumeErrors=True)
@defer.inlineCallbacks
def update_presence_cache(self, user, state={}, room_ids=None,
add_to_cache=True):
"""Update the presence cache for a user with a new state and bump the
serial to the latest value.
Args:
user(UserID): The user being updated
state(dict): The presence state being updated
room_ids(None or list of str): A list of room_ids to update. If
room_ids is None then fetch the list of room_ids the user is
joined to.
add_to_cache: Whether to add an entry to the presence cache if the
user isn't already in the cache.
Returns:
A Deferred UserPresenceCache for the user being updated.
"""
if room_ids is None:
room_ids = yield self.get_joined_rooms_for_user(user)
for room_id in room_ids:
self._room_serials[room_id] = self._user_cachemap_latest_serial
if add_to_cache:
statuscache = self._get_or_make_usercache(user)
else:
statuscache = self._get_or_offline_usercache(user)
statuscache.update(state, serial=self._user_cachemap_latest_serial)
defer.returnValue(statuscache)
@defer.inlineCallbacks
def push_update_to_local_and_remote(self, observed_user, statuscache,
users_to_push=[], room_ids=[],
remote_domains=[]):
"""Notify local clients and remote servers of a change in the presence
of a user.
Args:
observed_user(UserID): The user to push the presence state for.
statuscache(UserPresenceCache): The cache for the presence state to
push.
users_to_push([UserID]): A list of local and remote users to
notify.
room_ids([str]): Notify the local and remote occupants of these
rooms.
remote_domains([str]): A list of remote servers to notify in
addition to those implied by the users_to_push and the
room_ids.
Returns:
A Deferred.
"""
localusers, remoteusers = partitionbool(
users_to_push,
@@ -744,10 +1048,7 @@ class PresenceHandler(BaseHandler):
localusers = set(localusers)
self.push_update_to_clients(
observed_user=observed_user,
users_to_push=localusers,
room_ids=room_ids,
statuscache=statuscache,
users_to_push=localusers, room_ids=room_ids
)
remote_domains = set(remote_domains)
@@ -772,75 +1073,132 @@ class PresenceHandler(BaseHandler):
defer.returnValue((localusers, remote_domains))
def push_update_to_clients(self, observed_user, users_to_push=[],
room_ids=[], statuscache=None):
self.notifier.on_new_user_event(
def push_update_to_clients(self, users_to_push=[], room_ids=[]):
"""Notify clients of a new presence event.
Args:
users_to_push([UserID]): List of users to notify.
room_ids([str]): List of room_ids to notify.
"""
with PreserveLoggingContext():
self.notifier.on_new_event(
"presence_key",
self._user_cachemap_latest_serial,
users_to_push,
room_ids,
)
@defer.inlineCallbacks
def _push_presence_remote(self, user, destination, state=None):
"""Push a user's presence to a remote server. If a presence state event
that event is sent. Otherwise a new state event is constructed from the
stored presence state.
The last_active is replaced with last_active_ago in case the wallclock
time on the remote server is different to the time on this server.
Sends an EDU to the remote server with the current presence state.
Args:
user(UserID): The user to push the presence state for.
destination(str): The remote server to send state to.
state(dict): The state to push, or None to use the current stored
state.
Returns:
A Deferred.
"""
if state is None:
state = yield self.store.get_presence_state(user.localpart)
del state["mtime"]
state["presence"] = state.pop("state")
if user in self._user_cachemap:
state["last_active"] = (
self._user_cachemap[user].get_state()["last_active"]
)
yield self.distributor.fire(
"collect_presencelike_data", user, state
)
if "last_active" in state:
state = dict(state)
state["last_active_ago"] = int(
self.clock.time_msec() - state.pop("last_active")
)
user_state = {"user_id": user.to_string(), }
user_state.update(state)
yield self.federation.send_edu(
destination=destination,
edu_type="m.presence",
content={"push": [user_state, ], }
)
class PresenceEventSource(object):
def __init__(self, hs):
self.hs = hs
self.clock = hs.get_clock()
@defer.inlineCallbacks
def is_visible(self, observer_user, observed_user):
if observer_user == observed_user:
defer.returnValue(True)
presence = self.hs.get_handlers().presence_handler
if (yield presence.store.user_rooms_intersect(
[u.to_string() for u in observer_user, observed_user])):
defer.returnValue(True)
if self.hs.is_mine(observed_user):
pushmap = presence._local_pushmap
defer.returnValue(
observed_user.localpart in pushmap and
observer_user in pushmap[observed_user.localpart]
)
else:
recvmap = presence._remote_recvmap
defer.returnValue(
observed_user in recvmap and
observer_user in recvmap[observed_user]
)
@defer.inlineCallbacks
@log_function
def get_new_events_for_user(self, user, from_key, limit):
from_key = int(from_key)
observer_user = user
presence = self.hs.get_handlers().presence_handler
cachemap = presence._user_cachemap
max_serial = presence._user_cachemap_latest_serial
clock = self.clock
latest_serial = 0
user_ids_to_check = {user}
presence_list = yield presence.store.get_presence_list(
user.localpart, accepted=True
)
if presence_list is not None:
user_ids_to_check |= set(
UserID.from_string(p["observed_user_id"]) for p in presence_list
)
room_ids = yield presence.get_joined_rooms_for_user(user)
for room_id in set(room_ids) & set(presence._room_serials):
if presence._room_serials[room_id] > from_key:
joined = yield presence.get_joined_users_for_room_id(room_id)
user_ids_to_check |= set(joined)
updates = []
# TODO(paul): use a DeferredList ? How to limit concurrency.
for observed_user in cachemap.keys():
for observed_user in user_ids_to_check & set(cachemap):
cached = cachemap[observed_user]
if cached.serial <= from_key:
if cached.serial <= from_key or cached.serial > max_serial:
continue
if (yield self.is_visible(observer_user, observed_user)):
updates.append((observed_user, cached))
latest_serial = max(cached.serial, latest_serial)
updates.append(cached.make_event(user=observed_user, clock=clock))
# TODO(paul): limit
for serial, user_ids in presence._remote_offline_serials:
if serial <= from_key:
break
if serial > max_serial:
continue
latest_serial = max(latest_serial, serial)
for u in user_ids:
updates.append({
"type": "m.presence",
"content": {"user_id": u, "presence": PresenceState.OFFLINE},
})
# TODO(paul): For the v2 API we want to tell the client their from_key
# is too old if we fell off the end of the _remote_offline_serials
# list, and get them to invalidate+resync. In v1 we have no such
# concept so this is a best-effort result.
if updates:
clock = self.clock
latest_serial = max([x[1].serial for x in updates])
data = [x[1].make_event(user=x[0], clock=clock) for x in updates]
defer.returnValue((data, latest_serial))
defer.returnValue((updates, latest_serial))
else:
defer.returnValue(([], presence._user_cachemap_latest_serial))
@@ -852,8 +1210,6 @@ class PresenceEventSource(object):
def get_pagination_rows(self, user, pagination_config, key):
# TODO (erikj): Does this make sense? Ordering?
observer_user = user
from_key = int(pagination_config.from_key)
if pagination_config.to_key:
@@ -864,13 +1220,25 @@ class PresenceEventSource(object):
presence = self.hs.get_handlers().presence_handler
cachemap = presence._user_cachemap
user_ids_to_check = {user}
presence_list = yield presence.store.get_presence_list(
user.localpart, accepted=True
)
if presence_list is not None:
user_ids_to_check |= set(
UserID.from_string(p["observed_user_id"]) for p in presence_list
)
room_ids = yield presence.get_joined_rooms_for_user(user)
for room_id in set(room_ids) & set(presence._room_serials):
if presence._room_serials[room_id] >= from_key:
joined = yield presence.get_joined_users_for_room_id(room_id)
user_ids_to_check |= set(joined)
updates = []
# TODO(paul): use a DeferredList ? How to limit concurrency.
for observed_user in cachemap.keys():
for observed_user in user_ids_to_check & set(cachemap):
if not (to_key < cachemap[observed_user].serial <= from_key):
continue
if (yield self.is_visible(observer_user, observed_user)):
updates.append((observed_user, cachemap[observed_user]))
# TODO(paul): limit

View File

@@ -17,8 +17,8 @@ from twisted.internet import defer
from synapse.api.errors import SynapseError, AuthError, CodeMessageException
from synapse.api.constants import EventTypes, Membership
from synapse.util.logcontext import PreserveLoggingContext
from synapse.types import UserID
from synapse.util import unwrapFirstError
from ._base import BaseHandler
@@ -88,6 +88,9 @@ class ProfileHandler(BaseHandler):
if target_user != auth_user:
raise AuthError(400, "Cannot set another user's displayname")
if new_displayname == '':
new_displayname = None
yield self.store.set_profile_displayname(
target_user.localpart, new_displayname
)
@@ -154,14 +157,13 @@ class ProfileHandler(BaseHandler):
if not self.hs.is_mine(user):
defer.returnValue(None)
with PreserveLoggingContext():
(displayname, avatar_url) = yield defer.gatherResults(
[
self.store.get_profile_displayname(user.localpart),
self.store.get_profile_avatar_url(user.localpart),
],
consumeErrors=True
)
).addErrback(unwrapFirstError)
state["displayname"] = displayname
state["avatar_url"] = avatar_url
@@ -197,9 +199,8 @@ class ProfileHandler(BaseHandler):
self.ratelimit(user.to_string())
joins = yield self.store.get_rooms_for_user_where_membership_is(
joins = yield self.store.get_rooms_for_user(
user.to_string(),
[Membership.JOIN],
)
for j in joins:
@@ -212,6 +213,7 @@ class ProfileHandler(BaseHandler):
)
msg_handler = self.hs.get_handlers().message_handler
try:
yield msg_handler.create_and_send_event({
"type": EventTypes.Member,
"room_id": j.room_id,
@@ -219,3 +221,8 @@ class ProfileHandler(BaseHandler):
"content": content,
"sender": user.to_string()
}, ratelimit=False)
except Exception as e:
logger.warn(
"Failed to update join event for room %s - %s",
j.room_id, str(e.message)
)

View File

@@ -0,0 +1,210 @@
# -*- coding: utf-8 -*-
# Copyright 2015 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ._base import BaseHandler
from twisted.internet import defer
from synapse.util.logcontext import PreserveLoggingContext
import logging
logger = logging.getLogger(__name__)
class ReceiptsHandler(BaseHandler):
def __init__(self, hs):
super(ReceiptsHandler, self).__init__(hs)
self.hs = hs
self.federation = hs.get_replication_layer()
self.federation.register_edu_handler(
"m.receipt", self._received_remote_receipt
)
self.clock = self.hs.get_clock()
self._receipt_cache = None
@defer.inlineCallbacks
def received_client_receipt(self, room_id, receipt_type, user_id,
event_id):
"""Called when a client tells us a local user has read up to the given
event_id in the room.
"""
receipt = {
"room_id": room_id,
"receipt_type": receipt_type,
"user_id": user_id,
"event_ids": [event_id],
"data": {
"ts": int(self.clock.time_msec()),
}
}
is_new = yield self._handle_new_receipts([receipt])
if is_new:
self._push_remotes([receipt])
@defer.inlineCallbacks
def _received_remote_receipt(self, origin, content):
"""Called when we receive an EDU of type m.receipt from a remote HS.
"""
receipts = [
{
"room_id": room_id,
"receipt_type": receipt_type,
"user_id": user_id,
"event_ids": user_values["event_ids"],
"data": user_values.get("data", {}),
}
for room_id, room_values in content.items()
for receipt_type, users in room_values.items()
for user_id, user_values in users.items()
]
yield self._handle_new_receipts(receipts)
@defer.inlineCallbacks
def _handle_new_receipts(self, receipts):
"""Takes a list of receipts, stores them and informs the notifier.
"""
for receipt in receipts:
room_id = receipt["room_id"]
receipt_type = receipt["receipt_type"]
user_id = receipt["user_id"]
event_ids = receipt["event_ids"]
data = receipt["data"]
res = yield self.store.insert_receipt(
room_id, receipt_type, user_id, event_ids, data
)
if not res:
# res will be None if this read receipt is 'old'
defer.returnValue(False)
stream_id, max_persisted_id = res
with PreserveLoggingContext():
self.notifier.on_new_event(
"receipt_key", max_persisted_id, rooms=[room_id]
)
defer.returnValue(True)
@defer.inlineCallbacks
def _push_remotes(self, receipts):
"""Given a list of receipts, works out which remote servers should be
poked and pokes them.
"""
# TODO: Some of this stuff should be coallesced.
for receipt in receipts:
room_id = receipt["room_id"]
receipt_type = receipt["receipt_type"]
user_id = receipt["user_id"]
event_ids = receipt["event_ids"]
data = receipt["data"]
remotedomains = set()
rm_handler = self.hs.get_handlers().room_member_handler
yield rm_handler.fetch_room_distributions_into(
room_id, localusers=None, remotedomains=remotedomains
)
logger.debug("Sending receipt to: %r", remotedomains)
for domain in remotedomains:
self.federation.send_edu(
destination=domain,
edu_type="m.receipt",
content={
room_id: {
receipt_type: {
user_id: {
"event_ids": event_ids,
"data": data,
}
}
},
},
)
@defer.inlineCallbacks
def get_receipts_for_room(self, room_id, to_key):
"""Gets all receipts for a room, upto the given key.
"""
result = yield self.store.get_linearized_receipts_for_room(
room_id,
to_key=to_key,
)
if not result:
defer.returnValue([])
event = {
"type": "m.receipt",
"room_id": room_id,
"content": result,
}
defer.returnValue([event])
class ReceiptEventSource(object):
def __init__(self, hs):
self.store = hs.get_datastore()
@defer.inlineCallbacks
def get_new_events_for_user(self, user, from_key, limit):
from_key = int(from_key)
to_key = yield self.get_current_key()
if from_key == to_key:
defer.returnValue(([], to_key))
rooms = yield self.store.get_rooms_for_user(user.to_string())
rooms = [room.room_id for room in rooms]
events = yield self.store.get_linearized_receipts_for_rooms(
rooms,
from_key=from_key,
to_key=to_key,
)
defer.returnValue((events, to_key))
def get_current_key(self, direction='f'):
return self.store.get_max_receipt_stream_id()
@defer.inlineCallbacks
def get_pagination_rows(self, user, config, key):
to_key = int(config.from_key)
if config.to_key:
from_key = int(config.to_key)
else:
from_key = None
rooms = yield self.store.get_rooms_for_user(user.to_string())
rooms = [room.room_id for room in rooms]
events = yield self.store.get_linearized_receipts_for_rooms(
rooms,
from_key=from_key,
to_key=to_key,
)
defer.returnValue((events, to_key))

View File

@@ -18,19 +18,17 @@ from twisted.internet import defer
from synapse.types import UserID
from synapse.api.errors import (
AuthError, Codes, SynapseError, RegistrationError, InvalidCaptchaError,
CodeMessageException
AuthError, Codes, SynapseError, RegistrationError, InvalidCaptchaError
)
from ._base import BaseHandler
import synapse.util.stringutils as stringutils
from synapse.util.async import run_on_reactor
from synapse.http.client import SimpleHttpClient
from synapse.http.client import CaptchaServerHttpClient
import base64
import bcrypt
import json
import logging
import urllib
logger = logging.getLogger(__name__)
@@ -43,6 +41,30 @@ class RegistrationHandler(BaseHandler):
self.distributor = hs.get_distributor()
self.distributor.declare("registered_user")
@defer.inlineCallbacks
def check_username(self, localpart):
yield run_on_reactor()
if urllib.quote(localpart) != localpart:
raise SynapseError(
400,
"User ID must only contain characters which do not"
" require URL encoding."
)
user = UserID(localpart, self.hs.hostname)
user_id = user.to_string()
yield self.check_user_id_is_valid(user_id)
u = yield self.store.get_user_by_id(user_id)
if u:
raise SynapseError(
400,
"User ID already taken.",
errcode=Codes.USER_IN_USE,
)
@defer.inlineCallbacks
def register(self, localpart=None, password=None):
"""Registers a new client on the server.
@@ -51,7 +73,8 @@ class RegistrationHandler(BaseHandler):
localpart : The local part of the user ID to register. If None,
one will be randomly generated.
password (str) : The password to assign to this user so they can
login again.
login again. This can be None which means they cannot login again
via a password (e.g. the user is an application service user).
Returns:
A tuple of (user_id, access_token).
Raises:
@@ -63,12 +86,12 @@ class RegistrationHandler(BaseHandler):
password_hash = bcrypt.hashpw(password, bcrypt.gensalt())
if localpart:
yield self.check_username(localpart)
user = UserID(localpart, self.hs.hostname)
user_id = user.to_string()
yield self.check_user_id_is_valid(user_id)
token = self._generate_token(user_id)
token = self.generate_token(user_id)
yield self.store.register(
user_id=user_id,
token=token,
@@ -88,7 +111,7 @@ class RegistrationHandler(BaseHandler):
user_id = user.to_string()
yield self.check_user_id_is_valid(user_id)
token = self._generate_token(user_id)
token = self.generate_token(user_id)
yield self.store.register(
user_id=user_id,
token=token,
@@ -138,7 +161,7 @@ class RegistrationHandler(BaseHandler):
400, "Invalid user localpart for this application service.",
errcode=Codes.EXCLUSIVE
)
token = self._generate_token(user_id)
token = self.generate_token(user_id)
yield self.store.register(
user_id=user_id,
token=token,
@@ -149,7 +172,11 @@ class RegistrationHandler(BaseHandler):
@defer.inlineCallbacks
def check_recaptcha(self, ip, private_key, challenge, response):
"""Checks a recaptcha is correct."""
"""
Checks a recaptcha is correct.
Used only by c/s api v1
"""
captcha_response = yield self._validate_captcha(
ip,
@@ -166,15 +193,49 @@ class RegistrationHandler(BaseHandler):
else:
logger.info("Valid captcha entered from %s", ip)
@defer.inlineCallbacks
def register_saml2(self, localpart):
"""
Registers email_id as SAML2 Based Auth.
"""
if urllib.quote(localpart) != localpart:
raise SynapseError(
400,
"User ID must only contain characters which do not"
" require URL encoding."
)
user = UserID(localpart, self.hs.hostname)
user_id = user.to_string()
yield self.check_user_id_is_valid(user_id)
token = self.generate_token(user_id)
try:
yield self.store.register(
user_id=user_id,
token=token,
password_hash=None
)
yield self.distributor.fire("registered_user", user)
except Exception, e:
yield self.store.add_access_token_to_user(user_id, token)
# Ignore Registration errors
logger.exception(e)
defer.returnValue((user_id, token))
@defer.inlineCallbacks
def register_email(self, threepidCreds):
"""Registers emails with an identity server."""
"""
Registers emails with an identity server.
Used only by c/s api v1
"""
for c in threepidCreds:
logger.info("validating theeepidcred sid %s on id server %s",
c['sid'], c['idServer'])
try:
threepid = yield self._threepid_from_creds(c)
identity_handler = self.hs.get_handlers().identity_handler
threepid = yield identity_handler.threepid_from_creds(c)
except:
logger.exception("Couldn't validate 3pid")
raise RegistrationError(400, "Couldn't validate 3pid")
@@ -186,12 +247,16 @@ class RegistrationHandler(BaseHandler):
@defer.inlineCallbacks
def bind_emails(self, user_id, threepidCreds):
"""Links emails with a user ID and informs an identity server."""
"""Links emails with a user ID and informs an identity server.
Used only by c/s api v1
"""
# Now we have a matrix ID, bind it to the threepids we were given
for c in threepidCreds:
identity_handler = self.hs.get_handlers().identity_handler
# XXX: This should be a deferred list, shouldn't it?
yield self._bind_threepid(c, user_id)
yield identity_handler.bind_threepid(c, user_id)
@defer.inlineCallbacks
def check_user_id_is_valid(self, user_id):
@@ -201,13 +266,14 @@ class RegistrationHandler(BaseHandler):
interested_services = [
s for s in services if s.is_interested_in_user(user_id)
]
if len(interested_services) > 0:
for service in interested_services:
if service.is_exclusive_user(user_id):
raise SynapseError(
400, "This user ID is reserved by an application service.",
errcode=Codes.EXCLUSIVE
)
def _generate_token(self, user_id):
def generate_token(self, user_id):
# urlsafe variant uses _ and - so use . as the separator and replace
# all =s with .s so http clients don't quote =s when it is used as
# query params.
@@ -217,62 +283,12 @@ class RegistrationHandler(BaseHandler):
def _generate_user_id(self):
return "-" + stringutils.random_string(18)
@defer.inlineCallbacks
def _threepid_from_creds(self, creds):
# TODO: get this from the homeserver rather than creating a new one for
# each request
http_client = SimpleHttpClient(self.hs)
# XXX: make this configurable!
trustedIdServers = ['matrix.org:8090', 'matrix.org']
if not creds['idServer'] in trustedIdServers:
logger.warn('%s is not a trusted ID server: rejecting 3pid ' +
'credentials', creds['idServer'])
defer.returnValue(None)
data = {}
try:
data = yield http_client.get_json(
# XXX: This should be HTTPS
"http://%s%s" % (
creds['idServer'],
"/_matrix/identity/api/v1/3pid/getValidated3pid"
),
{'sid': creds['sid'], 'clientSecret': creds['clientSecret']}
)
except CodeMessageException as e:
data = json.loads(e.msg)
if 'medium' in data:
defer.returnValue(data)
defer.returnValue(None)
@defer.inlineCallbacks
def _bind_threepid(self, creds, mxid):
yield
logger.debug("binding threepid")
http_client = SimpleHttpClient(self.hs)
data = None
try:
data = yield http_client.post_urlencoded_get_json(
# XXX: Change when ID servers are all HTTPS
"http://%s%s" % (
creds['idServer'], "/_matrix/identity/api/v1/3pid/bind"
),
{
'sid': creds['sid'],
'clientSecret': creds['clientSecret'],
'mxid': mxid,
}
)
logger.debug("bound threepid")
except CodeMessageException as e:
data = json.loads(e.msg)
defer.returnValue(data)
@defer.inlineCallbacks
def _validate_captcha(self, ip_addr, private_key, challenge, response):
"""Validates the captcha provided.
Used only by c/s api v1
Returns:
dict: Containing 'valid'(bool) and 'error_url'(str) if invalid.
@@ -290,6 +306,9 @@ class RegistrationHandler(BaseHandler):
@defer.inlineCallbacks
def _submit_captcha(self, ip_addr, private_key, challenge, response):
"""
Used only by c/s api v1
"""
# TODO: get this from the homeserver rather than creating a new one for
# each request
client = CaptchaServerHttpClient(self.hs)

View File

@@ -19,19 +19,36 @@ from twisted.internet import defer
from ._base import BaseHandler
from synapse.types import UserID, RoomAlias, RoomID
from synapse.api.constants import EventTypes, Membership, JoinRules
from synapse.api.constants import (
EventTypes, Membership, JoinRules, RoomCreationPreset,
)
from synapse.api.errors import StoreError, SynapseError
from synapse.util import stringutils
from synapse.util import stringutils, unwrapFirstError
from synapse.util.async import run_on_reactor
from synapse.events.utils import serialize_event
from collections import OrderedDict
import logging
import string
logger = logging.getLogger(__name__)
class RoomCreationHandler(BaseHandler):
PRESETS_DICT = {
RoomCreationPreset.PRIVATE_CHAT: {
"join_rules": JoinRules.INVITE,
"history_visibility": "invited",
"original_invitees_have_ops": False,
},
RoomCreationPreset.PUBLIC_CHAT: {
"join_rules": JoinRules.PUBLIC,
"history_visibility": "shared",
"original_invitees_have_ops": False,
},
}
@defer.inlineCallbacks
def create_room(self, user_id, room_id, config):
""" Creates a new room.
@@ -50,6 +67,10 @@ class RoomCreationHandler(BaseHandler):
self.ratelimit(user_id)
if "room_alias_name" in config:
for wchar in string.whitespace:
if wchar in config["room_alias_name"]:
raise SynapseError(400, "Invalid characters in room alias")
room_alias = RoomAlias.create(
config["room_alias_name"],
self.hs.hostname,
@@ -116,15 +137,31 @@ class RoomCreationHandler(BaseHandler):
servers=[self.hs.hostname],
)
preset_config = config.get(
"preset",
RoomCreationPreset.PUBLIC_CHAT
if is_public
else RoomCreationPreset.PRIVATE_CHAT
)
raw_initial_state = config.get("initial_state", [])
initial_state = OrderedDict()
for val in raw_initial_state:
initial_state[(val["type"], val.get("state_key", ""))] = val["content"]
user = UserID.from_string(user_id)
creation_events = self._create_events_for_new_room(
user, room_id, is_public=is_public
user, room_id,
preset_config=preset_config,
invite_list=invite_list,
initial_state=initial_state,
)
msg_handler = self.hs.get_handlers().message_handler
for event in creation_events:
yield msg_handler.create_and_send_event(event)
yield msg_handler.create_and_send_event(event, ratelimit=False)
if "name" in config:
name = config["name"]
@@ -134,7 +171,7 @@ class RoomCreationHandler(BaseHandler):
"sender": user_id,
"state_key": "",
"content": {"name": name},
})
}, ratelimit=False)
if "topic" in config:
topic = config["topic"]
@@ -144,7 +181,7 @@ class RoomCreationHandler(BaseHandler):
"sender": user_id,
"state_key": "",
"content": {"topic": topic},
})
}, ratelimit=False)
for invitee in invite_list:
yield msg_handler.create_and_send_event({
@@ -153,7 +190,7 @@ class RoomCreationHandler(BaseHandler):
"room_id": room_id,
"sender": user_id,
"content": {"membership": Membership.INVITE},
})
}, ratelimit=False)
result = {"room_id": room_id}
@@ -165,7 +202,10 @@ class RoomCreationHandler(BaseHandler):
defer.returnValue(result)
def _create_events_for_new_room(self, creator, room_id, is_public=False):
def _create_events_for_new_room(self, creator, room_id, preset_config,
invite_list, initial_state):
config = RoomCreationHandler.PRESETS_DICT[preset_config]
creator_id = creator.to_string()
event_keys = {
@@ -198,9 +238,10 @@ class RoomCreationHandler(BaseHandler):
},
)
power_levels_event = create(
etype=EventTypes.PowerLevels,
content={
returned_events = [creation_event, join_event]
if (EventTypes.PowerLevels, '') not in initial_state:
power_level_content = {
"users": {
creator.to_string(): 100,
},
@@ -208,27 +249,51 @@ class RoomCreationHandler(BaseHandler):
"events": {
EventTypes.Name: 100,
EventTypes.PowerLevels: 100,
EventTypes.RoomHistoryVisibility: 100,
},
"events_default": 0,
"state_default": 50,
"ban": 50,
"kick": 50,
"redact": 50
},
"redact": 50,
"invite": 0,
}
if config["original_invitees_have_ops"]:
for invitee in invite_list:
power_level_content["users"][invitee] = 100
power_levels_event = create(
etype=EventTypes.PowerLevels,
content=power_level_content,
)
join_rule = JoinRules.PUBLIC if is_public else JoinRules.INVITE
returned_events.append(power_levels_event)
if (EventTypes.JoinRules, '') not in initial_state:
join_rules_event = create(
etype=EventTypes.JoinRules,
content={"join_rule": join_rule},
content={"join_rule": config["join_rules"]},
)
return [
creation_event,
join_event,
power_levels_event,
join_rules_event,
]
returned_events.append(join_rules_event)
if (EventTypes.RoomHistoryVisibility, '') not in initial_state:
history_event = create(
etype=EventTypes.RoomHistoryVisibility,
content={"history_visibility": config["history_visibility"]}
)
returned_events.append(history_event)
for (etype, state_key), content in initial_state.items():
returned_events.append(create(
etype=etype,
state_key=state_key,
content=content,
))
return returned_events
class RoomMemberHandler(BaseHandler):
@@ -310,25 +375,6 @@ class RoomMemberHandler(BaseHandler):
# paginating
defer.returnValue(chunk_data)
@defer.inlineCallbacks
def get_room_member(self, room_id, member_user_id, auth_user_id):
"""Retrieve a room member from a room.
Args:
room_id : The room the member is in.
member_user_id : The member's user ID
auth_user_id : The user ID of the user making this request.
Returns:
The room member, or None if this member does not exist.
Raises:
SynapseError if something goes wrong.
"""
yield self.auth.check_joined_room(room_id, auth_user_id)
member = yield self.store.get_room_member(user_id=member_user_id,
room_id=room_id)
defer.returnValue(member)
@defer.inlineCallbacks
def change_membership(self, event, context, do_auth=True):
""" Change the membership status of a user in a room.
@@ -507,11 +553,18 @@ class RoomMemberHandler(BaseHandler):
defer.returnValue((is_remote_invite_join, room_host))
@defer.inlineCallbacks
def get_rooms_for_user(self, user, membership_list=[Membership.JOIN]):
def get_joined_rooms_for_user(self, user):
"""Returns a list of roomids that the user has any of the given
membership states in."""
rooms = yield self.store.get_rooms_for_user_where_membership_is(
user_id=user.to_string(), membership_list=membership_list
app_service = yield self.store.get_app_service_by_user_id(
user.to_string()
)
if app_service:
rooms = yield self.store.get_app_service_rooms(app_service)
else:
rooms = yield self.store.get_rooms_for_user(
user.to_string(),
)
# For some reason the list of events contains duplicates
@@ -540,11 +593,17 @@ class RoomListHandler(BaseHandler):
@defer.inlineCallbacks
def get_public_room_list(self):
chunk = yield self.store.get_rooms(is_public=True)
for room in chunk:
joined_users = yield self.store.get_users_in_room(
room_id=room["room_id"],
)
room["num_joined_members"] = len(joined_users)
results = yield defer.gatherResults(
[
self.store.get_users_in_room(room["room_id"])
for room in chunk
],
consumeErrors=True,
).addErrback(unwrapFirstError)
for i, room in enumerate(chunk):
room["num_joined_members"] = len(results[i])
# FIXME (erikj): START is no longer a valid value
defer.returnValue({"start": "START", "end": "END", "chunk": chunk})
@@ -559,6 +618,17 @@ class RoomEventSource(object):
to_key = yield self.get_current_key()
app_service = yield self.store.get_app_service_by_user_id(
user.to_string()
)
if app_service:
events, end_key = yield self.store.get_appservice_room_stream(
service=app_service,
from_key=from_key,
to_key=to_key,
limit=limit,
)
else:
events, end_key = yield self.store.get_room_events_stream(
user_id=user.to_string(),
from_key=from_key,
@@ -569,8 +639,8 @@ class RoomEventSource(object):
defer.returnValue((events, end_key))
def get_current_key(self):
return self.store.get_room_events_max_id()
def get_current_key(self, direction='f'):
return self.store.get_room_events_max_id(direction)
@defer.inlineCallbacks
def get_pagination_rows(self, user, config, key):

View File

@@ -92,11 +92,13 @@ class SyncHandler(BaseHandler):
result = yield self.current_sync_for_user(sync_config, since_token)
defer.returnValue(result)
else:
def current_sync_callback():
def current_sync_callback(before_token, after_token):
return self.current_sync_for_user(sync_config, since_token)
rm_handler = self.hs.get_handlers().room_member_handler
room_ids = yield rm_handler.get_rooms_for_user(sync_config.user)
room_ids = yield rm_handler.get_joined_rooms_for_user(
sync_config.user
)
result = yield self.notifier.wait_for_events(
sync_config.user, room_ids,
sync_config.filter, timeout, current_sync_callback
@@ -227,7 +229,7 @@ class SyncHandler(BaseHandler):
logger.debug("Typing %r", typing_by_room)
rm_handler = self.hs.get_handlers().room_member_handler
room_ids = yield rm_handler.get_rooms_for_user(sync_config.user)
room_ids = yield rm_handler.get_joined_rooms_for_user(sync_config.user)
# TODO (mjark): Does public mean "published"?
published_rooms = yield self.store.get_rooms(is_public=True)
@@ -290,6 +292,52 @@ class SyncHandler(BaseHandler):
next_batch=now_token,
))
@defer.inlineCallbacks
def _filter_events_for_client(self, user_id, room_id, events):
event_id_to_state = yield self.store.get_state_for_events(
room_id, frozenset(e.event_id for e in events),
types=(
(EventTypes.RoomHistoryVisibility, ""),
(EventTypes.Member, user_id),
)
)
def allowed(event, state):
if event.type == EventTypes.RoomHistoryVisibility:
return True
membership_ev = state.get((EventTypes.Member, user_id), None)
if membership_ev:
membership = membership_ev.membership
else:
membership = Membership.LEAVE
if membership == Membership.JOIN:
return True
history = state.get((EventTypes.RoomHistoryVisibility, ''), None)
if history:
visibility = history.content.get("history_visibility", "shared")
else:
visibility = "shared"
if visibility == "public":
return True
elif visibility == "shared":
return True
elif visibility == "joined":
return membership == Membership.JOIN
elif visibility == "invited":
return membership == Membership.INVITE
return True
defer.returnValue([
event
for event in events
if allowed(event, event_id_to_state[event.event_id])
])
@defer.inlineCallbacks
def load_filtered_recents(self, room_id, sync_config, now_token,
since_token=None):
@@ -311,6 +359,9 @@ class SyncHandler(BaseHandler):
(room_key, _) = keys
end_key = "s" + room_key.split('-')[-1]
loaded_recents = sync_config.filter.filter_room_events(events)
loaded_recents = yield self._filter_events_for_client(
sync_config.user.to_string(), room_id, loaded_recents,
)
loaded_recents.extend(recents)
recents = loaded_recents
if len(events) <= load_limit:

View File

@@ -18,6 +18,7 @@ from twisted.internet import defer
from ._base import BaseHandler
from synapse.api.errors import SynapseError, AuthError
from synapse.util.logcontext import PreserveLoggingContext
from synapse.types import UserID
import logging
@@ -216,13 +217,17 @@ class TypingNotificationHandler(BaseHandler):
self._latest_room_serial += 1
self._room_serials[room_id] = self._latest_room_serial
self.notifier.on_new_user_event(rooms=[room_id])
with PreserveLoggingContext():
self.notifier.on_new_event(
"typing_key", self._latest_room_serial, rooms=[room_id]
)
class TypingNotificationEventSource(object):
def __init__(self, hs):
self.hs = hs
self._handler = None
self._room_member_handler = None
def handler(self):
# Avoid cyclic dependency in handler setup
@@ -230,6 +235,11 @@ class TypingNotificationEventSource(object):
self._handler = self.hs.get_handlers().typing_notification_handler
return self._handler
def room_member_handler(self):
if not self._room_member_handler:
self._room_member_handler = self.hs.get_handlers().room_member_handler
return self._room_member_handler
def _make_event_for(self, room_id):
typing = self.handler()._room_typing[room_id]
return {
@@ -240,19 +250,25 @@ class TypingNotificationEventSource(object):
},
}
@defer.inlineCallbacks
def get_new_events_for_user(self, user, from_key, limit):
from_key = int(from_key)
handler = self.handler()
joined_room_ids = (
yield self.room_member_handler().get_joined_rooms_for_user(user)
)
events = []
for room_id in handler._room_serials:
if room_id not in joined_room_ids:
continue
if handler._room_serials[room_id] <= from_key:
continue
# TODO: check if user is in room
events.append(self._make_event_for(room_id))
return (events, handler._latest_room_serial)
defer.returnValue((events, handler._latest_room_serial))
def get_current_key(self):
return self.handler()._latest_room_serial

Some files were not shown because too many files have changed in this diff Show More