mirror of
https://github.com/element-hq/synapse.git
synced 2025-12-15 02:00:21 +00:00
Compare commits
2171 Commits
v0.13.0
...
v0.18.4-rc
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9d58ccc547 | ||
|
|
9355a5c42b | ||
|
|
3991b4cbdb | ||
|
|
af4a1bac50 | ||
|
|
0964005d84 | ||
|
|
1c93cd9f9f | ||
|
|
8ecaff51a1 | ||
|
|
f6c48802f5 | ||
|
|
a88bc67f88 | ||
|
|
42c43cfafd | ||
|
|
c7daf3136c | ||
|
|
8a8ad46f48 | ||
|
|
2771447c29 | ||
|
|
6cc4fcf25c | ||
|
|
ac507e7ab8 | ||
|
|
e6651e8046 | ||
|
|
291628d42a | ||
|
|
3c09818d91 | ||
|
|
27d3f2e7ab | ||
|
|
17e0a58020 | ||
|
|
34449cfc6c | ||
|
|
a4632783fb | ||
|
|
24772ba56e | ||
|
|
eeda4e618c | ||
|
|
d24197bead | ||
|
|
c6bbad109b | ||
|
|
16dc9064d4 | ||
|
|
63772443e6 | ||
|
|
a3f6576084 | ||
|
|
7fc2b5c063 | ||
|
|
89e3e39d52 | ||
|
|
2938a00825 | ||
|
|
5219f7e060 | ||
|
|
93ebeb2aa8 | ||
|
|
c1b077cd19 | ||
|
|
06cc0bb762 | ||
|
|
64c6566980 | ||
|
|
8fd4d9129f | ||
|
|
9164bfa1c3 | ||
|
|
9084720993 | ||
|
|
80d5d3baa1 | ||
|
|
b1c27975d0 | ||
|
|
dc155f4c2c | ||
|
|
2746e805fe | ||
|
|
0aeb1324b7 | ||
|
|
4a9055d446 | ||
|
|
3c91c5b216 | ||
|
|
f6e8019b9c | ||
|
|
760469c812 | ||
|
|
47ed4d84bb | ||
|
|
f09d2b692f | ||
|
|
4c3eb14d68 | ||
|
|
1d4d518b50 | ||
|
|
159434a133 | ||
|
|
264f6c2a39 | ||
|
|
82e71a259c | ||
|
|
490b97d3e7 | ||
|
|
f9d5b60a24 | ||
|
|
1cc22da600 | ||
|
|
aac13b1f9a | ||
|
|
ccc1a3d54d | ||
|
|
665e53524e | ||
|
|
e438699c59 | ||
|
|
a9111786f9 | ||
|
|
1fc1bc2a51 | ||
|
|
db0609f1ec | ||
|
|
ab731d8f8e | ||
|
|
45bdacd9a7 | ||
|
|
177f104432 | ||
|
|
22fbf86e4f | ||
|
|
f138bb40e2 | ||
|
|
855645c719 | ||
|
|
25423f50aa | ||
|
|
2ef617bc06 | ||
|
|
e83a08d795 | ||
|
|
b6800a8ecd | ||
|
|
d04e2ff3a4 | ||
|
|
a842fed418 | ||
|
|
e01a1bc92d | ||
|
|
6fdd31915b | ||
|
|
07caa749bf | ||
|
|
f09db236b1 | ||
|
|
8bfd01f619 | ||
|
|
1b17d1a106 | ||
|
|
b01aaadd48 | ||
|
|
1071c7d963 | ||
|
|
6453d03edd | ||
|
|
3ae48a1f99 | ||
|
|
4cedd53224 | ||
|
|
5663137e03 | ||
|
|
b202531be6 | ||
|
|
1b179455fc | ||
|
|
981f852d54 | ||
|
|
def63649df | ||
|
|
06f1ad1625 | ||
|
|
95fc70216d | ||
|
|
9b0316c75a | ||
|
|
03c2720940 | ||
|
|
b21b9dbc37 | ||
|
|
78c083f159 | ||
|
|
3aa8925091 | ||
|
|
f2f74ffce6 | ||
|
|
7d2cf7e960 | ||
|
|
0108ed8ae6 | ||
|
|
a7f48320b1 | ||
|
|
df2a616c7b | ||
|
|
550308c7a1 | ||
|
|
e8b1d2a452 | ||
|
|
5b54d51d1e | ||
|
|
f6955db970 | ||
|
|
8ca05b5755 | ||
|
|
f0ca088280 | ||
|
|
50ac1d843d | ||
|
|
513e600f63 | ||
|
|
b95dbdcba4 | ||
|
|
927a67ee1a | ||
|
|
6942d68247 | ||
|
|
b59994b454 | ||
|
|
816988baaa | ||
|
|
2869a29fd7 | ||
|
|
d43b63818c | ||
|
|
a68ade6ed3 | ||
|
|
29c5922021 | ||
|
|
d9350b0db8 | ||
|
|
bcb1245a2d | ||
|
|
62073992c5 | ||
|
|
0393c4203c | ||
|
|
6f7540ada4 | ||
|
|
1d107d8484 | ||
|
|
f7aed3d7a2 | ||
|
|
9009143fb9 | ||
|
|
fbd3866bc6 | ||
|
|
9e18e0b1cb | ||
|
|
c61ddeedac | ||
|
|
0af6213019 | ||
|
|
35e2cc8b52 | ||
|
|
6e9f3ab415 | ||
|
|
e641115421 | ||
|
|
3061dac53e | ||
|
|
668f91d707 | ||
|
|
0061e8744f | ||
|
|
fa74fcf512 | ||
|
|
a2f2516199 | ||
|
|
a940618c94 | ||
|
|
c57f871184 | ||
|
|
8681aff4f1 | ||
|
|
5d9546f9f4 | ||
|
|
7b5546d077 | ||
|
|
5d34e32d42 | ||
|
|
f382117852 | ||
|
|
3de7c8a4d0 | ||
|
|
2ff2d36b80 | ||
|
|
9bfc617791 | ||
|
|
503c0ab78b | ||
|
|
e779ee0ee2 | ||
|
|
4285be791d | ||
|
|
b5665f7516 | ||
|
|
6d3513740d | ||
|
|
850b103b36 | ||
|
|
21185e3e8a | ||
|
|
24a70e19c7 | ||
|
|
04aa2f2863 | ||
|
|
f7bcdbe56c | ||
|
|
3027ea22b0 | ||
|
|
5875a65253 | ||
|
|
36d621201b | ||
|
|
9040c9ffa1 | ||
|
|
4a18127917 | ||
|
|
adae348fdf | ||
|
|
4974147aa3 | ||
|
|
13122e5e24 | ||
|
|
cf3e1cc200 | ||
|
|
a38d46249e | ||
|
|
aab6a31c96 | ||
|
|
748d8fdc7b | ||
|
|
655891d179 | ||
|
|
4225a97f4e | ||
|
|
22578545a0 | ||
|
|
667fcd54e8 | ||
|
|
f96020550f | ||
|
|
81964aeb90 | ||
|
|
2e9ee30969 | ||
|
|
a61e4522b5 | ||
|
|
1168cbd54d | ||
|
|
bbc0d9617f | ||
|
|
8009d84364 | ||
|
|
dc692556d6 | ||
|
|
dc78db8c56 | ||
|
|
4f78108d8c | ||
|
|
0b78d8adf2 | ||
|
|
85827eef2d | ||
|
|
90c070c850 | ||
|
|
87528f0756 | ||
|
|
88acb99747 | ||
|
|
2b8ff4659f | ||
|
|
ddfcdd4778 | ||
|
|
6f0c5e5d9b | ||
|
|
49cf205dc7 | ||
|
|
39af634dd2 | ||
|
|
3f6ec271ba | ||
|
|
4d49e0bdfd | ||
|
|
81570abfb2 | ||
|
|
ddc89df89d | ||
|
|
eb24aecf8c | ||
|
|
e1ba98d724 | ||
|
|
a298331de4 | ||
|
|
71edaae981 | ||
|
|
64527f94cc | ||
|
|
883df2e983 | ||
|
|
5336acd46f | ||
|
|
fa9d2c7295 | ||
|
|
19fe990476 | ||
|
|
995f2f032f | ||
|
|
9e1283c824 | ||
|
|
a68807d426 | ||
|
|
2e67cabd7f | ||
|
|
b7b62bf9ea | ||
|
|
d84319ae10 | ||
|
|
23b6701a28 | ||
|
|
e58a9d781c | ||
|
|
74d4cdee25 | ||
|
|
418bcd4309 | ||
|
|
098db4aa52 | ||
|
|
c33b25fd8d | ||
|
|
de4f798f01 | ||
|
|
ea6dc356b0 | ||
|
|
955f34d23e | ||
|
|
241d7d2d62 | ||
|
|
1535f21eb5 | ||
|
|
4be85281f9 | ||
|
|
cb3edec6af | ||
|
|
923f77cff3 | ||
|
|
55e6fc917c | ||
|
|
68c1ed4d1a | ||
|
|
b82fa849c8 | ||
|
|
e457034e99 | ||
|
|
1d98cf26be | ||
|
|
211786ecd6 | ||
|
|
4fb65a1091 | ||
|
|
5810cffd33 | ||
|
|
f3eead0660 | ||
|
|
4131381123 | ||
|
|
6a5ded5988 | ||
|
|
4f181f361d | ||
|
|
c566f0ee17 | ||
|
|
772c6067a3 | ||
|
|
baffe96d95 | ||
|
|
264a48aedf | ||
|
|
21c88016bd | ||
|
|
ed992ae6ba | ||
|
|
3e6e8a1c03 | ||
|
|
e0b6db29ed | ||
|
|
a70a43bc51 | ||
|
|
f2b2cd8eb4 | ||
|
|
00f51493f5 | ||
|
|
d5ae1f1291 | ||
|
|
1b01488d27 | ||
|
|
0f73f0e70e | ||
|
|
ca35e54d6b | ||
|
|
497f053344 | ||
|
|
ad816b0add | ||
|
|
0c057736ac | ||
|
|
43253c10b8 | ||
|
|
18ab019a4a | ||
|
|
76b09c29b0 | ||
|
|
ba6bc2faa0 | ||
|
|
edbcb4152b | ||
|
|
949c2c5435 | ||
|
|
b17af156c7 | ||
|
|
1c9da43a95 | ||
|
|
0b32bb20bb | ||
|
|
c94de0ab60 | ||
|
|
502c901e11 | ||
|
|
48a5a7552d | ||
|
|
706b5d76ed | ||
|
|
7c679b1118 | ||
|
|
d080b3425c | ||
|
|
03a98aff3c | ||
|
|
fa20c9ce94 | ||
|
|
5ef5435529 | ||
|
|
aa7b890cfe | ||
|
|
7cd6edb947 | ||
|
|
0294c14ec4 | ||
|
|
7fe42cf949 | ||
|
|
15ca0c6a4d | ||
|
|
0baf498bd1 | ||
|
|
a232e06100 | ||
|
|
4a32d25d4c | ||
|
|
31f85f9db9 | ||
|
|
ec609f8094 | ||
|
|
caef86f428 | ||
|
|
54417999b6 | ||
|
|
45dc260060 | ||
|
|
d1c217c823 | ||
|
|
897d57bc58 | ||
|
|
555460ae1b | ||
|
|
4162f820ff | ||
|
|
29205e9596 | ||
|
|
d213884c41 | ||
|
|
b91e2833b3 | ||
|
|
f2acc3dcf9 | ||
|
|
3ddec016ff | ||
|
|
8e01263587 | ||
|
|
3265def8c7 | ||
|
|
af4701b311 | ||
|
|
44330a21e9 | ||
|
|
464ffd1b5e | ||
|
|
327425764e | ||
|
|
dbff7e9436 | ||
|
|
a4339de9de | ||
|
|
8aee5aa068 | ||
|
|
52b2318777 | ||
|
|
56f38d1776 | ||
|
|
8cb252d00c | ||
|
|
776594f99d | ||
|
|
ed44c475d8 | ||
|
|
ab80d5e0a9 | ||
|
|
f25d74f69c | ||
|
|
ea05155a8c | ||
|
|
d271383e63 | ||
|
|
0fc0a3bdff | ||
|
|
6c4d582144 | ||
|
|
685da5a3b0 | ||
|
|
a6c6750166 | ||
|
|
bdbcfc2a80 | ||
|
|
6eb0c8a2e4 | ||
|
|
6b54fa81de | ||
|
|
25eb769b26 | ||
|
|
0b6b999e7b | ||
|
|
3328428d05 | ||
|
|
4598682b43 | ||
|
|
033d43e419 | ||
|
|
647c724573 | ||
|
|
a15ba15e64 | ||
|
|
6a6cbfcf1e | ||
|
|
d2688d7f03 | ||
|
|
303b6f29f0 | ||
|
|
1fe7ca1362 | ||
|
|
9bba6ebaa9 | ||
|
|
66efcbbff1 | ||
|
|
0877157353 | ||
|
|
2ffec928e2 | ||
|
|
b390756150 | ||
|
|
b8f84f99ff | ||
|
|
43b77c5d97 | ||
|
|
2f267ee160 | ||
|
|
7d5b142547 | ||
|
|
c3276aef25 | ||
|
|
fa722a699c | ||
|
|
023143f9ae | ||
|
|
5c688739d6 | ||
|
|
ebb46497ba | ||
|
|
91ec972277 | ||
|
|
5beda10bbd | ||
|
|
257025ac89 | ||
|
|
3f9889bfd6 | ||
|
|
caa22334b3 | ||
|
|
5834c6178c | ||
|
|
b152ee71fe | ||
|
|
d987353840 | ||
|
|
a1c8f268e5 | ||
|
|
8b93af662d | ||
|
|
2117c409a0 | ||
|
|
fa9d36e050 | ||
|
|
4ef222ab61 | ||
|
|
61cd9af09b | ||
|
|
791658b576 | ||
|
|
2982d16e07 | ||
|
|
c5b49eb7ca | ||
|
|
b568ca309c | ||
|
|
3c320c006c | ||
|
|
85b51fdd6b | ||
|
|
43954d000e | ||
|
|
2a0159b8ae | ||
|
|
cb98ac261b | ||
|
|
31a07d2335 | ||
|
|
91279fd218 | ||
|
|
513188aa56 | ||
|
|
fadb01551a | ||
|
|
d25c20ccbe | ||
|
|
7d893beebe | ||
|
|
94a83b534f | ||
|
|
74cbfdc7de | ||
|
|
d4a35ada28 | ||
|
|
e020834e4f | ||
|
|
2ad72da931 | ||
|
|
8da7d0e4f9 | ||
|
|
3c4208a057 | ||
|
|
f4164edb70 | ||
|
|
2eed4d7af4 | ||
|
|
438ef47637 | ||
|
|
74a3b4a650 | ||
|
|
9b69c85f7c | ||
|
|
d51b8a1674 | ||
|
|
662b031a30 | ||
|
|
4ec67a3d21 | ||
|
|
0595413c0f | ||
|
|
a7032abb2e | ||
|
|
9e6d88f4e2 | ||
|
|
8c93e0bae7 | ||
|
|
70332a12dd | ||
|
|
373654c635 | ||
|
|
485d999c8a | ||
|
|
69054e3d4c | ||
|
|
0237a0d1a5 | ||
|
|
69a2d4e38c | ||
|
|
19275b3030 | ||
|
|
f12993ec16 | ||
|
|
940d4fad24 | ||
|
|
bb36b93f71 | ||
|
|
d87b87adf7 | ||
|
|
caed150363 | ||
|
|
80a6a445fa | ||
|
|
628e65721b | ||
|
|
274c2f50a5 | ||
|
|
a99e933550 | ||
|
|
3847fa38c4 | ||
|
|
f2690c6423 | ||
|
|
81b94c5750 | ||
|
|
65fa37ac5e | ||
|
|
3baf641a48 | ||
|
|
c0238ecbed | ||
|
|
273b6bcf22 | ||
|
|
f7f1027d3d | ||
|
|
34e5e17f91 | ||
|
|
b96c6c3185 | ||
|
|
1ffe9578d1 | ||
|
|
cce957e254 | ||
|
|
bd9b8d87ae | ||
|
|
2aa39db681 | ||
|
|
657847e4c6 | ||
|
|
965168a842 | ||
|
|
2854ee2a52 | ||
|
|
598317927c | ||
|
|
7ed5acacf4 | ||
|
|
c1c38da586 | ||
|
|
051a9ea921 | ||
|
|
265d847ffd | ||
|
|
f4778d4cd9 | ||
|
|
9e25443db8 | ||
|
|
44982606ee | ||
|
|
516a272aca | ||
|
|
0cfd6c3161 | ||
|
|
5405351b14 | ||
|
|
1b91ff685f | ||
|
|
ed7a703d4c | ||
|
|
1671913287 | ||
|
|
f51888530d | ||
|
|
826ca61745 | ||
|
|
fbd2615de4 | ||
|
|
c10cb581c6 | ||
|
|
ef0cc648cf | ||
|
|
761f9fccff | ||
|
|
a662252758 | ||
|
|
1aa3e1d287 | ||
|
|
1bb8ec296d | ||
|
|
d80f64d370 | ||
|
|
998666be64 | ||
|
|
c882783535 | ||
|
|
572acde483 | ||
|
|
5dc2a702cf | ||
|
|
3e784eff74 | ||
|
|
16b652f0a3 | ||
|
|
e82247f990 | ||
|
|
c7f665d700 | ||
|
|
d3f108b6bb | ||
|
|
097330bae8 | ||
|
|
21b977ccfe | ||
|
|
928d337c16 | ||
|
|
bc1a8b1f7a | ||
|
|
b3be9e4376 | ||
|
|
67f0c990f8 | ||
|
|
fba1111dd6 | ||
|
|
c8cd87b21b | ||
|
|
55e17d3697 | ||
|
|
1ee6285905 | ||
|
|
68e1a872fd | ||
|
|
55fc17cf4b | ||
|
|
ffc807af50 | ||
|
|
41788bba50 | ||
|
|
873f870e5a | ||
|
|
5acbe09b67 | ||
|
|
8c1e746f54 | ||
|
|
93b32d4515 | ||
|
|
bed10f9880 | ||
|
|
4bbef62124 | ||
|
|
3cf15edef7 | ||
|
|
a234e895cf | ||
|
|
c943d8d2e8 | ||
|
|
4daa397a00 | ||
|
|
c7cd35d682 | ||
|
|
54cc69154e | ||
|
|
11faa4296d | ||
|
|
f6338d6a3e | ||
|
|
1ccdc1e93a | ||
|
|
25414b44a2 | ||
|
|
3f11953fcb | ||
|
|
50943ab942 | ||
|
|
30961182f2 | ||
|
|
c1a133a6b6 | ||
|
|
778fa85f47 | ||
|
|
1a1e198f72 | ||
|
|
3b8d0ceb22 | ||
|
|
7356d52e73 | ||
|
|
9459137f1e | ||
|
|
1294d4a329 | ||
|
|
ab34fdecb7 | ||
|
|
b162cb2e41 | ||
|
|
0e1900d819 | ||
|
|
641efb6a39 | ||
|
|
e7af8be5ae | ||
|
|
142983b4ea | ||
|
|
721414d98a | ||
|
|
e993925279 | ||
|
|
a3dc1e9cbe | ||
|
|
d9dcb2ba3a | ||
|
|
adf53f04ce | ||
|
|
c435bfee9c | ||
|
|
db7283cc6b | ||
|
|
d0b8d49f71 | ||
|
|
5474824975 | ||
|
|
17f4f14df7 | ||
|
|
cd5b264b03 | ||
|
|
eb6a7cf3f4 | ||
|
|
37638c06c5 | ||
|
|
60a015550a | ||
|
|
90d5983d7a | ||
|
|
d89f8683dc | ||
|
|
c20cb5160d | ||
|
|
fda97dd58a | ||
|
|
8e1ed09dff | ||
|
|
965f33c901 | ||
|
|
9899824b85 | ||
|
|
9219139351 | ||
|
|
63c19e1df9 | ||
|
|
3e86dcf1c0 | ||
|
|
86bcf4d6a7 | ||
|
|
ba07d4a70e | ||
|
|
928b2187ea | ||
|
|
4b31426a02 | ||
|
|
122c7a43c9 | ||
|
|
14047126d8 | ||
|
|
d143f211c8 | ||
|
|
e9fe9af068 | ||
|
|
aad8a1a825 | ||
|
|
689f4cb914 | ||
|
|
c8f9b45bc2 | ||
|
|
e65bc7d315 | ||
|
|
33f3624ff7 | ||
|
|
8c52160b07 | ||
|
|
a093fab253 | ||
|
|
6e80c03d45 | ||
|
|
6372efbdc3 | ||
|
|
58d6c93103 | ||
|
|
b7ffa0e2cd | ||
|
|
d77ef276fa | ||
|
|
27e0178da9 | ||
|
|
6d1a94d218 | ||
|
|
8731197e54 | ||
|
|
afbf6b33fc | ||
|
|
37adde32dc | ||
|
|
04fc8bbcb0 | ||
|
|
39b900b316 | ||
|
|
47dd8f02a1 | ||
|
|
2426c2f21a | ||
|
|
39242090e3 | ||
|
|
e6784daf07 | ||
|
|
45fd2c8942 | ||
|
|
c0d7d9d642 | ||
|
|
dc76a3e909 | ||
|
|
f164fd9220 | ||
|
|
ba214a5e32 | ||
|
|
4161ff2fc4 | ||
|
|
290763f559 | ||
|
|
b770435389 | ||
|
|
5674ea3e6c | ||
|
|
1e4217c90c | ||
|
|
0acdd0f1ea | ||
|
|
65201631a4 | ||
|
|
697872cf08 | ||
|
|
b515f844ee | ||
|
|
602c84cd9c | ||
|
|
2a91799fcc | ||
|
|
be088b32d8 | ||
|
|
fcf1dec809 | ||
|
|
105ff162d4 | ||
|
|
06964c4a0a | ||
|
|
f3afd6ef1a | ||
|
|
bcbd74dc5b | ||
|
|
d7b42afc74 | ||
|
|
80f4740c8f | ||
|
|
522c804f6b | ||
|
|
19a625362b | ||
|
|
90b8b7706f | ||
|
|
07229bbdae | ||
|
|
434bbf2cb5 | ||
|
|
d5bf7a4a99 | ||
|
|
718ffcf8bb | ||
|
|
3856582741 | ||
|
|
f0c73a1e7a | ||
|
|
b3511adb92 | ||
|
|
6762a7268c | ||
|
|
9da84a9a1e | ||
|
|
403ecd8a2c | ||
|
|
47fbff7a05 | ||
|
|
396624864a | ||
|
|
e73dcb66da | ||
|
|
ea166f2ac9 | ||
|
|
3ec10dffd6 | ||
|
|
732cf72b86 | ||
|
|
abcb9aee5b | ||
|
|
2a0d8f8219 | ||
|
|
320dfe523c | ||
|
|
0af9e1a637 | ||
|
|
fa87c981e1 | ||
|
|
0d7cef0943 | ||
|
|
f90b3d83a3 | ||
|
|
f743471380 | ||
|
|
b9e888858c | ||
|
|
973d67a033 | ||
|
|
e3e3fbc23a | ||
|
|
e885024523 | ||
|
|
7321f45457 | ||
|
|
d87c9092c9 | ||
|
|
b9abf3e4e3 | ||
|
|
92d39126d7 | ||
|
|
b835ebcc79 | ||
|
|
62c5245c87 | ||
|
|
49043f5ff3 | ||
|
|
949629291c | ||
|
|
ad42322257 | ||
|
|
0bba2799b6 | ||
|
|
64a2acb161 | ||
|
|
1594eba29e | ||
|
|
16284039c6 | ||
|
|
1119b4cebe | ||
|
|
109a560905 | ||
|
|
48b5829aea | ||
|
|
7c6f4f9427 | ||
|
|
25c2332071 | ||
|
|
2ee1bd124c | ||
|
|
a2427981b7 | ||
|
|
6cbd1b495e | ||
|
|
1c7c317df1 | ||
|
|
dc3a00f24f | ||
|
|
75299af4fc | ||
|
|
89e786bd85 | ||
|
|
d9664344ec | ||
|
|
784a2d4f2c | ||
|
|
0be963472b | ||
|
|
64e7e11853 | ||
|
|
4d70d1f80e | ||
|
|
99bbd90b0d | ||
|
|
8a57cc3123 | ||
|
|
2edb7c7676 | ||
|
|
dfaf0fee31 | ||
|
|
e380538b59 | ||
|
|
4e1cebd56f | ||
|
|
5006c4b30d | ||
|
|
866a5320de | ||
|
|
448ac6cf0d | ||
|
|
832799dbff | ||
|
|
b4ecf0b886 | ||
|
|
5b5148b7ec | ||
|
|
c9c1541fd0 | ||
|
|
1d40373c9d | ||
|
|
5202679edc | ||
|
|
c315922b5f | ||
|
|
ca8abfbf30 | ||
|
|
5aeadb7414 | ||
|
|
c9f724caa4 | ||
|
|
487bc49bf8 | ||
|
|
739ea29d1e | ||
|
|
ea8c4094db | ||
|
|
7f41bcbeec | ||
|
|
11fdfaf03b | ||
|
|
2510db3e76 | ||
|
|
f91df1f761 | ||
|
|
3bc9629be5 | ||
|
|
d45489474d | ||
|
|
fa1ce4d8ad | ||
|
|
79ebfbe7c6 | ||
|
|
cd41c6ece2 | ||
|
|
27771b2495 | ||
|
|
d3250499c1 | ||
|
|
a8bcc7274d | ||
|
|
65666fedd5 | ||
|
|
0682ca04b3 | ||
|
|
6fe6a6f029 | ||
|
|
d330d45e2d | ||
|
|
ccd4290777 | ||
|
|
9cb42507f8 | ||
|
|
d960910c72 | ||
|
|
b46b8a5efb | ||
|
|
27fe3e2d4f | ||
|
|
3410142741 | ||
|
|
e7674eb759 | ||
|
|
7c1a92274c | ||
|
|
f5deaff424 | ||
|
|
5f360182c6 | ||
|
|
46453bfc2f | ||
|
|
6bf6bc1d1d | ||
|
|
f45be05305 | ||
|
|
24f36469bc | ||
|
|
597c79be10 | ||
|
|
e6021c370e | ||
|
|
a8b946decb | ||
|
|
3f5ac150b2 | ||
|
|
5bcccfde6c | ||
|
|
c95dd7a426 | ||
|
|
4d87d3659a | ||
|
|
32fc39fd4c | ||
|
|
93acf49e9b | ||
|
|
b2c290a6e5 | ||
|
|
499dc1b349 | ||
|
|
9377509dcd | ||
|
|
2d4de61fb7 | ||
|
|
87ef315ad5 | ||
|
|
fccadb7719 | ||
|
|
f0fa66f495 | ||
|
|
1515d1b581 | ||
|
|
a2b7102eea | ||
|
|
a5d7968b3e | ||
|
|
b5525c76d1 | ||
|
|
e97648c4e2 | ||
|
|
835ceeee76 | ||
|
|
b3682df2ca | ||
|
|
8ad8490cff | ||
|
|
59fa91fe88 | ||
|
|
1b5436ad78 | ||
|
|
257c41cc2e | ||
|
|
b4e2290d89 | ||
|
|
e3ee63578f | ||
|
|
ea0b767114 | ||
|
|
ab03912e94 | ||
|
|
1fc50712d6 | ||
|
|
7c7786d4e1 | ||
|
|
b0a14bf53e | ||
|
|
f131cd9e53 | ||
|
|
edb33eb163 | ||
|
|
bcc9cda8ca | ||
|
|
05e3354047 | ||
|
|
3364d96c6b | ||
|
|
98385888b8 | ||
|
|
68264d7404 | ||
|
|
91fa69e029 | ||
|
|
4c56bedee3 | ||
|
|
520ee9bd2c | ||
|
|
a60a2eaa02 | ||
|
|
e3a720217a | ||
|
|
530bc862dc | ||
|
|
a6f5cc65d9 | ||
|
|
e555bc6551 | ||
|
|
a843868fe9 | ||
|
|
f5da3bacb2 | ||
|
|
97f072db74 | ||
|
|
80ad710217 | ||
|
|
4fec5e57be | ||
|
|
a8a32d2714 | ||
|
|
921f17f938 | ||
|
|
9a2f296fa2 | ||
|
|
58c9653c6b | ||
|
|
6b58ade2f0 | ||
|
|
9e66c58ceb | ||
|
|
f83f5fbce8 | ||
|
|
aecaec3e10 | ||
|
|
1efee2f52b | ||
|
|
49e047c55e | ||
|
|
59a2c6d60e | ||
|
|
06f812b95c | ||
|
|
c9154b970c | ||
|
|
b3d5c4ad9d | ||
|
|
456544b621 | ||
|
|
d199f2248f | ||
|
|
8f650bd338 | ||
|
|
7b0f6293f2 | ||
|
|
fcde5b2a97 | ||
|
|
342e072024 | ||
|
|
55e8a87888 | ||
|
|
26a8c1d7ab | ||
|
|
54de6a812a | ||
|
|
733bf44290 | ||
|
|
f14cd95342 | ||
|
|
986615b0b2 | ||
|
|
bfeaab6dfc | ||
|
|
b260f92936 | ||
|
|
271d3e7865 | ||
|
|
18b7eb830b | ||
|
|
74106ba171 | ||
|
|
2a9ce8c422 | ||
|
|
cbea0c7044 | ||
|
|
5aa024e501 | ||
|
|
c51a52f300 | ||
|
|
3d13c3a295 | ||
|
|
a679a01dbe | ||
|
|
8dad08a950 | ||
|
|
0a7d3cd00f | ||
|
|
ec8b217722 | ||
|
|
328ad6901d | ||
|
|
76b89d0edb | ||
|
|
370135ad0b | ||
|
|
0fcbca531f | ||
|
|
1e2740caab | ||
|
|
6ede23ff1b | ||
|
|
591ad2268c | ||
|
|
3c3246c078 | ||
|
|
5fb41a955c | ||
|
|
367b594183 | ||
|
|
7861cfec0a | ||
|
|
019cf013d6 | ||
|
|
4329f20e3f | ||
|
|
a285194021 | ||
|
|
bf81e38d36 | ||
|
|
78cac3e594 | ||
|
|
7871790db1 | ||
|
|
18e044628e | ||
|
|
b557b682d9 | ||
|
|
389c890f14 | ||
|
|
cd8738ab63 | ||
|
|
f6f8f81a48 | ||
|
|
ecd5e6bfa4 | ||
|
|
fda078f995 | ||
|
|
ab5580e152 | ||
|
|
e8d212d92e | ||
|
|
40e539683c | ||
|
|
05f6447301 | ||
|
|
5238960850 | ||
|
|
ccec25e2c6 | ||
|
|
c38b7c4104 | ||
|
|
29b25d59c6 | ||
|
|
884b800899 | ||
|
|
09d31815b4 | ||
|
|
fe1b369946 | ||
|
|
26cb0efa88 | ||
|
|
d47115ff8b | ||
|
|
2e3d90d67c | ||
|
|
a4b06b619c | ||
|
|
c63b1697f4 | ||
|
|
87ffd21b29 | ||
|
|
2452611d0f | ||
|
|
eb359eced4 | ||
|
|
c824b29e77 | ||
|
|
33d7776473 | ||
|
|
9ad8d9b17c | ||
|
|
5b1825ba5b | ||
|
|
9c4cf83259 | ||
|
|
05e7e5e972 | ||
|
|
db4f823d34 | ||
|
|
8e02494166 | ||
|
|
a6f06ce3e2 | ||
|
|
d34e9f93b7 | ||
|
|
efeb6176c1 | ||
|
|
1a54513cf1 | ||
|
|
242c52d607 | ||
|
|
012b4c1913 | ||
|
|
436bffd15f | ||
|
|
1b3c3e6d68 | ||
|
|
33d08e8433 | ||
|
|
8f7f4cb92b | ||
|
|
2623cec874 | ||
|
|
4fcdf7b4b2 | ||
|
|
955ef1f06c | ||
|
|
2ee4c9ee02 | ||
|
|
9dbd903f41 | ||
|
|
bf3de7b90b | ||
|
|
e73ad8de3b | ||
|
|
42f4feb2b7 | ||
|
|
f16f0e169d | ||
|
|
465117d7ca | ||
|
|
7ed58bb347 | ||
|
|
dad2da7e54 | ||
|
|
363786845b | ||
|
|
ec5717caf5 | ||
|
|
d26b660aa6 | ||
|
|
aede7248ab | ||
|
|
68a92afcff | ||
|
|
55abbe1850 | ||
|
|
2c28e25bda | ||
|
|
1e6e370b76 | ||
|
|
1c3c202b96 | ||
|
|
406f7aa0f6 | ||
|
|
34f56b40fd | ||
|
|
c445f5fec7 | ||
|
|
44adde498e | ||
|
|
cf94a78872 | ||
|
|
1a64dffb00 | ||
|
|
081e5d55e6 | ||
|
|
248e6770ca | ||
|
|
40a1c96617 | ||
|
|
7314bf4682 | ||
|
|
e9e3eaa67d | ||
|
|
d36b1d849d | ||
|
|
742056be0d | ||
|
|
bc8f265f0a | ||
|
|
ec041b335e | ||
|
|
053e83dafb | ||
|
|
b97a1356b1 | ||
|
|
b73dc0ef4d | ||
|
|
499e3281e6 | ||
|
|
66868119dc | ||
|
|
aba0b2a39b | ||
|
|
57dca35692 | ||
|
|
c68518dfbb | ||
|
|
e967bc86e7 | ||
|
|
1e2a7f18a1 | ||
|
|
f91faf09b3 | ||
|
|
4430b1ceb3 | ||
|
|
3413f1e284 | ||
|
|
40cbffb2d2 | ||
|
|
b9e997f561 | ||
|
|
9a7a77a22a | ||
|
|
8f6281ab0c | ||
|
|
0da0d0a29d | ||
|
|
022b9176fe | ||
|
|
0c62c958fd | ||
|
|
c41d52a042 | ||
|
|
7e554aac86 | ||
|
|
f863a52cea | ||
|
|
93efcb8526 | ||
|
|
dcfd71aa4c | ||
|
|
fca90b3445 | ||
|
|
a292454aa1 | ||
|
|
4f81edbd4f | ||
|
|
6344db659f | ||
|
|
511a52afc8 | ||
|
|
e885e2a623 | ||
|
|
d137e03231 | ||
|
|
f52565de50 | ||
|
|
a2d288c6a9 | ||
|
|
bd7c51921d | ||
|
|
978fa53cc2 | ||
|
|
eec9609e96 | ||
|
|
9e1b43bcbf | ||
|
|
a3036ac37e | ||
|
|
ebdafd8114 | ||
|
|
a98d215204 | ||
|
|
d554ca5e1d | ||
|
|
209e04fa11 | ||
|
|
e5142f65a6 | ||
|
|
b64aa6d687 | ||
|
|
848d3bf2e1 | ||
|
|
b55c770271 | ||
|
|
d543b72562 | ||
|
|
0136a522b1 | ||
|
|
2cb758ac75 | ||
|
|
560c71c735 | ||
|
|
a37ee2293c | ||
|
|
c55ad2e375 | ||
|
|
aaa9d9f0e1 | ||
|
|
75fa7f6b3c | ||
|
|
a5db0026ed | ||
|
|
9c491366c5 | ||
|
|
385aec4010 | ||
|
|
10f4856b0c | ||
|
|
dfde67a6fe | ||
|
|
10c843fcfb | ||
|
|
58930da52b | ||
|
|
0870588c20 | ||
|
|
f90cf150e2 | ||
|
|
067596d341 | ||
|
|
70d650be2b | ||
|
|
b92e7955be | ||
|
|
c98e1479bd | ||
|
|
67f2c901ea | ||
|
|
eef7778af9 | ||
|
|
a17e7caeb7 | ||
|
|
f0c06ac65c | ||
|
|
76b18df3d9 | ||
|
|
0da24cac8b | ||
|
|
2e3c8acc68 | ||
|
|
8d9a884cee | ||
|
|
896bc6cd46 | ||
|
|
be3548f7e1 | ||
|
|
4adf93e0f7 | ||
|
|
651faee698 | ||
|
|
caf33b2d9b | ||
|
|
8f8798bc0d | ||
|
|
7335f0adda | ||
|
|
ef535178ff | ||
|
|
04dee11e97 | ||
|
|
dd2ccee27d | ||
|
|
b6b0132ac7 | ||
|
|
e34cb5e7dc | ||
|
|
252ee2d979 | ||
|
|
14362bf359 | ||
|
|
1ee2584307 | ||
|
|
507b8bb091 | ||
|
|
d44d11d864 | ||
|
|
2d21d43c34 | ||
|
|
0fb76c71ac | ||
|
|
8bdaf5f7af | ||
|
|
a67bf0b074 | ||
|
|
f18d7546c6 | ||
|
|
3de8168343 | ||
|
|
bb069079bb | ||
|
|
2e5a31f197 | ||
|
|
fc8007dbec | ||
|
|
1238203bc4 | ||
|
|
41f072fd0e | ||
|
|
5a6ef20ef6 | ||
|
|
be8be535f7 | ||
|
|
ab71589c0b | ||
|
|
f328d95cef | ||
|
|
aac546c978 | ||
|
|
f52cb4cd78 | ||
|
|
6783534a0f | ||
|
|
a70688445d | ||
|
|
314b146b2e | ||
|
|
7846ac3125 | ||
|
|
56ec5869c9 | ||
|
|
1ea358b28b | ||
|
|
db74dcda5b | ||
|
|
551fe80bed | ||
|
|
63bb8f0df9 | ||
|
|
70d820c875 | ||
|
|
3f7652c56f | ||
|
|
535b6bfacc | ||
|
|
f7fe0e5f67 | ||
|
|
2455ad8468 | ||
|
|
0b640aa56b | ||
|
|
aa3a4944d5 | ||
|
|
46b7362304 | ||
|
|
870c45913e | ||
|
|
05f1a4596a | ||
|
|
13517e2914 | ||
|
|
b5fb7458d5 | ||
|
|
f73fdb04a6 | ||
|
|
3a4120e49a | ||
|
|
9fe894402f | ||
|
|
0a32208e5d | ||
|
|
774f3a692c | ||
|
|
5cc7564c5c | ||
|
|
0fe0b0eeb6 | ||
|
|
0126a5d60a | ||
|
|
13e334506c | ||
|
|
6b40e4f52a | ||
|
|
d5fb561709 | ||
|
|
d8ec81cc31 | ||
|
|
bc72d381b2 | ||
|
|
4d362a61ea | ||
|
|
00c281f6a4 | ||
|
|
c8cd41cdd8 | ||
|
|
41e4b2efea | ||
|
|
0c13d45522 | ||
|
|
9f1800fba8 | ||
|
|
8f4a9bbc16 | ||
|
|
9ba2bf1570 | ||
|
|
120c238705 | ||
|
|
1c1f633b13 | ||
|
|
6660f37558 | ||
|
|
20e5b46b20 | ||
|
|
0113ad36ee | ||
|
|
3e41de05cc | ||
|
|
2884712ca7 | ||
|
|
ded01c3bf6 | ||
|
|
8c75040c25 | ||
|
|
f1073ad43d | ||
|
|
a352b68acf | ||
|
|
364d616792 | ||
|
|
bde13833cb | ||
|
|
80a1bc7db5 | ||
|
|
f1f70bf4b5 | ||
|
|
dbb5a39b64 | ||
|
|
885ee861f7 | ||
|
|
486b9a6a2d | ||
|
|
1f31381611 | ||
|
|
ed5f43a55a | ||
|
|
09a17f965c | ||
|
|
1e9026e484 | ||
|
|
a60169ea09 | ||
|
|
78a16d395c | ||
|
|
5436848955 | ||
|
|
0477368e9a | ||
|
|
0ef0655b83 | ||
|
|
a64dbae90b | ||
|
|
d41a1a91d3 | ||
|
|
15bf3e3376 | ||
|
|
d9f7fa2e57 | ||
|
|
b31c49d676 | ||
|
|
255c229f23 | ||
|
|
d12134ce37 | ||
|
|
36e2aade87 | ||
|
|
33546b58aa | ||
|
|
fdc015c6e9 | ||
|
|
e9892b4b26 | ||
|
|
50f69e2ef2 | ||
|
|
41b35412bf | ||
|
|
7dbb473339 | ||
|
|
16a8884233 | ||
|
|
ba0406d10d | ||
|
|
8327d5df70 | ||
|
|
a31befbcd0 | ||
|
|
5ee5b655b2 | ||
|
|
0a43219a27 | ||
|
|
eba4ff1bcb | ||
|
|
2eab219a70 | ||
|
|
95f305c35a | ||
|
|
6e7dc7c7dd | ||
|
|
b7fbc9bd95 | ||
|
|
919a2c74f6 | ||
|
|
81c07a32fd | ||
|
|
b063784b78 | ||
|
|
defa28efa1 | ||
|
|
690029d1a3 | ||
|
|
d88faf92d1 | ||
|
|
958c968d02 | ||
|
|
746b2f5657 | ||
|
|
efeabd3180 | ||
|
|
1fd6eb695d | ||
|
|
128360d4f0 | ||
|
|
17aab5827a | ||
|
|
1a815fb04f | ||
|
|
66503a69c9 | ||
|
|
bab916bccc | ||
|
|
5c73115155 | ||
|
|
e0fda29f94 | ||
|
|
02270b4b3d | ||
|
|
6babdb671b | ||
|
|
0f2165ccf4 | ||
|
|
18f0cc7d99 | ||
|
|
64935d11f7 | ||
|
|
2d1d1025fa | ||
|
|
b01e71e719 | ||
|
|
dded389ac1 | ||
|
|
c54fcd9ee4 | ||
|
|
0b2158719c | ||
|
|
188f8d63e2 | ||
|
|
48e65099b5 | ||
|
|
75331c5fca | ||
|
|
8c966fbd51 | ||
|
|
efe8126290 | ||
|
|
88625db05f | ||
|
|
84379062f9 | ||
|
|
310197bab5 | ||
|
|
b0932b34cb | ||
|
|
4a5bbb1941 | ||
|
|
87f60e7053 | ||
|
|
5ef84da4f1 | ||
|
|
216a05b3e3 | ||
|
|
9f715573aa | ||
|
|
96dc600579 | ||
|
|
377eb480ca | ||
|
|
e4134c5e13 | ||
|
|
778c1fea8b | ||
|
|
7aa778fba9 | ||
|
|
70aee0717c | ||
|
|
3210f4c385 | ||
|
|
040a560a48 | ||
|
|
cffe46408f | ||
|
|
ac9716f154 | ||
|
|
8f79084bd4 | ||
|
|
10ea3f46ba | ||
|
|
f6be734be9 | ||
|
|
05e01f21d7 | ||
|
|
81d226888f | ||
|
|
72c4d482e9 | ||
|
|
fbf608decb | ||
|
|
06d40c8b98 | ||
|
|
5f88549f4a | ||
|
|
ca457f594e | ||
|
|
c11614bcdc | ||
|
|
21961c93c7 | ||
|
|
48340e4f13 | ||
|
|
fcbc282f56 | ||
|
|
51773bcbaf | ||
|
|
da491e75b2 | ||
|
|
0b3c80a234 | ||
|
|
dd6f62ed99 | ||
|
|
c367ba5dd2 | ||
|
|
eef541a291 | ||
|
|
80aade3805 | ||
|
|
ab116bdb0c | ||
|
|
4ce84a1acd | ||
|
|
a7ff5a1770 | ||
|
|
aa6fab0cc2 | ||
|
|
8d740132f4 | ||
|
|
3b096c5f5c | ||
|
|
43b7f371f5 | ||
|
|
abb151f3c9 | ||
|
|
4982b28868 | ||
|
|
d06f2a229e | ||
|
|
58a224a651 | ||
|
|
20eccd84d4 | ||
|
|
722472b48c | ||
|
|
73c7112433 | ||
|
|
b09f348530 | ||
|
|
4c04222fa5 | ||
|
|
ccb56fc24b | ||
|
|
81cf449daa | ||
|
|
e043ede4a2 | ||
|
|
b821c839dc | ||
|
|
597013caa5 | ||
|
|
6a0afa582a | ||
|
|
3ae915b27e | ||
|
|
59f2d73522 | ||
|
|
9c26b390a2 | ||
|
|
f88d747f79 | ||
|
|
065e739d6e | ||
|
|
696d7c5937 | ||
|
|
0eae075723 | ||
|
|
79d1f072f4 | ||
|
|
6bb9aacf9d | ||
|
|
745ddb4dd0 | ||
|
|
7a5a5f2df2 | ||
|
|
1f31cc37f8 | ||
|
|
2675c1e40e | ||
|
|
c71177f285 | ||
|
|
07a5559916 | ||
|
|
56d15a0530 | ||
|
|
812b5de0fe | ||
|
|
80f34d7b57 | ||
|
|
661a540dd1 | ||
|
|
70599ce925 | ||
|
|
fb2193cc63 | ||
|
|
356f13c069 | ||
|
|
02ac463dbf | ||
|
|
c5af1b6b00 | ||
|
|
3a3fb2f6f9 | ||
|
|
4a10510cd5 | ||
|
|
f84b89f0c6 | ||
|
|
a15ad60849 | ||
|
|
07233a1ec8 | ||
|
|
e793866398 | ||
|
|
aaa70e26a2 | ||
|
|
a04a2d043c | ||
|
|
0f06b496d1 | ||
|
|
83b70c9f63 | ||
|
|
e0deeff23e | ||
|
|
991af8b0d6 | ||
|
|
00c487a8db | ||
|
|
c8285564a3 | ||
|
|
1db79d6192 | ||
|
|
d60eed0710 | ||
|
|
195254cae8 | ||
|
|
43db0d9f6a | ||
|
|
8e539f13c0 | ||
|
|
6ecb2ca4ec | ||
|
|
58ee43d020 | ||
|
|
2a449fec4d | ||
|
|
6ca4d3ae9a | ||
|
|
dea9f20f8c | ||
|
|
963e3ed282 | ||
|
|
d240796ded | ||
|
|
c8c5bf950a | ||
|
|
c9ca285d33 | ||
|
|
1d4ee854e2 | ||
|
|
cca0093fa9 | ||
|
|
4efa389299 | ||
|
|
aefd2d1cbc | ||
|
|
10de8c2631 | ||
|
|
014e0799f9 | ||
|
|
c626fc576a | ||
|
|
e5b0bbcd33 | ||
|
|
70ecb415f5 | ||
|
|
e1625d62a8 | ||
|
|
163e48c0e3 | ||
|
|
887c6e6f05 | ||
|
|
42a9ea37e4 | ||
|
|
8b5dbee47e | ||
|
|
85b992f621 | ||
|
|
cc84f7cb8e | ||
|
|
209ba4d024 | ||
|
|
b007ee4606 | ||
|
|
95b86e6dad | ||
|
|
cbf8d146ac | ||
|
|
faad233ea6 | ||
|
|
6900303997 | ||
|
|
37b7e84620 | ||
|
|
1c5ed2a19b | ||
|
|
b08ad0389e | ||
|
|
be2c677386 | ||
|
|
79bea8ab9a | ||
|
|
84f94e4cbb | ||
|
|
d16cc52b5d | ||
|
|
137e6a4557 | ||
|
|
680f1d9387 | ||
|
|
cb8a321bdd | ||
|
|
d437882581 | ||
|
|
88ea5ab2c3 | ||
|
|
989bdc9e56 | ||
|
|
6fe04ffef2 | ||
|
|
b791a530da | ||
|
|
a24bc5b2dc | ||
|
|
c0c79ef444 | ||
|
|
b5605dfecc | ||
|
|
31b5395ab6 | ||
|
|
09804c9862 | ||
|
|
c2da3406fc | ||
|
|
ccffb0965d | ||
|
|
18d68bfee4 | ||
|
|
d4503e25ed | ||
|
|
149fa411e2 | ||
|
|
60ff2e7984 | ||
|
|
332d7e9b97 | ||
|
|
6fb51eaf7b | ||
|
|
e837df6adb | ||
|
|
42368ea8db | ||
|
|
ee660c6b9b | ||
|
|
0cb441fedd | ||
|
|
5adf627551 | ||
|
|
6a30a0bfd3 | ||
|
|
c4c98ce8da | ||
|
|
523d5bcd0b | ||
|
|
43e1e0489c | ||
|
|
1ed33784a6 | ||
|
|
526bf8126f | ||
|
|
425e6b4983 | ||
|
|
b153f5b150 | ||
|
|
3fd8a07fca | ||
|
|
f68eea808a | ||
|
|
53e171f345 | ||
|
|
80cb9becd8 | ||
|
|
816df9f267 | ||
|
|
821306120a | ||
|
|
1a3a2002ff | ||
|
|
e168abbcff | ||
|
|
e501e9ecb2 | ||
|
|
cbd2adc95e | ||
|
|
3b86ecfa79 | ||
|
|
647781ca56 | ||
|
|
c39f305067 | ||
|
|
678c8a7f1e | ||
|
|
c5c5a7403b | ||
|
|
2d98c960ec | ||
|
|
eb79110beb | ||
|
|
dd95eb4cb5 | ||
|
|
60d53f9e95 | ||
|
|
c0112fabc2 | ||
|
|
0e8364c7f5 | ||
|
|
782471b7e1 | ||
|
|
0466454b00 | ||
|
|
077468f6a9 | ||
|
|
b3f29dc1e5 | ||
|
|
f03ddc98ec | ||
|
|
1f71f386f6 | ||
|
|
206eb9fd94 | ||
|
|
7d6e89ed22 | ||
|
|
21018c2c13 | ||
|
|
a8affd606e | ||
|
|
b7381d5338 | ||
|
|
3abab26458 | ||
|
|
99b5a2e560 | ||
|
|
ba5c616ff4 | ||
|
|
0c11c1be88 | ||
|
|
e00e8f2166 | ||
|
|
fd8e921b6e | ||
|
|
024cda9a97 | ||
|
|
c9aff0736c | ||
|
|
40aa6e8349 | ||
|
|
9295fa30a8 | ||
|
|
5e50058473 | ||
|
|
cdda850ce1 | ||
|
|
0e792e7903 | ||
|
|
3547e66bc6 | ||
|
|
6da7f39d95 | ||
|
|
b5e646a18c | ||
|
|
048b3ece36 | ||
|
|
13d37c3c56 | ||
|
|
a458a40337 | ||
|
|
7e23476814 | ||
|
|
260b498ee5 | ||
|
|
1620578b13 | ||
|
|
108434e53d | ||
|
|
1400bb1663 | ||
|
|
a284a32d78 | ||
|
|
458a435114 | ||
|
|
30057b1e15 | ||
|
|
ae1af262f6 | ||
|
|
90afc07f39 | ||
|
|
89b5ef7c4b | ||
|
|
35b6e6d2a8 | ||
|
|
3367e65476 | ||
|
|
0c4ccdcb83 | ||
|
|
f28643cea9 | ||
|
|
5f46be19a7 | ||
|
|
d46b18a00f | ||
|
|
3b1930e8ec | ||
|
|
fe97b81c09 | ||
|
|
997db04648 | ||
|
|
c00b484eff | ||
|
|
94040b0798 | ||
|
|
e581754f09 | ||
|
|
e04b1d6b0a | ||
|
|
5599608887 | ||
|
|
6b45ffd2d1 | ||
|
|
c9eb6dfc1b | ||
|
|
3f84da139c | ||
|
|
def64d6ef3 | ||
|
|
100e2c42f6 | ||
|
|
8715731559 | ||
|
|
34b3af3363 | ||
|
|
7354667bd3 | ||
|
|
08dfa8eee2 | ||
|
|
f904b1c60c | ||
|
|
1f1dee94f6 | ||
|
|
f6ebaf4a32 | ||
|
|
4ea762c1a2 | ||
|
|
012cb5416c | ||
|
|
fcb2c3f0db | ||
|
|
fd85b167ec | ||
|
|
b6e0be701e | ||
|
|
96d9d5d388 | ||
|
|
d13459636f | ||
|
|
1d275dba69 | ||
|
|
56b5e83e36 | ||
|
|
1f590f3e9a | ||
|
|
1b45e6a9bc | ||
|
|
53ca739f1f | ||
|
|
81c2176cba | ||
|
|
573ef3f1c9 | ||
|
|
8940281d1b | ||
|
|
9c272da05f | ||
|
|
a5974f89d6 | ||
|
|
5d8a93a10e | ||
|
|
1f0f5ffa1e | ||
|
|
634efb65f1 | ||
|
|
c64d5fc66c | ||
|
|
3c39fa8902 | ||
|
|
ce81ccb063 | ||
|
|
1cf5c379cb | ||
|
|
fee1118a20 | ||
|
|
97b9141245 | ||
|
|
fcd1eb642d | ||
|
|
8e6a163f27 | ||
|
|
39d0a99972 | ||
|
|
9ef05a12c3 | ||
|
|
80be396464 | ||
|
|
5650e38e7d | ||
|
|
8a04412fa1 | ||
|
|
8cc82aad87 | ||
|
|
de22001ab5 | ||
|
|
f1026418ea | ||
|
|
17cbf773b9 | ||
|
|
984d4a2c0f | ||
|
|
e6bffa4475 | ||
|
|
92f0f3d21d | ||
|
|
a438a6d2bc | ||
|
|
7ea3b4118d | ||
|
|
183f23f10d | ||
|
|
792def4928 | ||
|
|
2df75de505 | ||
|
|
b084e4d963 | ||
|
|
35b7b8e4bc | ||
|
|
6b9b6a9169 | ||
|
|
c7c75e87dc | ||
|
|
b0a1036d93 | ||
|
|
8f99cd5996 | ||
|
|
60f44c098d | ||
|
|
50ad8005e4 | ||
|
|
83618d719a | ||
|
|
e7a76b5123 | ||
|
|
29c8cf8db8 | ||
|
|
d3da5294e8 | ||
|
|
765f2b8446 | ||
|
|
ff5e5423e5 | ||
|
|
311b5ce051 | ||
|
|
3facde2536 | ||
|
|
4364ea1272 | ||
|
|
4b0c3a3270 | ||
|
|
5048455965 | ||
|
|
6c8957be7f | ||
|
|
18ce88bd2d | ||
|
|
40d40e470d | ||
|
|
56aae0eaf5 | ||
|
|
dc2c527ce9 | ||
|
|
62b51b8452 | ||
|
|
b2c04da8dc | ||
|
|
ec9cbe847d | ||
|
|
acded821c4 | ||
|
|
69e519052b | ||
|
|
46b5547a42 | ||
|
|
36bb5c2383 | ||
|
|
e800ee2f63 | ||
|
|
cc0874cf71 | ||
|
|
68f8fc2f14 | ||
|
|
4845c7359d | ||
|
|
351b50a887 | ||
|
|
60f86fc876 | ||
|
|
937c407eef | ||
|
|
dcfc10b129 | ||
|
|
aebd0c9717 | ||
|
|
1c188cf73c | ||
|
|
1a12766e3b | ||
|
|
6037349512 | ||
|
|
ebbabc4986 | ||
|
|
8d7ad44331 | ||
|
|
5367708236 | ||
|
|
9dba1b668c | ||
|
|
424a7f48f8 | ||
|
|
4ed1e45869 | ||
|
|
21d188bf95 | ||
|
|
8a65666454 | ||
|
|
8781083960 | ||
|
|
fa12209c1b | ||
|
|
c7c03bf303 | ||
|
|
871357d539 | ||
|
|
71df327190 | ||
|
|
c9eab73f2a | ||
|
|
47571d11db | ||
|
|
b80b93ea0f | ||
|
|
6df5a6a833 | ||
|
|
5164ccc3e5 | ||
|
|
3306cf45ca | ||
|
|
c487c42492 | ||
|
|
9c417c54d4 | ||
|
|
9843f2a657 | ||
|
|
7b4715bad7 | ||
|
|
f15e9e8de4 | ||
|
|
72e2fafa20 | ||
|
|
233bf78ab4 | ||
|
|
f22f46f4f9 | ||
|
|
290f125a13 | ||
|
|
52ecbc2843 | ||
|
|
05e49ffbdf | ||
|
|
bd0f9c2065 | ||
|
|
c5b3c6e101 | ||
|
|
83bf65297a | ||
|
|
e8701e64b9 | ||
|
|
c553797c4f | ||
|
|
5905f36f05 | ||
|
|
c3f8dbf6b5 | ||
|
|
62607d5452 | ||
|
|
e57df8fa86 | ||
|
|
e856036f4c | ||
|
|
9e7aa98c22 | ||
|
|
2022ae0fb9 | ||
|
|
64ec3493c1 | ||
|
|
4063fe0283 | ||
|
|
183cacac90 | ||
|
|
19d6b6cd7a | ||
|
|
c10ed26c30 | ||
|
|
ae571810f2 | ||
|
|
3ddbb1687c | ||
|
|
8fae3d7b1e | ||
|
|
b57dcb4b51 | ||
|
|
26db18bc90 | ||
|
|
b9675ef6e6 | ||
|
|
e395eb1108 | ||
|
|
3b0fa77f50 | ||
|
|
129e403487 | ||
|
|
a3ac837599 | ||
|
|
78741cf025 | ||
|
|
51bb339ab2 | ||
|
|
b743c1237e | ||
|
|
31719ad124 | ||
|
|
565c2edb0a | ||
|
|
c877f0f034 | ||
|
|
b15266fe06 | ||
|
|
cfe1ff4bdb | ||
|
|
d4823efad9 | ||
|
|
9f53491cab | ||
|
|
02a27a6c4f | ||
|
|
a611c968cc | ||
|
|
59698906eb | ||
|
|
c0d8e0eb63 | ||
|
|
2ed0adb075 | ||
|
|
68ebb81e86 | ||
|
|
05adc6c2de | ||
|
|
f63bd4ff47 | ||
|
|
5bbc321588 | ||
|
|
4cf4320593 | ||
|
|
eab47ea1e5 | ||
|
|
f52dd35ac3 | ||
|
|
61c7edfd34 | ||
|
|
5bbd424ee0 | ||
|
|
6ac40f7b65 | ||
|
|
f505575f69 | ||
|
|
4084c58aa1 | ||
|
|
e99365f601 | ||
|
|
e2a01455af | ||
|
|
e8884e5e9c | ||
|
|
a7001c311b | ||
|
|
9181e2f4c7 | ||
|
|
fb76a81ff7 | ||
|
|
07d765209d | ||
|
|
48af68ba8e | ||
|
|
0c93df89b6 | ||
|
|
43f0941e8f | ||
|
|
481119f7d6 | ||
|
|
9f56645038 | ||
|
|
eb8619e256 | ||
|
|
4ef7a25c10 | ||
|
|
3727a15764 | ||
|
|
aaabbd3e9e | ||
|
|
84f9cac4d0 | ||
|
|
914f1eafac | ||
|
|
6fd2f685fe | ||
|
|
737aee9295 | ||
|
|
cb9c465707 | ||
|
|
3c79bdd7a0 | ||
|
|
a4c56bf67b | ||
|
|
4c1b32d7e2 | ||
|
|
f78b479118 | ||
|
|
4802f9cdb6 | ||
|
|
83776d6219 | ||
|
|
e83f8c0aa5 | ||
|
|
bd77216d06 | ||
|
|
5a578ea4c7 | ||
|
|
9ae64c9910 | ||
|
|
b42ad359e9 | ||
|
|
757e2c79b4 | ||
|
|
86e9bbc74e | ||
|
|
e40f25ebe1 | ||
|
|
ff1d333a02 | ||
|
|
2ae91a9e2f | ||
|
|
d213d69fe3 | ||
|
|
56da835eaf | ||
|
|
96bcfb29c7 | ||
|
|
7be1065b8f | ||
|
|
a2546b9082 | ||
|
|
ceeb5b909f | ||
|
|
43a89cca8e | ||
|
|
f338bf9257 | ||
|
|
767fc0b739 | ||
|
|
54d08c8868 | ||
|
|
5880bc5417 | ||
|
|
f613a3e332 | ||
|
|
bfe586843f | ||
|
|
d0633e6dbe | ||
|
|
0f2ca8cde1 | ||
|
|
c53f9d561e | ||
|
|
65141161f6 | ||
|
|
72f454b752 | ||
|
|
10ebbaea2e | ||
|
|
aa5ce4d450 | ||
|
|
d33d623f0d | ||
|
|
7984ffdc6a | ||
|
|
c1267d04c5 | ||
|
|
a04c076b7f | ||
|
|
44891b4a0a | ||
|
|
7b39bcdaae | ||
|
|
d937f342bb | ||
|
|
318cb1f207 | ||
|
|
c48465dbaa | ||
|
|
8be1a37909 | ||
|
|
d3d0be4167 | ||
|
|
762ada1e07 | ||
|
|
0d3da210f0 | ||
|
|
cccf86dd05 | ||
|
|
8a76094965 | ||
|
|
790f5848b2 | ||
|
|
82d7eea7e3 | ||
|
|
2547dffccc | ||
|
|
9bb041791c | ||
|
|
17515bae14 | ||
|
|
4bd3d25218 | ||
|
|
5ffacc5e84 | ||
|
|
83b2f83da0 | ||
|
|
b36270b5e1 | ||
|
|
6ff7a79308 | ||
|
|
af582b66bb | ||
|
|
2460d904bd | ||
|
|
1ccabe2965 | ||
|
|
fb83f6a1fc | ||
|
|
b04f81284a | ||
|
|
ec9331f851 | ||
|
|
dafef5a688 | ||
|
|
d96a070a3a | ||
|
|
ed3979df5f | ||
|
|
79fc4ff6f9 | ||
|
|
7b6d519482 | ||
|
|
52d1008661 | ||
|
|
96bd8ff57c | ||
|
|
ce3fe52498 | ||
|
|
7e2f971c08 | ||
|
|
d63b49137a | ||
|
|
b9ee5650b0 | ||
|
|
caef337587 | ||
|
|
b4a5002a6e | ||
|
|
86be915cce | ||
|
|
d9f38561c8 | ||
|
|
4836864f56 | ||
|
|
a4a31fa8dc | ||
|
|
f942980c0b | ||
|
|
3fb35cbd6f | ||
|
|
15e0f1696f | ||
|
|
da84fa3d74 | ||
|
|
d6e7333ae4 | ||
|
|
6ec02e9ecf | ||
|
|
25cd5bb697 | ||
|
|
fa129ce5b5 | ||
|
|
e1e042f2a1 | ||
|
|
ceb599e789 | ||
|
|
8c82b06904 | ||
|
|
05d044aac3 | ||
|
|
2d5c693fd3 | ||
|
|
57fa1801c3 | ||
|
|
a294b04bf0 | ||
|
|
9c99ab4572 | ||
|
|
d549fdfa22 | ||
|
|
95ac3078da | ||
|
|
92e3071623 | ||
|
|
ee5aef6c72 | ||
|
|
639cd07d6d | ||
|
|
af03ecf352 | ||
|
|
60ec9793fb | ||
|
|
674379e673 | ||
|
|
a28d066732 | ||
|
|
8495b6d365 | ||
|
|
1ef0365670 | ||
|
|
87a30890a3 | ||
|
|
ed4d18f516 | ||
|
|
9c62fcdb68 | ||
|
|
27a0c21c38 | ||
|
|
3555a659ec | ||
|
|
4c5e8adf8b | ||
|
|
875ed05bdc | ||
|
|
67f3a50e9a | ||
|
|
afff321e9a | ||
|
|
8f0e47fae8 | ||
|
|
823b8be4b7 | ||
|
|
92767dd703 | ||
|
|
7b9319b1c8 | ||
|
|
3d95405e5f | ||
|
|
8d2bca1a90 | ||
|
|
0fd1cd2400 | ||
|
|
6bfec56796 | ||
|
|
e815763b7f | ||
|
|
7e2c89a37f | ||
|
|
1e05637e37 | ||
|
|
b713934b2e | ||
|
|
75fb9ac1be | ||
|
|
8aab9d87fa | ||
|
|
7d11f825aa | ||
|
|
196ebaf662 | ||
|
|
87f2dec8d4 | ||
|
|
a1e0d316ea | ||
|
|
11860637e1 | ||
|
|
2e308a3a38 | ||
|
|
c2b429ab24 | ||
|
|
6222ae51ce | ||
|
|
b29f98377d | ||
|
|
1d4deff25a | ||
|
|
df727f2126 | ||
|
|
7a77f8b6d5 | ||
|
|
0c53d750e7 | ||
|
|
92ab45a330 | ||
|
|
3d76b7cb2b | ||
|
|
bf14883a04 | ||
|
|
9f7dc2bef7 | ||
|
|
cf51c4120e | ||
|
|
0834b152fb | ||
|
|
8b98a7e8c3 | ||
|
|
eab4d462f8 | ||
|
|
c3916462f6 | ||
|
|
110780b18b | ||
|
|
b09e29a03c | ||
|
|
7426c86eb8 | ||
|
|
d1b154a10f | ||
|
|
9377157961 | ||
|
|
2c838f6459 | ||
|
|
5037ee0d37 | ||
|
|
b26e8604f1 | ||
|
|
5fd07da764 | ||
|
|
d76d89323c | ||
|
|
aa82cb38e9 | ||
|
|
89e6839a48 | ||
|
|
c906f30661 | ||
|
|
2a37467fa1 | ||
|
|
f2b916534b | ||
|
|
9bc5b4c663 | ||
|
|
35b5c4ba1b | ||
|
|
a853cdec5b | ||
|
|
3f4eb4c924 | ||
|
|
8d73cd502b | ||
|
|
a2866e2e6a | ||
|
|
e36bfbab38 | ||
|
|
35bb465b86 | ||
|
|
c42f46ab7d | ||
|
|
7753fc6570 | ||
|
|
c60b751694 | ||
|
|
683e564815 | ||
|
|
431aa8ada9 | ||
|
|
dc4c1579d4 | ||
|
|
03e406eefc | ||
|
|
72550c3803 | ||
|
|
5d06929169 | ||
|
|
76503f95ed | ||
|
|
fe95943305 | ||
|
|
bb9a2ca87c | ||
|
|
d35780eda0 | ||
|
|
0d3d7de6fc | ||
|
|
62e395f0e3 | ||
|
|
5260db7663 | ||
|
|
2ec5426035 | ||
|
|
c9500a9c1d | ||
|
|
f9d3665c88 | ||
|
|
c27c51484a | ||
|
|
f699b8f997 | ||
|
|
a8a5dd3b44 | ||
|
|
a68c1b15aa | ||
|
|
9113316b0e | ||
|
|
7178ab7da0 | ||
|
|
1fbb094c6f | ||
|
|
98c460cecd | ||
|
|
8b8052909f | ||
|
|
61407986b4 | ||
|
|
31a9eceda5 | ||
|
|
fc66df1e60 | ||
|
|
178c9fb200 | ||
|
|
73b6bf4629 | ||
|
|
08a8514b7a | ||
|
|
d24662b88a | ||
|
|
1e25f62ee6 | ||
|
|
5fbdf2bcec | ||
|
|
e7aaa7c61e | ||
|
|
fddb6fddc1 | ||
|
|
a932acaa6b | ||
|
|
82312d4fff | ||
|
|
f5bf45a2e5 | ||
|
|
3f9948a069 | ||
|
|
ae5831d303 | ||
|
|
721b2bfa85 | ||
|
|
19038582d3 | ||
|
|
64b4aead15 | ||
|
|
dd4287ca5d | ||
|
|
e0c2490a14 | ||
|
|
ec0cf996c9 | ||
|
|
d9d48aad2d | ||
|
|
adafa24b0a | ||
|
|
3e8bb99a2b | ||
|
|
77cba688ed | ||
|
|
54a546091a | ||
|
|
191c7bef6b | ||
|
|
31e6f8636f | ||
|
|
3b554bda26 | ||
|
|
15844040c2 | ||
|
|
7a3815b372 | ||
|
|
647b041d1a | ||
|
|
8122ad7bab | ||
|
|
2f0180b09e | ||
|
|
acdfef7b14 | ||
|
|
f96526ffc2 | ||
|
|
fe9794706a | ||
|
|
75daede92f | ||
|
|
fbdeb1778d | ||
|
|
b275765545 | ||
|
|
0c1a27b787 | ||
|
|
84afeb41f3 | ||
|
|
b2802a1351 | ||
|
|
0677fc1c4e | ||
|
|
2749da542c | ||
|
|
e14baa7a3b | ||
|
|
0e7363e0b3 | ||
|
|
d87a846ebc | ||
|
|
8b0dfc9fc4 | ||
|
|
34473a9c7f | ||
|
|
b6507869cd | ||
|
|
9e2e994395 | ||
|
|
d531ebcb57 | ||
|
|
c4a8cbd15a | ||
|
|
99f929f36b | ||
|
|
d787e41b20 | ||
|
|
6cf0ba1466 | ||
|
|
76d18a5776 | ||
|
|
cd9ba1ed89 | ||
|
|
5defb25ac6 | ||
|
|
fa2f96c2e3 | ||
|
|
f93304e77f | ||
|
|
2c86187a1b | ||
|
|
d6ac752538 | ||
|
|
97785bfc0f | ||
|
|
b591277620 | ||
|
|
63137bb901 | ||
|
|
d3654694d0 | ||
|
|
5244c0b48e | ||
|
|
3e7fac0d56 | ||
|
|
58f8226c7f | ||
|
|
e4054abfdc | ||
|
|
9adf0e92bc | ||
|
|
1660145a08 | ||
|
|
7f79a6405b | ||
|
|
8595ff7842 | ||
|
|
58e207cd77 | ||
|
|
67ed8065db | ||
|
|
916227b4df | ||
|
|
3c5f25507b | ||
|
|
56aa4e7a9a | ||
|
|
384ee6eafb | ||
|
|
2ec3460967 | ||
|
|
7a38612620 | ||
|
|
2cd9260500 | ||
|
|
673c96ce97 | ||
|
|
4ebb688f4f | ||
|
|
5670205e2a | ||
|
|
f984decd66 | ||
|
|
a7daa5ae13 | ||
|
|
48b2e853a8 | ||
|
|
b58d10a875 | ||
|
|
3ee7d7dc7f | ||
|
|
3176aebf9d | ||
|
|
9671e6750c | ||
|
|
742b6c6d15 | ||
|
|
f5e90422f5 | ||
|
|
ff7d3dc3a0 | ||
|
|
99797947aa | ||
|
|
c12b9d719a | ||
|
|
add89a03a6 | ||
|
|
467c1599c9 | ||
|
|
660ae8e0f3 | ||
|
|
ba660ecde2 | ||
|
|
a877209c8b | ||
|
|
ee32d622ce | ||
|
|
6df1c79c22 | ||
|
|
12904932c4 | ||
|
|
b6e8420aee | ||
|
|
91779b49c4 | ||
|
|
e5f0e58931 | ||
|
|
9e982750ee | ||
|
|
5ca695cc12 | ||
|
|
13e29a697c | ||
|
|
6b1e9b8dfe | ||
|
|
590fbbef03 | ||
|
|
a547e2df85 | ||
|
|
e462aa97bf | ||
|
|
398cd1edfb | ||
|
|
494d0c8e02 | ||
|
|
ffb9dd02fe | ||
|
|
15122da0e2 | ||
|
|
e9c1cabac2 | ||
|
|
ae6ff09494 | ||
|
|
b13035cc91 | ||
|
|
c081228439 | ||
|
|
b5afe6bc38 | ||
|
|
2dee03aee5 | ||
|
|
af59826a2f | ||
|
|
f523177850 | ||
|
|
57c444b3ad | ||
|
|
d5fda6e3b0 | ||
|
|
58443a022d | ||
|
|
aa11db5f11 | ||
|
|
2e2be463f8 | ||
|
|
379c60b08d | ||
|
|
465605d616 | ||
|
|
703826886c | ||
|
|
9669a99d1a | ||
|
|
c22a3f37a9 | ||
|
|
1be438f2a6 | ||
|
|
40160e24ab | ||
|
|
8a88684736 | ||
|
|
af2fe6110c | ||
|
|
3ecaabc7fd | ||
|
|
1309b8ca97 | ||
|
|
07cf96ebf7 | ||
|
|
b7b899cae6 | ||
|
|
b7dbe5147a | ||
|
|
158a322e82 | ||
|
|
ce829c2aef | ||
|
|
4814e7c9b2 | ||
|
|
866d0e7cb8 | ||
|
|
1748d4b739 | ||
|
|
5f5817ab05 | ||
|
|
b117f67227 | ||
|
|
3b97797c8d | ||
|
|
edca2d9891 | ||
|
|
c00f4e48ba | ||
|
|
7076082ae6 | ||
|
|
ea72bd9600 | ||
|
|
f40131b4d9 | ||
|
|
9a3c80a348 | ||
|
|
7bcee4733a | ||
|
|
239badea9b | ||
|
|
316c00936f | ||
|
|
874fd43257 | ||
|
|
80916e6884 | ||
|
|
2ab0bf4b97 | ||
|
|
b7a3be693b | ||
|
|
beebc0a40f | ||
|
|
9848b54cac | ||
|
|
deda48068c | ||
|
|
ebcbb23226 | ||
|
|
7e9fc9b6af | ||
|
|
1a1abd8d05 | ||
|
|
125f674eae | ||
|
|
13cbd31040 | ||
|
|
0ff9aaf6c1 | ||
|
|
3110c37d02 | ||
|
|
ec7460b4f2 | ||
|
|
1b4f4a936f | ||
|
|
ed61a49169 | ||
|
|
389d558a3b | ||
|
|
44b084a75e | ||
|
|
bb0e82fff1 | ||
|
|
5fc59f009c | ||
|
|
ce82b9e48f | ||
|
|
09b1d98070 | ||
|
|
dd463e246d | ||
|
|
fa6d6bbceb | ||
|
|
a92b4ea76f | ||
|
|
361fc53917 | ||
|
|
62d808becc | ||
|
|
a85179aff3 | ||
|
|
5d6fbc1655 | ||
|
|
0b3083c75b | ||
|
|
b4022cc487 | ||
|
|
50c250b808 | ||
|
|
c037170faa | ||
|
|
7678ec3f9b | ||
|
|
fc9c7b6cbc | ||
|
|
246b8c6e4a | ||
|
|
6789b63131 | ||
|
|
91f4ac602b | ||
|
|
690596b770 | ||
|
|
5c90451ea0 | ||
|
|
3406eba4ef | ||
|
|
ddf9e7b302 | ||
|
|
95481e7ba7 | ||
|
|
79f34bdbc2 | ||
|
|
b139e51041 | ||
|
|
74cd80e530 | ||
|
|
ff8b87118d | ||
|
|
2223204eba | ||
|
|
fc1f932cc0 | ||
|
|
c0147f86a1 | ||
|
|
47c361d2f8 | ||
|
|
863d3f26b3 | ||
|
|
9ff940a0ef | ||
|
|
2a78dac60d | ||
|
|
27185de752 | ||
|
|
dda2058d90 | ||
|
|
a1cf9e3bf3 | ||
|
|
05ea111c47 | ||
|
|
8a1d3b86af | ||
|
|
a612ce6659 | ||
|
|
d50ca1b1ed | ||
|
|
60a0f81c7a | ||
|
|
f9af8962f8 | ||
|
|
54172924c8 | ||
|
|
374f9b2f07 | ||
|
|
ce2cdced61 | ||
|
|
910fc0f28f | ||
|
|
742ec37ca3 | ||
|
|
72165e5b77 | ||
|
|
ff2d7551c7 | ||
|
|
903fb34b39 | ||
|
|
9c48f1ed22 | ||
|
|
bfdcc7b9b6 | ||
|
|
4bf13a8207 | ||
|
|
de27f7fc79 | ||
|
|
413e36b17a | ||
|
|
354d3842b5 | ||
|
|
9329cd5f13 | ||
|
|
87acd8fb07 | ||
|
|
a53774721a | ||
|
|
0f0b011440 | ||
|
|
faa3d172ab | ||
|
|
15c2ac2cac | ||
|
|
fb9b5b6f4a | ||
|
|
4ecfbac85f | ||
|
|
9892d017b2 | ||
|
|
e8d34bccbd | ||
|
|
33300673b7 | ||
|
|
869580206d | ||
|
|
278d6c0527 | ||
|
|
e7ab0e0f9f | ||
|
|
6451fcd085 | ||
|
|
b5f77eb12a | ||
|
|
e3e0ac6ec7 | ||
|
|
f1dd03548f | ||
|
|
28ad246bb4 | ||
|
|
577951b032 | ||
|
|
13f86c3489 | ||
|
|
6e0209112b | ||
|
|
c77dae7a1a | ||
|
|
a7b2ce32f7 | ||
|
|
0d4b3a133d | ||
|
|
02e928cf9b | ||
|
|
56a94ccd9e | ||
|
|
baf056bae8 | ||
|
|
10d581d1cf | ||
|
|
138c405974 | ||
|
|
8fe3b450d2 | ||
|
|
210b7d8e00 | ||
|
|
1dcfb201c4 | ||
|
|
f7e3de02ef | ||
|
|
4d14655c2b | ||
|
|
5e2890bd49 | ||
|
|
5be3944730 | ||
|
|
7641a90c34 | ||
|
|
c43609e035 | ||
|
|
9e696bd6a3 | ||
|
|
60bec24083 | ||
|
|
5c79ef9396 | ||
|
|
6c5b147a39 | ||
|
|
b82d6f70a4 | ||
|
|
700487a7c7 | ||
|
|
3dbaeef58c | ||
|
|
42ac5f0c1a | ||
|
|
05aee12652 | ||
|
|
24d9f2c140 | ||
|
|
b71ca2b014 | ||
|
|
4a95eb0a12 | ||
|
|
be799453aa | ||
|
|
ea7786e8ca | ||
|
|
929cb0ed7d | ||
|
|
5f4eca3816 | ||
|
|
5614b4dafb | ||
|
|
e5ad2e5267 | ||
|
|
e12ec335a5 | ||
|
|
220231d8e3 | ||
|
|
e6c5e3f28a | ||
|
|
42109a62a4 | ||
|
|
b8cdec92c7 | ||
|
|
9c902025bf | ||
|
|
b9977ea667 | ||
|
|
48b652bcbe | ||
|
|
b4796a62ee | ||
|
|
35cda2e692 | ||
|
|
f8d21e1431 | ||
|
|
9da9826b85 | ||
|
|
fe95f2217c | ||
|
|
8351538873 | ||
|
|
112283e230 | ||
|
|
b31ec214a5 | ||
|
|
114b929f8b | ||
|
|
ddca9c56fc | ||
|
|
58371fa263 | ||
|
|
7e90fb6a57 | ||
|
|
591af2d074 | ||
|
|
c229c87398 | ||
|
|
e5999bfb1a | ||
|
|
a4e278bfe7 | ||
|
|
9e7900da1e | ||
|
|
200de16440 | ||
|
|
536f949a1a | ||
|
|
97d1b3a506 | ||
|
|
71d5d2c669 | ||
|
|
6605adf669 | ||
|
|
458782bf67 | ||
|
|
c2025c0425 | ||
|
|
a9c9868957 | ||
|
|
d1fb790818 | ||
|
|
1f403325ac | ||
|
|
04686df17a | ||
|
|
feedaa37fa | ||
|
|
a182e5d721 | ||
|
|
4bfb32f685 | ||
|
|
1a2197d7bf | ||
|
|
e560045cfd | ||
|
|
8168341e9b | ||
|
|
1bbb67c452 | ||
|
|
150fcde0dc | ||
|
|
73e616df2a | ||
|
|
f318d4f2a4 | ||
|
|
e71095801f | ||
|
|
dbeed36dec | ||
|
|
4de08a4672 | ||
|
|
d7aa103f00 | ||
|
|
cf81375b94 | ||
|
|
66f9a49ce9 | ||
|
|
58c9f20692 | ||
|
|
ec0f3836ff | ||
|
|
4d54d87c3e | ||
|
|
ee4f332ec5 | ||
|
|
dc6da63e30 | ||
|
|
763360594d | ||
|
|
7e0a1683e6 | ||
|
|
2a24f906a9 | ||
|
|
a79af259e9 | ||
|
|
ce14c7a995 | ||
|
|
88a973cde5 | ||
|
|
1a830b751d | ||
|
|
abc1b22193 | ||
|
|
0eff740523 | ||
|
|
a1b7902944 | ||
|
|
7718303e71 | ||
|
|
103b432c84 | ||
|
|
a45cc801d2 | ||
|
|
7634687057 | ||
|
|
b3ecb96e36 | ||
|
|
7dd0c1730a |
8
.gitignore
vendored
8
.gitignore
vendored
@@ -24,10 +24,10 @@ homeserver*.yaml
|
||||
.coverage
|
||||
htmlcov
|
||||
|
||||
demo/*.db
|
||||
demo/*.log
|
||||
demo/*.log.*
|
||||
demo/*.pid
|
||||
demo/*/*.db
|
||||
demo/*/*.log
|
||||
demo/*/*.log.*
|
||||
demo/*/*.pid
|
||||
demo/media_store.*
|
||||
demo/etc
|
||||
|
||||
|
||||
10
AUTHORS.rst
10
AUTHORS.rst
@@ -29,7 +29,7 @@ Matthew Hodgson <matthew at matrix.org>
|
||||
|
||||
Emmanuel Rohee <manu at matrix.org>
|
||||
* Supporting iOS clients (testability and fallback registration)
|
||||
|
||||
|
||||
Turned to Dust <dwinslow86 at gmail.com>
|
||||
* ArchLinux installation instructions
|
||||
|
||||
@@ -53,4 +53,10 @@ Mads Robin Christensen <mads at v42 dot dk>
|
||||
* CentOS 7 installation instructions.
|
||||
|
||||
Florent Violleau <floviolleau at gmail dot com>
|
||||
* Add Raspberry Pi installation instructions and general troubleshooting items
|
||||
* Add Raspberry Pi installation instructions and general troubleshooting items
|
||||
|
||||
Niklas Riekenbrauck <nikriek at gmail dot.com>
|
||||
* Add JWT support for registration and login
|
||||
|
||||
Christoph Witzany <christoph at web.crofting.com>
|
||||
* Add LDAP support for authentication
|
||||
|
||||
668
CHANGES.rst
668
CHANGES.rst
@@ -1,3 +1,663 @@
|
||||
Changes in synapse v0.18.4-rc1 (2016-11-14)
|
||||
===========================================
|
||||
|
||||
Changes:
|
||||
|
||||
* Various database efficiency improvements (PR #1188, #1192)
|
||||
* Update default config to blacklist more internal IPs, thanks to Euan Kemp (PR
|
||||
#1198)
|
||||
* Allow specifying duration in minutes in config, thanks to Daniel Dent (PR
|
||||
#1625)
|
||||
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix media repo to set CORs headers on responses (PR #1190)
|
||||
* Fix registration to not error on non-ascii passwords (PR #1191)
|
||||
* Fix create event code to limit the number of prev_events (PR #1615)
|
||||
* Fix bug in transaction ID deduplication (PR #1624)
|
||||
|
||||
|
||||
Changes in synapse v0.18.3 (2016-11-08)
|
||||
=======================================
|
||||
|
||||
SECURITY UPDATE
|
||||
|
||||
Explicitly require authentication when using LDAP3. This is the default on
|
||||
versions of ``ldap3`` above 1.0, but some distributions will package an older
|
||||
version.
|
||||
|
||||
If you are using LDAP3 login and have a version of ``ldap3`` older than 1.0 it
|
||||
is **CRITICAL to updgrade**.
|
||||
|
||||
|
||||
Changes in synapse v0.18.2 (2016-11-01)
|
||||
=======================================
|
||||
|
||||
No changes since v0.18.2-rc5
|
||||
|
||||
|
||||
Changes in synapse v0.18.2-rc5 (2016-10-28)
|
||||
===========================================
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix prometheus process metrics in worker processes (PR #1184)
|
||||
|
||||
|
||||
Changes in synapse v0.18.2-rc4 (2016-10-27)
|
||||
===========================================
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix ``user_threepids`` schema delta, which in some instances prevented
|
||||
startup after upgrade (PR #1183)
|
||||
|
||||
|
||||
Changes in synapse v0.18.2-rc3 (2016-10-27)
|
||||
===========================================
|
||||
|
||||
Changes:
|
||||
|
||||
* Allow clients to supply access tokens as headers (PR #1098)
|
||||
* Clarify error codes for GET /filter/, thanks to Alexander Maznev (PR #1164)
|
||||
* Make password reset email field case insensitive (PR #1170)
|
||||
* Reduce redundant database work in email pusher (PR #1174)
|
||||
* Allow configurable rate limiting per AS (PR #1175)
|
||||
* Check whether to ratelimit sooner to avoid work (PR #1176)
|
||||
* Standardise prometheus metrics (PR #1177)
|
||||
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix incredibly slow back pagination query (PR #1178)
|
||||
* Fix infinite typing bug (PR #1179)
|
||||
|
||||
|
||||
Changes in synapse v0.18.2-rc2 (2016-10-25)
|
||||
===========================================
|
||||
|
||||
(This release did not include the changes advertised and was identical to RC1)
|
||||
|
||||
|
||||
Changes in synapse v0.18.2-rc1 (2016-10-17)
|
||||
===========================================
|
||||
|
||||
Changes:
|
||||
|
||||
* Remove redundant event_auth index (PR #1113)
|
||||
* Reduce DB hits for replication (PR #1141)
|
||||
* Implement pluggable password auth (PR #1155)
|
||||
* Remove rate limiting from app service senders and fix get_or_create_user
|
||||
requester, thanks to Patrik Oldsberg (PR #1157)
|
||||
* window.postmessage for Interactive Auth fallback (PR #1159)
|
||||
* Use sys.executable instead of hardcoded python, thanks to Pedro Larroy
|
||||
(PR #1162)
|
||||
* Add config option for adding additional TLS fingerprints (PR #1167)
|
||||
* User-interactive auth on delete device (PR #1168)
|
||||
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix not being allowed to set your own state_key, thanks to Patrik Oldsberg
|
||||
(PR #1150)
|
||||
* Fix interactive auth to return 401 from for incorrect password (PR #1160,
|
||||
#1166)
|
||||
* Fix email push notifs being dropped (PR #1169)
|
||||
|
||||
|
||||
|
||||
Changes in synapse v0.18.1 (2016-10-05)
|
||||
======================================
|
||||
|
||||
No changes since v0.18.1-rc1
|
||||
|
||||
|
||||
Changes in synapse v0.18.1-rc1 (2016-09-30)
|
||||
===========================================
|
||||
|
||||
Features:
|
||||
|
||||
* Add total_room_count_estimate to ``/publicRooms`` (PR #1133)
|
||||
|
||||
|
||||
Changes:
|
||||
|
||||
* Time out typing over federation (PR #1140)
|
||||
* Restructure LDAP authentication (PR #1153)
|
||||
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix 3pid invites when server is already in the room (PR #1136)
|
||||
* Fix upgrading with SQLite taking lots of CPU for a few days
|
||||
after upgrade (PR #1144)
|
||||
* Fix upgrading from very old database versions (PR #1145)
|
||||
* Fix port script to work with recently added tables (PR #1146)
|
||||
|
||||
|
||||
Changes in synapse v0.18.0 (2016-09-19)
|
||||
=======================================
|
||||
|
||||
The release includes major changes to the state storage database schemas, which
|
||||
significantly reduce database size. Synapse will attempt to upgrade the current
|
||||
data in the background. Servers with large SQLite database may experience
|
||||
degradation of performance while this upgrade is in progress, therefore you may
|
||||
want to consider migrating to using Postgres before upgrading very large SQLite
|
||||
databases
|
||||
|
||||
|
||||
Changes:
|
||||
|
||||
* Make public room search case insensitive (PR #1127)
|
||||
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix and clean up publicRooms pagination (PR #1129)
|
||||
|
||||
|
||||
Changes in synapse v0.18.0-rc1 (2016-09-16)
|
||||
===========================================
|
||||
|
||||
Features:
|
||||
|
||||
* Add ``only=highlight`` on ``/notifications`` (PR #1081)
|
||||
* Add server param to /publicRooms (PR #1082)
|
||||
* Allow clients to ask for the whole of a single state event (PR #1094)
|
||||
* Add is_direct param to /createRoom (PR #1108)
|
||||
* Add pagination support to publicRooms (PR #1121)
|
||||
* Add very basic filter API to /publicRooms (PR #1126)
|
||||
* Add basic direct to device messaging support for E2E (PR #1074, #1084, #1104,
|
||||
#1111)
|
||||
|
||||
|
||||
Changes:
|
||||
|
||||
* Move to storing state_groups_state as deltas, greatly reducing DB size (PR
|
||||
#1065)
|
||||
* Reduce amount of state pulled out of the DB during common requests (PR #1069)
|
||||
* Allow PDF to be rendered from media repo (PR #1071)
|
||||
* Reindex state_groups_state after pruning (PR #1085)
|
||||
* Clobber EDUs in send queue (PR #1095)
|
||||
* Conform better to the CAS protocol specification (PR #1100)
|
||||
* Limit how often we ask for keys from dead servers (PR #1114)
|
||||
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix /notifications API when used with ``from`` param (PR #1080)
|
||||
* Fix backfill when cannot find an event. (PR #1107)
|
||||
|
||||
|
||||
Changes in synapse v0.17.3 (2016-09-09)
|
||||
=======================================
|
||||
|
||||
This release fixes a major bug that stopped servers from handling rooms with
|
||||
over 1000 members.
|
||||
|
||||
|
||||
Changes in synapse v0.17.2 (2016-09-08)
|
||||
=======================================
|
||||
|
||||
This release contains security bug fixes. Please upgrade.
|
||||
|
||||
|
||||
No changes since v0.17.2-rc1
|
||||
|
||||
|
||||
Changes in synapse v0.17.2-rc1 (2016-09-05)
|
||||
===========================================
|
||||
|
||||
Features:
|
||||
|
||||
* Start adding store-and-forward direct-to-device messaging (PR #1046, #1050,
|
||||
#1062, #1066)
|
||||
|
||||
|
||||
Changes:
|
||||
|
||||
* Avoid pulling the full state of a room out so often (PR #1047, #1049, #1063,
|
||||
#1068)
|
||||
* Don't notify for online to online presence transitions. (PR #1054)
|
||||
* Occasionally persist unpersisted presence updates (PR #1055)
|
||||
* Allow application services to have an optional 'url' (PR #1056)
|
||||
* Clean up old sent transactions from DB (PR #1059)
|
||||
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix None check in backfill (PR #1043)
|
||||
* Fix membership changes to be idempotent (PR #1067)
|
||||
* Fix bug in get_pdu where it would sometimes return events with incorrect
|
||||
signature
|
||||
|
||||
|
||||
|
||||
Changes in synapse v0.17.1 (2016-08-24)
|
||||
=======================================
|
||||
|
||||
Changes:
|
||||
|
||||
* Delete old received_transactions rows (PR #1038)
|
||||
* Pass through user-supplied content in /join/$room_id (PR #1039)
|
||||
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix bug with backfill (PR #1040)
|
||||
|
||||
|
||||
Changes in synapse v0.17.1-rc1 (2016-08-22)
|
||||
===========================================
|
||||
|
||||
Features:
|
||||
|
||||
* Add notification API (PR #1028)
|
||||
|
||||
|
||||
Changes:
|
||||
|
||||
* Don't print stack traces when failing to get remote keys (PR #996)
|
||||
* Various federation /event/ perf improvements (PR #998)
|
||||
* Only process one local membership event per room at a time (PR #1005)
|
||||
* Move default display name push rule (PR #1011, #1023)
|
||||
* Fix up preview URL API. Add tests. (PR #1015)
|
||||
* Set ``Content-Security-Policy`` on media repo (PR #1021)
|
||||
* Make notify_interested_services faster (PR #1022)
|
||||
* Add usage stats to prometheus monitoring (PR #1037)
|
||||
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix token login (PR #993)
|
||||
* Fix CAS login (PR #994, #995)
|
||||
* Fix /sync to not clobber status_msg (PR #997)
|
||||
* Fix redacted state events to include prev_content (PR #1003)
|
||||
* Fix some bugs in the auth/ldap handler (PR #1007)
|
||||
* Fix backfill request to limit URI length, so that remotes don't reject the
|
||||
requests due to path length limits (PR #1012)
|
||||
* Fix AS push code to not send duplicate events (PR #1025)
|
||||
|
||||
|
||||
|
||||
Changes in synapse v0.17.0 (2016-08-08)
|
||||
=======================================
|
||||
|
||||
This release contains significant security bug fixes regarding authenticating
|
||||
events received over federation. PLEASE UPGRADE.
|
||||
|
||||
This release changes the LDAP configuration format in a backwards incompatible
|
||||
way, see PR #843 for details.
|
||||
|
||||
|
||||
Changes:
|
||||
|
||||
* Add federation /version API (PR #990)
|
||||
* Make psutil dependency optional (PR #992)
|
||||
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix URL preview API to exclude HTML comments in description (PR #988)
|
||||
* Fix error handling of remote joins (PR #991)
|
||||
|
||||
|
||||
Changes in synapse v0.17.0-rc4 (2016-08-05)
|
||||
===========================================
|
||||
|
||||
Changes:
|
||||
|
||||
* Change the way we summarize URLs when previewing (PR #973)
|
||||
* Add new ``/state_ids/`` federation API (PR #979)
|
||||
* Speed up processing of ``/state/`` response (PR #986)
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix event persistence when event has already been partially persisted
|
||||
(PR #975, #983, #985)
|
||||
* Fix port script to also copy across backfilled events (PR #982)
|
||||
|
||||
|
||||
Changes in synapse v0.17.0-rc3 (2016-08-02)
|
||||
===========================================
|
||||
|
||||
Changes:
|
||||
|
||||
* Forbid non-ASes from registering users whose names begin with '_' (PR #958)
|
||||
* Add some basic admin API docs (PR #963)
|
||||
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Send the correct host header when fetching keys (PR #941)
|
||||
* Fix joining a room that has missing auth events (PR #964)
|
||||
* Fix various push bugs (PR #966, #970)
|
||||
* Fix adding emails on registration (PR #968)
|
||||
|
||||
|
||||
Changes in synapse v0.17.0-rc2 (2016-08-02)
|
||||
===========================================
|
||||
|
||||
(This release did not include the changes advertised and was identical to RC1)
|
||||
|
||||
|
||||
Changes in synapse v0.17.0-rc1 (2016-07-28)
|
||||
===========================================
|
||||
|
||||
This release changes the LDAP configuration format in a backwards incompatible
|
||||
way, see PR #843 for details.
|
||||
|
||||
|
||||
Features:
|
||||
|
||||
* Add purge_media_cache admin API (PR #902)
|
||||
* Add deactivate account admin API (PR #903)
|
||||
* Add optional pepper to password hashing (PR #907, #910 by KentShikama)
|
||||
* Add an admin option to shared secret registration (breaks backwards compat)
|
||||
(PR #909)
|
||||
* Add purge local room history API (PR #911, #923, #924)
|
||||
* Add requestToken endpoints (PR #915)
|
||||
* Add an /account/deactivate endpoint (PR #921)
|
||||
* Add filter param to /messages. Add 'contains_url' to filter. (PR #922)
|
||||
* Add device_id support to /login (PR #929)
|
||||
* Add device_id support to /v2/register flow. (PR #937, #942)
|
||||
* Add GET /devices endpoint (PR #939, #944)
|
||||
* Add GET /device/{deviceId} (PR #943)
|
||||
* Add update and delete APIs for devices (PR #949)
|
||||
|
||||
|
||||
Changes:
|
||||
|
||||
* Rewrite LDAP Authentication against ldap3 (PR #843 by mweinelt)
|
||||
* Linearize some federation endpoints based on (origin, room_id) (PR #879)
|
||||
* Remove the legacy v0 content upload API. (PR #888)
|
||||
* Use similar naming we use in email notifs for push (PR #894)
|
||||
* Optionally include password hash in createUser endpoint (PR #905 by
|
||||
KentShikama)
|
||||
* Use a query that postgresql optimises better for get_events_around (PR #906)
|
||||
* Fall back to 'username' if 'user' is not given for appservice registration.
|
||||
(PR #927 by Half-Shot)
|
||||
* Add metrics for psutil derived memory usage (PR #936)
|
||||
* Record device_id in client_ips (PR #938)
|
||||
* Send the correct host header when fetching keys (PR #941)
|
||||
* Log the hostname the reCAPTCHA was completed on (PR #946)
|
||||
* Make the device id on e2e key upload optional (PR #956)
|
||||
* Add r0.2.0 to the "supported versions" list (PR #960)
|
||||
* Don't include name of room for invites in push (PR #961)
|
||||
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix substitution failure in mail template (PR #887)
|
||||
* Put most recent 20 messages in email notif (PR #892)
|
||||
* Ensure that the guest user is in the database when upgrading accounts
|
||||
(PR #914)
|
||||
* Fix various edge cases in auth handling (PR #919)
|
||||
* Fix 500 ISE when sending alias event without a state_key (PR #925)
|
||||
* Fix bug where we stored rejections in the state_group, persist all
|
||||
rejections (PR #948)
|
||||
* Fix lack of check of if the user is banned when handling 3pid invites
|
||||
(PR #952)
|
||||
* Fix a couple of bugs in the transaction and keyring code (PR #954, #955)
|
||||
|
||||
|
||||
|
||||
Changes in synapse v0.16.1-r1 (2016-07-08)
|
||||
==========================================
|
||||
|
||||
THIS IS A CRITICAL SECURITY UPDATE.
|
||||
|
||||
This fixes a bug which allowed users' accounts to be accessed by unauthorised
|
||||
users.
|
||||
|
||||
Changes in synapse v0.16.1 (2016-06-20)
|
||||
=======================================
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix assorted bugs in ``/preview_url`` (PR #872)
|
||||
* Fix TypeError when setting unicode passwords (PR #873)
|
||||
|
||||
|
||||
Performance improvements:
|
||||
|
||||
* Turn ``use_frozen_events`` off by default (PR #877)
|
||||
* Disable responding with canonical json for federation (PR #878)
|
||||
|
||||
|
||||
Changes in synapse v0.16.1-rc1 (2016-06-15)
|
||||
===========================================
|
||||
|
||||
Features: None
|
||||
|
||||
Changes:
|
||||
|
||||
* Log requester for ``/publicRoom`` endpoints when possible (PR #856)
|
||||
* 502 on ``/thumbnail`` when can't connect to remote server (PR #862)
|
||||
* Linearize fetching of gaps on incoming events (PR #871)
|
||||
|
||||
|
||||
Bugs fixes:
|
||||
|
||||
* Fix bug where rooms where marked as published by default (PR #857)
|
||||
* Fix bug where joining room with an event with invalid sender (PR #868)
|
||||
* Fix bug where backfilled events were sent down sync streams (PR #869)
|
||||
* Fix bug where outgoing connections could wedge indefinitely, causing push
|
||||
notifications to be unreliable (PR #870)
|
||||
|
||||
|
||||
Performance improvements:
|
||||
|
||||
* Improve ``/publicRooms`` performance(PR #859)
|
||||
|
||||
|
||||
Changes in synapse v0.16.0 (2016-06-09)
|
||||
=======================================
|
||||
|
||||
NB: As of v0.14 all AS config files must have an ID field.
|
||||
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Don't make rooms published by default (PR #857)
|
||||
|
||||
Changes in synapse v0.16.0-rc2 (2016-06-08)
|
||||
===========================================
|
||||
|
||||
Features:
|
||||
|
||||
* Add configuration option for tuning GC via ``gc.set_threshold`` (PR #849)
|
||||
|
||||
Changes:
|
||||
|
||||
* Record metrics about GC (PR #771, #847, #852)
|
||||
* Add metric counter for number of persisted events (PR #841)
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix 'From' header in email notifications (PR #843)
|
||||
* Fix presence where timeouts were not being fired for the first 8h after
|
||||
restarts (PR #842)
|
||||
* Fix bug where synapse sent malformed transactions to AS's when retrying
|
||||
transactions (Commits 310197b, 8437906)
|
||||
|
||||
Performance improvements:
|
||||
|
||||
* Remove event fetching from DB threads (PR #835)
|
||||
* Change the way we cache events (PR #836)
|
||||
* Add events to cache when we persist them (PR #840)
|
||||
|
||||
|
||||
Changes in synapse v0.16.0-rc1 (2016-06-03)
|
||||
===========================================
|
||||
|
||||
Version 0.15 was not released. See v0.15.0-rc1 below for additional changes.
|
||||
|
||||
Features:
|
||||
|
||||
* Add email notifications for missed messages (PR #759, #786, #799, #810, #815,
|
||||
#821)
|
||||
* Add a ``url_preview_ip_range_whitelist`` config param (PR #760)
|
||||
* Add /report endpoint (PR #762)
|
||||
* Add basic ignore user API (PR #763)
|
||||
* Add an openidish mechanism for proving that you own a given user_id (PR #765)
|
||||
* Allow clients to specify a server_name to avoid 'No known servers' (PR #794)
|
||||
* Add secondary_directory_servers option to fetch room list from other servers
|
||||
(PR #808, #813)
|
||||
|
||||
Changes:
|
||||
|
||||
* Report per request metrics for all of the things using request_handler (PR
|
||||
#756)
|
||||
* Correctly handle ``NULL`` password hashes from the database (PR #775)
|
||||
* Allow receipts for events we haven't seen in the db (PR #784)
|
||||
* Make synctl read a cache factor from config file (PR #785)
|
||||
* Increment badge count per missed convo, not per msg (PR #793)
|
||||
* Special case m.room.third_party_invite event auth to match invites (PR #814)
|
||||
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix typo in event_auth servlet path (PR #757)
|
||||
* Fix password reset (PR #758)
|
||||
|
||||
|
||||
Performance improvements:
|
||||
|
||||
* Reduce database inserts when sending transactions (PR #767)
|
||||
* Queue events by room for persistence (PR #768)
|
||||
* Add cache to ``get_user_by_id`` (PR #772)
|
||||
* Add and use ``get_domain_from_id`` (PR #773)
|
||||
* Use tree cache for ``get_linearized_receipts_for_room`` (PR #779)
|
||||
* Remove unused indices (PR #782)
|
||||
* Add caches to ``bulk_get_push_rules*`` (PR #804)
|
||||
* Cache ``get_event_reference_hashes`` (PR #806)
|
||||
* Add ``get_users_with_read_receipts_in_room`` cache (PR #809)
|
||||
* Use state to calculate ``get_users_in_room`` (PR #811)
|
||||
* Load push rules in storage layer so that they get cached (PR #825)
|
||||
* Make ``get_joined_hosts_for_room`` use get_users_in_room (PR #828)
|
||||
* Poke notifier on next reactor tick (PR #829)
|
||||
* Change CacheMetrics to be quicker (PR #830)
|
||||
|
||||
|
||||
Changes in synapse v0.15.0-rc1 (2016-04-26)
|
||||
===========================================
|
||||
|
||||
Features:
|
||||
|
||||
* Add login support for Javascript Web Tokens, thanks to Niklas Riekenbrauck
|
||||
(PR #671,#687)
|
||||
* Add URL previewing support (PR #688)
|
||||
* Add login support for LDAP, thanks to Christoph Witzany (PR #701)
|
||||
* Add GET endpoint for pushers (PR #716)
|
||||
|
||||
Changes:
|
||||
|
||||
* Never notify for member events (PR #667)
|
||||
* Deduplicate identical ``/sync`` requests (PR #668)
|
||||
* Require user to have left room to forget room (PR #673)
|
||||
* Use DNS cache if within TTL (PR #677)
|
||||
* Let users see their own leave events (PR #699)
|
||||
* Deduplicate membership changes (PR #700)
|
||||
* Increase performance of pusher code (PR #705)
|
||||
* Respond with error status 504 if failed to talk to remote server (PR #731)
|
||||
* Increase search performance on postgres (PR #745)
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix bug where disabling all notifications still resulted in push (PR #678)
|
||||
* Fix bug where users couldn't reject remote invites if remote refused (PR #691)
|
||||
* Fix bug where synapse attempted to backfill from itself (PR #693)
|
||||
* Fix bug where profile information was not correctly added when joining remote
|
||||
rooms (PR #703)
|
||||
* Fix bug where register API required incorrect key name for AS registration
|
||||
(PR #727)
|
||||
|
||||
|
||||
Changes in synapse v0.14.0 (2016-03-30)
|
||||
=======================================
|
||||
|
||||
No changes from v0.14.0-rc2
|
||||
|
||||
Changes in synapse v0.14.0-rc2 (2016-03-23)
|
||||
===========================================
|
||||
|
||||
Features:
|
||||
|
||||
* Add published room list API (PR #657)
|
||||
|
||||
Changes:
|
||||
|
||||
* Change various caches to consume less memory (PR #656, #658, #660, #662,
|
||||
#663, #665)
|
||||
* Allow rooms to be published without requiring an alias (PR #664)
|
||||
* Intern common strings in caches to reduce memory footprint (#666)
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix reject invites over federation (PR #646)
|
||||
* Fix bug where registration was not idempotent (PR #649)
|
||||
* Update aliases event after deleting aliases (PR #652)
|
||||
* Fix unread notification count, which was sometimes wrong (PR #661)
|
||||
|
||||
Changes in synapse v0.14.0-rc1 (2016-03-14)
|
||||
===========================================
|
||||
|
||||
Features:
|
||||
|
||||
* Add event_id to response to state event PUT (PR #581)
|
||||
* Allow guest users access to messages in rooms they have joined (PR #587)
|
||||
* Add config for what state is included in a room invite (PR #598)
|
||||
* Send the inviter's member event in room invite state (PR #607)
|
||||
* Add error codes for malformed/bad JSON in /login (PR #608)
|
||||
* Add support for changing the actions for default rules (PR #609)
|
||||
* Add environment variable SYNAPSE_CACHE_FACTOR, default it to 0.1 (PR #612)
|
||||
* Add ability for alias creators to delete aliases (PR #614)
|
||||
* Add profile information to invites (PR #624)
|
||||
|
||||
Changes:
|
||||
|
||||
* Enforce user_id exclusivity for AS registrations (PR #572)
|
||||
* Make adding push rules idempotent (PR #587)
|
||||
* Improve presence performance (PR #582, #586)
|
||||
* Change presence semantics for ``last_active_ago`` (PR #582, #586)
|
||||
* Don't allow ``m.room.create`` to be changed (PR #596)
|
||||
* Add 800x600 to default list of valid thumbnail sizes (PR #616)
|
||||
* Always include kicks and bans in full /sync (PR #625)
|
||||
* Send history visibility on boundary changes (PR #626)
|
||||
* Register endpoint now returns a refresh_token (PR #637)
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix bug where we returned incorrect state in /sync (PR #573)
|
||||
* Always return a JSON object from push rule API (PR #606)
|
||||
* Fix bug where registering without a user id sometimes failed (PR #610)
|
||||
* Report size of ExpiringCache in cache size metrics (PR #611)
|
||||
* Fix rejection of invites to empty rooms (PR #615)
|
||||
* Fix usage of ``bcrypt`` to not use ``checkpw`` (PR #619)
|
||||
* Pin ``pysaml2`` dependency (PR #634)
|
||||
* Fix bug in ``/sync`` where timeline order was incorrect for backfilled events
|
||||
(PR #635)
|
||||
|
||||
Changes in synapse v0.13.3 (2016-02-11)
|
||||
=======================================
|
||||
|
||||
* Fix bug where ``/sync`` would occasionally return events in the wrong room.
|
||||
|
||||
Changes in synapse v0.13.2 (2016-02-11)
|
||||
=======================================
|
||||
|
||||
* Fix bug where ``/events`` would fail to skip some events if there had been
|
||||
more events than the limit specified since the last request (PR #570)
|
||||
|
||||
Changes in synapse v0.13.1 (2016-02-10)
|
||||
=======================================
|
||||
|
||||
* Bump matrix-angular-sdk (matrix web console) dependency to 0.6.8 to
|
||||
pull in the fix for SYWEB-361 so that the default client can display
|
||||
HTML messages again(!)
|
||||
|
||||
Changes in synapse v0.13.0 (2016-02-10)
|
||||
=======================================
|
||||
|
||||
@@ -428,7 +1088,7 @@ Configuration:
|
||||
|
||||
* Add support for changing the bind host of the metrics listener via the
|
||||
``metrics_bind_host`` option.
|
||||
|
||||
|
||||
|
||||
Changes in synapse v0.9.0-r5 (2015-05-21)
|
||||
=========================================
|
||||
@@ -770,7 +1430,7 @@ See UPGRADE for information about changes to the client server API, including
|
||||
breaking backwards compatibility with VoIP calls and registration API.
|
||||
|
||||
Homeserver:
|
||||
* When a user changes their displayname or avatar the server will now update
|
||||
* When a user changes their displayname or avatar the server will now update
|
||||
all their join states to reflect this.
|
||||
* The server now adds "age" key to events to indicate how old they are. This
|
||||
is clock independent, so at no point does any server or webclient have to
|
||||
@@ -828,7 +1488,7 @@ Changes in synapse 0.2.2 (2014-09-06)
|
||||
=====================================
|
||||
|
||||
Homeserver:
|
||||
* When the server returns state events it now also includes the previous
|
||||
* When the server returns state events it now also includes the previous
|
||||
content.
|
||||
* Add support for inviting people when creating a new room.
|
||||
* Make the homeserver inform the room via `m.room.aliases` when a new alias
|
||||
@@ -840,7 +1500,7 @@ Webclient:
|
||||
* Handle `m.room.aliases` events.
|
||||
* Asynchronously send messages and show a local echo.
|
||||
* Inform the UI when a message failed to send.
|
||||
* Only autoscroll on receiving a new message if the user was already at the
|
||||
* Only autoscroll on receiving a new message if the user was already at the
|
||||
bottom of the screen.
|
||||
* Add support for ban/kick reasons.
|
||||
|
||||
|
||||
@@ -11,8 +11,10 @@ recursive-include synapse/storage/schema *.sql
|
||||
recursive-include synapse/storage/schema *.py
|
||||
|
||||
recursive-include docs *
|
||||
recursive-include res *
|
||||
recursive-include scripts *
|
||||
recursive-include scripts-dev *
|
||||
recursive-include synapse *.pyi
|
||||
recursive-include tests *.py
|
||||
|
||||
recursive-include synapse/static *.css
|
||||
@@ -21,5 +23,8 @@ recursive-include synapse/static *.html
|
||||
recursive-include synapse/static *.js
|
||||
|
||||
exclude jenkins.sh
|
||||
exclude jenkins*.sh
|
||||
exclude jenkins*
|
||||
recursive-exclude jenkins *.sh
|
||||
|
||||
prune demo/etc
|
||||
|
||||
133
README.rst
133
README.rst
@@ -11,8 +11,8 @@ VoIP. The basics you need to know to get up and running are:
|
||||
like ``#matrix:matrix.org`` or ``#test:localhost:8448``.
|
||||
|
||||
- Matrix user IDs look like ``@matthew:matrix.org`` (although in the future
|
||||
you will normally refer to yourself and others using a 3PID: email
|
||||
address, phone number, etc rather than manipulating Matrix user IDs)
|
||||
you will normally refer to yourself and others using a third party identifier
|
||||
(3PID): email address, phone number, etc rather than manipulating Matrix user IDs)
|
||||
|
||||
The overall architecture is::
|
||||
|
||||
@@ -58,12 +58,13 @@ the spec in the context of a codebase and let you run your own homeserver and
|
||||
generally help bootstrap the ecosystem.
|
||||
|
||||
In Matrix, every user runs one or more Matrix clients, which connect through to
|
||||
a Matrix homeserver which stores all their personal chat history and user
|
||||
account information - much as a mail client connects through to an IMAP/SMTP
|
||||
server. Just like email, you can either run your own Matrix homeserver and
|
||||
control and own your own communications and history or use one hosted by
|
||||
someone else (e.g. matrix.org) - there is no single point of control or
|
||||
mandatory service provider in Matrix, unlike WhatsApp, Facebook, Hangouts, etc.
|
||||
a Matrix homeserver. The homeserver stores all their personal chat history and
|
||||
user account information - much as a mail client connects through to an
|
||||
IMAP/SMTP server. Just like email, you can either run your own Matrix
|
||||
homeserver and control and own your own communications and history or use one
|
||||
hosted by someone else (e.g. matrix.org) - there is no single point of control
|
||||
or mandatory service provider in Matrix, unlike WhatsApp, Facebook, Hangouts,
|
||||
etc.
|
||||
|
||||
Synapse ships with two basic demo Matrix clients: webclient (a basic group chat
|
||||
web client demo implemented in AngularJS) and cmdclient (a basic Python
|
||||
@@ -94,7 +95,7 @@ Synapse is the reference python/twisted Matrix homeserver implementation.
|
||||
System requirements:
|
||||
- POSIX-compliant system (tested on Linux & OS X)
|
||||
- Python 2.7
|
||||
- At least 512 MB RAM.
|
||||
- At least 1GB of free RAM if you want to join large public rooms like #matrix:matrix.org
|
||||
|
||||
Synapse is written in python but some of the libraries is uses are written in
|
||||
C. So before we can install synapse itself we need a working C compiler and the
|
||||
@@ -104,7 +105,7 @@ Installing prerequisites on Ubuntu or Debian::
|
||||
|
||||
sudo apt-get install build-essential python2.7-dev libffi-dev \
|
||||
python-pip python-setuptools sqlite3 \
|
||||
libssl-dev python-virtualenv libjpeg-dev
|
||||
libssl-dev python-virtualenv libjpeg-dev libxslt1-dev
|
||||
|
||||
Installing prerequisites on ArchLinux::
|
||||
|
||||
@@ -118,7 +119,6 @@ Installing prerequisites on CentOS 7::
|
||||
python-virtualenv libffi-devel openssl-devel
|
||||
sudo yum groupinstall "Development Tools"
|
||||
|
||||
|
||||
Installing prerequisites on Mac OS X::
|
||||
|
||||
xcode-select --install
|
||||
@@ -134,6 +134,12 @@ Installing prerequisites on Raspbian::
|
||||
sudo pip install --upgrade ndg-httpsclient
|
||||
sudo pip install --upgrade virtualenv
|
||||
|
||||
Installing prerequisites on openSUSE::
|
||||
|
||||
sudo zypper in -t pattern devel_basis
|
||||
sudo zypper in python-pip python-setuptools sqlite3 python-virtualenv \
|
||||
python-devel libffi-devel libopenssl-devel libjpeg62-devel
|
||||
|
||||
To install the synapse homeserver run::
|
||||
|
||||
virtualenv -p python2.7 ~/.synapse
|
||||
@@ -150,12 +156,7 @@ In case of problems, please see the _Troubleshooting section below.
|
||||
Alternatively, Silvio Fricke has contributed a Dockerfile to automate the
|
||||
above in Docker at https://registry.hub.docker.com/u/silviof/docker-matrix/.
|
||||
|
||||
Another alternative is to install via apt from http://matrix.org/packages/debian/.
|
||||
Note that these packages do not include a client - choose one from
|
||||
https://matrix.org/blog/try-matrix-now/ (or build your own with
|
||||
https://github.com/matrix-org/matrix-js-sdk/).
|
||||
|
||||
Finally, Martin Giess has created an auto-deployment process with vagrant/ansible,
|
||||
Also, Martin Giess has created an auto-deployment process with vagrant/ansible,
|
||||
tested with VirtualBox/AWS/DigitalOcean - see https://github.com/EMnify/matrix-synapse-auto-deploy
|
||||
for details.
|
||||
|
||||
@@ -204,6 +205,21 @@ run (e.g. ``~/.synapse``), and::
|
||||
source ./bin/activate
|
||||
synctl start
|
||||
|
||||
Security Note
|
||||
=============
|
||||
|
||||
Matrix serves raw user generated data in some APIs - specifically the content
|
||||
repository endpoints: http://matrix.org/docs/spec/client_server/r0.2.0.html#get-matrix-media-r0-download-servername-mediaid
|
||||
Whilst we have tried to mitigate against possible XSS attacks (e.g.
|
||||
https://github.com/matrix-org/synapse/pull/1021) we recommend running
|
||||
matrix homeservers on a dedicated domain name, to limit any malicious user generated
|
||||
content served to web browsers a matrix API from being able to attack webapps hosted
|
||||
on the same domain. This is particularly true of sharing a matrix webclient and
|
||||
server on the same domain.
|
||||
|
||||
See https://github.com/vector-im/vector-web/issues/1977 and
|
||||
https://developer.github.com/changes/2014-04-25-user-content-security for more details.
|
||||
|
||||
Using PostgreSQL
|
||||
================
|
||||
|
||||
@@ -220,15 +236,25 @@ The advantages of Postgres include:
|
||||
pointing at the same DB master, as well as enabling DB replication in
|
||||
synapse itself.
|
||||
|
||||
The only disadvantage is that the code is relatively new as of April 2015 and
|
||||
may have a few regressions relative to SQLite.
|
||||
|
||||
For information on how to install and use PostgreSQL, please see
|
||||
`docs/postgres.rst <docs/postgres.rst>`_.
|
||||
|
||||
Platform Specific Instructions
|
||||
==============================
|
||||
|
||||
Debian
|
||||
------
|
||||
|
||||
Matrix provides official Debian packages via apt from http://matrix.org/packages/debian/.
|
||||
Note that these packages do not include a client - choose one from
|
||||
https://matrix.org/blog/try-matrix-now/ (or build your own with one of our SDKs :)
|
||||
|
||||
Fedora
|
||||
------
|
||||
|
||||
Oleg Girko provides Fedora RPMs at
|
||||
https://obs.infoserver.lv/project/monitor/matrix-synapse
|
||||
|
||||
ArchLinux
|
||||
---------
|
||||
|
||||
@@ -270,11 +296,17 @@ During setup of Synapse you need to call python2.7 directly again::
|
||||
FreeBSD
|
||||
-------
|
||||
|
||||
Synapse can be installed via FreeBSD Ports or Packages:
|
||||
Synapse can be installed via FreeBSD Ports or Packages contributed by Brendan Molloy from:
|
||||
|
||||
- Ports: ``cd /usr/ports/net/py-matrix-synapse && make install clean``
|
||||
- Packages: ``pkg install py27-matrix-synapse``
|
||||
|
||||
NixOS
|
||||
-----
|
||||
|
||||
Robin Lambertz has packaged Synapse for NixOS at:
|
||||
https://github.com/NixOS/nixpkgs/blob/master/nixos/modules/services/misc/matrix-synapse.nix
|
||||
|
||||
Windows Install
|
||||
---------------
|
||||
Synapse can be installed on Cygwin. It requires the following Cygwin packages:
|
||||
@@ -431,7 +463,7 @@ You have two choices here, which will influence the form of your Matrix user
|
||||
IDs:
|
||||
|
||||
1) Use the machine's own hostname as available on public DNS in the form of
|
||||
its A or AAAA records. This is easier to set up initially, perhaps for
|
||||
its A records. This is easier to set up initially, perhaps for
|
||||
testing, but lacks the flexibility of SRV.
|
||||
|
||||
2) Set up a SRV record for your domain name. This requires you create a SRV
|
||||
@@ -525,7 +557,6 @@ Logging In To An Existing Account
|
||||
Just enter the ``@localpart:my.domain.here`` Matrix user ID and password into
|
||||
the form and click the Login button.
|
||||
|
||||
|
||||
Identity Servers
|
||||
================
|
||||
|
||||
@@ -546,6 +577,43 @@ we are running a single identity server (https://matrix.org) at the current
|
||||
time.
|
||||
|
||||
|
||||
URL Previews
|
||||
============
|
||||
|
||||
Synapse 0.15.0 introduces an experimental new API for previewing URLs at
|
||||
/_matrix/media/r0/preview_url. This is disabled by default. To turn it on
|
||||
you must enable the `url_preview_enabled: True` config parameter and explicitly
|
||||
specify the IP ranges that Synapse is not allowed to spider for previewing in
|
||||
the `url_preview_ip_range_blacklist` configuration parameter. This is critical
|
||||
from a security perspective to stop arbitrary Matrix users spidering 'internal'
|
||||
URLs on your network. At the very least we recommend that your loopback and
|
||||
RFC1918 IP addresses are blacklisted.
|
||||
|
||||
This also requires the optional lxml and netaddr python dependencies to be
|
||||
installed.
|
||||
|
||||
|
||||
Password reset
|
||||
==============
|
||||
|
||||
If a user has registered an email address to their account using an identity
|
||||
server, they can request a password-reset token via clients such as Vector.
|
||||
|
||||
A manual password reset can be done via direct database access as follows.
|
||||
|
||||
First calculate the hash of the new password:
|
||||
|
||||
$ source ~/.synapse/bin/activate
|
||||
$ ./scripts/hash_password
|
||||
Password:
|
||||
Confirm password:
|
||||
$2a$12$xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
||||
|
||||
Then update the `users` table in the database:
|
||||
|
||||
UPDATE users SET password_hash='$2a$12$xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'
|
||||
WHERE name='@test:test.com';
|
||||
|
||||
Where's the spec?!
|
||||
==================
|
||||
|
||||
@@ -565,4 +633,21 @@ sphinxcontrib-napoleon::
|
||||
Building internal API documentation::
|
||||
|
||||
python setup.py build_sphinx
|
||||
|
||||
|
||||
|
||||
|
||||
Help!! Synapse eats all my RAM!
|
||||
===============================
|
||||
|
||||
Synapse's architecture is quite RAM hungry currently - we deliberately
|
||||
cache a lot of recent room data and metadata in RAM in order to speed up
|
||||
common requests. We'll improve this in future, but for now the easiest
|
||||
way to either reduce the RAM usage (at the risk of slowing things down)
|
||||
is to set the almost-undocumented ``SYNAPSE_CACHE_FACTOR`` environment
|
||||
variable. Roughly speaking, a SYNAPSE_CACHE_FACTOR of 1.0 will max out
|
||||
at around 3-4GB of resident memory - this is what we currently run the
|
||||
matrix.org on. The default setting is currently 0.1, which is probably
|
||||
around a ~700MB footprint. You can dial it down further to 0.02 if
|
||||
desired, which targets roughly ~512MB. Conversely you can dial it up if
|
||||
you need performance for lots of users and have a box with a lot of RAM.
|
||||
|
||||
|
||||
10
UPGRADE.rst
10
UPGRADE.rst
@@ -27,7 +27,15 @@ running:
|
||||
# Pull the latest version of the master branch.
|
||||
git pull
|
||||
# Update the versions of synapse's python dependencies.
|
||||
python synapse/python_dependencies.py | xargs -n1 pip install
|
||||
python synapse/python_dependencies.py | xargs -n1 pip install --upgrade
|
||||
|
||||
|
||||
Upgrading to v0.15.0
|
||||
====================
|
||||
|
||||
If you want to use the new URL previewing API (/_matrix/media/r0/preview_url)
|
||||
then you have to explicitly enable it in the config and update your dependencies
|
||||
dependencies. See README.rst for details.
|
||||
|
||||
|
||||
Upgrading to v0.11.0
|
||||
|
||||
@@ -9,6 +9,7 @@ Description=Synapse Matrix homeserver
|
||||
Type=simple
|
||||
User=synapse
|
||||
Group=synapse
|
||||
EnvironmentFile=-/etc/sysconfig/synapse
|
||||
WorkingDirectory=/var/lib/synapse
|
||||
ExecStart=/usr/bin/python2.7 -m synapse.app.homeserver --config-path=/etc/synapse/homeserver.yaml --log-config=/etc/synapse/log_config.yaml
|
||||
|
||||
|
||||
12
docs/admin_api/README.rst
Normal file
12
docs/admin_api/README.rst
Normal file
@@ -0,0 +1,12 @@
|
||||
Admin APIs
|
||||
==========
|
||||
|
||||
This directory includes documentation for the various synapse specific admin
|
||||
APIs available.
|
||||
|
||||
Only users that are server admins can use these APIs. A user can be marked as a
|
||||
server admin by updating the database directly, e.g.:
|
||||
|
||||
``UPDATE users SET admin = 1 WHERE name = '@foo:bar.com'``
|
||||
|
||||
Restarting may be required for the changes to register.
|
||||
15
docs/admin_api/purge_history_api.rst
Normal file
15
docs/admin_api/purge_history_api.rst
Normal file
@@ -0,0 +1,15 @@
|
||||
Purge History API
|
||||
=================
|
||||
|
||||
The purge history API allows server admins to purge historic events from their
|
||||
database, reclaiming disk space.
|
||||
|
||||
Depending on the amount of history being purged a call to the API may take
|
||||
several minutes or longer. During this period users will not be able to
|
||||
paginate further back in the room from the point being purged from.
|
||||
|
||||
The API is simply:
|
||||
|
||||
``POST /_matrix/client/r0/admin/purge_history/<room_id>/<event_id>``
|
||||
|
||||
including an ``access_token`` of a server admin.
|
||||
19
docs/admin_api/purge_remote_media.rst
Normal file
19
docs/admin_api/purge_remote_media.rst
Normal file
@@ -0,0 +1,19 @@
|
||||
Purge Remote Media API
|
||||
======================
|
||||
|
||||
The purge remote media API allows server admins to purge old cached remote
|
||||
media.
|
||||
|
||||
The API is::
|
||||
|
||||
POST /_matrix/client/r0/admin/purge_media_cache
|
||||
|
||||
{
|
||||
"before_ts": <unix_timestamp_in_ms>
|
||||
}
|
||||
|
||||
Which will remove all cached media that was last accessed before
|
||||
``<unix_timestamp_in_ms>``.
|
||||
|
||||
If the user re-requests purged remote media, synapse will re-request the media
|
||||
from the originating server.
|
||||
@@ -32,5 +32,4 @@ The format of the AS configuration file is as follows:
|
||||
|
||||
See the spec_ for further details on how application services work.
|
||||
|
||||
.. _spec: https://github.com/matrix-org/matrix-doc/blob/master/specification/25_application_service_api.rst#application-service-api
|
||||
|
||||
.. _spec: https://matrix.org/docs/spec/application_service/unstable.html
|
||||
|
||||
@@ -43,7 +43,10 @@ Basically, PEP8
|
||||
together, or want to deliberately extend or preserve vertical/horizontal
|
||||
space)
|
||||
|
||||
Comments should follow the google code style. This is so that we can generate
|
||||
documentation with sphinx (http://sphinxcontrib-napoleon.readthedocs.org/en/latest/)
|
||||
Comments should follow the `google code style <http://google.github.io/styleguide/pyguide.html?showone=Comments#Comments>`_.
|
||||
This is so that we can generate documentation with
|
||||
`sphinx <http://sphinxcontrib-napoleon.readthedocs.org/en/latest/>`_. See the
|
||||
`examples <http://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_google.html>`_
|
||||
in the sphinx documentation.
|
||||
|
||||
Code should pass pep8 --max-line-length=100 without any warnings.
|
||||
|
||||
10
docs/log_contexts.rst
Normal file
10
docs/log_contexts.rst
Normal file
@@ -0,0 +1,10 @@
|
||||
What do I do about "Unexpected logging context" debug log-lines everywhere?
|
||||
|
||||
<Mjark> The logging context lives in thread local storage
|
||||
<Mjark> Sometimes it gets out of sync with what it should actually be, usually because something scheduled something to run on the reactor without preserving the logging context.
|
||||
<Matthew> what is the impact of it getting out of sync? and how and when should we preserve log context?
|
||||
<Mjark> The impact is that some of the CPU and database metrics will be under-reported, and some log lines will be mis-attributed.
|
||||
<Mjark> It should happen auto-magically in all the APIs that do IO or otherwise defer to the reactor.
|
||||
<Erik> Mjark: the other place is if we branch, e.g. using defer.gatherResults
|
||||
|
||||
Unanswered: how and when should we preserve log context?
|
||||
@@ -15,36 +15,45 @@ How to monitor Synapse metrics using Prometheus
|
||||
|
||||
Restart synapse
|
||||
|
||||
3: Check out synapse-prometheus-config
|
||||
https://github.com/matrix-org/synapse-prometheus-config
|
||||
3: Add a prometheus target for synapse. It needs to set the ``metrics_path``
|
||||
to a non-default value::
|
||||
|
||||
4: Add ``synapse.html`` and ``synapse.rules``
|
||||
The ``.html`` file needs to appear in prometheus's ``consoles`` directory,
|
||||
and the ``.rules`` file needs to be invoked somewhere in the main config
|
||||
file. A symlink to each from the git checkout into the prometheus directory
|
||||
might be easiest to ensure ``git pull`` keeps it updated.
|
||||
- job_name: "synapse"
|
||||
metrics_path: "/_synapse/metrics"
|
||||
static_configs:
|
||||
- targets:
|
||||
"my.server.here:9092"
|
||||
|
||||
5: Add a prometheus target for synapse
|
||||
This is easiest if prometheus runs on the same machine as synapse, as it can
|
||||
then just use localhost::
|
||||
Standard Metric Names
|
||||
---------------------
|
||||
|
||||
global: {
|
||||
rule_file: "synapse.rules"
|
||||
}
|
||||
As of synapse version 0.18.2, the format of the process-wide metrics has been
|
||||
changed to fit prometheus standard naming conventions. Additionally the units
|
||||
have been changed to seconds, from miliseconds.
|
||||
|
||||
job: {
|
||||
name: "synapse"
|
||||
================================== =============================
|
||||
New name Old name
|
||||
---------------------------------- -----------------------------
|
||||
process_cpu_user_seconds_total process_resource_utime / 1000
|
||||
process_cpu_system_seconds_total process_resource_stime / 1000
|
||||
process_open_fds (no 'type' label) process_fds
|
||||
================================== =============================
|
||||
|
||||
target_group: {
|
||||
target: "http://localhost:9092/"
|
||||
}
|
||||
}
|
||||
The python-specific counts of garbage collector performance have been renamed.
|
||||
|
||||
6: Start prometheus::
|
||||
=========================== ======================
|
||||
New name Old name
|
||||
--------------------------- ----------------------
|
||||
python_gc_time reactor_gc_time
|
||||
python_gc_unreachable_total reactor_gc_unreachable
|
||||
python_gc_counts reactor_gc_counts
|
||||
=========================== ======================
|
||||
|
||||
./prometheus -config.file=prometheus.conf
|
||||
The twisted-specific reactor metrics have been renamed.
|
||||
|
||||
7: Wait a few seconds for it to start and perform the first scrape,
|
||||
then visit the console:
|
||||
|
||||
http://server-where-prometheus-runs:9090/consoles/synapse.html
|
||||
==================================== =====================
|
||||
New name Old name
|
||||
------------------------------------ ---------------------
|
||||
python_twisted_reactor_pending_calls reactor_pending_calls
|
||||
python_twisted_reactor_tick_time reactor_tick_time
|
||||
==================================== =====================
|
||||
|
||||
58
docs/replication.rst
Normal file
58
docs/replication.rst
Normal file
@@ -0,0 +1,58 @@
|
||||
Replication Architecture
|
||||
========================
|
||||
|
||||
Motivation
|
||||
----------
|
||||
|
||||
We'd like to be able to split some of the work that synapse does into multiple
|
||||
python processes. In theory multiple synapse processes could share a single
|
||||
postgresql database and we'd scale up by running more synapse processes.
|
||||
However much of synapse assumes that only one process is interacting with the
|
||||
database, both for assigning unique identifiers when inserting into tables,
|
||||
notifying components about new updates, and for invalidating its caches.
|
||||
|
||||
So running multiple copies of the current code isn't an option. One way to
|
||||
run multiple processes would be to have a single writer process and multiple
|
||||
reader processes connected to the same database. In order to do this we'd need
|
||||
a way for the reader process to invalidate its in-memory caches when an update
|
||||
happens on the writer. One way to do this is for the writer to present an
|
||||
append-only log of updates which the readers can consume to invalidate their
|
||||
caches and to push updates to listening clients or pushers.
|
||||
|
||||
Synapse already stores much of its data as an append-only log so that it can
|
||||
correctly respond to /sync requests so the amount of code changes needed to
|
||||
expose the append-only log to the readers should be fairly minimal.
|
||||
|
||||
Architecture
|
||||
------------
|
||||
|
||||
The Replication API
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Synapse will optionally expose a long poll HTTP API for extracting updates. The
|
||||
API will have a similar shape to /sync in that clients provide tokens
|
||||
indicating where in the log they have reached and a timeout. The synapse server
|
||||
then either responds with updates immediately if it already has updates or it
|
||||
waits until the timeout for more updates. If the timeout expires and nothing
|
||||
happened then the server returns an empty response.
|
||||
|
||||
However unlike the /sync API this replication API is returning synapse specific
|
||||
data rather than trying to implement a matrix specification. The replication
|
||||
results are returned as arrays of rows where the rows are mostly lifted
|
||||
directly from the database. This avoids unnecessary JSON parsing on the server
|
||||
and hopefully avoids an impedance mismatch between the data returned and the
|
||||
required updates to the datastore.
|
||||
|
||||
This does not replicate all the database tables as many of the database tables
|
||||
are indexes that can be recovered from the contents of other tables.
|
||||
|
||||
The format and parameters for the api are documented in
|
||||
``synapse/replication/resource.py``.
|
||||
|
||||
|
||||
The Slaved DataStore
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
There are read-only version of the synapse storage layer in
|
||||
``synapse/replication/slave/storage`` that use the response of the replication
|
||||
API to invalidate their caches.
|
||||
@@ -9,31 +9,35 @@ the Home Server to generate credentials that are valid for use on the TURN
|
||||
server through the use of a secret shared between the Home Server and the
|
||||
TURN server.
|
||||
|
||||
This document described how to install coturn
|
||||
(https://code.google.com/p/coturn/) which also supports the TURN REST API,
|
||||
This document describes how to install coturn
|
||||
(https://github.com/coturn/coturn) which also supports the TURN REST API,
|
||||
and integrate it with synapse.
|
||||
|
||||
coturn Setup
|
||||
============
|
||||
|
||||
You may be able to setup coturn via your package manager, or set it up manually using the usual ``configure, make, make install`` process.
|
||||
|
||||
1. Check out coturn::
|
||||
svn checkout http://coturn.googlecode.com/svn/trunk/ coturn
|
||||
|
||||
git clone https://github.com/coturn/coturn.git coturn
|
||||
cd coturn
|
||||
|
||||
2. Configure it::
|
||||
|
||||
./configure
|
||||
|
||||
You may need to install libevent2: if so, you should do so
|
||||
You may need to install ``libevent2``: if so, you should do so
|
||||
in the way recommended by your operating system.
|
||||
You can ignore warnings about lack of database support: a
|
||||
database is unnecessary for this purpose.
|
||||
|
||||
3. Build and install it::
|
||||
|
||||
make
|
||||
make install
|
||||
|
||||
4. Make a config file in /etc/turnserver.conf. You can customise
|
||||
a config file from turnserver.conf.default. The relevant
|
||||
4. Create or edit the config file in ``/etc/turnserver.conf``. The relevant
|
||||
lines, with example values, are::
|
||||
|
||||
lt-cred-mech
|
||||
@@ -41,7 +45,7 @@ coturn Setup
|
||||
static-auth-secret=[your secret key here]
|
||||
realm=turn.myserver.org
|
||||
|
||||
See turnserver.conf.default for explanations of the options.
|
||||
See turnserver.conf for explanations of the options.
|
||||
One way to generate the static-auth-secret is with pwgen::
|
||||
|
||||
pwgen -s 64 1
|
||||
@@ -54,6 +58,7 @@ coturn Setup
|
||||
import your private key and certificate.
|
||||
|
||||
7. Start the turn server::
|
||||
|
||||
bin/turnserver -o
|
||||
|
||||
|
||||
|
||||
74
docs/url_previews.rst
Normal file
74
docs/url_previews.rst
Normal file
@@ -0,0 +1,74 @@
|
||||
URL Previews
|
||||
============
|
||||
|
||||
Design notes on a URL previewing service for Matrix:
|
||||
|
||||
Options are:
|
||||
|
||||
1. Have an AS which listens for URLs, downloads them, and inserts an event that describes their metadata.
|
||||
* Pros:
|
||||
* Decouples the implementation entirely from Synapse.
|
||||
* Uses existing Matrix events & content repo to store the metadata.
|
||||
* Cons:
|
||||
* Which AS should provide this service for a room, and why should you trust it?
|
||||
* Doesn't work well with E2E; you'd have to cut the AS into every room
|
||||
* the AS would end up subscribing to every room anyway.
|
||||
|
||||
2. Have a generic preview API (nothing to do with Matrix) that provides a previewing service:
|
||||
* Pros:
|
||||
* Simple and flexible; can be used by any clients at any point
|
||||
* Cons:
|
||||
* If each HS provides one of these independently, all the HSes in a room may needlessly DoS the target URI
|
||||
* We need somewhere to store the URL metadata rather than just using Matrix itself
|
||||
* We can't piggyback on matrix to distribute the metadata between HSes.
|
||||
|
||||
3. Make the synapse of the sending user responsible for spidering the URL and inserting an event asynchronously which describes the metadata.
|
||||
* Pros:
|
||||
* Works transparently for all clients
|
||||
* Piggy-backs nicely on using Matrix for distributing the metadata.
|
||||
* No confusion as to which AS
|
||||
* Cons:
|
||||
* Doesn't work with E2E
|
||||
* We might want to decouple the implementation of the spider from the HS, given spider behaviour can be quite complicated and evolve much more rapidly than the HS. It's more like a bot than a core part of the server.
|
||||
|
||||
4. Make the sending client use the preview API and insert the event itself when successful.
|
||||
* Pros:
|
||||
* Works well with E2E
|
||||
* No custom server functionality
|
||||
* Lets the client customise the preview that they send (like on FB)
|
||||
* Cons:
|
||||
* Entirely specific to the sending client, whereas it'd be nice if /any/ URL was correctly previewed if clients support it.
|
||||
|
||||
5. Have the option of specifying a shared (centralised) previewing service used by a room, to avoid all the different HSes in the room DoSing the target.
|
||||
|
||||
Best solution is probably a combination of both 2 and 4.
|
||||
* Sending clients do their best to create and send a preview at the point of sending the message, perhaps delaying the message until the preview is computed? (This also lets the user validate the preview before sending)
|
||||
* Receiving clients have the option of going and creating their own preview if one doesn't arrive soon enough (or if the original sender didn't create one)
|
||||
|
||||
This is a bit magical though in that the preview could come from two entirely different sources - the sending HS or your local one. However, this can always be exposed to users: "Generate your own URL previews if none are available?"
|
||||
|
||||
This is tantamount also to senders calculating their own thumbnails for sending in advance of the main content - we are trusting the sender not to lie about the content in the thumbnail. Whereas currently thumbnails are calculated by the receiving homeserver to avoid this attack.
|
||||
|
||||
However, this kind of phishing attack does exist whether we let senders pick their thumbnails or not, in that a malicious sender can send normal text messages around the attachment claiming it to be legitimate. We could rely on (future) reputation/abuse management to punish users who phish (be it with bogus metadata or bogus descriptions). Bogus metadata is particularly bad though, especially if it's avoidable.
|
||||
|
||||
As a first cut, let's do #2 and have the receiver hit the API to calculate its own previews (as it does currently for image thumbnails). We can then extend/optimise this to option 4 as a special extra if needed.
|
||||
|
||||
API
|
||||
---
|
||||
|
||||
GET /_matrix/media/r0/preview_url?url=http://wherever.com
|
||||
200 OK
|
||||
{
|
||||
"og:type" : "article"
|
||||
"og:url" : "https://twitter.com/matrixdotorg/status/684074366691356672"
|
||||
"og:title" : "Matrix on Twitter"
|
||||
"og:image" : "https://pbs.twimg.com/profile_images/500400952029888512/yI0qtFi7_400x400.png"
|
||||
"og:description" : "“Synapse 0.12 is out! Lots of polishing, performance &amp; bugfixes: /sync API, /r0 prefix, fulltext search, 3PID invites https://t.co/5alhXLLEGP”"
|
||||
"og:site_name" : "Twitter"
|
||||
}
|
||||
|
||||
* Downloads the URL
|
||||
* If HTML, just stores it in RAM and parses it for OG meta tags
|
||||
* Download any media OG meta tags to the media repo, and refer to them in the OG via mxc:// URIs.
|
||||
* If a media filetype we know we can thumbnail: store it on disk, and hand it to the thumbnailer. Generate OG meta tags from the thumbnailer contents.
|
||||
* Otherwise, don't bother downloading further.
|
||||
98
docs/workers.rst
Normal file
98
docs/workers.rst
Normal file
@@ -0,0 +1,98 @@
|
||||
Scaling synapse via workers
|
||||
---------------------------
|
||||
|
||||
Synapse has experimental support for splitting out functionality into
|
||||
multiple separate python processes, helping greatly with scalability. These
|
||||
processes are called 'workers', and are (eventually) intended to scale
|
||||
horizontally independently.
|
||||
|
||||
All processes continue to share the same database instance, and as such, workers
|
||||
only work with postgres based synapse deployments (sharing a single sqlite
|
||||
across multiple processes is a recipe for disaster, plus you should be using
|
||||
postgres anyway if you care about scalability).
|
||||
|
||||
The workers communicate with the master synapse process via a synapse-specific
|
||||
HTTP protocol called 'replication' - analogous to MySQL or Postgres style
|
||||
database replication; feeding a stream of relevant data to the workers so they
|
||||
can be kept in sync with the main synapse process and database state.
|
||||
|
||||
To enable workers, you need to add a replication listener to the master synapse, e.g.::
|
||||
|
||||
listeners:
|
||||
- port: 9092
|
||||
bind_address: '127.0.0.1'
|
||||
type: http
|
||||
tls: false
|
||||
x_forwarded: false
|
||||
resources:
|
||||
- names: [replication]
|
||||
compress: false
|
||||
|
||||
Under **no circumstances** should this replication API listener be exposed to the
|
||||
public internet; it currently implements no authentication whatsoever and is
|
||||
unencrypted HTTP.
|
||||
|
||||
You then create a set of configs for the various worker processes. These should be
|
||||
worker configuration files should be stored in a dedicated subdirectory, to allow
|
||||
synctl to manipulate them.
|
||||
|
||||
The current available worker applications are:
|
||||
* synapse.app.pusher - handles sending push notifications to sygnal and email
|
||||
* synapse.app.synchrotron - handles /sync endpoints. can scales horizontally through multiple instances.
|
||||
* synapse.app.appservice - handles output traffic to Application Services
|
||||
* synapse.app.federation_reader - handles receiving federation traffic (including public_rooms API)
|
||||
* synapse.app.media_repository - handles the media repository.
|
||||
* synapse.app.client_reader - handles client API endpoints like /publicRooms
|
||||
|
||||
Each worker configuration file inherits the configuration of the main homeserver
|
||||
configuration file. You can then override configuration specific to that worker,
|
||||
e.g. the HTTP listener that it provides (if any); logging configuration; etc.
|
||||
You should minimise the number of overrides though to maintain a usable config.
|
||||
|
||||
You must specify the type of worker application (worker_app) and the replication
|
||||
endpoint that it's talking to on the main synapse process (worker_replication_url).
|
||||
|
||||
For instance::
|
||||
|
||||
worker_app: synapse.app.synchrotron
|
||||
|
||||
# The replication listener on the synapse to talk to.
|
||||
worker_replication_url: http://127.0.0.1:9092/_synapse/replication
|
||||
|
||||
worker_listeners:
|
||||
- type: http
|
||||
port: 8083
|
||||
resources:
|
||||
- names:
|
||||
- client
|
||||
|
||||
worker_daemonize: True
|
||||
worker_pid_file: /home/matrix/synapse/synchrotron.pid
|
||||
worker_log_config: /home/matrix/synapse/config/synchrotron_log_config.yaml
|
||||
|
||||
...is a full configuration for a synchrotron worker instance, which will expose a
|
||||
plain HTTP /sync endpoint on port 8083 separately from the /sync endpoint provided
|
||||
by the main synapse.
|
||||
|
||||
Obviously you should configure your loadbalancer to route the /sync endpoint to
|
||||
the synchrotron instance(s) in this instance.
|
||||
|
||||
Finally, to actually run your worker-based synapse, you must pass synctl the -a
|
||||
commandline option to tell it to operate on all the worker configurations found
|
||||
in the given directory, e.g.::
|
||||
|
||||
synctl -a $CONFIG/workers start
|
||||
|
||||
Currently one should always restart all workers when restarting or upgrading
|
||||
synapse, unless you explicitly know it's safe not to. For instance, restarting
|
||||
synapse without restarting all the synchrotrons may result in broken typing
|
||||
notifications.
|
||||
|
||||
To manipulate a specific worker, you pass the -w option to synctl::
|
||||
|
||||
synctl -w $CONFIG/workers/synchrotron.yaml restart
|
||||
|
||||
All of the above is highly experimental and subject to change as Synapse evolves,
|
||||
but documenting it here to help folks needing highly scalable Synapses similar
|
||||
to the one running matrix.org!
|
||||
|
||||
24
jenkins-dendron-postgres.sh
Executable file
24
jenkins-dendron-postgres.sh
Executable file
@@ -0,0 +1,24 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -eux
|
||||
|
||||
: ${WORKSPACE:="$(pwd)"}
|
||||
|
||||
export WORKSPACE
|
||||
export PYTHONDONTWRITEBYTECODE=yep
|
||||
export SYNAPSE_CACHE_FACTOR=1
|
||||
|
||||
./jenkins/prepare_synapse.sh
|
||||
./jenkins/clone.sh sytest https://github.com/matrix-org/sytest.git
|
||||
./jenkins/clone.sh dendron https://github.com/matrix-org/dendron.git
|
||||
./dendron/jenkins/build_dendron.sh
|
||||
./sytest/jenkins/prep_sytest_for_postgres.sh
|
||||
|
||||
./sytest/jenkins/install_and_run.sh \
|
||||
--synapse-directory $WORKSPACE \
|
||||
--dendron $WORKSPACE/dendron/bin/dendron \
|
||||
--pusher \
|
||||
--synchrotron \
|
||||
--federation-reader \
|
||||
--client-reader \
|
||||
--appservice \
|
||||
22
jenkins-flake8.sh
Executable file
22
jenkins-flake8.sh
Executable file
@@ -0,0 +1,22 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -eux
|
||||
|
||||
: ${WORKSPACE:="$(pwd)"}
|
||||
|
||||
export PYTHONDONTWRITEBYTECODE=yep
|
||||
export SYNAPSE_CACHE_FACTOR=1
|
||||
|
||||
# Output test results as junit xml
|
||||
export TRIAL_FLAGS="--reporter=subunit"
|
||||
export TOXSUFFIX="| subunit-1to2 | subunit2junitxml --no-passthrough --output-to=results.xml"
|
||||
# Write coverage reports to a separate file for each process
|
||||
export COVERAGE_OPTS="-p"
|
||||
export DUMP_COVERAGE_COMMAND="coverage help"
|
||||
|
||||
# Output flake8 violations to violations.flake8.log
|
||||
export PEP8SUFFIX="--output-file=violations.flake8.log"
|
||||
|
||||
rm .coverage* || echo "No coverage files to remove"
|
||||
|
||||
tox -e packaging -e pep8
|
||||
17
jenkins-postgres.sh
Executable file
17
jenkins-postgres.sh
Executable file
@@ -0,0 +1,17 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -eux
|
||||
|
||||
: ${WORKSPACE:="$(pwd)"}
|
||||
|
||||
export WORKSPACE
|
||||
export PYTHONDONTWRITEBYTECODE=yep
|
||||
export SYNAPSE_CACHE_FACTOR=1
|
||||
|
||||
./jenkins/prepare_synapse.sh
|
||||
./jenkins/clone.sh sytest https://github.com/matrix-org/sytest.git
|
||||
|
||||
./sytest/jenkins/prep_sytest_for_postgres.sh
|
||||
|
||||
./sytest/jenkins/install_and_run.sh \
|
||||
--synapse-directory $WORKSPACE \
|
||||
15
jenkins-sqlite.sh
Executable file
15
jenkins-sqlite.sh
Executable file
@@ -0,0 +1,15 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -eux
|
||||
|
||||
: ${WORKSPACE:="$(pwd)"}
|
||||
|
||||
export WORKSPACE
|
||||
export PYTHONDONTWRITEBYTECODE=yep
|
||||
export SYNAPSE_CACHE_FACTOR=1
|
||||
|
||||
./jenkins/prepare_synapse.sh
|
||||
./jenkins/clone.sh sytest https://github.com/matrix-org/sytest.git
|
||||
|
||||
./sytest/jenkins/install_and_run.sh \
|
||||
--synapse-directory $WORKSPACE \
|
||||
30
jenkins-unittests.sh
Executable file
30
jenkins-unittests.sh
Executable file
@@ -0,0 +1,30 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -eux
|
||||
|
||||
: ${WORKSPACE:="$(pwd)"}
|
||||
|
||||
export PYTHONDONTWRITEBYTECODE=yep
|
||||
export SYNAPSE_CACHE_FACTOR=1
|
||||
|
||||
# Output test results as junit xml
|
||||
export TRIAL_FLAGS="--reporter=subunit"
|
||||
export TOXSUFFIX="| subunit-1to2 | subunit2junitxml --no-passthrough --output-to=results.xml"
|
||||
# Write coverage reports to a separate file for each process
|
||||
export COVERAGE_OPTS="-p"
|
||||
export DUMP_COVERAGE_COMMAND="coverage help"
|
||||
|
||||
# Output flake8 violations to violations.flake8.log
|
||||
# Don't exit with non-0 status code on Jenkins,
|
||||
# so that the build steps continue and a later step can decided whether to
|
||||
# UNSTABLE or FAILURE this build.
|
||||
export PEP8SUFFIX="--output-file=violations.flake8.log || echo flake8 finished with status code \$?"
|
||||
|
||||
rm .coverage* || echo "No coverage files to remove"
|
||||
|
||||
tox --notest -e py27
|
||||
TOX_BIN=$WORKSPACE/.tox/py27/bin
|
||||
python synapse/python_dependencies.py | xargs -n1 $TOX_BIN/pip install
|
||||
$TOX_BIN/pip install lxml
|
||||
|
||||
tox -e py27
|
||||
81
jenkins.sh
81
jenkins.sh
@@ -1,81 +0,0 @@
|
||||
#!/bin/bash -eu
|
||||
|
||||
export PYTHONDONTWRITEBYTECODE=yep
|
||||
|
||||
# Output test results as junit xml
|
||||
export TRIAL_FLAGS="--reporter=subunit"
|
||||
export TOXSUFFIX="| subunit-1to2 | subunit2junitxml --no-passthrough --output-to=results.xml"
|
||||
# Write coverage reports to a separate file for each process
|
||||
export COVERAGE_OPTS="-p"
|
||||
export DUMP_COVERAGE_COMMAND="coverage help"
|
||||
|
||||
# Output flake8 violations to violations.flake8.log
|
||||
# Don't exit with non-0 status code on Jenkins,
|
||||
# so that the build steps continue and a later step can decided whether to
|
||||
# UNSTABLE or FAILURE this build.
|
||||
export PEP8SUFFIX="--output-file=violations.flake8.log || echo flake8 finished with status code \$?"
|
||||
|
||||
rm .coverage* || echo "No coverage files to remove"
|
||||
|
||||
tox
|
||||
|
||||
: ${GIT_BRANCH:="origin/$(git rev-parse --abbrev-ref HEAD)"}
|
||||
|
||||
TOX_BIN=$WORKSPACE/.tox/py27/bin
|
||||
|
||||
if [[ ! -e .sytest-base ]]; then
|
||||
git clone https://github.com/matrix-org/sytest.git .sytest-base --mirror
|
||||
else
|
||||
(cd .sytest-base; git fetch -p)
|
||||
fi
|
||||
|
||||
rm -rf sytest
|
||||
git clone .sytest-base sytest --shared
|
||||
cd sytest
|
||||
|
||||
git checkout "${GIT_BRANCH}" || (echo >&2 "No ref ${GIT_BRANCH} found, falling back to develop" ; git checkout develop)
|
||||
|
||||
: ${PERL5LIB:=$WORKSPACE/perl5/lib/perl5}
|
||||
: ${PERL_MB_OPT:=--install_base=$WORKSPACE/perl5}
|
||||
: ${PERL_MM_OPT:=INSTALL_BASE=$WORKSPACE/perl5}
|
||||
export PERL5LIB PERL_MB_OPT PERL_MM_OPT
|
||||
|
||||
./install-deps.pl
|
||||
|
||||
: ${PORT_BASE:=8000}
|
||||
|
||||
echo >&2 "Running sytest with SQLite3";
|
||||
./run-tests.pl --coverage -O tap --synapse-directory $WORKSPACE \
|
||||
--python $TOX_BIN/python --all --port-base $PORT_BASE > results-sqlite3.tap
|
||||
|
||||
RUN_POSTGRES=""
|
||||
|
||||
for port in $(($PORT_BASE + 1)) $(($PORT_BASE + 2)); do
|
||||
if psql synapse_jenkins_$port <<< ""; then
|
||||
RUN_POSTGRES="$RUN_POSTGRES:$port"
|
||||
cat > localhost-$port/database.yaml << EOF
|
||||
name: psycopg2
|
||||
args:
|
||||
database: synapse_jenkins_$port
|
||||
EOF
|
||||
fi
|
||||
done
|
||||
|
||||
# Run if both postgresql databases exist
|
||||
if test "$RUN_POSTGRES" = ":$(($PORT_BASE + 1)):$(($PORT_BASE + 2))"; then
|
||||
echo >&2 "Running sytest with PostgreSQL";
|
||||
$TOX_BIN/pip install psycopg2
|
||||
./run-tests.pl --coverage -O tap --synapse-directory $WORKSPACE \
|
||||
--python $TOX_BIN/python --all --port-base $PORT_BASE > results-postgresql.tap
|
||||
else
|
||||
echo >&2 "Skipping running sytest with PostgreSQL, $RUN_POSTGRES"
|
||||
fi
|
||||
|
||||
cd ..
|
||||
cp sytest/.coverage.* .
|
||||
|
||||
# Combine the coverage reports
|
||||
echo "Combining:" .coverage.*
|
||||
$TOX_BIN/python -m coverage combine
|
||||
# Output coverage to coverage.xml
|
||||
$TOX_BIN/coverage xml -o coverage.xml
|
||||
44
jenkins/clone.sh
Executable file
44
jenkins/clone.sh
Executable file
@@ -0,0 +1,44 @@
|
||||
#! /bin/bash
|
||||
|
||||
# This clones a project from github into a named subdirectory
|
||||
# If the project has a branch with the same name as this branch
|
||||
# then it will checkout that branch after cloning.
|
||||
# Otherwise it will checkout "origin/develop."
|
||||
# The first argument is the name of the directory to checkout
|
||||
# the branch into.
|
||||
# The second argument is the URL of the remote repository to checkout.
|
||||
# Usually something like https://github.com/matrix-org/sytest.git
|
||||
|
||||
set -eux
|
||||
|
||||
NAME=$1
|
||||
PROJECT=$2
|
||||
BASE=".$NAME-base"
|
||||
|
||||
# Update our mirror.
|
||||
if [ ! -d ".$NAME-base" ]; then
|
||||
# Create a local mirror of the source repository.
|
||||
# This saves us from having to download the entire repository
|
||||
# when this script is next run.
|
||||
git clone "$PROJECT" "$BASE" --mirror
|
||||
else
|
||||
# Fetch any updates from the source repository.
|
||||
(cd "$BASE"; git fetch -p)
|
||||
fi
|
||||
|
||||
# Remove the existing repository so that we have a clean copy
|
||||
rm -rf "$NAME"
|
||||
# Cloning with --shared means that we will share portions of the
|
||||
# .git directory with our local mirror.
|
||||
git clone "$BASE" "$NAME" --shared
|
||||
|
||||
# Jenkins may have supplied us with the name of the branch in the
|
||||
# environment. Otherwise we will have to guess based on the current
|
||||
# commit.
|
||||
: ${GIT_BRANCH:="origin/$(git rev-parse --abbrev-ref HEAD)"}
|
||||
cd "$NAME"
|
||||
# check out the relevant branch
|
||||
git checkout "${GIT_BRANCH}" || (
|
||||
echo >&2 "No ref ${GIT_BRANCH} found, falling back to develop"
|
||||
git checkout "origin/develop"
|
||||
)
|
||||
20
jenkins/prepare_synapse.sh
Executable file
20
jenkins/prepare_synapse.sh
Executable file
@@ -0,0 +1,20 @@
|
||||
#! /bin/bash
|
||||
|
||||
cd "`dirname $0`/.."
|
||||
|
||||
TOX_DIR=$WORKSPACE/.tox
|
||||
|
||||
mkdir -p $TOX_DIR
|
||||
|
||||
if ! [ $TOX_DIR -ef .tox ]; then
|
||||
ln -s "$TOX_DIR" .tox
|
||||
fi
|
||||
|
||||
# set up the virtualenv
|
||||
tox -e py27 --notest -v
|
||||
|
||||
TOX_BIN=$TOX_DIR/py27/bin
|
||||
$TOX_BIN/pip install setuptools
|
||||
python synapse/python_dependencies.py | xargs -n1 $TOX_BIN/pip install
|
||||
$TOX_BIN/pip install lxml
|
||||
$TOX_BIN/pip install psycopg2
|
||||
7
res/templates/mail-Vector.css
Normal file
7
res/templates/mail-Vector.css
Normal file
@@ -0,0 +1,7 @@
|
||||
.header {
|
||||
border-bottom: 4px solid #e4f7ed ! important;
|
||||
}
|
||||
|
||||
.notif_link a, .footer a {
|
||||
color: #76CFA6 ! important;
|
||||
}
|
||||
156
res/templates/mail.css
Normal file
156
res/templates/mail.css
Normal file
@@ -0,0 +1,156 @@
|
||||
body {
|
||||
margin: 0px;
|
||||
}
|
||||
|
||||
pre, code {
|
||||
word-break: break-word;
|
||||
white-space: pre-wrap;
|
||||
}
|
||||
|
||||
#page {
|
||||
font-family: 'Open Sans', Helvetica, Arial, Sans-Serif;
|
||||
font-color: #454545;
|
||||
font-size: 12pt;
|
||||
width: 100%;
|
||||
padding: 20px;
|
||||
}
|
||||
|
||||
#inner {
|
||||
width: 640px;
|
||||
}
|
||||
|
||||
.header {
|
||||
width: 100%;
|
||||
height: 87px;
|
||||
color: #454545;
|
||||
border-bottom: 4px solid #e5e5e5;
|
||||
}
|
||||
|
||||
.logo {
|
||||
text-align: right;
|
||||
margin-left: 20px;
|
||||
}
|
||||
|
||||
.salutation {
|
||||
padding-top: 10px;
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
.summarytext {
|
||||
}
|
||||
|
||||
.room {
|
||||
width: 100%;
|
||||
color: #454545;
|
||||
border-bottom: 1px solid #e5e5e5;
|
||||
}
|
||||
|
||||
.room_header td {
|
||||
padding-top: 38px;
|
||||
padding-bottom: 10px;
|
||||
border-bottom: 1px solid #e5e5e5;
|
||||
}
|
||||
|
||||
.room_name {
|
||||
vertical-align: middle;
|
||||
font-size: 18px;
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
.room_header h2 {
|
||||
margin-top: 0px;
|
||||
margin-left: 75px;
|
||||
font-size: 20px;
|
||||
}
|
||||
|
||||
.room_avatar {
|
||||
width: 56px;
|
||||
line-height: 0px;
|
||||
text-align: center;
|
||||
vertical-align: middle;
|
||||
}
|
||||
|
||||
.room_avatar img {
|
||||
width: 48px;
|
||||
height: 48px;
|
||||
object-fit: cover;
|
||||
border-radius: 24px;
|
||||
}
|
||||
|
||||
.notif {
|
||||
border-bottom: 1px solid #e5e5e5;
|
||||
margin-top: 16px;
|
||||
padding-bottom: 16px;
|
||||
}
|
||||
|
||||
.historical_message .sender_avatar {
|
||||
opacity: 0.3;
|
||||
}
|
||||
|
||||
/* spell out opacity and historical_message class names for Outlook aka Word */
|
||||
.historical_message .sender_name {
|
||||
color: #e3e3e3;
|
||||
}
|
||||
|
||||
.historical_message .message_time {
|
||||
color: #e3e3e3;
|
||||
}
|
||||
|
||||
.historical_message .message_body {
|
||||
color: #c7c7c7;
|
||||
}
|
||||
|
||||
.historical_message td,
|
||||
.message td {
|
||||
padding-top: 10px;
|
||||
}
|
||||
|
||||
.sender_avatar {
|
||||
width: 56px;
|
||||
text-align: center;
|
||||
vertical-align: top;
|
||||
}
|
||||
|
||||
.sender_avatar img {
|
||||
margin-top: -2px;
|
||||
width: 32px;
|
||||
height: 32px;
|
||||
border-radius: 16px;
|
||||
}
|
||||
|
||||
.sender_name {
|
||||
display: inline;
|
||||
font-size: 13px;
|
||||
color: #a2a2a2;
|
||||
}
|
||||
|
||||
.message_time {
|
||||
text-align: right;
|
||||
width: 100px;
|
||||
font-size: 11px;
|
||||
color: #a2a2a2;
|
||||
}
|
||||
|
||||
.message_body {
|
||||
}
|
||||
|
||||
.notif_link td {
|
||||
padding-top: 10px;
|
||||
padding-bottom: 10px;
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
.notif_link a, .footer a {
|
||||
color: #454545;
|
||||
text-decoration: none;
|
||||
}
|
||||
|
||||
.debug {
|
||||
font-size: 10px;
|
||||
color: #888;
|
||||
}
|
||||
|
||||
.footer {
|
||||
margin-top: 20px;
|
||||
text-align: center;
|
||||
}
|
||||
45
res/templates/notif.html
Normal file
45
res/templates/notif.html
Normal file
@@ -0,0 +1,45 @@
|
||||
{% for message in notif.messages %}
|
||||
<tr class="{{ "historical_message" if message.is_historical else "message" }}">
|
||||
<td class="sender_avatar">
|
||||
{% if loop.index0 == 0 or notif.messages[loop.index0 - 1].sender_name != notif.messages[loop.index0].sender_name %}
|
||||
{% if message.sender_avatar_url %}
|
||||
<img alt="" class="sender_avatar" src="{{ message.sender_avatar_url|mxc_to_http(32,32) }}" />
|
||||
{% else %}
|
||||
{% if message.sender_hash % 3 == 0 %}
|
||||
<img class="sender_avatar" src="https://vector.im/beta/img/76cfa6.png" />
|
||||
{% elif message.sender_hash % 3 == 1 %}
|
||||
<img class="sender_avatar" src="https://vector.im/beta/img/50e2c2.png" />
|
||||
{% else %}
|
||||
<img class="sender_avatar" src="https://vector.im/beta/img/f4c371.png" />
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
</td>
|
||||
<td class="message_contents">
|
||||
{% if loop.index0 == 0 or notif.messages[loop.index0 - 1].sender_name != notif.messages[loop.index0].sender_name %}
|
||||
<div class="sender_name">{% if message.msgtype == "m.emote" %}*{% endif %} {{ message.sender_name }}</div>
|
||||
{% endif %}
|
||||
<div class="message_body">
|
||||
{% if message.msgtype == "m.text" %}
|
||||
{{ message.body_text_html }}
|
||||
{% elif message.msgtype == "m.emote" %}
|
||||
{{ message.body_text_html }}
|
||||
{% elif message.msgtype == "m.notice" %}
|
||||
{{ message.body_text_html }}
|
||||
{% elif message.msgtype == "m.image" %}
|
||||
<img src="{{ message.image_url|mxc_to_http(640, 480, scale) }}" />
|
||||
{% elif message.msgtype == "m.file" %}
|
||||
<span class="filename">{{ message.body_text_plain }}</span>
|
||||
{% endif %}
|
||||
</div>
|
||||
</td>
|
||||
<td class="message_time">{{ message.ts|format_ts("%H:%M") }}</td>
|
||||
</tr>
|
||||
{% endfor %}
|
||||
<tr class="notif_link">
|
||||
<td></td>
|
||||
<td>
|
||||
<a href="{{ notif.link }}">View {{ room.title }}</a>
|
||||
</td>
|
||||
<td></td>
|
||||
</tr>
|
||||
16
res/templates/notif.txt
Normal file
16
res/templates/notif.txt
Normal file
@@ -0,0 +1,16 @@
|
||||
{% for message in notif.messages %}
|
||||
{% if message.msgtype == "m.emote" %}* {% endif %}{{ message.sender_name }} ({{ message.ts|format_ts("%H:%M") }})
|
||||
{% if message.msgtype == "m.text" %}
|
||||
{{ message.body_text_plain }}
|
||||
{% elif message.msgtype == "m.emote" %}
|
||||
{{ message.body_text_plain }}
|
||||
{% elif message.msgtype == "m.notice" %}
|
||||
{{ message.body_text_plain }}
|
||||
{% elif message.msgtype == "m.image" %}
|
||||
{{ message.body_text_plain }}
|
||||
{% elif message.msgtype == "m.file" %}
|
||||
{{ message.body_text_plain }}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
View {{ room.title }} at {{ notif.link }}
|
||||
55
res/templates/notif_mail.html
Normal file
55
res/templates/notif_mail.html
Normal file
@@ -0,0 +1,55 @@
|
||||
<!doctype html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<style type="text/css">
|
||||
{% include 'mail.css' without context %}
|
||||
{% include "mail-%s.css" % app_name ignore missing without context %}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<table id="page">
|
||||
<tr>
|
||||
<td> </td>
|
||||
<td id="inner">
|
||||
<table class="header">
|
||||
<tr>
|
||||
<td>
|
||||
<div class="salutation">Hi {{ user_display_name }},</div>
|
||||
<div class="summarytext">{{ summary_text }}</div>
|
||||
</td>
|
||||
<td class="logo">
|
||||
{% if app_name == "Riot" %}
|
||||
<img src="http://matrix.org/img/riot-logo-email.png" width="83" height="83" alt="[Riot]"/>
|
||||
{% elif app_name == "Vector" %}
|
||||
<img src="http://matrix.org/img/vector-logo-email.png" width="64" height="83" alt="[Vector]"/>
|
||||
{% else %}
|
||||
<img src="http://matrix.org/img/matrix-120x51.png" width="120" height="51" alt="[matrix]"/>
|
||||
{% endif %}
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
{% for room in rooms %}
|
||||
{% include 'room.html' with context %}
|
||||
{% endfor %}
|
||||
<div class="footer">
|
||||
<a href="{{ unsubscribe_link }}">Unsubscribe</a>
|
||||
<br/>
|
||||
<br/>
|
||||
<div class="debug">
|
||||
Sending email at {{ reason.now|format_ts("%c") }} due to activity in room {{ reason.room_name }} because
|
||||
an event was received at {{ reason.received_at|format_ts("%c") }}
|
||||
which is more than {{ "%.1f"|format(reason.delay_before_mail_ms / (60*1000)) }} ({{ reason.delay_before_mail_ms }}) mins ago,
|
||||
{% if reason.last_sent_ts %}
|
||||
and the last time we sent a mail for this room was {{ reason.last_sent_ts|format_ts("%c") }},
|
||||
which is more than {{ "%.1f"|format(reason.throttle_ms / (60*1000)) }} (current throttle_ms) mins ago.
|
||||
{% else %}
|
||||
and we don't have a last time we sent a mail for this room.
|
||||
{% endif %}
|
||||
</div>
|
||||
</div>
|
||||
</td>
|
||||
<td> </td>
|
||||
</tr>
|
||||
</table>
|
||||
</body>
|
||||
</html>
|
||||
10
res/templates/notif_mail.txt
Normal file
10
res/templates/notif_mail.txt
Normal file
@@ -0,0 +1,10 @@
|
||||
Hi {{ user_display_name }},
|
||||
|
||||
{{ summary_text }}
|
||||
|
||||
{% for room in rooms %}
|
||||
{% include 'room.txt' with context %}
|
||||
{% endfor %}
|
||||
|
||||
You can disable these notifications at {{ unsubscribe_link }}
|
||||
|
||||
33
res/templates/room.html
Normal file
33
res/templates/room.html
Normal file
@@ -0,0 +1,33 @@
|
||||
<table class="room">
|
||||
<tr class="room_header">
|
||||
<td class="room_avatar">
|
||||
{% if room.avatar_url %}
|
||||
<img alt="" src="{{ room.avatar_url|mxc_to_http(48,48) }}" />
|
||||
{% else %}
|
||||
{% if room.hash % 3 == 0 %}
|
||||
<img alt="" src="https://vector.im/beta/img/76cfa6.png" />
|
||||
{% elif room.hash % 3 == 1 %}
|
||||
<img alt="" src="https://vector.im/beta/img/50e2c2.png" />
|
||||
{% else %}
|
||||
<img alt="" src="https://vector.im/beta/img/f4c371.png" />
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
</td>
|
||||
<td class="room_name" colspan="2">
|
||||
{{ room.title }}
|
||||
</td>
|
||||
</tr>
|
||||
{% if room.invite %}
|
||||
<tr>
|
||||
<td></td>
|
||||
<td>
|
||||
<a href="{{ room.link }}">Join the conversation.</a>
|
||||
</td>
|
||||
<td></td>
|
||||
</tr>
|
||||
{% else %}
|
||||
{% for notif in room.notifs %}
|
||||
{% include 'notif.html' with context %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
</table>
|
||||
9
res/templates/room.txt
Normal file
9
res/templates/room.txt
Normal file
@@ -0,0 +1,9 @@
|
||||
{{ room.title }}
|
||||
|
||||
{% if room.invite %}
|
||||
You've been invited, join at {{ room.link }}
|
||||
{% else %}
|
||||
{% for notif in room.notifs %}
|
||||
{% include 'notif.txt' with context %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
@@ -86,9 +86,12 @@ def used_names(prefix, item, defs, names):
|
||||
for name, funcs in defs.get('class', {}).items():
|
||||
used_names(prefix + name + ".", name, funcs, names)
|
||||
|
||||
path = prefix.rstrip('.')
|
||||
for used in defs.get('uses', ()):
|
||||
if used in names:
|
||||
names[used].setdefault('used', {}).setdefault(item, []).append(prefix.rstrip('.'))
|
||||
if item:
|
||||
names[item].setdefault('uses', []).append(used)
|
||||
names[used].setdefault('used', {}).setdefault(item, []).append(path)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
@@ -113,6 +116,10 @@ if __name__ == '__main__':
|
||||
"--referrers", default=0, type=int,
|
||||
help="Include referrers up to the given depth"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--referred", default=0, type=int,
|
||||
help="Include referred down to the given depth"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--format", default="yaml",
|
||||
help="Output format, one of 'yaml' or 'dot'"
|
||||
@@ -161,6 +168,20 @@ if __name__ == '__main__':
|
||||
continue
|
||||
result[name] = definition
|
||||
|
||||
referred_depth = args.referred
|
||||
referred = set()
|
||||
while referred_depth:
|
||||
referred_depth -= 1
|
||||
for entry in result.values():
|
||||
for uses in entry.get("uses", ()):
|
||||
referred.add(uses)
|
||||
for name, definition in names.items():
|
||||
if not name in referred:
|
||||
continue
|
||||
if ignore and any(pattern.match(name) for pattern in ignore):
|
||||
continue
|
||||
result[name] = definition
|
||||
|
||||
if args.format == 'yaml':
|
||||
yaml.dump(result, sys.stdout, default_flow_style=False)
|
||||
elif args.format == 'dot':
|
||||
|
||||
@@ -116,17 +116,19 @@ def get_json(origin_name, origin_key, destination, path):
|
||||
authorization_headers = []
|
||||
|
||||
for key, sig in signed_json["signatures"][origin_name].items():
|
||||
authorization_headers.append(bytes(
|
||||
"X-Matrix origin=%s,key=\"%s\",sig=\"%s\"" % (
|
||||
origin_name, key, sig,
|
||||
)
|
||||
))
|
||||
header = "X-Matrix origin=%s,key=\"%s\",sig=\"%s\"" % (
|
||||
origin_name, key, sig,
|
||||
)
|
||||
authorization_headers.append(bytes(header))
|
||||
sys.stderr.write(header)
|
||||
sys.stderr.write("\n")
|
||||
|
||||
result = requests.get(
|
||||
lookup(destination, path),
|
||||
headers={"Authorization": authorization_headers[0]},
|
||||
verify=False,
|
||||
)
|
||||
sys.stderr.write("Status Code: %d\n" % (result.status_code,))
|
||||
return result.json()
|
||||
|
||||
|
||||
@@ -141,6 +143,7 @@ def main():
|
||||
)
|
||||
|
||||
json.dump(result, sys.stdout)
|
||||
print ""
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
||||
67
scripts-dev/tail-synapse.py
Normal file
67
scripts-dev/tail-synapse.py
Normal file
@@ -0,0 +1,67 @@
|
||||
import requests
|
||||
import collections
|
||||
import sys
|
||||
import time
|
||||
import json
|
||||
|
||||
Entry = collections.namedtuple("Entry", "name position rows")
|
||||
|
||||
ROW_TYPES = {}
|
||||
|
||||
|
||||
def row_type_for_columns(name, column_names):
|
||||
column_names = tuple(column_names)
|
||||
row_type = ROW_TYPES.get((name, column_names))
|
||||
if row_type is None:
|
||||
row_type = collections.namedtuple(name, column_names)
|
||||
ROW_TYPES[(name, column_names)] = row_type
|
||||
return row_type
|
||||
|
||||
|
||||
def parse_response(content):
|
||||
streams = json.loads(content)
|
||||
result = {}
|
||||
for name, value in streams.items():
|
||||
row_type = row_type_for_columns(name, value["field_names"])
|
||||
position = value["position"]
|
||||
rows = [row_type(*row) for row in value["rows"]]
|
||||
result[name] = Entry(name, position, rows)
|
||||
return result
|
||||
|
||||
|
||||
def replicate(server, streams):
|
||||
return parse_response(requests.get(
|
||||
server + "/_synapse/replication",
|
||||
verify=False,
|
||||
params=streams
|
||||
).content)
|
||||
|
||||
|
||||
def main():
|
||||
server = sys.argv[1]
|
||||
|
||||
streams = None
|
||||
while not streams:
|
||||
try:
|
||||
streams = {
|
||||
row.name: row.position
|
||||
for row in replicate(server, {"streams":"-1"})["streams"].rows
|
||||
}
|
||||
except requests.exceptions.ConnectionError as e:
|
||||
time.sleep(0.1)
|
||||
|
||||
while True:
|
||||
try:
|
||||
results = replicate(server, streams)
|
||||
except:
|
||||
sys.stdout.write("connection_lost("+ repr(streams) + ")\n")
|
||||
break
|
||||
for update in results.values():
|
||||
for row in update.rows:
|
||||
sys.stdout.write(repr(row) + "\n")
|
||||
streams[update.name] = update.position
|
||||
|
||||
|
||||
|
||||
if __name__=='__main__':
|
||||
main()
|
||||
@@ -1 +0,0 @@
|
||||
perl -MCrypt::Random -MCrypt::Eksblowfish::Bcrypt -e 'print Crypt::Eksblowfish::Bcrypt::bcrypt("secret", "\$2\$12\$" . Crypt::Eksblowfish::Bcrypt::en_base64(Crypt::Random::makerandom_octet(Length=>16)))."\n"'
|
||||
55
scripts/hash_password
Executable file
55
scripts/hash_password
Executable file
@@ -0,0 +1,55 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
import argparse
|
||||
|
||||
import sys
|
||||
|
||||
import bcrypt
|
||||
import getpass
|
||||
|
||||
import yaml
|
||||
|
||||
bcrypt_rounds=12
|
||||
password_pepper = ""
|
||||
|
||||
def prompt_for_pass():
|
||||
password = getpass.getpass("Password: ")
|
||||
|
||||
if not password:
|
||||
raise Exception("Password cannot be blank.")
|
||||
|
||||
confirm_password = getpass.getpass("Confirm password: ")
|
||||
|
||||
if password != confirm_password:
|
||||
raise Exception("Passwords do not match.")
|
||||
|
||||
return password
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Calculate the hash of a new password, so that passwords"
|
||||
" can be reset")
|
||||
parser.add_argument(
|
||||
"-p", "--password",
|
||||
default=None,
|
||||
help="New password for user. Will prompt if omitted.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-c", "--config",
|
||||
type=argparse.FileType('r'),
|
||||
help="Path to server config file. Used to read in bcrypt_rounds and password_pepper.",
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
if "config" in args and args.config:
|
||||
config = yaml.safe_load(args.config)
|
||||
bcrypt_rounds = config.get("bcrypt_rounds", bcrypt_rounds)
|
||||
password_config = config.get("password_config", {})
|
||||
password_pepper = password_config.get("pepper", password_pepper)
|
||||
password = args.password
|
||||
|
||||
if not password:
|
||||
password = prompt_for_pass()
|
||||
|
||||
print bcrypt.hashpw(password + password_pepper, bcrypt.gensalt(bcrypt_rounds))
|
||||
|
||||
@@ -25,18 +25,26 @@ import urllib2
|
||||
import yaml
|
||||
|
||||
|
||||
def request_registration(user, password, server_location, shared_secret):
|
||||
def request_registration(user, password, server_location, shared_secret, admin=False):
|
||||
mac = hmac.new(
|
||||
key=shared_secret,
|
||||
msg=user,
|
||||
digestmod=hashlib.sha1,
|
||||
).hexdigest()
|
||||
)
|
||||
|
||||
mac.update(user)
|
||||
mac.update("\x00")
|
||||
mac.update(password)
|
||||
mac.update("\x00")
|
||||
mac.update("admin" if admin else "notadmin")
|
||||
|
||||
mac = mac.hexdigest()
|
||||
|
||||
data = {
|
||||
"user": user,
|
||||
"password": password,
|
||||
"mac": mac,
|
||||
"type": "org.matrix.login.shared_secret",
|
||||
"admin": admin,
|
||||
}
|
||||
|
||||
server_location = server_location.rstrip("/")
|
||||
@@ -68,7 +76,7 @@ def request_registration(user, password, server_location, shared_secret):
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def register_new_user(user, password, server_location, shared_secret):
|
||||
def register_new_user(user, password, server_location, shared_secret, admin):
|
||||
if not user:
|
||||
try:
|
||||
default_user = getpass.getuser()
|
||||
@@ -99,7 +107,14 @@ def register_new_user(user, password, server_location, shared_secret):
|
||||
print "Passwords do not match"
|
||||
sys.exit(1)
|
||||
|
||||
request_registration(user, password, server_location, shared_secret)
|
||||
if not admin:
|
||||
admin = raw_input("Make admin [no]: ")
|
||||
if admin in ("y", "yes", "true"):
|
||||
admin = True
|
||||
else:
|
||||
admin = False
|
||||
|
||||
request_registration(user, password, server_location, shared_secret, bool(admin))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
@@ -119,6 +134,11 @@ if __name__ == "__main__":
|
||||
default=None,
|
||||
help="New password for user. Will prompt if omitted.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-a", "--admin",
|
||||
action="store_true",
|
||||
help="Register new user as an admin. Will prompt if omitted.",
|
||||
)
|
||||
|
||||
group = parser.add_mutually_exclusive_group(required=True)
|
||||
group.add_argument(
|
||||
@@ -151,4 +171,4 @@ if __name__ == "__main__":
|
||||
else:
|
||||
secret = args.shared_secret
|
||||
|
||||
register_new_user(args.user, args.password, args.server_url, secret)
|
||||
register_new_user(args.user, args.password, args.server_url, secret, args.admin)
|
||||
|
||||
@@ -19,6 +19,7 @@ from twisted.enterprise import adbapi
|
||||
|
||||
from synapse.storage._base import LoggingTransaction, SQLBaseStore
|
||||
from synapse.storage.engines import create_engine
|
||||
from synapse.storage.prepare_database import prepare_database
|
||||
|
||||
import argparse
|
||||
import curses
|
||||
@@ -33,10 +34,12 @@ logger = logging.getLogger("synapse_port_db")
|
||||
|
||||
|
||||
BOOLEAN_COLUMNS = {
|
||||
"events": ["processed", "outlier"],
|
||||
"events": ["processed", "outlier", "contains_url"],
|
||||
"rooms": ["is_public"],
|
||||
"event_edges": ["is_state"],
|
||||
"presence_list": ["accepted"],
|
||||
"presence_stream": ["currently_active"],
|
||||
"public_room_list_stream": ["visibility"],
|
||||
}
|
||||
|
||||
|
||||
@@ -69,6 +72,14 @@ APPEND_ONLY_TABLES = [
|
||||
"event_to_state_groups",
|
||||
"rejections",
|
||||
"event_search",
|
||||
"presence_stream",
|
||||
"push_rules_stream",
|
||||
"current_state_resets",
|
||||
"ex_outlier_stream",
|
||||
"cache_invalidation_stream",
|
||||
"public_room_list_stream",
|
||||
"state_group_edges",
|
||||
"stream_ordering_to_exterm",
|
||||
]
|
||||
|
||||
|
||||
@@ -90,8 +101,12 @@ class Store(object):
|
||||
|
||||
_simple_select_onecol_txn = SQLBaseStore.__dict__["_simple_select_onecol_txn"]
|
||||
_simple_select_onecol = SQLBaseStore.__dict__["_simple_select_onecol"]
|
||||
_simple_select_one = SQLBaseStore.__dict__["_simple_select_one"]
|
||||
_simple_select_one_txn = SQLBaseStore.__dict__["_simple_select_one_txn"]
|
||||
_simple_select_one_onecol = SQLBaseStore.__dict__["_simple_select_one_onecol"]
|
||||
_simple_select_one_onecol_txn = SQLBaseStore.__dict__["_simple_select_one_onecol_txn"]
|
||||
_simple_select_one_onecol_txn = SQLBaseStore.__dict__[
|
||||
"_simple_select_one_onecol_txn"
|
||||
]
|
||||
|
||||
_simple_update_one = SQLBaseStore.__dict__["_simple_update_one"]
|
||||
_simple_update_one_txn = SQLBaseStore.__dict__["_simple_update_one_txn"]
|
||||
@@ -156,31 +171,40 @@ class Porter(object):
|
||||
def setup_table(self, table):
|
||||
if table in APPEND_ONLY_TABLES:
|
||||
# It's safe to just carry on inserting.
|
||||
next_chunk = yield self.postgres_store._simple_select_one_onecol(
|
||||
row = yield self.postgres_store._simple_select_one(
|
||||
table="port_from_sqlite3",
|
||||
keyvalues={"table_name": table},
|
||||
retcol="rowid",
|
||||
retcols=("forward_rowid", "backward_rowid"),
|
||||
allow_none=True,
|
||||
)
|
||||
|
||||
total_to_port = None
|
||||
if next_chunk is None:
|
||||
if row is None:
|
||||
if table == "sent_transactions":
|
||||
next_chunk, already_ported, total_to_port = (
|
||||
forward_chunk, already_ported, total_to_port = (
|
||||
yield self._setup_sent_transactions()
|
||||
)
|
||||
backward_chunk = 0
|
||||
else:
|
||||
yield self.postgres_store._simple_insert(
|
||||
table="port_from_sqlite3",
|
||||
values={"table_name": table, "rowid": 1}
|
||||
values={
|
||||
"table_name": table,
|
||||
"forward_rowid": 1,
|
||||
"backward_rowid": 0,
|
||||
}
|
||||
)
|
||||
|
||||
next_chunk = 1
|
||||
forward_chunk = 1
|
||||
backward_chunk = 0
|
||||
already_ported = 0
|
||||
else:
|
||||
forward_chunk = row["forward_rowid"]
|
||||
backward_chunk = row["backward_rowid"]
|
||||
|
||||
if total_to_port is None:
|
||||
already_ported, total_to_port = yield self._get_total_count_to_port(
|
||||
table, next_chunk
|
||||
table, forward_chunk, backward_chunk
|
||||
)
|
||||
else:
|
||||
def delete_all(txn):
|
||||
@@ -194,32 +218,124 @@ class Porter(object):
|
||||
|
||||
yield self.postgres_store._simple_insert(
|
||||
table="port_from_sqlite3",
|
||||
values={"table_name": table, "rowid": 0}
|
||||
values={
|
||||
"table_name": table,
|
||||
"forward_rowid": 1,
|
||||
"backward_rowid": 0,
|
||||
}
|
||||
)
|
||||
|
||||
next_chunk = 1
|
||||
forward_chunk = 1
|
||||
backward_chunk = 0
|
||||
|
||||
already_ported, total_to_port = yield self._get_total_count_to_port(
|
||||
table, next_chunk
|
||||
table, forward_chunk, backward_chunk
|
||||
)
|
||||
|
||||
defer.returnValue((table, already_ported, total_to_port, next_chunk))
|
||||
defer.returnValue(
|
||||
(table, already_ported, total_to_port, forward_chunk, backward_chunk)
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def handle_table(self, table, postgres_size, table_size, next_chunk):
|
||||
def handle_table(self, table, postgres_size, table_size, forward_chunk,
|
||||
backward_chunk):
|
||||
if not table_size:
|
||||
return
|
||||
|
||||
self.progress.add_table(table, postgres_size, table_size)
|
||||
|
||||
select = (
|
||||
if table == "event_search":
|
||||
yield self.handle_search_table(
|
||||
postgres_size, table_size, forward_chunk, backward_chunk
|
||||
)
|
||||
return
|
||||
|
||||
forward_select = (
|
||||
"SELECT rowid, * FROM %s WHERE rowid >= ? ORDER BY rowid LIMIT ?"
|
||||
% (table,)
|
||||
)
|
||||
|
||||
backward_select = (
|
||||
"SELECT rowid, * FROM %s WHERE rowid <= ? ORDER BY rowid LIMIT ?"
|
||||
% (table,)
|
||||
)
|
||||
|
||||
do_forward = [True]
|
||||
do_backward = [True]
|
||||
|
||||
while True:
|
||||
def r(txn):
|
||||
txn.execute(select, (next_chunk, self.batch_size,))
|
||||
forward_rows = []
|
||||
backward_rows = []
|
||||
if do_forward[0]:
|
||||
txn.execute(forward_select, (forward_chunk, self.batch_size,))
|
||||
forward_rows = txn.fetchall()
|
||||
if not forward_rows:
|
||||
do_forward[0] = False
|
||||
|
||||
if do_backward[0]:
|
||||
txn.execute(backward_select, (backward_chunk, self.batch_size,))
|
||||
backward_rows = txn.fetchall()
|
||||
if not backward_rows:
|
||||
do_backward[0] = False
|
||||
|
||||
if forward_rows or backward_rows:
|
||||
headers = [column[0] for column in txn.description]
|
||||
else:
|
||||
headers = None
|
||||
|
||||
return headers, forward_rows, backward_rows
|
||||
|
||||
headers, frows, brows = yield self.sqlite_store.runInteraction(
|
||||
"select", r
|
||||
)
|
||||
|
||||
if frows or brows:
|
||||
if frows:
|
||||
forward_chunk = max(row[0] for row in frows) + 1
|
||||
if brows:
|
||||
backward_chunk = min(row[0] for row in brows) - 1
|
||||
|
||||
rows = frows + brows
|
||||
self._convert_rows(table, headers, rows)
|
||||
|
||||
def insert(txn):
|
||||
self.postgres_store.insert_many_txn(
|
||||
txn, table, headers[1:], rows
|
||||
)
|
||||
|
||||
self.postgres_store._simple_update_one_txn(
|
||||
txn,
|
||||
table="port_from_sqlite3",
|
||||
keyvalues={"table_name": table},
|
||||
updatevalues={
|
||||
"forward_rowid": forward_chunk,
|
||||
"backward_rowid": backward_chunk,
|
||||
},
|
||||
)
|
||||
|
||||
yield self.postgres_store.execute(insert)
|
||||
|
||||
postgres_size += len(rows)
|
||||
|
||||
self.progress.update(table, postgres_size)
|
||||
else:
|
||||
return
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def handle_search_table(self, postgres_size, table_size, forward_chunk,
|
||||
backward_chunk):
|
||||
select = (
|
||||
"SELECT es.rowid, es.*, e.origin_server_ts, e.stream_ordering"
|
||||
" FROM event_search as es"
|
||||
" INNER JOIN events AS e USING (event_id, room_id)"
|
||||
" WHERE es.rowid >= ?"
|
||||
" ORDER BY es.rowid LIMIT ?"
|
||||
)
|
||||
|
||||
while True:
|
||||
def r(txn):
|
||||
txn.execute(select, (forward_chunk, self.batch_size,))
|
||||
rows = txn.fetchall()
|
||||
headers = [column[0] for column in txn.description]
|
||||
|
||||
@@ -228,59 +344,51 @@ class Porter(object):
|
||||
headers, rows = yield self.sqlite_store.runInteraction("select", r)
|
||||
|
||||
if rows:
|
||||
next_chunk = rows[-1][0] + 1
|
||||
forward_chunk = rows[-1][0] + 1
|
||||
|
||||
if table == "event_search":
|
||||
# We have to treat event_search differently since it has a
|
||||
# different structure in the two different databases.
|
||||
def insert(txn):
|
||||
sql = (
|
||||
"INSERT INTO event_search (event_id, room_id, key, sender, vector)"
|
||||
" VALUES (?,?,?,?,to_tsvector('english', ?))"
|
||||
# We have to treat event_search differently since it has a
|
||||
# different structure in the two different databases.
|
||||
def insert(txn):
|
||||
sql = (
|
||||
"INSERT INTO event_search (event_id, room_id, key,"
|
||||
" sender, vector, origin_server_ts, stream_ordering)"
|
||||
" VALUES (?,?,?,?,to_tsvector('english', ?),?,?)"
|
||||
)
|
||||
|
||||
rows_dict = [
|
||||
dict(zip(headers, row))
|
||||
for row in rows
|
||||
]
|
||||
|
||||
txn.executemany(sql, [
|
||||
(
|
||||
row["event_id"],
|
||||
row["room_id"],
|
||||
row["key"],
|
||||
row["sender"],
|
||||
row["value"],
|
||||
row["origin_server_ts"],
|
||||
row["stream_ordering"],
|
||||
)
|
||||
for row in rows_dict
|
||||
])
|
||||
|
||||
rows_dict = [
|
||||
dict(zip(headers, row))
|
||||
for row in rows
|
||||
]
|
||||
|
||||
txn.executemany(sql, [
|
||||
(
|
||||
row["event_id"],
|
||||
row["room_id"],
|
||||
row["key"],
|
||||
row["sender"],
|
||||
row["value"],
|
||||
)
|
||||
for row in rows_dict
|
||||
])
|
||||
|
||||
self.postgres_store._simple_update_one_txn(
|
||||
txn,
|
||||
table="port_from_sqlite3",
|
||||
keyvalues={"table_name": table},
|
||||
updatevalues={"rowid": next_chunk},
|
||||
)
|
||||
else:
|
||||
self._convert_rows(table, headers, rows)
|
||||
|
||||
def insert(txn):
|
||||
self.postgres_store.insert_many_txn(
|
||||
txn, table, headers[1:], rows
|
||||
)
|
||||
|
||||
self.postgres_store._simple_update_one_txn(
|
||||
txn,
|
||||
table="port_from_sqlite3",
|
||||
keyvalues={"table_name": table},
|
||||
updatevalues={"rowid": next_chunk},
|
||||
)
|
||||
self.postgres_store._simple_update_one_txn(
|
||||
txn,
|
||||
table="port_from_sqlite3",
|
||||
keyvalues={"table_name": "event_search"},
|
||||
updatevalues={
|
||||
"forward_rowid": forward_chunk,
|
||||
"backward_rowid": backward_chunk,
|
||||
},
|
||||
)
|
||||
|
||||
yield self.postgres_store.execute(insert)
|
||||
|
||||
postgres_size += len(rows)
|
||||
|
||||
self.progress.update(table, postgres_size)
|
||||
self.progress.update("event_search", postgres_size)
|
||||
|
||||
else:
|
||||
return
|
||||
|
||||
@@ -292,7 +400,7 @@ class Porter(object):
|
||||
}
|
||||
)
|
||||
|
||||
database_engine.prepare_database(db_conn)
|
||||
prepare_database(db_conn, database_engine, config=None)
|
||||
|
||||
db_conn.commit()
|
||||
|
||||
@@ -309,8 +417,8 @@ class Porter(object):
|
||||
**self.postgres_config["args"]
|
||||
)
|
||||
|
||||
sqlite_engine = create_engine("sqlite3")
|
||||
postgres_engine = create_engine("psycopg2")
|
||||
sqlite_engine = create_engine(sqlite_config)
|
||||
postgres_engine = create_engine(postgres_config)
|
||||
|
||||
self.sqlite_store = Store(sqlite_db_pool, sqlite_engine)
|
||||
self.postgres_store = Store(postgres_db_pool, postgres_engine)
|
||||
@@ -354,10 +462,32 @@ class Porter(object):
|
||||
txn.execute(
|
||||
"CREATE TABLE port_from_sqlite3 ("
|
||||
" table_name varchar(100) NOT NULL UNIQUE,"
|
||||
" rowid bigint NOT NULL"
|
||||
" forward_rowid bigint NOT NULL,"
|
||||
" backward_rowid bigint NOT NULL"
|
||||
")"
|
||||
)
|
||||
|
||||
# The old port script created a table with just a "rowid" column.
|
||||
# We want people to be able to rerun this script from an old port
|
||||
# so that they can pick up any missing events that were not
|
||||
# ported across.
|
||||
def alter_table(txn):
|
||||
txn.execute(
|
||||
"ALTER TABLE IF EXISTS port_from_sqlite3"
|
||||
" RENAME rowid TO forward_rowid"
|
||||
)
|
||||
txn.execute(
|
||||
"ALTER TABLE IF EXISTS port_from_sqlite3"
|
||||
" ADD backward_rowid bigint NOT NULL DEFAULT 0"
|
||||
)
|
||||
|
||||
try:
|
||||
yield self.postgres_store.runInteraction(
|
||||
"alter_table", alter_table
|
||||
)
|
||||
except Exception as e:
|
||||
logger.info("Failed to create port table: %s", e)
|
||||
|
||||
try:
|
||||
yield self.postgres_store.runInteraction(
|
||||
"create_port_table", create_port_table
|
||||
@@ -417,7 +547,7 @@ class Porter(object):
|
||||
@defer.inlineCallbacks
|
||||
def _setup_sent_transactions(self):
|
||||
# Only save things from the last day
|
||||
yesterday = int(time.time()*1000) - 86400000
|
||||
yesterday = int(time.time() * 1000) - 86400000
|
||||
|
||||
# And save the max transaction id from each destination
|
||||
select = (
|
||||
@@ -473,7 +603,11 @@ class Porter(object):
|
||||
|
||||
yield self.postgres_store._simple_insert(
|
||||
table="port_from_sqlite3",
|
||||
values={"table_name": "sent_transactions", "rowid": next_chunk}
|
||||
values={
|
||||
"table_name": "sent_transactions",
|
||||
"forward_rowid": next_chunk,
|
||||
"backward_rowid": 0,
|
||||
}
|
||||
)
|
||||
|
||||
def get_sent_table_size(txn):
|
||||
@@ -494,13 +628,18 @@ class Porter(object):
|
||||
defer.returnValue((next_chunk, inserted_rows, total_count))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _get_remaining_count_to_port(self, table, next_chunk):
|
||||
rows = yield self.sqlite_store.execute_sql(
|
||||
def _get_remaining_count_to_port(self, table, forward_chunk, backward_chunk):
|
||||
frows = yield self.sqlite_store.execute_sql(
|
||||
"SELECT count(*) FROM %s WHERE rowid >= ?" % (table,),
|
||||
next_chunk,
|
||||
forward_chunk,
|
||||
)
|
||||
|
||||
defer.returnValue(rows[0][0])
|
||||
brows = yield self.sqlite_store.execute_sql(
|
||||
"SELECT count(*) FROM %s WHERE rowid <= ?" % (table,),
|
||||
backward_chunk,
|
||||
)
|
||||
|
||||
defer.returnValue(frows[0][0] + brows[0][0])
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _get_already_ported_count(self, table):
|
||||
@@ -511,10 +650,10 @@ class Porter(object):
|
||||
defer.returnValue(rows[0][0])
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _get_total_count_to_port(self, table, next_chunk):
|
||||
def _get_total_count_to_port(self, table, forward_chunk, backward_chunk):
|
||||
remaining, done = yield defer.gatherResults(
|
||||
[
|
||||
self._get_remaining_count_to_port(table, next_chunk),
|
||||
self._get_remaining_count_to_port(table, forward_chunk, backward_chunk),
|
||||
self._get_already_ported_count(table),
|
||||
],
|
||||
consumeErrors=True,
|
||||
@@ -645,7 +784,7 @@ class CursesProgress(Progress):
|
||||
color = curses.color_pair(2) if perc == 100 else curses.color_pair(1)
|
||||
|
||||
self.stdscr.addstr(
|
||||
i+2, left_margin + max_len - len(table),
|
||||
i + 2, left_margin + max_len - len(table),
|
||||
table,
|
||||
curses.A_BOLD | color,
|
||||
)
|
||||
@@ -653,18 +792,18 @@ class CursesProgress(Progress):
|
||||
size = 20
|
||||
|
||||
progress = "[%s%s]" % (
|
||||
"#" * int(perc*size/100),
|
||||
" " * (size - int(perc*size/100)),
|
||||
"#" * int(perc * size / 100),
|
||||
" " * (size - int(perc * size / 100)),
|
||||
)
|
||||
|
||||
self.stdscr.addstr(
|
||||
i+2, left_margin + max_len + middle_space,
|
||||
i + 2, left_margin + max_len + middle_space,
|
||||
"%s %3d%% (%d/%d)" % (progress, perc, data["num_done"], data["total"]),
|
||||
)
|
||||
|
||||
if self.finished:
|
||||
self.stdscr.addstr(
|
||||
rows-1, 0,
|
||||
rows - 1, 0,
|
||||
"Press any key to exit...",
|
||||
)
|
||||
|
||||
|
||||
@@ -16,4 +16,5 @@ ignore =
|
||||
|
||||
[flake8]
|
||||
max-line-length = 90
|
||||
ignore = W503 ; W503 requires that binary operators be at the end, not start, of lines. Erik doesn't like it.
|
||||
# W503 requires that binary operators be at the end, not start, of lines. Erik doesn't like it.
|
||||
ignore = W503
|
||||
|
||||
@@ -16,4 +16,4 @@
|
||||
""" This is a reference implementation of a Matrix home server.
|
||||
"""
|
||||
|
||||
__version__ = "0.13.0"
|
||||
__version__ = "0.18.4"
|
||||
|
||||
@@ -13,22 +13,22 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""This module contains classes for authenticating the user."""
|
||||
import logging
|
||||
|
||||
import pymacaroons
|
||||
from canonicaljson import encode_canonical_json
|
||||
from signedjson.key import decode_verify_key_bytes
|
||||
from signedjson.sign import verify_signed_json, SignatureVerifyException
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.constants import EventTypes, Membership, JoinRules
|
||||
from synapse.api.errors import AuthError, Codes, SynapseError, EventSizeError
|
||||
from synapse.types import Requester, RoomID, UserID, EventID
|
||||
from synapse.util.logutils import log_function
|
||||
from synapse.util.logcontext import preserve_context_over_fn
|
||||
from unpaddedbase64 import decode_base64
|
||||
|
||||
import logging
|
||||
import pymacaroons
|
||||
import synapse.types
|
||||
from synapse.api.constants import EventTypes, Membership, JoinRules
|
||||
from synapse.api.errors import AuthError, Codes, SynapseError, EventSizeError
|
||||
from synapse.types import UserID, get_domain_from_id
|
||||
from synapse.util.logcontext import preserve_context_over_fn
|
||||
from synapse.util.logutils import log_function
|
||||
from synapse.util.metrics import Measure
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -41,12 +41,20 @@ AuthEventTypes = (
|
||||
|
||||
|
||||
class Auth(object):
|
||||
|
||||
"""
|
||||
FIXME: This class contains a mix of functions for authenticating users
|
||||
of our client-server API and authenticating events added to room graphs.
|
||||
"""
|
||||
def __init__(self, hs):
|
||||
self.hs = hs
|
||||
self.clock = hs.get_clock()
|
||||
self.store = hs.get_datastore()
|
||||
self.state = hs.get_state_handler()
|
||||
self.TOKEN_NOT_FOUND_HTTP_STATUS = 401
|
||||
# Docs for these currently lives at
|
||||
# github.com/matrix-org/matrix-doc/blob/master/drafts/macaroons_caveats.rst
|
||||
# In addition, we have type == delete_pusher which grants access only to
|
||||
# delete pushers.
|
||||
self._KNOWN_CAVEAT_PREFIXES = set([
|
||||
"gen = ",
|
||||
"guest = ",
|
||||
@@ -55,7 +63,18 @@ class Auth(object):
|
||||
"user_id = ",
|
||||
])
|
||||
|
||||
def check(self, event, auth_events):
|
||||
@defer.inlineCallbacks
|
||||
def check_from_context(self, event, context, do_sig_check=True):
|
||||
auth_events_ids = yield self.compute_auth_events(
|
||||
event, context.prev_state_ids, for_verification=True,
|
||||
)
|
||||
auth_events = yield self.store.get_events(auth_events_ids)
|
||||
auth_events = {
|
||||
(e.type, e.state_key): e for e in auth_events.values()
|
||||
}
|
||||
self.check(event, auth_events=auth_events, do_sig_check=do_sig_check)
|
||||
|
||||
def check(self, event, auth_events, do_sig_check=True):
|
||||
""" Checks if this event is correctly authed.
|
||||
|
||||
Args:
|
||||
@@ -66,11 +85,35 @@ class Auth(object):
|
||||
Returns:
|
||||
True if the auth checks pass.
|
||||
"""
|
||||
self.check_size_limits(event)
|
||||
with Measure(self.clock, "auth.check"):
|
||||
self.check_size_limits(event)
|
||||
|
||||
try:
|
||||
if not hasattr(event, "room_id"):
|
||||
raise AuthError(500, "Event has no room_id: %s" % event)
|
||||
|
||||
if do_sig_check:
|
||||
sender_domain = get_domain_from_id(event.sender)
|
||||
event_id_domain = get_domain_from_id(event.event_id)
|
||||
|
||||
is_invite_via_3pid = (
|
||||
event.type == EventTypes.Member
|
||||
and event.membership == Membership.INVITE
|
||||
and "third_party_invite" in event.content
|
||||
)
|
||||
|
||||
# Check the sender's domain has signed the event
|
||||
if not event.signatures.get(sender_domain):
|
||||
# We allow invites via 3pid to have a sender from a different
|
||||
# HS, as the sender must match the sender of the original
|
||||
# 3pid invite. This is checked further down with the
|
||||
# other dedicated membership checks.
|
||||
if not is_invite_via_3pid:
|
||||
raise AuthError(403, "Event not signed by sender's server")
|
||||
|
||||
# Check the event_id's domain has signed the event
|
||||
if not event.signatures.get(event_id_domain):
|
||||
raise AuthError(403, "Event not signed by sending server")
|
||||
|
||||
if auth_events is None:
|
||||
# Oh, we don't know what the state of the room was, so we
|
||||
# are trusting that this is allowed (at least for now)
|
||||
@@ -78,6 +121,12 @@ class Auth(object):
|
||||
return True
|
||||
|
||||
if event.type == EventTypes.Create:
|
||||
room_id_domain = get_domain_from_id(event.room_id)
|
||||
if room_id_domain != sender_domain:
|
||||
raise AuthError(
|
||||
403,
|
||||
"Creation event's room_id domain does not match sender's"
|
||||
)
|
||||
# FIXME
|
||||
return True
|
||||
|
||||
@@ -89,8 +138,8 @@ class Auth(object):
|
||||
"Room %r does not exist" % (event.room_id,)
|
||||
)
|
||||
|
||||
creating_domain = RoomID.from_string(event.room_id).domain
|
||||
originating_domain = UserID.from_string(event.sender).domain
|
||||
creating_domain = get_domain_from_id(event.room_id)
|
||||
originating_domain = get_domain_from_id(event.sender)
|
||||
if creating_domain != originating_domain:
|
||||
if not self.can_federate(event, auth_events):
|
||||
raise AuthError(
|
||||
@@ -100,6 +149,22 @@ class Auth(object):
|
||||
|
||||
# FIXME: Temp hack
|
||||
if event.type == EventTypes.Aliases:
|
||||
if not event.is_state():
|
||||
raise AuthError(
|
||||
403,
|
||||
"Alias event must be a state event",
|
||||
)
|
||||
if not event.state_key:
|
||||
raise AuthError(
|
||||
403,
|
||||
"Alias event must have non-empty state_key"
|
||||
)
|
||||
sender_domain = get_domain_from_id(event.sender)
|
||||
if event.state_key != sender_domain:
|
||||
raise AuthError(
|
||||
403,
|
||||
"Alias event's state_key does not match sender's domain"
|
||||
)
|
||||
return True
|
||||
|
||||
logger.debug(
|
||||
@@ -118,6 +183,24 @@ class Auth(object):
|
||||
return allowed
|
||||
|
||||
self.check_event_sender_in_room(event, auth_events)
|
||||
|
||||
# Special case to allow m.room.third_party_invite events wherever
|
||||
# a user is allowed to issue invites. Fixes
|
||||
# https://github.com/vector-im/vector-web/issues/1208 hopefully
|
||||
if event.type == EventTypes.ThirdPartyInvite:
|
||||
user_level = self._get_user_power_level(event.user_id, auth_events)
|
||||
invite_level = self._get_named_level(auth_events, "invite", 0)
|
||||
|
||||
if user_level < invite_level:
|
||||
raise AuthError(
|
||||
403, (
|
||||
"You cannot issue a third party invite for %s." %
|
||||
(event.content.display_name,)
|
||||
)
|
||||
)
|
||||
else:
|
||||
return True
|
||||
|
||||
self._can_send_event(event, auth_events)
|
||||
|
||||
if event.type == EventTypes.PowerLevels:
|
||||
@@ -127,13 +210,6 @@ class Auth(object):
|
||||
self.check_redaction(event, auth_events)
|
||||
|
||||
logger.debug("Allowing! %s", event)
|
||||
except AuthError as e:
|
||||
logger.info(
|
||||
"Event auth check failed on event %s with msg: %s",
|
||||
event, e.msg
|
||||
)
|
||||
logger.info("Denying! %s", event)
|
||||
raise
|
||||
|
||||
def check_size_limits(self, event):
|
||||
def too_big(field):
|
||||
@@ -219,21 +295,17 @@ class Auth(object):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def check_host_in_room(self, room_id, host):
|
||||
curr_state = yield self.state.get_current_state(room_id)
|
||||
with Measure(self.clock, "check_host_in_room"):
|
||||
latest_event_ids = yield self.store.get_latest_event_ids_in_room(room_id)
|
||||
|
||||
for event in curr_state.values():
|
||||
if event.type == EventTypes.Member:
|
||||
try:
|
||||
if UserID.from_string(event.state_key).domain != host:
|
||||
continue
|
||||
except:
|
||||
logger.warn("state_key not user_id: %s", event.state_key)
|
||||
continue
|
||||
entry = yield self.state.resolve_state_groups(
|
||||
room_id, latest_event_ids
|
||||
)
|
||||
|
||||
if event.content["membership"] == Membership.JOIN:
|
||||
defer.returnValue(True)
|
||||
|
||||
defer.returnValue(False)
|
||||
ret = yield self.store.is_host_joined(
|
||||
room_id, host, entry.state_group, entry.state
|
||||
)
|
||||
defer.returnValue(ret)
|
||||
|
||||
def check_event_sender_in_room(self, event, auth_events):
|
||||
key = (EventTypes.Member, event.user_id, )
|
||||
@@ -271,8 +343,8 @@ class Auth(object):
|
||||
|
||||
target_user_id = event.state_key
|
||||
|
||||
creating_domain = RoomID.from_string(event.room_id).domain
|
||||
target_domain = UserID.from_string(target_user_id).domain
|
||||
creating_domain = get_domain_from_id(event.room_id)
|
||||
target_domain = get_domain_from_id(target_user_id)
|
||||
if creating_domain != target_domain:
|
||||
if not self.can_federate(event, auth_events):
|
||||
raise AuthError(
|
||||
@@ -328,6 +400,10 @@ class Auth(object):
|
||||
if Membership.INVITE == membership and "third_party_invite" in event.content:
|
||||
if not self._verify_third_party_invite(event, auth_events):
|
||||
raise AuthError(403, "You are not invited to this room.")
|
||||
if target_banned:
|
||||
raise AuthError(
|
||||
403, "%s is banned from the room" % (target_user_id,)
|
||||
)
|
||||
return True
|
||||
|
||||
if Membership.JOIN != membership:
|
||||
@@ -432,33 +508,51 @@ class Auth(object):
|
||||
if not invite_event:
|
||||
return False
|
||||
|
||||
if invite_event.sender != event.sender:
|
||||
return False
|
||||
|
||||
if event.user_id != invite_event.user_id:
|
||||
return False
|
||||
try:
|
||||
public_key = invite_event.content["public_key"]
|
||||
if signed["mxid"] != event.state_key:
|
||||
return False
|
||||
if signed["token"] != token:
|
||||
return False
|
||||
for server, signature_block in signed["signatures"].items():
|
||||
for key_name, encoded_signature in signature_block.items():
|
||||
if not key_name.startswith("ed25519:"):
|
||||
return False
|
||||
verify_key = decode_verify_key_bytes(
|
||||
key_name,
|
||||
decode_base64(public_key)
|
||||
)
|
||||
verify_signed_json(signed, server, verify_key)
|
||||
|
||||
# We got the public key from the invite, so we know that the
|
||||
# correct server signed the signed bundle.
|
||||
# The caller is responsible for checking that the signing
|
||||
# server has not revoked that public key.
|
||||
return True
|
||||
if signed["mxid"] != event.state_key:
|
||||
return False
|
||||
except (KeyError, SignatureVerifyException,):
|
||||
if signed["token"] != token:
|
||||
return False
|
||||
|
||||
for public_key_object in self.get_public_keys(invite_event):
|
||||
public_key = public_key_object["public_key"]
|
||||
try:
|
||||
for server, signature_block in signed["signatures"].items():
|
||||
for key_name, encoded_signature in signature_block.items():
|
||||
if not key_name.startswith("ed25519:"):
|
||||
continue
|
||||
verify_key = decode_verify_key_bytes(
|
||||
key_name,
|
||||
decode_base64(public_key)
|
||||
)
|
||||
verify_signed_json(signed, server, verify_key)
|
||||
|
||||
# We got the public key from the invite, so we know that the
|
||||
# correct server signed the signed bundle.
|
||||
# The caller is responsible for checking that the signing
|
||||
# server has not revoked that public key.
|
||||
return True
|
||||
except (KeyError, SignatureVerifyException,):
|
||||
continue
|
||||
return False
|
||||
|
||||
def get_public_keys(self, invite_event):
|
||||
public_keys = []
|
||||
if "public_key" in invite_event.content:
|
||||
o = {
|
||||
"public_key": invite_event.content["public_key"],
|
||||
}
|
||||
if "key_validity_url" in invite_event.content:
|
||||
o["key_validity_url"] = invite_event.content["key_validity_url"]
|
||||
public_keys.append(o)
|
||||
public_keys.extend(invite_event.content.get("public_keys", []))
|
||||
return public_keys
|
||||
|
||||
def _get_power_level_event(self, auth_events):
|
||||
key = (EventTypes.PowerLevels, "", )
|
||||
return auth_events.get(key)
|
||||
@@ -497,33 +591,38 @@ class Auth(object):
|
||||
return default
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_user_by_req(self, request, allow_guest=False):
|
||||
def get_user_by_req(self, request, allow_guest=False, rights="access"):
|
||||
""" Get a registered user's ID.
|
||||
|
||||
Args:
|
||||
request - An HTTP request with an access_token query parameter.
|
||||
Returns:
|
||||
tuple of:
|
||||
UserID (str)
|
||||
Access token ID (str)
|
||||
defer.Deferred: resolves to a ``synapse.types.Requester`` object
|
||||
Raises:
|
||||
AuthError if no user by that token exists or the token is invalid.
|
||||
"""
|
||||
# Can optionally look elsewhere in the request (e.g. headers)
|
||||
try:
|
||||
user_id = yield self._get_appservice_user_id(request.args)
|
||||
user_id, app_service = yield self._get_appservice_user_id(request)
|
||||
if user_id:
|
||||
request.authenticated_entity = user_id
|
||||
defer.returnValue(
|
||||
Requester(UserID.from_string(user_id), "", False)
|
||||
synapse.types.create_requester(user_id, app_service=app_service)
|
||||
)
|
||||
|
||||
access_token = request.args["access_token"][0]
|
||||
user_info = yield self._get_user_by_access_token(access_token)
|
||||
access_token = get_access_token_from_request(
|
||||
request, self.TOKEN_NOT_FOUND_HTTP_STATUS
|
||||
)
|
||||
|
||||
user_info = yield self.get_user_by_access_token(access_token, rights)
|
||||
user = user_info["user"]
|
||||
token_id = user_info["token_id"]
|
||||
is_guest = user_info["is_guest"]
|
||||
|
||||
# device_id may not be present if get_user_by_access_token has been
|
||||
# stubbed out.
|
||||
device_id = user_info.get("device_id")
|
||||
|
||||
ip_addr = self.hs.get_ip_from_request(request)
|
||||
user_agent = request.requestHeaders.getRawHeaders(
|
||||
"User-Agent",
|
||||
@@ -535,7 +634,8 @@ class Auth(object):
|
||||
user=user,
|
||||
access_token=access_token,
|
||||
ip=ip_addr,
|
||||
user_agent=user_agent
|
||||
user_agent=user_agent,
|
||||
device_id=device_id,
|
||||
)
|
||||
|
||||
if is_guest and not allow_guest:
|
||||
@@ -545,7 +645,9 @@ class Auth(object):
|
||||
|
||||
request.authenticated_entity = user.to_string()
|
||||
|
||||
defer.returnValue(Requester(user, token_id, is_guest))
|
||||
defer.returnValue(synapse.types.create_requester(
|
||||
user, token_id, is_guest, device_id, app_service=app_service)
|
||||
)
|
||||
except KeyError:
|
||||
raise AuthError(
|
||||
self.TOKEN_NOT_FOUND_HTTP_STATUS, "Missing access token.",
|
||||
@@ -553,19 +655,21 @@ class Auth(object):
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _get_appservice_user_id(self, request_args):
|
||||
app_service = yield self.store.get_app_service_by_token(
|
||||
request_args["access_token"][0]
|
||||
def _get_appservice_user_id(self, request):
|
||||
app_service = self.store.get_app_service_by_token(
|
||||
get_access_token_from_request(
|
||||
request, self.TOKEN_NOT_FOUND_HTTP_STATUS
|
||||
)
|
||||
)
|
||||
if app_service is None:
|
||||
defer.returnValue(None)
|
||||
defer.returnValue((None, None))
|
||||
|
||||
if "user_id" not in request_args:
|
||||
defer.returnValue(app_service.sender)
|
||||
if "user_id" not in request.args:
|
||||
defer.returnValue((app_service.sender, app_service))
|
||||
|
||||
user_id = request_args["user_id"][0]
|
||||
user_id = request.args["user_id"][0]
|
||||
if app_service.sender == user_id:
|
||||
defer.returnValue(app_service.sender)
|
||||
defer.returnValue((app_service.sender, app_service))
|
||||
|
||||
if not app_service.is_interested_in_user(user_id):
|
||||
raise AuthError(
|
||||
@@ -577,10 +681,10 @@ class Auth(object):
|
||||
403,
|
||||
"Application service has not registered this user"
|
||||
)
|
||||
defer.returnValue(user_id)
|
||||
defer.returnValue((user_id, app_service))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _get_user_by_access_token(self, token):
|
||||
def get_user_by_access_token(self, token, rights="access"):
|
||||
""" Get a registered user's ID.
|
||||
|
||||
Args:
|
||||
@@ -591,46 +695,61 @@ class Auth(object):
|
||||
AuthError if no user by that token exists or the token is invalid.
|
||||
"""
|
||||
try:
|
||||
ret = yield self.get_user_from_macaroon(token)
|
||||
ret = yield self.get_user_from_macaroon(token, rights)
|
||||
except AuthError:
|
||||
# TODO(daniel): Remove this fallback when all existing access tokens
|
||||
# have been re-issued as macaroons.
|
||||
if self.hs.config.expire_access_token:
|
||||
raise
|
||||
ret = yield self._look_up_user_by_access_token(token)
|
||||
|
||||
defer.returnValue(ret)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_user_from_macaroon(self, macaroon_str):
|
||||
def get_user_from_macaroon(self, macaroon_str, rights="access"):
|
||||
try:
|
||||
macaroon = pymacaroons.Macaroon.deserialize(macaroon_str)
|
||||
self.validate_macaroon(macaroon, "access", False)
|
||||
|
||||
user_prefix = "user_id = "
|
||||
user = None
|
||||
user_id = self.get_user_id_from_macaroon(macaroon)
|
||||
user = UserID.from_string(user_id)
|
||||
|
||||
self.validate_macaroon(
|
||||
macaroon, rights, self.hs.config.expire_access_token,
|
||||
user_id=user_id,
|
||||
)
|
||||
|
||||
guest = False
|
||||
for caveat in macaroon.caveats:
|
||||
if caveat.caveat_id.startswith(user_prefix):
|
||||
user = UserID.from_string(caveat.caveat_id[len(user_prefix):])
|
||||
elif caveat.caveat_id == "guest = true":
|
||||
if caveat.caveat_id == "guest = true":
|
||||
guest = True
|
||||
|
||||
if user is None:
|
||||
raise AuthError(
|
||||
self.TOKEN_NOT_FOUND_HTTP_STATUS, "No user caveat in macaroon",
|
||||
errcode=Codes.UNKNOWN_TOKEN
|
||||
)
|
||||
|
||||
if guest:
|
||||
ret = {
|
||||
"user": user,
|
||||
"is_guest": True,
|
||||
"token_id": None,
|
||||
"device_id": None,
|
||||
}
|
||||
elif rights == "delete_pusher":
|
||||
# We don't store these tokens in the database
|
||||
ret = {
|
||||
"user": user,
|
||||
"is_guest": False,
|
||||
"token_id": None,
|
||||
"device_id": None,
|
||||
}
|
||||
else:
|
||||
# This codepath exists so that we can actually return a
|
||||
# token ID, because we use token IDs in place of device
|
||||
# identifiers throughout the codebase.
|
||||
# TODO(daniel): Remove this fallback when device IDs are
|
||||
# properly implemented.
|
||||
# This codepath exists for several reasons:
|
||||
# * so that we can actually return a token ID, which is used
|
||||
# in some parts of the schema (where we probably ought to
|
||||
# use device IDs instead)
|
||||
# * the only way we currently have to invalidate an
|
||||
# access_token is by removing it from the database, so we
|
||||
# have to check here that it is still in the db
|
||||
# * some attributes (notably device_id) aren't stored in the
|
||||
# macaroon. They probably should be.
|
||||
# TODO: build the dictionary from the macaroon once the
|
||||
# above are fixed
|
||||
ret = yield self._look_up_user_by_access_token(macaroon_str)
|
||||
if ret["user"] != user:
|
||||
logger.error(
|
||||
@@ -650,21 +769,46 @@ class Auth(object):
|
||||
errcode=Codes.UNKNOWN_TOKEN
|
||||
)
|
||||
|
||||
def validate_macaroon(self, macaroon, type_string, verify_expiry):
|
||||
def get_user_id_from_macaroon(self, macaroon):
|
||||
"""Retrieve the user_id given by the caveats on the macaroon.
|
||||
|
||||
Does *not* validate the macaroon.
|
||||
|
||||
Args:
|
||||
macaroon (pymacaroons.Macaroon): The macaroon to validate
|
||||
|
||||
Returns:
|
||||
(str) user id
|
||||
|
||||
Raises:
|
||||
AuthError if there is no user_id caveat in the macaroon
|
||||
"""
|
||||
user_prefix = "user_id = "
|
||||
for caveat in macaroon.caveats:
|
||||
if caveat.caveat_id.startswith(user_prefix):
|
||||
return caveat.caveat_id[len(user_prefix):]
|
||||
raise AuthError(
|
||||
self.TOKEN_NOT_FOUND_HTTP_STATUS, "No user caveat in macaroon",
|
||||
errcode=Codes.UNKNOWN_TOKEN
|
||||
)
|
||||
|
||||
def validate_macaroon(self, macaroon, type_string, verify_expiry, user_id):
|
||||
"""
|
||||
validate that a Macaroon is understood by and was signed by this server.
|
||||
|
||||
Args:
|
||||
macaroon(pymacaroons.Macaroon): The macaroon to validate
|
||||
type_string(str): The kind of token this is (e.g. "access", "refresh")
|
||||
type_string(str): The kind of token required (e.g. "access", "refresh",
|
||||
"delete_pusher")
|
||||
verify_expiry(bool): Whether to verify whether the macaroon has expired.
|
||||
This should really always be True, but no clients currently implement
|
||||
token refresh, so we can't enforce expiry yet.
|
||||
user_id (str): The user_id required
|
||||
"""
|
||||
v = pymacaroons.Verifier()
|
||||
v.satisfy_exact("gen = 1")
|
||||
v.satisfy_exact("type = " + type_string)
|
||||
v.satisfy_general(lambda c: c.startswith("user_id = "))
|
||||
v.satisfy_exact("user_id = %s" % user_id)
|
||||
v.satisfy_exact("guest = true")
|
||||
if verify_expiry:
|
||||
v.satisfy_general(self._verify_expiry)
|
||||
@@ -703,18 +847,23 @@ class Auth(object):
|
||||
self.TOKEN_NOT_FOUND_HTTP_STATUS, "Unrecognised access token.",
|
||||
errcode=Codes.UNKNOWN_TOKEN
|
||||
)
|
||||
# we use ret.get() below because *lots* of unit tests stub out
|
||||
# get_user_by_access_token in a way where it only returns a couple of
|
||||
# the fields.
|
||||
user_info = {
|
||||
"user": UserID.from_string(ret.get("name")),
|
||||
"token_id": ret.get("token_id", None),
|
||||
"is_guest": False,
|
||||
"device_id": ret.get("device_id"),
|
||||
}
|
||||
defer.returnValue(user_info)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_appservice_by_req(self, request):
|
||||
try:
|
||||
token = request.args["access_token"][0]
|
||||
service = yield self.store.get_app_service_by_token(token)
|
||||
token = get_access_token_from_request(
|
||||
request, self.TOKEN_NOT_FOUND_HTTP_STATUS
|
||||
)
|
||||
service = self.store.get_app_service_by_token(token)
|
||||
if not service:
|
||||
logger.warn("Unrecognised appservice access token: %s" % (token,))
|
||||
raise AuthError(
|
||||
@@ -723,7 +872,7 @@ class Auth(object):
|
||||
errcode=Codes.UNKNOWN_TOKEN
|
||||
)
|
||||
request.authenticated_entity = service.sender
|
||||
defer.returnValue(service)
|
||||
return defer.succeed(service)
|
||||
except KeyError:
|
||||
raise AuthError(
|
||||
self.TOKEN_NOT_FOUND_HTTP_STATUS, "Missing access token."
|
||||
@@ -734,7 +883,7 @@ class Auth(object):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def add_auth_events(self, builder, context):
|
||||
auth_ids = self.compute_auth_events(builder, context.current_state)
|
||||
auth_ids = yield self.compute_auth_events(builder, context.prev_state_ids)
|
||||
|
||||
auth_events_entries = yield self.store.add_event_hashes(
|
||||
auth_ids
|
||||
@@ -742,30 +891,32 @@ class Auth(object):
|
||||
|
||||
builder.auth_events = auth_events_entries
|
||||
|
||||
def compute_auth_events(self, event, current_state):
|
||||
@defer.inlineCallbacks
|
||||
def compute_auth_events(self, event, current_state_ids, for_verification=False):
|
||||
if event.type == EventTypes.Create:
|
||||
return []
|
||||
defer.returnValue([])
|
||||
|
||||
auth_ids = []
|
||||
|
||||
key = (EventTypes.PowerLevels, "", )
|
||||
power_level_event = current_state.get(key)
|
||||
power_level_event_id = current_state_ids.get(key)
|
||||
|
||||
if power_level_event:
|
||||
auth_ids.append(power_level_event.event_id)
|
||||
if power_level_event_id:
|
||||
auth_ids.append(power_level_event_id)
|
||||
|
||||
key = (EventTypes.JoinRules, "", )
|
||||
join_rule_event = current_state.get(key)
|
||||
join_rule_event_id = current_state_ids.get(key)
|
||||
|
||||
key = (EventTypes.Member, event.user_id, )
|
||||
member_event = current_state.get(key)
|
||||
member_event_id = current_state_ids.get(key)
|
||||
|
||||
key = (EventTypes.Create, "", )
|
||||
create_event = current_state.get(key)
|
||||
if create_event:
|
||||
auth_ids.append(create_event.event_id)
|
||||
create_event_id = current_state_ids.get(key)
|
||||
if create_event_id:
|
||||
auth_ids.append(create_event_id)
|
||||
|
||||
if join_rule_event:
|
||||
if join_rule_event_id:
|
||||
join_rule_event = yield self.store.get_event(join_rule_event_id)
|
||||
join_rule = join_rule_event.content.get("join_rule")
|
||||
is_public = join_rule == JoinRules.PUBLIC if join_rule else False
|
||||
else:
|
||||
@@ -774,15 +925,21 @@ class Auth(object):
|
||||
if event.type == EventTypes.Member:
|
||||
e_type = event.content["membership"]
|
||||
if e_type in [Membership.JOIN, Membership.INVITE]:
|
||||
if join_rule_event:
|
||||
auth_ids.append(join_rule_event.event_id)
|
||||
if join_rule_event_id:
|
||||
auth_ids.append(join_rule_event_id)
|
||||
|
||||
if e_type == Membership.JOIN:
|
||||
if member_event and not is_public:
|
||||
auth_ids.append(member_event.event_id)
|
||||
if member_event_id and not is_public:
|
||||
auth_ids.append(member_event_id)
|
||||
else:
|
||||
if member_event:
|
||||
auth_ids.append(member_event.event_id)
|
||||
if member_event_id:
|
||||
auth_ids.append(member_event_id)
|
||||
|
||||
if for_verification:
|
||||
key = (EventTypes.Member, event.state_key, )
|
||||
existing_event_id = current_state_ids.get(key)
|
||||
if existing_event_id:
|
||||
auth_ids.append(existing_event_id)
|
||||
|
||||
if e_type == Membership.INVITE:
|
||||
if "third_party_invite" in event.content:
|
||||
@@ -790,26 +947,26 @@ class Auth(object):
|
||||
EventTypes.ThirdPartyInvite,
|
||||
event.content["third_party_invite"]["signed"]["token"]
|
||||
)
|
||||
third_party_invite = current_state.get(key)
|
||||
if third_party_invite:
|
||||
auth_ids.append(third_party_invite.event_id)
|
||||
elif member_event:
|
||||
third_party_invite_id = current_state_ids.get(key)
|
||||
if third_party_invite_id:
|
||||
auth_ids.append(third_party_invite_id)
|
||||
elif member_event_id:
|
||||
member_event = yield self.store.get_event(member_event_id)
|
||||
if member_event.content["membership"] == Membership.JOIN:
|
||||
auth_ids.append(member_event.event_id)
|
||||
|
||||
return auth_ids
|
||||
defer.returnValue(auth_ids)
|
||||
|
||||
@log_function
|
||||
def _can_send_event(self, event, auth_events):
|
||||
def _get_send_level(self, etype, state_key, auth_events):
|
||||
key = (EventTypes.PowerLevels, "", )
|
||||
send_level_event = auth_events.get(key)
|
||||
send_level = None
|
||||
if send_level_event:
|
||||
send_level = send_level_event.content.get("events", {}).get(
|
||||
event.type
|
||||
etype
|
||||
)
|
||||
if send_level is None:
|
||||
if hasattr(event, "state_key"):
|
||||
if state_key is not None:
|
||||
send_level = send_level_event.content.get(
|
||||
"state_default", 50
|
||||
)
|
||||
@@ -823,6 +980,13 @@ class Auth(object):
|
||||
else:
|
||||
send_level = 0
|
||||
|
||||
return send_level
|
||||
|
||||
@log_function
|
||||
def _can_send_event(self, event, auth_events):
|
||||
send_level = self._get_send_level(
|
||||
event.type, event.get("state_key", None), auth_events
|
||||
)
|
||||
user_level = self._get_user_power_level(event.user_id, auth_events)
|
||||
|
||||
if user_level < send_level:
|
||||
@@ -840,16 +1004,6 @@ class Auth(object):
|
||||
403,
|
||||
"You are not allowed to set others state"
|
||||
)
|
||||
else:
|
||||
sender_domain = UserID.from_string(
|
||||
event.user_id
|
||||
).domain
|
||||
|
||||
if sender_domain != event.state_key:
|
||||
raise AuthError(
|
||||
403,
|
||||
"You are not allowed to set others state"
|
||||
)
|
||||
|
||||
return True
|
||||
|
||||
@@ -873,8 +1027,8 @@ class Auth(object):
|
||||
if user_level >= redact_level:
|
||||
return False
|
||||
|
||||
redacter_domain = EventID.from_string(event.event_id).domain
|
||||
redactee_domain = EventID.from_string(event.redacts).domain
|
||||
redacter_domain = get_domain_from_id(event.event_id)
|
||||
redactee_domain = get_domain_from_id(event.redacts)
|
||||
if redacter_domain == redactee_domain:
|
||||
return True
|
||||
|
||||
@@ -967,3 +1121,108 @@ class Auth(object):
|
||||
"You don't have permission to add ops level greater "
|
||||
"than your own"
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def check_can_change_room_list(self, room_id, user):
|
||||
"""Check if the user is allowed to edit the room's entry in the
|
||||
published room list.
|
||||
|
||||
Args:
|
||||
room_id (str)
|
||||
user (UserID)
|
||||
"""
|
||||
|
||||
is_admin = yield self.is_server_admin(user)
|
||||
if is_admin:
|
||||
defer.returnValue(True)
|
||||
|
||||
user_id = user.to_string()
|
||||
yield self.check_joined_room(room_id, user_id)
|
||||
|
||||
# We currently require the user is a "moderator" in the room. We do this
|
||||
# by checking if they would (theoretically) be able to change the
|
||||
# m.room.aliases events
|
||||
power_level_event = yield self.state.get_current_state(
|
||||
room_id, EventTypes.PowerLevels, ""
|
||||
)
|
||||
|
||||
auth_events = {}
|
||||
if power_level_event:
|
||||
auth_events[(EventTypes.PowerLevels, "")] = power_level_event
|
||||
|
||||
send_level = self._get_send_level(
|
||||
EventTypes.Aliases, "", auth_events
|
||||
)
|
||||
user_level = self._get_user_power_level(user_id, auth_events)
|
||||
|
||||
if user_level < send_level:
|
||||
raise AuthError(
|
||||
403,
|
||||
"This server requires you to be a moderator in the room to"
|
||||
" edit its room list entry"
|
||||
)
|
||||
|
||||
|
||||
def has_access_token(request):
|
||||
"""Checks if the request has an access_token.
|
||||
|
||||
Returns:
|
||||
bool: False if no access_token was given, True otherwise.
|
||||
"""
|
||||
query_params = request.args.get("access_token")
|
||||
auth_headers = request.requestHeaders.getRawHeaders("Authorization")
|
||||
return bool(query_params) or bool(auth_headers)
|
||||
|
||||
|
||||
def get_access_token_from_request(request, token_not_found_http_status=401):
|
||||
"""Extracts the access_token from the request.
|
||||
|
||||
Args:
|
||||
request: The http request.
|
||||
token_not_found_http_status(int): The HTTP status code to set in the
|
||||
AuthError if the token isn't found. This is used in some of the
|
||||
legacy APIs to change the status code to 403 from the default of
|
||||
401 since some of the old clients depended on auth errors returning
|
||||
403.
|
||||
Returns:
|
||||
str: The access_token
|
||||
Raises:
|
||||
AuthError: If there isn't an access_token in the request.
|
||||
"""
|
||||
|
||||
auth_headers = request.requestHeaders.getRawHeaders("Authorization")
|
||||
query_params = request.args.get("access_token")
|
||||
if auth_headers:
|
||||
# Try the get the access_token from a "Authorization: Bearer"
|
||||
# header
|
||||
if query_params is not None:
|
||||
raise AuthError(
|
||||
token_not_found_http_status,
|
||||
"Mixing Authorization headers and access_token query parameters.",
|
||||
errcode=Codes.MISSING_TOKEN,
|
||||
)
|
||||
if len(auth_headers) > 1:
|
||||
raise AuthError(
|
||||
token_not_found_http_status,
|
||||
"Too many Authorization headers.",
|
||||
errcode=Codes.MISSING_TOKEN,
|
||||
)
|
||||
parts = auth_headers[0].split(" ")
|
||||
if parts[0] == "Bearer" and len(parts) == 2:
|
||||
return parts[1]
|
||||
else:
|
||||
raise AuthError(
|
||||
token_not_found_http_status,
|
||||
"Invalid Authorization header.",
|
||||
errcode=Codes.MISSING_TOKEN,
|
||||
)
|
||||
else:
|
||||
# Try to get the access_token from the query params.
|
||||
if not query_params:
|
||||
raise AuthError(
|
||||
token_not_found_http_status,
|
||||
"Missing access token.",
|
||||
errcode=Codes.MISSING_TOKEN
|
||||
)
|
||||
|
||||
return query_params[0]
|
||||
|
||||
@@ -32,7 +32,6 @@ class PresenceState(object):
|
||||
OFFLINE = u"offline"
|
||||
UNAVAILABLE = u"unavailable"
|
||||
ONLINE = u"online"
|
||||
FREE_FOR_CHAT = u"free_for_chat"
|
||||
|
||||
|
||||
class JoinRules(object):
|
||||
@@ -86,3 +85,8 @@ class RoomCreationPreset(object):
|
||||
PRIVATE_CHAT = "private_chat"
|
||||
PUBLIC_CHAT = "public_chat"
|
||||
TRUSTED_PRIVATE_CHAT = "trusted_private_chat"
|
||||
|
||||
|
||||
class ThirdPartyEntityKind(object):
|
||||
USER = "user"
|
||||
LOCATION = "location"
|
||||
|
||||
@@ -42,8 +42,10 @@ class Codes(object):
|
||||
TOO_LARGE = "M_TOO_LARGE"
|
||||
EXCLUSIVE = "M_EXCLUSIVE"
|
||||
THREEPID_AUTH_FAILED = "M_THREEPID_AUTH_FAILED"
|
||||
THREEPID_IN_USE = "THREEPID_IN_USE"
|
||||
THREEPID_IN_USE = "M_THREEPID_IN_USE"
|
||||
THREEPID_NOT_FOUND = "M_THREEPID_NOT_FOUND"
|
||||
INVALID_USERNAME = "M_INVALID_USERNAME"
|
||||
SERVER_NOT_TRUSTED = "M_SERVER_NOT_TRUSTED"
|
||||
|
||||
|
||||
class CodeMessageException(RuntimeError):
|
||||
|
||||
@@ -15,6 +15,8 @@
|
||||
from synapse.api.errors import SynapseError
|
||||
from synapse.types import UserID, RoomID
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
import ujson as json
|
||||
|
||||
|
||||
@@ -24,10 +26,10 @@ class Filtering(object):
|
||||
super(Filtering, self).__init__()
|
||||
self.store = hs.get_datastore()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_user_filter(self, user_localpart, filter_id):
|
||||
result = self.store.get_user_filter(user_localpart, filter_id)
|
||||
result.addCallback(FilterCollection)
|
||||
return result
|
||||
result = yield self.store.get_user_filter(user_localpart, filter_id)
|
||||
defer.returnValue(FilterCollection(result))
|
||||
|
||||
def add_user_filter(self, user_localpart, user_filter):
|
||||
self.check_valid_filter(user_filter)
|
||||
@@ -189,6 +191,17 @@ class Filter(object):
|
||||
def __init__(self, filter_json):
|
||||
self.filter_json = filter_json
|
||||
|
||||
self.types = self.filter_json.get("types", None)
|
||||
self.not_types = self.filter_json.get("not_types", [])
|
||||
|
||||
self.rooms = self.filter_json.get("rooms", None)
|
||||
self.not_rooms = self.filter_json.get("not_rooms", [])
|
||||
|
||||
self.senders = self.filter_json.get("senders", None)
|
||||
self.not_senders = self.filter_json.get("not_senders", [])
|
||||
|
||||
self.contains_url = self.filter_json.get("contains_url", None)
|
||||
|
||||
def check(self, event):
|
||||
"""Checks whether the filter matches the given event.
|
||||
|
||||
@@ -198,15 +211,19 @@ class Filter(object):
|
||||
sender = event.get("sender", None)
|
||||
if not sender:
|
||||
# Presence events have their 'sender' in content.user_id
|
||||
sender = event.get("content", {}).get("user_id", None)
|
||||
content = event.get("content")
|
||||
# account_data has been allowed to have non-dict content, so check type first
|
||||
if isinstance(content, dict):
|
||||
sender = content.get("user_id")
|
||||
|
||||
return self.check_fields(
|
||||
event.get("room_id", None),
|
||||
sender,
|
||||
event.get("type", None),
|
||||
"url" in event.get("content", {})
|
||||
)
|
||||
|
||||
def check_fields(self, room_id, sender, event_type):
|
||||
def check_fields(self, room_id, sender, event_type, contains_url):
|
||||
"""Checks whether the filter matches the given event fields.
|
||||
|
||||
Returns:
|
||||
@@ -220,15 +237,20 @@ class Filter(object):
|
||||
|
||||
for name, match_func in literal_keys.items():
|
||||
not_name = "not_%s" % (name,)
|
||||
disallowed_values = self.filter_json.get(not_name, [])
|
||||
disallowed_values = getattr(self, not_name)
|
||||
if any(map(match_func, disallowed_values)):
|
||||
return False
|
||||
|
||||
allowed_values = self.filter_json.get(name, None)
|
||||
allowed_values = getattr(self, name)
|
||||
if allowed_values is not None:
|
||||
if not any(map(match_func, allowed_values)):
|
||||
return False
|
||||
|
||||
contains_url_filter = self.filter_json.get("contains_url")
|
||||
if contains_url_filter is not None:
|
||||
if contains_url_filter != contains_url:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def filter_rooms(self, room_ids):
|
||||
|
||||
@@ -23,7 +23,7 @@ class Ratelimiter(object):
|
||||
def __init__(self):
|
||||
self.message_counts = collections.OrderedDict()
|
||||
|
||||
def send_message(self, user_id, time_now_s, msg_rate_hz, burst_count):
|
||||
def send_message(self, user_id, time_now_s, msg_rate_hz, burst_count, update=True):
|
||||
"""Can the user send a message?
|
||||
Args:
|
||||
user_id: The user sending a message.
|
||||
@@ -32,12 +32,15 @@ class Ratelimiter(object):
|
||||
second.
|
||||
burst_count: How many messages the user can send before being
|
||||
limited.
|
||||
update (bool): Whether to update the message rates or not. This is
|
||||
useful to check if a message would be allowed to be sent before
|
||||
its ready to be actually sent.
|
||||
Returns:
|
||||
A pair of a bool indicating if they can send a message now and a
|
||||
time in seconds of when they can next send a message.
|
||||
"""
|
||||
self.prune_message_counts(time_now_s)
|
||||
message_count, time_start, _ignored = self.message_counts.pop(
|
||||
message_count, time_start, _ignored = self.message_counts.get(
|
||||
user_id, (0., time_now_s, None),
|
||||
)
|
||||
time_delta = time_now_s - time_start
|
||||
@@ -52,9 +55,10 @@ class Ratelimiter(object):
|
||||
allowed = True
|
||||
message_count += 1
|
||||
|
||||
self.message_counts[user_id] = (
|
||||
message_count, time_start, msg_rate_hz
|
||||
)
|
||||
if update:
|
||||
self.message_counts[user_id] = (
|
||||
message_count, time_start, msg_rate_hz
|
||||
)
|
||||
|
||||
if msg_rate_hz > 0:
|
||||
time_allowed = (
|
||||
|
||||
@@ -25,4 +25,3 @@ SERVER_KEY_PREFIX = "/_matrix/key/v1"
|
||||
SERVER_KEY_V2_PREFIX = "/_matrix/key/v2"
|
||||
MEDIA_PREFIX = "/_matrix/media/r0"
|
||||
LEGACY_MEDIA_PREFIX = "/_matrix/media/v1"
|
||||
APP_SERVICE_PREFIX = "/_matrix/appservice/v1"
|
||||
|
||||
@@ -16,13 +16,11 @@
|
||||
import sys
|
||||
sys.dont_write_bytecode = True
|
||||
|
||||
from synapse.python_dependencies import (
|
||||
check_requirements, MissingRequirementError
|
||||
) # NOQA
|
||||
from synapse import python_dependencies # noqa: E402
|
||||
|
||||
try:
|
||||
check_requirements()
|
||||
except MissingRequirementError as e:
|
||||
python_dependencies.check_requirements()
|
||||
except python_dependencies.MissingRequirementError as e:
|
||||
message = "\n".join([
|
||||
"Missing Requirement: %s" % (e.message,),
|
||||
"To install run:",
|
||||
|
||||
214
synapse/app/appservice.py
Normal file
214
synapse/app/appservice.py
Normal file
@@ -0,0 +1,214 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import synapse
|
||||
|
||||
from synapse.server import HomeServer
|
||||
from synapse.config._base import ConfigError
|
||||
from synapse.config.logger import setup_logging
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.http.site import SynapseSite
|
||||
from synapse.metrics.resource import MetricsResource, METRICS_PREFIX
|
||||
from synapse.replication.slave.storage.directory import DirectoryStore
|
||||
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||
from synapse.storage.engines import create_engine
|
||||
from synapse.util.async import sleep
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.logcontext import LoggingContext
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.rlimit import change_resource_limit
|
||||
from synapse.util.versionstring import get_version_string
|
||||
|
||||
from synapse import events
|
||||
|
||||
from twisted.internet import reactor, defer
|
||||
from twisted.web.resource import Resource
|
||||
|
||||
from daemonize import Daemonize
|
||||
|
||||
import sys
|
||||
import logging
|
||||
import gc
|
||||
|
||||
logger = logging.getLogger("synapse.app.appservice")
|
||||
|
||||
|
||||
class AppserviceSlaveStore(
|
||||
DirectoryStore, SlavedEventStore, SlavedApplicationServiceStore,
|
||||
SlavedRegistrationStore,
|
||||
):
|
||||
pass
|
||||
|
||||
|
||||
class AppserviceServer(HomeServer):
|
||||
def get_db_conn(self, run_new_connection=True):
|
||||
# Any param beginning with cp_ is a parameter for adbapi, and should
|
||||
# not be passed to the database engine.
|
||||
db_params = {
|
||||
k: v for k, v in self.db_config.get("args", {}).items()
|
||||
if not k.startswith("cp_")
|
||||
}
|
||||
db_conn = self.database_engine.module.connect(**db_params)
|
||||
|
||||
if run_new_connection:
|
||||
self.database_engine.on_new_connection(db_conn)
|
||||
return db_conn
|
||||
|
||||
def setup(self):
|
||||
logger.info("Setting up.")
|
||||
self.datastore = AppserviceSlaveStore(self.get_db_conn(), self)
|
||||
logger.info("Finished setting up.")
|
||||
|
||||
def _listen_http(self, listener_config):
|
||||
port = listener_config["port"]
|
||||
bind_address = listener_config.get("bind_address", "")
|
||||
site_tag = listener_config.get("tag", port)
|
||||
resources = {}
|
||||
for res in listener_config["resources"]:
|
||||
for name in res["names"]:
|
||||
if name == "metrics":
|
||||
resources[METRICS_PREFIX] = MetricsResource(self)
|
||||
|
||||
root_resource = create_resource_tree(resources, Resource())
|
||||
reactor.listenTCP(
|
||||
port,
|
||||
SynapseSite(
|
||||
"synapse.access.http.%s" % (site_tag,),
|
||||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
),
|
||||
interface=bind_address
|
||||
)
|
||||
logger.info("Synapse appservice now listening on port %d", port)
|
||||
|
||||
def start_listening(self, listeners):
|
||||
for listener in listeners:
|
||||
if listener["type"] == "http":
|
||||
self._listen_http(listener)
|
||||
elif listener["type"] == "manhole":
|
||||
reactor.listenTCP(
|
||||
listener["port"],
|
||||
manhole(
|
||||
username="matrix",
|
||||
password="rabbithole",
|
||||
globals={"hs": self},
|
||||
),
|
||||
interface=listener.get("bind_address", '127.0.0.1')
|
||||
)
|
||||
else:
|
||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def replicate(self):
|
||||
http_client = self.get_simple_http_client()
|
||||
store = self.get_datastore()
|
||||
replication_url = self.config.worker_replication_url
|
||||
appservice_handler = self.get_application_service_handler()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def replicate(results):
|
||||
stream = results.get("events")
|
||||
if stream:
|
||||
max_stream_id = stream["position"]
|
||||
yield appservice_handler.notify_interested_services(max_stream_id)
|
||||
|
||||
while True:
|
||||
try:
|
||||
args = store.stream_positions()
|
||||
args["timeout"] = 30000
|
||||
result = yield http_client.get_json(replication_url, args=args)
|
||||
yield store.process_replication(result)
|
||||
replicate(result)
|
||||
except:
|
||||
logger.exception("Error replicating from %r", replication_url)
|
||||
yield sleep(30)
|
||||
|
||||
|
||||
def start(config_options):
|
||||
try:
|
||||
config = HomeServerConfig.load_config(
|
||||
"Synapse appservice", config_options
|
||||
)
|
||||
except ConfigError as e:
|
||||
sys.stderr.write("\n" + e.message + "\n")
|
||||
sys.exit(1)
|
||||
|
||||
assert config.worker_app == "synapse.app.appservice"
|
||||
|
||||
setup_logging(config.worker_log_config, config.worker_log_file)
|
||||
|
||||
events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||
|
||||
database_engine = create_engine(config.database_config)
|
||||
|
||||
if config.notify_appservices:
|
||||
sys.stderr.write(
|
||||
"\nThe appservices must be disabled in the main synapse process"
|
||||
"\nbefore they can be run in a separate worker."
|
||||
"\nPlease add ``notify_appservices: false`` to the main config"
|
||||
"\n"
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
# Force the pushers to start since they will be disabled in the main config
|
||||
config.notify_appservices = True
|
||||
|
||||
ps = AppserviceServer(
|
||||
config.server_name,
|
||||
db_config=config.database_config,
|
||||
config=config,
|
||||
version_string="Synapse/" + get_version_string(synapse),
|
||||
database_engine=database_engine,
|
||||
)
|
||||
|
||||
ps.setup()
|
||||
ps.start_listening(config.worker_listeners)
|
||||
|
||||
def run():
|
||||
with LoggingContext("run"):
|
||||
logger.info("Running")
|
||||
change_resource_limit(config.soft_file_limit)
|
||||
if config.gc_thresholds:
|
||||
gc.set_threshold(*config.gc_thresholds)
|
||||
reactor.run()
|
||||
|
||||
def start():
|
||||
ps.replicate()
|
||||
ps.get_datastore().start_profiling()
|
||||
ps.get_state_handler().start_caching()
|
||||
|
||||
reactor.callWhenRunning(start)
|
||||
|
||||
if config.worker_daemonize:
|
||||
daemon = Daemonize(
|
||||
app="synapse-appservice",
|
||||
pid=config.worker_pid_file,
|
||||
action=run,
|
||||
auto_close_fds=False,
|
||||
verbose=True,
|
||||
logger=logger,
|
||||
)
|
||||
daemon.start()
|
||||
else:
|
||||
run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
with LoggingContext("main"):
|
||||
start(sys.argv[1:])
|
||||
220
synapse/app/client_reader.py
Normal file
220
synapse/app/client_reader.py
Normal file
@@ -0,0 +1,220 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import synapse
|
||||
|
||||
from synapse.config._base import ConfigError
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.config.logger import setup_logging
|
||||
from synapse.http.site import SynapseSite
|
||||
from synapse.http.server import JsonResource
|
||||
from synapse.metrics.resource import MetricsResource, METRICS_PREFIX
|
||||
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||
from synapse.replication.slave.storage.keys import SlavedKeyStore
|
||||
from synapse.replication.slave.storage.room import RoomStore
|
||||
from synapse.replication.slave.storage.directory import DirectoryStore
|
||||
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||
from synapse.rest.client.v1.room import PublicRoomListRestServlet
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.client_ips import ClientIpStore
|
||||
from synapse.storage.engines import create_engine
|
||||
from synapse.util.async import sleep
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.logcontext import LoggingContext
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.rlimit import change_resource_limit
|
||||
from synapse.util.versionstring import get_version_string
|
||||
from synapse.crypto import context_factory
|
||||
|
||||
from synapse import events
|
||||
|
||||
|
||||
from twisted.internet import reactor, defer
|
||||
from twisted.web.resource import Resource
|
||||
|
||||
from daemonize import Daemonize
|
||||
|
||||
import sys
|
||||
import logging
|
||||
import gc
|
||||
|
||||
logger = logging.getLogger("synapse.app.client_reader")
|
||||
|
||||
|
||||
class ClientReaderSlavedStore(
|
||||
SlavedEventStore,
|
||||
SlavedKeyStore,
|
||||
RoomStore,
|
||||
DirectoryStore,
|
||||
SlavedApplicationServiceStore,
|
||||
SlavedRegistrationStore,
|
||||
BaseSlavedStore,
|
||||
ClientIpStore, # After BaseSlavedStore because the constructor is different
|
||||
):
|
||||
pass
|
||||
|
||||
|
||||
class ClientReaderServer(HomeServer):
|
||||
def get_db_conn(self, run_new_connection=True):
|
||||
# Any param beginning with cp_ is a parameter for adbapi, and should
|
||||
# not be passed to the database engine.
|
||||
db_params = {
|
||||
k: v for k, v in self.db_config.get("args", {}).items()
|
||||
if not k.startswith("cp_")
|
||||
}
|
||||
db_conn = self.database_engine.module.connect(**db_params)
|
||||
|
||||
if run_new_connection:
|
||||
self.database_engine.on_new_connection(db_conn)
|
||||
return db_conn
|
||||
|
||||
def setup(self):
|
||||
logger.info("Setting up.")
|
||||
self.datastore = ClientReaderSlavedStore(self.get_db_conn(), self)
|
||||
logger.info("Finished setting up.")
|
||||
|
||||
def _listen_http(self, listener_config):
|
||||
port = listener_config["port"]
|
||||
bind_address = listener_config.get("bind_address", "")
|
||||
site_tag = listener_config.get("tag", port)
|
||||
resources = {}
|
||||
for res in listener_config["resources"]:
|
||||
for name in res["names"]:
|
||||
if name == "metrics":
|
||||
resources[METRICS_PREFIX] = MetricsResource(self)
|
||||
elif name == "client":
|
||||
resource = JsonResource(self, canonical_json=False)
|
||||
PublicRoomListRestServlet(self).register(resource)
|
||||
resources.update({
|
||||
"/_matrix/client/r0": resource,
|
||||
"/_matrix/client/unstable": resource,
|
||||
"/_matrix/client/v2_alpha": resource,
|
||||
"/_matrix/client/api/v1": resource,
|
||||
})
|
||||
|
||||
root_resource = create_resource_tree(resources, Resource())
|
||||
reactor.listenTCP(
|
||||
port,
|
||||
SynapseSite(
|
||||
"synapse.access.http.%s" % (site_tag,),
|
||||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
),
|
||||
interface=bind_address
|
||||
)
|
||||
logger.info("Synapse client reader now listening on port %d", port)
|
||||
|
||||
def start_listening(self, listeners):
|
||||
for listener in listeners:
|
||||
if listener["type"] == "http":
|
||||
self._listen_http(listener)
|
||||
elif listener["type"] == "manhole":
|
||||
reactor.listenTCP(
|
||||
listener["port"],
|
||||
manhole(
|
||||
username="matrix",
|
||||
password="rabbithole",
|
||||
globals={"hs": self},
|
||||
),
|
||||
interface=listener.get("bind_address", '127.0.0.1')
|
||||
)
|
||||
else:
|
||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def replicate(self):
|
||||
http_client = self.get_simple_http_client()
|
||||
store = self.get_datastore()
|
||||
replication_url = self.config.worker_replication_url
|
||||
|
||||
while True:
|
||||
try:
|
||||
args = store.stream_positions()
|
||||
args["timeout"] = 30000
|
||||
result = yield http_client.get_json(replication_url, args=args)
|
||||
yield store.process_replication(result)
|
||||
except:
|
||||
logger.exception("Error replicating from %r", replication_url)
|
||||
yield sleep(5)
|
||||
|
||||
|
||||
def start(config_options):
|
||||
try:
|
||||
config = HomeServerConfig.load_config(
|
||||
"Synapse client reader", config_options
|
||||
)
|
||||
except ConfigError as e:
|
||||
sys.stderr.write("\n" + e.message + "\n")
|
||||
sys.exit(1)
|
||||
|
||||
assert config.worker_app == "synapse.app.client_reader"
|
||||
|
||||
setup_logging(config.worker_log_config, config.worker_log_file)
|
||||
|
||||
events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||
|
||||
database_engine = create_engine(config.database_config)
|
||||
|
||||
tls_server_context_factory = context_factory.ServerContextFactory(config)
|
||||
|
||||
ss = ClientReaderServer(
|
||||
config.server_name,
|
||||
db_config=config.database_config,
|
||||
tls_server_context_factory=tls_server_context_factory,
|
||||
config=config,
|
||||
version_string="Synapse/" + get_version_string(synapse),
|
||||
database_engine=database_engine,
|
||||
)
|
||||
|
||||
ss.setup()
|
||||
ss.get_handlers()
|
||||
ss.start_listening(config.worker_listeners)
|
||||
|
||||
def run():
|
||||
with LoggingContext("run"):
|
||||
logger.info("Running")
|
||||
change_resource_limit(config.soft_file_limit)
|
||||
if config.gc_thresholds:
|
||||
gc.set_threshold(*config.gc_thresholds)
|
||||
reactor.run()
|
||||
|
||||
def start():
|
||||
ss.get_state_handler().start_caching()
|
||||
ss.get_datastore().start_profiling()
|
||||
ss.replicate()
|
||||
|
||||
reactor.callWhenRunning(start)
|
||||
|
||||
if config.worker_daemonize:
|
||||
daemon = Daemonize(
|
||||
app="synapse-client-reader",
|
||||
pid=config.worker_pid_file,
|
||||
action=run,
|
||||
auto_close_fds=False,
|
||||
verbose=True,
|
||||
logger=logger,
|
||||
)
|
||||
daemon.start()
|
||||
else:
|
||||
run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
with LoggingContext("main"):
|
||||
start(sys.argv[1:])
|
||||
211
synapse/app/federation_reader.py
Normal file
211
synapse/app/federation_reader.py
Normal file
@@ -0,0 +1,211 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import synapse
|
||||
|
||||
from synapse.config._base import ConfigError
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.config.logger import setup_logging
|
||||
from synapse.http.site import SynapseSite
|
||||
from synapse.metrics.resource import MetricsResource, METRICS_PREFIX
|
||||
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||
from synapse.replication.slave.storage.keys import SlavedKeyStore
|
||||
from synapse.replication.slave.storage.room import RoomStore
|
||||
from synapse.replication.slave.storage.transactions import TransactionStore
|
||||
from synapse.replication.slave.storage.directory import DirectoryStore
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.engines import create_engine
|
||||
from synapse.util.async import sleep
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.logcontext import LoggingContext
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.rlimit import change_resource_limit
|
||||
from synapse.util.versionstring import get_version_string
|
||||
from synapse.api.urls import FEDERATION_PREFIX
|
||||
from synapse.federation.transport.server import TransportLayerServer
|
||||
from synapse.crypto import context_factory
|
||||
|
||||
from synapse import events
|
||||
|
||||
|
||||
from twisted.internet import reactor, defer
|
||||
from twisted.web.resource import Resource
|
||||
|
||||
from daemonize import Daemonize
|
||||
|
||||
import sys
|
||||
import logging
|
||||
import gc
|
||||
|
||||
logger = logging.getLogger("synapse.app.federation_reader")
|
||||
|
||||
|
||||
class FederationReaderSlavedStore(
|
||||
SlavedEventStore,
|
||||
SlavedKeyStore,
|
||||
RoomStore,
|
||||
DirectoryStore,
|
||||
TransactionStore,
|
||||
BaseSlavedStore,
|
||||
):
|
||||
pass
|
||||
|
||||
|
||||
class FederationReaderServer(HomeServer):
|
||||
def get_db_conn(self, run_new_connection=True):
|
||||
# Any param beginning with cp_ is a parameter for adbapi, and should
|
||||
# not be passed to the database engine.
|
||||
db_params = {
|
||||
k: v for k, v in self.db_config.get("args", {}).items()
|
||||
if not k.startswith("cp_")
|
||||
}
|
||||
db_conn = self.database_engine.module.connect(**db_params)
|
||||
|
||||
if run_new_connection:
|
||||
self.database_engine.on_new_connection(db_conn)
|
||||
return db_conn
|
||||
|
||||
def setup(self):
|
||||
logger.info("Setting up.")
|
||||
self.datastore = FederationReaderSlavedStore(self.get_db_conn(), self)
|
||||
logger.info("Finished setting up.")
|
||||
|
||||
def _listen_http(self, listener_config):
|
||||
port = listener_config["port"]
|
||||
bind_address = listener_config.get("bind_address", "")
|
||||
site_tag = listener_config.get("tag", port)
|
||||
resources = {}
|
||||
for res in listener_config["resources"]:
|
||||
for name in res["names"]:
|
||||
if name == "metrics":
|
||||
resources[METRICS_PREFIX] = MetricsResource(self)
|
||||
elif name == "federation":
|
||||
resources.update({
|
||||
FEDERATION_PREFIX: TransportLayerServer(self),
|
||||
})
|
||||
|
||||
root_resource = create_resource_tree(resources, Resource())
|
||||
reactor.listenTCP(
|
||||
port,
|
||||
SynapseSite(
|
||||
"synapse.access.http.%s" % (site_tag,),
|
||||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
),
|
||||
interface=bind_address
|
||||
)
|
||||
logger.info("Synapse federation reader now listening on port %d", port)
|
||||
|
||||
def start_listening(self, listeners):
|
||||
for listener in listeners:
|
||||
if listener["type"] == "http":
|
||||
self._listen_http(listener)
|
||||
elif listener["type"] == "manhole":
|
||||
reactor.listenTCP(
|
||||
listener["port"],
|
||||
manhole(
|
||||
username="matrix",
|
||||
password="rabbithole",
|
||||
globals={"hs": self},
|
||||
),
|
||||
interface=listener.get("bind_address", '127.0.0.1')
|
||||
)
|
||||
else:
|
||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def replicate(self):
|
||||
http_client = self.get_simple_http_client()
|
||||
store = self.get_datastore()
|
||||
replication_url = self.config.worker_replication_url
|
||||
|
||||
while True:
|
||||
try:
|
||||
args = store.stream_positions()
|
||||
args["timeout"] = 30000
|
||||
result = yield http_client.get_json(replication_url, args=args)
|
||||
yield store.process_replication(result)
|
||||
except:
|
||||
logger.exception("Error replicating from %r", replication_url)
|
||||
yield sleep(5)
|
||||
|
||||
|
||||
def start(config_options):
|
||||
try:
|
||||
config = HomeServerConfig.load_config(
|
||||
"Synapse federation reader", config_options
|
||||
)
|
||||
except ConfigError as e:
|
||||
sys.stderr.write("\n" + e.message + "\n")
|
||||
sys.exit(1)
|
||||
|
||||
assert config.worker_app == "synapse.app.federation_reader"
|
||||
|
||||
setup_logging(config.worker_log_config, config.worker_log_file)
|
||||
|
||||
events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||
|
||||
database_engine = create_engine(config.database_config)
|
||||
|
||||
tls_server_context_factory = context_factory.ServerContextFactory(config)
|
||||
|
||||
ss = FederationReaderServer(
|
||||
config.server_name,
|
||||
db_config=config.database_config,
|
||||
tls_server_context_factory=tls_server_context_factory,
|
||||
config=config,
|
||||
version_string="Synapse/" + get_version_string(synapse),
|
||||
database_engine=database_engine,
|
||||
)
|
||||
|
||||
ss.setup()
|
||||
ss.get_handlers()
|
||||
ss.start_listening(config.worker_listeners)
|
||||
|
||||
def run():
|
||||
with LoggingContext("run"):
|
||||
logger.info("Running")
|
||||
change_resource_limit(config.soft_file_limit)
|
||||
if config.gc_thresholds:
|
||||
gc.set_threshold(*config.gc_thresholds)
|
||||
reactor.run()
|
||||
|
||||
def start():
|
||||
ss.get_state_handler().start_caching()
|
||||
ss.get_datastore().start_profiling()
|
||||
ss.replicate()
|
||||
|
||||
reactor.callWhenRunning(start)
|
||||
|
||||
if config.worker_daemonize:
|
||||
daemon = Daemonize(
|
||||
app="synapse-federation-reader",
|
||||
pid=config.worker_pid_file,
|
||||
action=run,
|
||||
auto_close_fds=False,
|
||||
verbose=True,
|
||||
logger=logger,
|
||||
)
|
||||
daemon.start()
|
||||
else:
|
||||
run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
with LoggingContext("main"):
|
||||
start(sys.argv[1:])
|
||||
@@ -16,14 +16,10 @@
|
||||
|
||||
import synapse
|
||||
|
||||
import contextlib
|
||||
import gc
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import resource
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
from synapse.config._base import ConfigError
|
||||
|
||||
from synapse.python_dependencies import (
|
||||
@@ -33,22 +29,15 @@ from synapse.python_dependencies import (
|
||||
from synapse.rest import ClientRestResource
|
||||
from synapse.storage.engines import create_engine, IncorrectDatabaseSetup
|
||||
from synapse.storage import are_all_users_on_domain
|
||||
from synapse.storage.prepare_database import UpgradeDatabaseException
|
||||
from synapse.storage.prepare_database import UpgradeDatabaseException, prepare_database
|
||||
|
||||
from synapse.server import HomeServer
|
||||
|
||||
|
||||
from twisted.conch.manhole import ColoredManhole
|
||||
from twisted.conch.insults import insults
|
||||
from twisted.conch import manhole_ssh
|
||||
from twisted.cred import checkers, portal
|
||||
|
||||
|
||||
from twisted.internet import reactor, task, defer
|
||||
from twisted.application import service
|
||||
from twisted.web.resource import Resource, EncodingResourceWrapper
|
||||
from twisted.web.static import File
|
||||
from twisted.web.server import Site, GzipEncoderFactory, Request
|
||||
from twisted.web.server import GzipEncoderFactory
|
||||
from synapse.http.server import RootRedirect
|
||||
from synapse.rest.media.v0.content_repository import ContentRepoResource
|
||||
from synapse.rest.media.v1.media_repository import MediaRepositoryResource
|
||||
@@ -62,9 +51,18 @@ from synapse.api.urls import (
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.crypto import context_factory
|
||||
from synapse.util.logcontext import LoggingContext
|
||||
from synapse.metrics import register_memory_metrics, get_metrics_for
|
||||
from synapse.metrics.resource import MetricsResource, METRICS_PREFIX
|
||||
from synapse.replication.resource import ReplicationResource, REPLICATION_PREFIX
|
||||
from synapse.federation.transport.server import TransportLayerServer
|
||||
|
||||
from synapse.util.rlimit import change_resource_limit
|
||||
from synapse.util.versionstring import get_version_string
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.manhole import manhole
|
||||
|
||||
from synapse.http.site import SynapseSite
|
||||
|
||||
from synapse import events
|
||||
|
||||
from daemonize import Daemonize
|
||||
@@ -72,9 +70,6 @@ from daemonize import Daemonize
|
||||
logger = logging.getLogger("synapse.app.homeserver")
|
||||
|
||||
|
||||
ACCESS_TOKEN_RE = re.compile(r'(\?.*access(_|%5[Ff])token=)[^&]*(.*)$')
|
||||
|
||||
|
||||
def gz_wrap(r):
|
||||
return EncodingResourceWrapper(r, [GzipEncoderFactory()])
|
||||
|
||||
@@ -153,7 +148,7 @@ class SynapseHomeServer(HomeServer):
|
||||
MEDIA_PREFIX: media_repo,
|
||||
LEGACY_MEDIA_PREFIX: media_repo,
|
||||
CONTENT_REPO_PREFIX: ContentRepoResource(
|
||||
self, self.config.uploads_path, self.auth, self.content_addr
|
||||
self, self.config.uploads_path
|
||||
),
|
||||
})
|
||||
|
||||
@@ -169,7 +164,15 @@ class SynapseHomeServer(HomeServer):
|
||||
if name == "metrics" and self.get_config().enable_metrics:
|
||||
resources[METRICS_PREFIX] = MetricsResource(self)
|
||||
|
||||
root_resource = create_resource_tree(resources)
|
||||
if name == "replication":
|
||||
resources[REPLICATION_PREFIX] = ReplicationResource(self)
|
||||
|
||||
if WEB_CLIENT_PREFIX in resources:
|
||||
root_resource = RootRedirect(WEB_CLIENT_PREFIX)
|
||||
else:
|
||||
root_resource = Resource()
|
||||
|
||||
root_resource = create_resource_tree(resources, root_resource)
|
||||
if tls:
|
||||
reactor.listenSSL(
|
||||
port,
|
||||
@@ -202,24 +205,13 @@ class SynapseHomeServer(HomeServer):
|
||||
if listener["type"] == "http":
|
||||
self._listener_http(config, listener)
|
||||
elif listener["type"] == "manhole":
|
||||
checker = checkers.InMemoryUsernamePasswordDatabaseDontUse(
|
||||
matrix="rabbithole"
|
||||
)
|
||||
|
||||
rlm = manhole_ssh.TerminalRealm()
|
||||
rlm.chainedProtocolFactory = lambda: insults.ServerProtocol(
|
||||
ColoredManhole,
|
||||
{
|
||||
"__name__": "__console__",
|
||||
"hs": self,
|
||||
}
|
||||
)
|
||||
|
||||
f = manhole_ssh.ConchFactory(portal.Portal(rlm, [checker]))
|
||||
|
||||
reactor.listenTCP(
|
||||
listener["port"],
|
||||
f,
|
||||
manhole(
|
||||
username="matrix",
|
||||
password="rabbithole",
|
||||
globals={"hs": self},
|
||||
),
|
||||
interface=listener.get("bind_address", '127.0.0.1')
|
||||
)
|
||||
else:
|
||||
@@ -241,7 +233,7 @@ class SynapseHomeServer(HomeServer):
|
||||
except IncorrectDatabaseSetup as e:
|
||||
quit_with_error(e.message)
|
||||
|
||||
def get_db_conn(self):
|
||||
def get_db_conn(self, run_new_connection=True):
|
||||
# Any param beginning with cp_ is a parameter for adbapi, and should
|
||||
# not be passed to the database engine.
|
||||
db_params = {
|
||||
@@ -250,7 +242,8 @@ class SynapseHomeServer(HomeServer):
|
||||
}
|
||||
db_conn = self.database_engine.module.connect(**db_params)
|
||||
|
||||
self.database_engine.on_new_connection(db_conn)
|
||||
if run_new_connection:
|
||||
self.database_engine.on_new_connection(db_conn)
|
||||
return db_conn
|
||||
|
||||
|
||||
@@ -264,86 +257,6 @@ def quit_with_error(error_string):
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def get_version_string():
|
||||
try:
|
||||
null = open(os.devnull, 'w')
|
||||
cwd = os.path.dirname(os.path.abspath(__file__))
|
||||
try:
|
||||
git_branch = subprocess.check_output(
|
||||
['git', 'rev-parse', '--abbrev-ref', 'HEAD'],
|
||||
stderr=null,
|
||||
cwd=cwd,
|
||||
).strip()
|
||||
git_branch = "b=" + git_branch
|
||||
except subprocess.CalledProcessError:
|
||||
git_branch = ""
|
||||
|
||||
try:
|
||||
git_tag = subprocess.check_output(
|
||||
['git', 'describe', '--exact-match'],
|
||||
stderr=null,
|
||||
cwd=cwd,
|
||||
).strip()
|
||||
git_tag = "t=" + git_tag
|
||||
except subprocess.CalledProcessError:
|
||||
git_tag = ""
|
||||
|
||||
try:
|
||||
git_commit = subprocess.check_output(
|
||||
['git', 'rev-parse', '--short', 'HEAD'],
|
||||
stderr=null,
|
||||
cwd=cwd,
|
||||
).strip()
|
||||
except subprocess.CalledProcessError:
|
||||
git_commit = ""
|
||||
|
||||
try:
|
||||
dirty_string = "-this_is_a_dirty_checkout"
|
||||
is_dirty = subprocess.check_output(
|
||||
['git', 'describe', '--dirty=' + dirty_string],
|
||||
stderr=null,
|
||||
cwd=cwd,
|
||||
).strip().endswith(dirty_string)
|
||||
|
||||
git_dirty = "dirty" if is_dirty else ""
|
||||
except subprocess.CalledProcessError:
|
||||
git_dirty = ""
|
||||
|
||||
if git_branch or git_tag or git_commit or git_dirty:
|
||||
git_version = ",".join(
|
||||
s for s in
|
||||
(git_branch, git_tag, git_commit, git_dirty,)
|
||||
if s
|
||||
)
|
||||
|
||||
return (
|
||||
"Synapse/%s (%s)" % (
|
||||
synapse.__version__, git_version,
|
||||
)
|
||||
).encode("ascii")
|
||||
except Exception as e:
|
||||
logger.info("Failed to check for git repository: %s", e)
|
||||
|
||||
return ("Synapse/%s" % (synapse.__version__,)).encode("ascii")
|
||||
|
||||
|
||||
def change_resource_limit(soft_file_no):
|
||||
try:
|
||||
soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
|
||||
|
||||
if not soft_file_no:
|
||||
soft_file_no = hard
|
||||
|
||||
resource.setrlimit(resource.RLIMIT_NOFILE, (soft_file_no, hard))
|
||||
logger.info("Set file limit to: %d", soft_file_no)
|
||||
|
||||
resource.setrlimit(
|
||||
resource.RLIMIT_CORE, (resource.RLIM_INFINITY, resource.RLIM_INFINITY)
|
||||
)
|
||||
except (ValueError, resource.error) as e:
|
||||
logger.warn("Failed to set file or core limit: %s", e)
|
||||
|
||||
|
||||
def setup(config_options):
|
||||
"""
|
||||
Args:
|
||||
@@ -354,10 +267,9 @@ def setup(config_options):
|
||||
HomeServer
|
||||
"""
|
||||
try:
|
||||
config = HomeServerConfig.load_config(
|
||||
config = HomeServerConfig.load_or_generate_config(
|
||||
"Synapse Homeserver",
|
||||
config_options,
|
||||
generate_section="Homeserver"
|
||||
)
|
||||
except ConfigError as e:
|
||||
sys.stderr.write("\n" + e.message + "\n")
|
||||
@@ -373,7 +285,7 @@ def setup(config_options):
|
||||
# check any extra requirements we have now we have a config
|
||||
check_requirements(config)
|
||||
|
||||
version_string = get_version_string()
|
||||
version_string = "Synapse/" + get_version_string(synapse)
|
||||
|
||||
logger.info("Server hostname: %s", config.server_name)
|
||||
logger.info("Server version: %s", version_string)
|
||||
@@ -382,7 +294,7 @@ def setup(config_options):
|
||||
|
||||
tls_server_context_factory = context_factory.ServerContextFactory(config)
|
||||
|
||||
database_engine = create_engine(config.database_config["name"])
|
||||
database_engine = create_engine(config.database_config)
|
||||
config.database_config["args"]["cp_openfun"] = database_engine.on_new_connection
|
||||
|
||||
hs = SynapseHomeServer(
|
||||
@@ -390,7 +302,6 @@ def setup(config_options):
|
||||
db_config=config.database_config,
|
||||
tls_server_context_factory=tls_server_context_factory,
|
||||
config=config,
|
||||
content_addr=config.content_addr,
|
||||
version_string=version_string,
|
||||
database_engine=database_engine,
|
||||
)
|
||||
@@ -398,8 +309,10 @@ def setup(config_options):
|
||||
logger.info("Preparing database: %s...", config.database_config['name'])
|
||||
|
||||
try:
|
||||
db_conn = hs.get_db_conn()
|
||||
database_engine.prepare_database(db_conn)
|
||||
db_conn = hs.get_db_conn(run_new_connection=False)
|
||||
prepare_database(db_conn, database_engine, config=config)
|
||||
database_engine.on_new_connection(db_conn)
|
||||
|
||||
hs.run_startup_checks(db_conn, database_engine)
|
||||
|
||||
db_conn.commit()
|
||||
@@ -423,6 +336,8 @@ def setup(config_options):
|
||||
hs.get_datastore().start_doing_background_updates()
|
||||
hs.get_replication_layer().start_get_pdu_cache()
|
||||
|
||||
register_memory_metrics(hs)
|
||||
|
||||
reactor.callWhenRunning(start)
|
||||
|
||||
return hs
|
||||
@@ -438,215 +353,13 @@ class SynapseService(service.Service):
|
||||
def startService(self):
|
||||
hs = setup(self.config)
|
||||
change_resource_limit(hs.config.soft_file_limit)
|
||||
if hs.config.gc_thresholds:
|
||||
gc.set_threshold(*hs.config.gc_thresholds)
|
||||
|
||||
def stopService(self):
|
||||
return self._port.stopListening()
|
||||
|
||||
|
||||
class SynapseRequest(Request):
|
||||
def __init__(self, site, *args, **kw):
|
||||
Request.__init__(self, *args, **kw)
|
||||
self.site = site
|
||||
self.authenticated_entity = None
|
||||
self.start_time = 0
|
||||
|
||||
def __repr__(self):
|
||||
# We overwrite this so that we don't log ``access_token``
|
||||
return '<%s at 0x%x method=%s uri=%s clientproto=%s site=%s>' % (
|
||||
self.__class__.__name__,
|
||||
id(self),
|
||||
self.method,
|
||||
self.get_redacted_uri(),
|
||||
self.clientproto,
|
||||
self.site.site_tag,
|
||||
)
|
||||
|
||||
def get_redacted_uri(self):
|
||||
return ACCESS_TOKEN_RE.sub(
|
||||
r'\1<redacted>\3',
|
||||
self.uri
|
||||
)
|
||||
|
||||
def get_user_agent(self):
|
||||
return self.requestHeaders.getRawHeaders("User-Agent", [None])[-1]
|
||||
|
||||
def started_processing(self):
|
||||
self.site.access_logger.info(
|
||||
"%s - %s - Received request: %s %s",
|
||||
self.getClientIP(),
|
||||
self.site.site_tag,
|
||||
self.method,
|
||||
self.get_redacted_uri()
|
||||
)
|
||||
self.start_time = int(time.time() * 1000)
|
||||
|
||||
def finished_processing(self):
|
||||
|
||||
try:
|
||||
context = LoggingContext.current_context()
|
||||
ru_utime, ru_stime = context.get_resource_usage()
|
||||
db_txn_count = context.db_txn_count
|
||||
db_txn_duration = context.db_txn_duration
|
||||
except:
|
||||
ru_utime, ru_stime = (0, 0)
|
||||
db_txn_count, db_txn_duration = (0, 0)
|
||||
|
||||
self.site.access_logger.info(
|
||||
"%s - %s - {%s}"
|
||||
" Processed request: %dms (%dms, %dms) (%dms/%d)"
|
||||
" %sB %s \"%s %s %s\" \"%s\"",
|
||||
self.getClientIP(),
|
||||
self.site.site_tag,
|
||||
self.authenticated_entity,
|
||||
int(time.time() * 1000) - self.start_time,
|
||||
int(ru_utime * 1000),
|
||||
int(ru_stime * 1000),
|
||||
int(db_txn_duration * 1000),
|
||||
int(db_txn_count),
|
||||
self.sentLength,
|
||||
self.code,
|
||||
self.method,
|
||||
self.get_redacted_uri(),
|
||||
self.clientproto,
|
||||
self.get_user_agent(),
|
||||
)
|
||||
|
||||
@contextlib.contextmanager
|
||||
def processing(self):
|
||||
self.started_processing()
|
||||
yield
|
||||
self.finished_processing()
|
||||
|
||||
|
||||
class XForwardedForRequest(SynapseRequest):
|
||||
def __init__(self, *args, **kw):
|
||||
SynapseRequest.__init__(self, *args, **kw)
|
||||
|
||||
"""
|
||||
Add a layer on top of another request that only uses the value of an
|
||||
X-Forwarded-For header as the result of C{getClientIP}.
|
||||
"""
|
||||
def getClientIP(self):
|
||||
"""
|
||||
@return: The client address (the first address) in the value of the
|
||||
I{X-Forwarded-For header}. If the header is not present, return
|
||||
C{b"-"}.
|
||||
"""
|
||||
return self.requestHeaders.getRawHeaders(
|
||||
b"x-forwarded-for", [b"-"])[0].split(b",")[0].strip()
|
||||
|
||||
|
||||
class SynapseRequestFactory(object):
|
||||
def __init__(self, site, x_forwarded_for):
|
||||
self.site = site
|
||||
self.x_forwarded_for = x_forwarded_for
|
||||
|
||||
def __call__(self, *args, **kwargs):
|
||||
if self.x_forwarded_for:
|
||||
return XForwardedForRequest(self.site, *args, **kwargs)
|
||||
else:
|
||||
return SynapseRequest(self.site, *args, **kwargs)
|
||||
|
||||
|
||||
class SynapseSite(Site):
|
||||
"""
|
||||
Subclass of a twisted http Site that does access logging with python's
|
||||
standard logging
|
||||
"""
|
||||
def __init__(self, logger_name, site_tag, config, resource, *args, **kwargs):
|
||||
Site.__init__(self, resource, *args, **kwargs)
|
||||
|
||||
self.site_tag = site_tag
|
||||
|
||||
proxied = config.get("x_forwarded", False)
|
||||
self.requestFactory = SynapseRequestFactory(self, proxied)
|
||||
self.access_logger = logging.getLogger(logger_name)
|
||||
|
||||
def log(self, request):
|
||||
pass
|
||||
|
||||
|
||||
def create_resource_tree(desired_tree, redirect_root_to_web_client=True):
|
||||
"""Create the resource tree for this Home Server.
|
||||
|
||||
This in unduly complicated because Twisted does not support putting
|
||||
child resources more than 1 level deep at a time.
|
||||
|
||||
Args:
|
||||
web_client (bool): True to enable the web client.
|
||||
redirect_root_to_web_client (bool): True to redirect '/' to the
|
||||
location of the web client. This does nothing if web_client is not
|
||||
True.
|
||||
"""
|
||||
if redirect_root_to_web_client and WEB_CLIENT_PREFIX in desired_tree:
|
||||
root_resource = RootRedirect(WEB_CLIENT_PREFIX)
|
||||
else:
|
||||
root_resource = Resource()
|
||||
|
||||
# ideally we'd just use getChild and putChild but getChild doesn't work
|
||||
# unless you give it a Request object IN ADDITION to the name :/ So
|
||||
# instead, we'll store a copy of this mapping so we can actually add
|
||||
# extra resources to existing nodes. See self._resource_id for the key.
|
||||
resource_mappings = {}
|
||||
for full_path, res in desired_tree.items():
|
||||
logger.info("Attaching %s to path %s", res, full_path)
|
||||
last_resource = root_resource
|
||||
for path_seg in full_path.split('/')[1:-1]:
|
||||
if path_seg not in last_resource.listNames():
|
||||
# resource doesn't exist, so make a "dummy resource"
|
||||
child_resource = Resource()
|
||||
last_resource.putChild(path_seg, child_resource)
|
||||
res_id = _resource_id(last_resource, path_seg)
|
||||
resource_mappings[res_id] = child_resource
|
||||
last_resource = child_resource
|
||||
else:
|
||||
# we have an existing Resource, use that instead.
|
||||
res_id = _resource_id(last_resource, path_seg)
|
||||
last_resource = resource_mappings[res_id]
|
||||
|
||||
# ===========================
|
||||
# now attach the actual desired resource
|
||||
last_path_seg = full_path.split('/')[-1]
|
||||
|
||||
# if there is already a resource here, thieve its children and
|
||||
# replace it
|
||||
res_id = _resource_id(last_resource, last_path_seg)
|
||||
if res_id in resource_mappings:
|
||||
# there is a dummy resource at this path already, which needs
|
||||
# to be replaced with the desired resource.
|
||||
existing_dummy_resource = resource_mappings[res_id]
|
||||
for child_name in existing_dummy_resource.listNames():
|
||||
child_res_id = _resource_id(
|
||||
existing_dummy_resource, child_name
|
||||
)
|
||||
child_resource = resource_mappings[child_res_id]
|
||||
# steal the children
|
||||
res.putChild(child_name, child_resource)
|
||||
|
||||
# finally, insert the desired resource in the right place
|
||||
last_resource.putChild(last_path_seg, res)
|
||||
res_id = _resource_id(last_resource, last_path_seg)
|
||||
resource_mappings[res_id] = res
|
||||
|
||||
return root_resource
|
||||
|
||||
|
||||
def _resource_id(resource, path_seg):
|
||||
"""Construct an arbitrary resource ID so you can retrieve the mapping
|
||||
later.
|
||||
|
||||
If you want to represent resource A putChild resource B with path C,
|
||||
the mapping should looks like _resource_id(A,C) = B.
|
||||
|
||||
Args:
|
||||
resource (Resource): The *parent* Resourceb
|
||||
path_seg (str): The name of the child Resource to be attached.
|
||||
Returns:
|
||||
str: A unique string which can be a key to the child Resource.
|
||||
"""
|
||||
return "%s-%s" % (resource, path_seg)
|
||||
|
||||
|
||||
def run(hs):
|
||||
PROFILE_SYNAPSE = False
|
||||
if PROFILE_SYNAPSE:
|
||||
@@ -672,6 +385,8 @@ def run(hs):
|
||||
|
||||
start_time = hs.get_clock().time()
|
||||
|
||||
stats = {}
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def phone_stats_home():
|
||||
logger.info("Gathering stats for reporting")
|
||||
@@ -680,7 +395,10 @@ def run(hs):
|
||||
if uptime < 0:
|
||||
uptime = 0
|
||||
|
||||
stats = {}
|
||||
# If the stats directory is empty then this is the first time we've
|
||||
# reported stats.
|
||||
first_time = not stats
|
||||
|
||||
stats["homeserver"] = hs.config.server_name
|
||||
stats["timestamp"] = now
|
||||
stats["uptime_seconds"] = uptime
|
||||
@@ -693,6 +411,25 @@ def run(hs):
|
||||
daily_messages = yield hs.get_datastore().count_daily_messages()
|
||||
if daily_messages is not None:
|
||||
stats["daily_messages"] = daily_messages
|
||||
else:
|
||||
stats.pop("daily_messages", None)
|
||||
|
||||
if first_time:
|
||||
# Add callbacks to report the synapse stats as metrics whenever
|
||||
# prometheus requests them, typically every 30s.
|
||||
# As some of the stats are expensive to calculate we only update
|
||||
# them when synapse phones home to matrix.org every 24 hours.
|
||||
metrics = get_metrics_for("synapse.usage")
|
||||
metrics.add_callback("timestamp", lambda: stats["timestamp"])
|
||||
metrics.add_callback("uptime_seconds", lambda: stats["uptime_seconds"])
|
||||
metrics.add_callback("total_users", lambda: stats["total_users"])
|
||||
metrics.add_callback("total_room_count", lambda: stats["total_room_count"])
|
||||
metrics.add_callback(
|
||||
"daily_active_users", lambda: stats["daily_active_users"]
|
||||
)
|
||||
metrics.add_callback(
|
||||
"daily_messages", lambda: stats.get("daily_messages", 0)
|
||||
)
|
||||
|
||||
logger.info("Reporting stats to matrix.org: %s" % (stats,))
|
||||
try:
|
||||
@@ -713,12 +450,14 @@ def run(hs):
|
||||
# sys.settrace(logcontext_tracer)
|
||||
with LoggingContext("run"):
|
||||
change_resource_limit(hs.config.soft_file_limit)
|
||||
if hs.config.gc_thresholds:
|
||||
gc.set_threshold(*hs.config.gc_thresholds)
|
||||
reactor.run()
|
||||
|
||||
if hs.config.daemonize:
|
||||
|
||||
if hs.config.print_pidfile:
|
||||
print hs.config.pid_file
|
||||
print (hs.config.pid_file)
|
||||
|
||||
daemon = Daemonize(
|
||||
app="synapse-homeserver",
|
||||
|
||||
217
synapse/app/media_repository.py
Normal file
217
synapse/app/media_repository.py
Normal file
@@ -0,0 +1,217 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import synapse
|
||||
|
||||
from synapse.config._base import ConfigError
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.config.logger import setup_logging
|
||||
from synapse.http.site import SynapseSite
|
||||
from synapse.metrics.resource import MetricsResource, METRICS_PREFIX
|
||||
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||
from synapse.rest.media.v0.content_repository import ContentRepoResource
|
||||
from synapse.rest.media.v1.media_repository import MediaRepositoryResource
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.client_ips import ClientIpStore
|
||||
from synapse.storage.engines import create_engine
|
||||
from synapse.storage.media_repository import MediaRepositoryStore
|
||||
from synapse.util.async import sleep
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.logcontext import LoggingContext
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.rlimit import change_resource_limit
|
||||
from synapse.util.versionstring import get_version_string
|
||||
from synapse.api.urls import (
|
||||
CONTENT_REPO_PREFIX, LEGACY_MEDIA_PREFIX, MEDIA_PREFIX
|
||||
)
|
||||
from synapse.crypto import context_factory
|
||||
|
||||
from synapse import events
|
||||
|
||||
|
||||
from twisted.internet import reactor, defer
|
||||
from twisted.web.resource import Resource
|
||||
|
||||
from daemonize import Daemonize
|
||||
|
||||
import sys
|
||||
import logging
|
||||
import gc
|
||||
|
||||
logger = logging.getLogger("synapse.app.media_repository")
|
||||
|
||||
|
||||
class MediaRepositorySlavedStore(
|
||||
SlavedApplicationServiceStore,
|
||||
SlavedRegistrationStore,
|
||||
BaseSlavedStore,
|
||||
MediaRepositoryStore,
|
||||
ClientIpStore,
|
||||
):
|
||||
pass
|
||||
|
||||
|
||||
class MediaRepositoryServer(HomeServer):
|
||||
def get_db_conn(self, run_new_connection=True):
|
||||
# Any param beginning with cp_ is a parameter for adbapi, and should
|
||||
# not be passed to the database engine.
|
||||
db_params = {
|
||||
k: v for k, v in self.db_config.get("args", {}).items()
|
||||
if not k.startswith("cp_")
|
||||
}
|
||||
db_conn = self.database_engine.module.connect(**db_params)
|
||||
|
||||
if run_new_connection:
|
||||
self.database_engine.on_new_connection(db_conn)
|
||||
return db_conn
|
||||
|
||||
def setup(self):
|
||||
logger.info("Setting up.")
|
||||
self.datastore = MediaRepositorySlavedStore(self.get_db_conn(), self)
|
||||
logger.info("Finished setting up.")
|
||||
|
||||
def _listen_http(self, listener_config):
|
||||
port = listener_config["port"]
|
||||
bind_address = listener_config.get("bind_address", "")
|
||||
site_tag = listener_config.get("tag", port)
|
||||
resources = {}
|
||||
for res in listener_config["resources"]:
|
||||
for name in res["names"]:
|
||||
if name == "metrics":
|
||||
resources[METRICS_PREFIX] = MetricsResource(self)
|
||||
elif name == "media":
|
||||
media_repo = MediaRepositoryResource(self)
|
||||
resources.update({
|
||||
MEDIA_PREFIX: media_repo,
|
||||
LEGACY_MEDIA_PREFIX: media_repo,
|
||||
CONTENT_REPO_PREFIX: ContentRepoResource(
|
||||
self, self.config.uploads_path
|
||||
),
|
||||
})
|
||||
|
||||
root_resource = create_resource_tree(resources, Resource())
|
||||
reactor.listenTCP(
|
||||
port,
|
||||
SynapseSite(
|
||||
"synapse.access.http.%s" % (site_tag,),
|
||||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
),
|
||||
interface=bind_address
|
||||
)
|
||||
logger.info("Synapse media repository now listening on port %d", port)
|
||||
|
||||
def start_listening(self, listeners):
|
||||
for listener in listeners:
|
||||
if listener["type"] == "http":
|
||||
self._listen_http(listener)
|
||||
elif listener["type"] == "manhole":
|
||||
reactor.listenTCP(
|
||||
listener["port"],
|
||||
manhole(
|
||||
username="matrix",
|
||||
password="rabbithole",
|
||||
globals={"hs": self},
|
||||
),
|
||||
interface=listener.get("bind_address", '127.0.0.1')
|
||||
)
|
||||
else:
|
||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def replicate(self):
|
||||
http_client = self.get_simple_http_client()
|
||||
store = self.get_datastore()
|
||||
replication_url = self.config.worker_replication_url
|
||||
|
||||
while True:
|
||||
try:
|
||||
args = store.stream_positions()
|
||||
args["timeout"] = 30000
|
||||
result = yield http_client.get_json(replication_url, args=args)
|
||||
yield store.process_replication(result)
|
||||
except:
|
||||
logger.exception("Error replicating from %r", replication_url)
|
||||
yield sleep(5)
|
||||
|
||||
|
||||
def start(config_options):
|
||||
try:
|
||||
config = HomeServerConfig.load_config(
|
||||
"Synapse media repository", config_options
|
||||
)
|
||||
except ConfigError as e:
|
||||
sys.stderr.write("\n" + e.message + "\n")
|
||||
sys.exit(1)
|
||||
|
||||
assert config.worker_app == "synapse.app.media_repository"
|
||||
|
||||
setup_logging(config.worker_log_config, config.worker_log_file)
|
||||
|
||||
events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||
|
||||
database_engine = create_engine(config.database_config)
|
||||
|
||||
tls_server_context_factory = context_factory.ServerContextFactory(config)
|
||||
|
||||
ss = MediaRepositoryServer(
|
||||
config.server_name,
|
||||
db_config=config.database_config,
|
||||
tls_server_context_factory=tls_server_context_factory,
|
||||
config=config,
|
||||
version_string="Synapse/" + get_version_string(synapse),
|
||||
database_engine=database_engine,
|
||||
)
|
||||
|
||||
ss.setup()
|
||||
ss.get_handlers()
|
||||
ss.start_listening(config.worker_listeners)
|
||||
|
||||
def run():
|
||||
with LoggingContext("run"):
|
||||
logger.info("Running")
|
||||
change_resource_limit(config.soft_file_limit)
|
||||
if config.gc_thresholds:
|
||||
gc.set_threshold(*config.gc_thresholds)
|
||||
reactor.run()
|
||||
|
||||
def start():
|
||||
ss.get_state_handler().start_caching()
|
||||
ss.get_datastore().start_profiling()
|
||||
ss.replicate()
|
||||
|
||||
reactor.callWhenRunning(start)
|
||||
|
||||
if config.worker_daemonize:
|
||||
daemon = Daemonize(
|
||||
app="synapse-media-repository",
|
||||
pid=config.worker_pid_file,
|
||||
action=run,
|
||||
auto_close_fds=False,
|
||||
verbose=True,
|
||||
logger=logger,
|
||||
)
|
||||
daemon.start()
|
||||
else:
|
||||
run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
with LoggingContext("main"):
|
||||
start(sys.argv[1:])
|
||||
303
synapse/app/pusher.py
Normal file
303
synapse/app/pusher.py
Normal file
@@ -0,0 +1,303 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import synapse
|
||||
|
||||
from synapse.server import HomeServer
|
||||
from synapse.config._base import ConfigError
|
||||
from synapse.config.logger import setup_logging
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.http.site import SynapseSite
|
||||
from synapse.metrics.resource import MetricsResource, METRICS_PREFIX
|
||||
from synapse.storage.roommember import RoomMemberStore
|
||||
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||
from synapse.replication.slave.storage.pushers import SlavedPusherStore
|
||||
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
|
||||
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
|
||||
from synapse.storage.engines import create_engine
|
||||
from synapse.storage import DataStore
|
||||
from synapse.util.async import sleep
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.logcontext import LoggingContext, preserve_fn
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.rlimit import change_resource_limit
|
||||
from synapse.util.versionstring import get_version_string
|
||||
|
||||
from synapse import events
|
||||
|
||||
from twisted.internet import reactor, defer
|
||||
from twisted.web.resource import Resource
|
||||
|
||||
from daemonize import Daemonize
|
||||
|
||||
import sys
|
||||
import logging
|
||||
import gc
|
||||
|
||||
logger = logging.getLogger("synapse.app.pusher")
|
||||
|
||||
|
||||
class PusherSlaveStore(
|
||||
SlavedEventStore, SlavedPusherStore, SlavedReceiptsStore,
|
||||
SlavedAccountDataStore
|
||||
):
|
||||
update_pusher_last_stream_ordering_and_success = (
|
||||
DataStore.update_pusher_last_stream_ordering_and_success.__func__
|
||||
)
|
||||
|
||||
update_pusher_failing_since = (
|
||||
DataStore.update_pusher_failing_since.__func__
|
||||
)
|
||||
|
||||
update_pusher_last_stream_ordering = (
|
||||
DataStore.update_pusher_last_stream_ordering.__func__
|
||||
)
|
||||
|
||||
get_throttle_params_by_room = (
|
||||
DataStore.get_throttle_params_by_room.__func__
|
||||
)
|
||||
|
||||
set_throttle_params = (
|
||||
DataStore.set_throttle_params.__func__
|
||||
)
|
||||
|
||||
get_time_of_last_push_action_before = (
|
||||
DataStore.get_time_of_last_push_action_before.__func__
|
||||
)
|
||||
|
||||
get_profile_displayname = (
|
||||
DataStore.get_profile_displayname.__func__
|
||||
)
|
||||
|
||||
who_forgot_in_room = (
|
||||
RoomMemberStore.__dict__["who_forgot_in_room"]
|
||||
)
|
||||
|
||||
|
||||
class PusherServer(HomeServer):
|
||||
|
||||
def get_db_conn(self, run_new_connection=True):
|
||||
# Any param beginning with cp_ is a parameter for adbapi, and should
|
||||
# not be passed to the database engine.
|
||||
db_params = {
|
||||
k: v for k, v in self.db_config.get("args", {}).items()
|
||||
if not k.startswith("cp_")
|
||||
}
|
||||
db_conn = self.database_engine.module.connect(**db_params)
|
||||
|
||||
if run_new_connection:
|
||||
self.database_engine.on_new_connection(db_conn)
|
||||
return db_conn
|
||||
|
||||
def setup(self):
|
||||
logger.info("Setting up.")
|
||||
self.datastore = PusherSlaveStore(self.get_db_conn(), self)
|
||||
logger.info("Finished setting up.")
|
||||
|
||||
def remove_pusher(self, app_id, push_key, user_id):
|
||||
http_client = self.get_simple_http_client()
|
||||
replication_url = self.config.worker_replication_url
|
||||
url = replication_url + "/remove_pushers"
|
||||
return http_client.post_json_get_json(url, {
|
||||
"remove": [{
|
||||
"app_id": app_id,
|
||||
"push_key": push_key,
|
||||
"user_id": user_id,
|
||||
}]
|
||||
})
|
||||
|
||||
def _listen_http(self, listener_config):
|
||||
port = listener_config["port"]
|
||||
bind_address = listener_config.get("bind_address", "")
|
||||
site_tag = listener_config.get("tag", port)
|
||||
resources = {}
|
||||
for res in listener_config["resources"]:
|
||||
for name in res["names"]:
|
||||
if name == "metrics":
|
||||
resources[METRICS_PREFIX] = MetricsResource(self)
|
||||
|
||||
root_resource = create_resource_tree(resources, Resource())
|
||||
reactor.listenTCP(
|
||||
port,
|
||||
SynapseSite(
|
||||
"synapse.access.http.%s" % (site_tag,),
|
||||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
),
|
||||
interface=bind_address
|
||||
)
|
||||
logger.info("Synapse pusher now listening on port %d", port)
|
||||
|
||||
def start_listening(self, listeners):
|
||||
for listener in listeners:
|
||||
if listener["type"] == "http":
|
||||
self._listen_http(listener)
|
||||
elif listener["type"] == "manhole":
|
||||
reactor.listenTCP(
|
||||
listener["port"],
|
||||
manhole(
|
||||
username="matrix",
|
||||
password="rabbithole",
|
||||
globals={"hs": self},
|
||||
),
|
||||
interface=listener.get("bind_address", '127.0.0.1')
|
||||
)
|
||||
else:
|
||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def replicate(self):
|
||||
http_client = self.get_simple_http_client()
|
||||
store = self.get_datastore()
|
||||
replication_url = self.config.worker_replication_url
|
||||
pusher_pool = self.get_pusherpool()
|
||||
|
||||
def stop_pusher(user_id, app_id, pushkey):
|
||||
key = "%s:%s" % (app_id, pushkey)
|
||||
pushers_for_user = pusher_pool.pushers.get(user_id, {})
|
||||
pusher = pushers_for_user.pop(key, None)
|
||||
if pusher is None:
|
||||
return
|
||||
logger.info("Stopping pusher %r / %r", user_id, key)
|
||||
pusher.on_stop()
|
||||
|
||||
def start_pusher(user_id, app_id, pushkey):
|
||||
key = "%s:%s" % (app_id, pushkey)
|
||||
logger.info("Starting pusher %r / %r", user_id, key)
|
||||
return pusher_pool._refresh_pusher(app_id, pushkey, user_id)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def poke_pushers(results):
|
||||
pushers_rows = set(
|
||||
map(tuple, results.get("pushers", {}).get("rows", []))
|
||||
)
|
||||
deleted_pushers_rows = set(
|
||||
map(tuple, results.get("deleted_pushers", {}).get("rows", []))
|
||||
)
|
||||
for row in sorted(pushers_rows | deleted_pushers_rows):
|
||||
if row in deleted_pushers_rows:
|
||||
user_id, app_id, pushkey = row[1:4]
|
||||
stop_pusher(user_id, app_id, pushkey)
|
||||
elif row in pushers_rows:
|
||||
user_id = row[1]
|
||||
app_id = row[5]
|
||||
pushkey = row[8]
|
||||
yield start_pusher(user_id, app_id, pushkey)
|
||||
|
||||
stream = results.get("events")
|
||||
if stream and stream["rows"]:
|
||||
min_stream_id = stream["rows"][0][0]
|
||||
max_stream_id = stream["position"]
|
||||
preserve_fn(pusher_pool.on_new_notifications)(
|
||||
min_stream_id, max_stream_id
|
||||
)
|
||||
|
||||
stream = results.get("receipts")
|
||||
if stream and stream["rows"]:
|
||||
rows = stream["rows"]
|
||||
affected_room_ids = set(row[1] for row in rows)
|
||||
min_stream_id = rows[0][0]
|
||||
max_stream_id = stream["position"]
|
||||
preserve_fn(pusher_pool.on_new_receipts)(
|
||||
min_stream_id, max_stream_id, affected_room_ids
|
||||
)
|
||||
|
||||
while True:
|
||||
try:
|
||||
args = store.stream_positions()
|
||||
args["timeout"] = 30000
|
||||
result = yield http_client.get_json(replication_url, args=args)
|
||||
yield store.process_replication(result)
|
||||
poke_pushers(result)
|
||||
except:
|
||||
logger.exception("Error replicating from %r", replication_url)
|
||||
yield sleep(30)
|
||||
|
||||
|
||||
def start(config_options):
|
||||
try:
|
||||
config = HomeServerConfig.load_config(
|
||||
"Synapse pusher", config_options
|
||||
)
|
||||
except ConfigError as e:
|
||||
sys.stderr.write("\n" + e.message + "\n")
|
||||
sys.exit(1)
|
||||
|
||||
assert config.worker_app == "synapse.app.pusher"
|
||||
|
||||
setup_logging(config.worker_log_config, config.worker_log_file)
|
||||
|
||||
events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||
|
||||
if config.start_pushers:
|
||||
sys.stderr.write(
|
||||
"\nThe pushers must be disabled in the main synapse process"
|
||||
"\nbefore they can be run in a separate worker."
|
||||
"\nPlease add ``start_pushers: false`` to the main config"
|
||||
"\n"
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
# Force the pushers to start since they will be disabled in the main config
|
||||
config.start_pushers = True
|
||||
|
||||
database_engine = create_engine(config.database_config)
|
||||
|
||||
ps = PusherServer(
|
||||
config.server_name,
|
||||
db_config=config.database_config,
|
||||
config=config,
|
||||
version_string="Synapse/" + get_version_string(synapse),
|
||||
database_engine=database_engine,
|
||||
)
|
||||
|
||||
ps.setup()
|
||||
ps.start_listening(config.worker_listeners)
|
||||
|
||||
def run():
|
||||
with LoggingContext("run"):
|
||||
logger.info("Running")
|
||||
change_resource_limit(config.soft_file_limit)
|
||||
if config.gc_thresholds:
|
||||
gc.set_threshold(*config.gc_thresholds)
|
||||
reactor.run()
|
||||
|
||||
def start():
|
||||
ps.replicate()
|
||||
ps.get_pusherpool().start()
|
||||
ps.get_datastore().start_profiling()
|
||||
ps.get_state_handler().start_caching()
|
||||
|
||||
reactor.callWhenRunning(start)
|
||||
|
||||
if config.worker_daemonize:
|
||||
daemon = Daemonize(
|
||||
app="synapse-pusher",
|
||||
pid=config.worker_pid_file,
|
||||
action=run,
|
||||
auto_close_fds=False,
|
||||
verbose=True,
|
||||
logger=logger,
|
||||
)
|
||||
daemon.start()
|
||||
else:
|
||||
run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
with LoggingContext("main"):
|
||||
ps = start(sys.argv[1:])
|
||||
496
synapse/app/synchrotron.py
Normal file
496
synapse/app/synchrotron.py
Normal file
@@ -0,0 +1,496 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import synapse
|
||||
|
||||
from synapse.api.constants import EventTypes, PresenceState
|
||||
from synapse.config._base import ConfigError
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.config.logger import setup_logging
|
||||
from synapse.events import FrozenEvent
|
||||
from synapse.handlers.presence import PresenceHandler
|
||||
from synapse.http.site import SynapseSite
|
||||
from synapse.http.server import JsonResource
|
||||
from synapse.metrics.resource import MetricsResource, METRICS_PREFIX
|
||||
from synapse.rest.client.v2_alpha import sync
|
||||
from synapse.rest.client.v1 import events
|
||||
from synapse.rest.client.v1.room import RoomInitialSyncRestServlet
|
||||
from synapse.rest.client.v1.initial_sync import InitialSyncRestServlet
|
||||
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
|
||||
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
|
||||
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||
from synapse.replication.slave.storage.filtering import SlavedFilteringStore
|
||||
from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore
|
||||
from synapse.replication.slave.storage.presence import SlavedPresenceStore
|
||||
from synapse.replication.slave.storage.deviceinbox import SlavedDeviceInboxStore
|
||||
from synapse.replication.slave.storage.room import RoomStore
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.client_ips import ClientIpStore
|
||||
from synapse.storage.engines import create_engine
|
||||
from synapse.storage.presence import PresenceStore, UserPresenceState
|
||||
from synapse.storage.roommember import RoomMemberStore
|
||||
from synapse.util.async import sleep
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.logcontext import LoggingContext, preserve_fn
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.rlimit import change_resource_limit
|
||||
from synapse.util.stringutils import random_string
|
||||
from synapse.util.versionstring import get_version_string
|
||||
|
||||
from twisted.internet import reactor, defer
|
||||
from twisted.web.resource import Resource
|
||||
|
||||
from daemonize import Daemonize
|
||||
|
||||
import sys
|
||||
import logging
|
||||
import contextlib
|
||||
import gc
|
||||
import ujson as json
|
||||
|
||||
logger = logging.getLogger("synapse.app.synchrotron")
|
||||
|
||||
|
||||
class SynchrotronSlavedStore(
|
||||
SlavedPushRuleStore,
|
||||
SlavedEventStore,
|
||||
SlavedReceiptsStore,
|
||||
SlavedAccountDataStore,
|
||||
SlavedApplicationServiceStore,
|
||||
SlavedRegistrationStore,
|
||||
SlavedFilteringStore,
|
||||
SlavedPresenceStore,
|
||||
SlavedDeviceInboxStore,
|
||||
RoomStore,
|
||||
BaseSlavedStore,
|
||||
ClientIpStore, # After BaseSlavedStore because the constructor is different
|
||||
):
|
||||
who_forgot_in_room = (
|
||||
RoomMemberStore.__dict__["who_forgot_in_room"]
|
||||
)
|
||||
|
||||
# XXX: This is a bit broken because we don't persist the accepted list in a
|
||||
# way that can be replicated. This means that we don't have a way to
|
||||
# invalidate the cache correctly.
|
||||
get_presence_list_accepted = PresenceStore.__dict__[
|
||||
"get_presence_list_accepted"
|
||||
]
|
||||
get_presence_list_observers_accepted = PresenceStore.__dict__[
|
||||
"get_presence_list_observers_accepted"
|
||||
]
|
||||
|
||||
|
||||
UPDATE_SYNCING_USERS_MS = 10 * 1000
|
||||
|
||||
|
||||
class SynchrotronPresence(object):
|
||||
def __init__(self, hs):
|
||||
self.is_mine_id = hs.is_mine_id
|
||||
self.http_client = hs.get_simple_http_client()
|
||||
self.store = hs.get_datastore()
|
||||
self.user_to_num_current_syncs = {}
|
||||
self.syncing_users_url = hs.config.worker_replication_url + "/syncing_users"
|
||||
self.clock = hs.get_clock()
|
||||
self.notifier = hs.get_notifier()
|
||||
|
||||
active_presence = self.store.take_presence_startup_info()
|
||||
self.user_to_current_state = {
|
||||
state.user_id: state
|
||||
for state in active_presence
|
||||
}
|
||||
|
||||
self.process_id = random_string(16)
|
||||
logger.info("Presence process_id is %r", self.process_id)
|
||||
|
||||
self._sending_sync = False
|
||||
self._need_to_send_sync = False
|
||||
self.clock.looping_call(
|
||||
self._send_syncing_users_regularly,
|
||||
UPDATE_SYNCING_USERS_MS,
|
||||
)
|
||||
|
||||
reactor.addSystemEventTrigger("before", "shutdown", self._on_shutdown)
|
||||
|
||||
def set_state(self, user, state, ignore_status_msg=False):
|
||||
# TODO Hows this supposed to work?
|
||||
pass
|
||||
|
||||
get_states = PresenceHandler.get_states.__func__
|
||||
get_state = PresenceHandler.get_state.__func__
|
||||
_get_interested_parties = PresenceHandler._get_interested_parties.__func__
|
||||
current_state_for_users = PresenceHandler.current_state_for_users.__func__
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def user_syncing(self, user_id, affect_presence):
|
||||
if affect_presence:
|
||||
curr_sync = self.user_to_num_current_syncs.get(user_id, 0)
|
||||
self.user_to_num_current_syncs[user_id] = curr_sync + 1
|
||||
prev_states = yield self.current_state_for_users([user_id])
|
||||
if prev_states[user_id].state == PresenceState.OFFLINE:
|
||||
# TODO: Don't block the sync request on this HTTP hit.
|
||||
yield self._send_syncing_users_now()
|
||||
|
||||
def _end():
|
||||
# We check that the user_id is in user_to_num_current_syncs because
|
||||
# user_to_num_current_syncs may have been cleared if we are
|
||||
# shutting down.
|
||||
if affect_presence and user_id in self.user_to_num_current_syncs:
|
||||
self.user_to_num_current_syncs[user_id] -= 1
|
||||
|
||||
@contextlib.contextmanager
|
||||
def _user_syncing():
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
_end()
|
||||
|
||||
defer.returnValue(_user_syncing())
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _on_shutdown(self):
|
||||
# When the synchrotron is shutdown tell the master to clear the in
|
||||
# progress syncs for this process
|
||||
self.user_to_num_current_syncs.clear()
|
||||
yield self._send_syncing_users_now()
|
||||
|
||||
def _send_syncing_users_regularly(self):
|
||||
# Only send an update if we aren't in the middle of sending one.
|
||||
if not self._sending_sync:
|
||||
preserve_fn(self._send_syncing_users_now)()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _send_syncing_users_now(self):
|
||||
if self._sending_sync:
|
||||
# We don't want to race with sending another update.
|
||||
# Instead we wait for that update to finish and send another
|
||||
# update afterwards.
|
||||
self._need_to_send_sync = True
|
||||
return
|
||||
|
||||
# Flag that we are sending an update.
|
||||
self._sending_sync = True
|
||||
|
||||
yield self.http_client.post_json_get_json(self.syncing_users_url, {
|
||||
"process_id": self.process_id,
|
||||
"syncing_users": [
|
||||
user_id for user_id, count in self.user_to_num_current_syncs.items()
|
||||
if count > 0
|
||||
],
|
||||
})
|
||||
|
||||
# Unset the flag as we are no longer sending an update.
|
||||
self._sending_sync = False
|
||||
if self._need_to_send_sync:
|
||||
# If something happened while we were sending the update then
|
||||
# we might need to send another update.
|
||||
# TODO: Check if the update that was sent matches the current state
|
||||
# as we only need to send an update if they are different.
|
||||
self._need_to_send_sync = False
|
||||
yield self._send_syncing_users_now()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def notify_from_replication(self, states, stream_id):
|
||||
parties = yield self._get_interested_parties(
|
||||
states, calculate_remote_hosts=False
|
||||
)
|
||||
room_ids_to_states, users_to_states, _ = parties
|
||||
|
||||
self.notifier.on_new_event(
|
||||
"presence_key", stream_id, rooms=room_ids_to_states.keys(),
|
||||
users=users_to_states.keys()
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def process_replication(self, result):
|
||||
stream = result.get("presence", {"rows": []})
|
||||
states = []
|
||||
for row in stream["rows"]:
|
||||
(
|
||||
position, user_id, state, last_active_ts,
|
||||
last_federation_update_ts, last_user_sync_ts, status_msg,
|
||||
currently_active
|
||||
) = row
|
||||
state = UserPresenceState(
|
||||
user_id, state, last_active_ts,
|
||||
last_federation_update_ts, last_user_sync_ts, status_msg,
|
||||
currently_active
|
||||
)
|
||||
self.user_to_current_state[user_id] = state
|
||||
states.append(state)
|
||||
|
||||
if states and "position" in stream:
|
||||
stream_id = int(stream["position"])
|
||||
yield self.notify_from_replication(states, stream_id)
|
||||
|
||||
|
||||
class SynchrotronTyping(object):
|
||||
def __init__(self, hs):
|
||||
self._latest_room_serial = 0
|
||||
self._room_serials = {}
|
||||
self._room_typing = {}
|
||||
|
||||
def stream_positions(self):
|
||||
# We must update this typing token from the response of the previous
|
||||
# sync. In particular, the stream id may "reset" back to zero/a low
|
||||
# value which we *must* use for the next replication request.
|
||||
return {"typing": self._latest_room_serial}
|
||||
|
||||
def process_replication(self, result):
|
||||
stream = result.get("typing")
|
||||
if stream:
|
||||
self._latest_room_serial = int(stream["position"])
|
||||
|
||||
for row in stream["rows"]:
|
||||
position, room_id, typing_json = row
|
||||
typing = json.loads(typing_json)
|
||||
self._room_serials[room_id] = position
|
||||
self._room_typing[room_id] = typing
|
||||
|
||||
|
||||
class SynchrotronApplicationService(object):
|
||||
def notify_interested_services(self, event):
|
||||
pass
|
||||
|
||||
|
||||
class SynchrotronServer(HomeServer):
|
||||
def get_db_conn(self, run_new_connection=True):
|
||||
# Any param beginning with cp_ is a parameter for adbapi, and should
|
||||
# not be passed to the database engine.
|
||||
db_params = {
|
||||
k: v for k, v in self.db_config.get("args", {}).items()
|
||||
if not k.startswith("cp_")
|
||||
}
|
||||
db_conn = self.database_engine.module.connect(**db_params)
|
||||
|
||||
if run_new_connection:
|
||||
self.database_engine.on_new_connection(db_conn)
|
||||
return db_conn
|
||||
|
||||
def setup(self):
|
||||
logger.info("Setting up.")
|
||||
self.datastore = SynchrotronSlavedStore(self.get_db_conn(), self)
|
||||
logger.info("Finished setting up.")
|
||||
|
||||
def _listen_http(self, listener_config):
|
||||
port = listener_config["port"]
|
||||
bind_address = listener_config.get("bind_address", "")
|
||||
site_tag = listener_config.get("tag", port)
|
||||
resources = {}
|
||||
for res in listener_config["resources"]:
|
||||
for name in res["names"]:
|
||||
if name == "metrics":
|
||||
resources[METRICS_PREFIX] = MetricsResource(self)
|
||||
elif name == "client":
|
||||
resource = JsonResource(self, canonical_json=False)
|
||||
sync.register_servlets(self, resource)
|
||||
events.register_servlets(self, resource)
|
||||
InitialSyncRestServlet(self).register(resource)
|
||||
RoomInitialSyncRestServlet(self).register(resource)
|
||||
resources.update({
|
||||
"/_matrix/client/r0": resource,
|
||||
"/_matrix/client/unstable": resource,
|
||||
"/_matrix/client/v2_alpha": resource,
|
||||
"/_matrix/client/api/v1": resource,
|
||||
})
|
||||
|
||||
root_resource = create_resource_tree(resources, Resource())
|
||||
reactor.listenTCP(
|
||||
port,
|
||||
SynapseSite(
|
||||
"synapse.access.http.%s" % (site_tag,),
|
||||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
),
|
||||
interface=bind_address
|
||||
)
|
||||
logger.info("Synapse synchrotron now listening on port %d", port)
|
||||
|
||||
def start_listening(self, listeners):
|
||||
for listener in listeners:
|
||||
if listener["type"] == "http":
|
||||
self._listen_http(listener)
|
||||
elif listener["type"] == "manhole":
|
||||
reactor.listenTCP(
|
||||
listener["port"],
|
||||
manhole(
|
||||
username="matrix",
|
||||
password="rabbithole",
|
||||
globals={"hs": self},
|
||||
),
|
||||
interface=listener.get("bind_address", '127.0.0.1')
|
||||
)
|
||||
else:
|
||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def replicate(self):
|
||||
http_client = self.get_simple_http_client()
|
||||
store = self.get_datastore()
|
||||
replication_url = self.config.worker_replication_url
|
||||
notifier = self.get_notifier()
|
||||
presence_handler = self.get_presence_handler()
|
||||
typing_handler = self.get_typing_handler()
|
||||
|
||||
def notify_from_stream(
|
||||
result, stream_name, stream_key, room=None, user=None
|
||||
):
|
||||
stream = result.get(stream_name)
|
||||
if stream:
|
||||
position_index = stream["field_names"].index("position")
|
||||
if room:
|
||||
room_index = stream["field_names"].index(room)
|
||||
if user:
|
||||
user_index = stream["field_names"].index(user)
|
||||
|
||||
users = ()
|
||||
rooms = ()
|
||||
for row in stream["rows"]:
|
||||
position = row[position_index]
|
||||
|
||||
if user:
|
||||
users = (row[user_index],)
|
||||
|
||||
if room:
|
||||
rooms = (row[room_index],)
|
||||
|
||||
notifier.on_new_event(
|
||||
stream_key, position, users=users, rooms=rooms
|
||||
)
|
||||
|
||||
def notify(result):
|
||||
stream = result.get("events")
|
||||
if stream:
|
||||
max_position = stream["position"]
|
||||
for row in stream["rows"]:
|
||||
position = row[0]
|
||||
internal = json.loads(row[1])
|
||||
event_json = json.loads(row[2])
|
||||
event = FrozenEvent(event_json, internal_metadata_dict=internal)
|
||||
extra_users = ()
|
||||
if event.type == EventTypes.Member:
|
||||
extra_users = (event.state_key,)
|
||||
notifier.on_new_room_event(
|
||||
event, position, max_position, extra_users
|
||||
)
|
||||
|
||||
notify_from_stream(
|
||||
result, "push_rules", "push_rules_key", user="user_id"
|
||||
)
|
||||
notify_from_stream(
|
||||
result, "user_account_data", "account_data_key", user="user_id"
|
||||
)
|
||||
notify_from_stream(
|
||||
result, "room_account_data", "account_data_key", user="user_id"
|
||||
)
|
||||
notify_from_stream(
|
||||
result, "tag_account_data", "account_data_key", user="user_id"
|
||||
)
|
||||
notify_from_stream(
|
||||
result, "receipts", "receipt_key", room="room_id"
|
||||
)
|
||||
notify_from_stream(
|
||||
result, "typing", "typing_key", room="room_id"
|
||||
)
|
||||
notify_from_stream(
|
||||
result, "to_device", "to_device_key", user="user_id"
|
||||
)
|
||||
|
||||
while True:
|
||||
try:
|
||||
args = store.stream_positions()
|
||||
args.update(typing_handler.stream_positions())
|
||||
args["timeout"] = 30000
|
||||
result = yield http_client.get_json(replication_url, args=args)
|
||||
yield store.process_replication(result)
|
||||
typing_handler.process_replication(result)
|
||||
yield presence_handler.process_replication(result)
|
||||
notify(result)
|
||||
except:
|
||||
logger.exception("Error replicating from %r", replication_url)
|
||||
yield sleep(5)
|
||||
|
||||
def build_presence_handler(self):
|
||||
return SynchrotronPresence(self)
|
||||
|
||||
def build_typing_handler(self):
|
||||
return SynchrotronTyping(self)
|
||||
|
||||
|
||||
def start(config_options):
|
||||
try:
|
||||
config = HomeServerConfig.load_config(
|
||||
"Synapse synchrotron", config_options
|
||||
)
|
||||
except ConfigError as e:
|
||||
sys.stderr.write("\n" + e.message + "\n")
|
||||
sys.exit(1)
|
||||
|
||||
assert config.worker_app == "synapse.app.synchrotron"
|
||||
|
||||
setup_logging(config.worker_log_config, config.worker_log_file)
|
||||
|
||||
synapse.events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||
|
||||
database_engine = create_engine(config.database_config)
|
||||
|
||||
ss = SynchrotronServer(
|
||||
config.server_name,
|
||||
db_config=config.database_config,
|
||||
config=config,
|
||||
version_string="Synapse/" + get_version_string(synapse),
|
||||
database_engine=database_engine,
|
||||
application_service_handler=SynchrotronApplicationService(),
|
||||
)
|
||||
|
||||
ss.setup()
|
||||
ss.start_listening(config.worker_listeners)
|
||||
|
||||
def run():
|
||||
with LoggingContext("run"):
|
||||
logger.info("Running")
|
||||
change_resource_limit(config.soft_file_limit)
|
||||
if config.gc_thresholds:
|
||||
gc.set_threshold(*config.gc_thresholds)
|
||||
reactor.run()
|
||||
|
||||
def start():
|
||||
ss.get_datastore().start_profiling()
|
||||
ss.replicate()
|
||||
ss.get_state_handler().start_caching()
|
||||
|
||||
reactor.callWhenRunning(start)
|
||||
|
||||
if config.worker_daemonize:
|
||||
daemon = Daemonize(
|
||||
app="synapse-synchrotron",
|
||||
pid=config.worker_pid_file,
|
||||
action=run,
|
||||
auto_close_fds=False,
|
||||
verbose=True,
|
||||
logger=logger,
|
||||
)
|
||||
daemon.start()
|
||||
else:
|
||||
run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
with LoggingContext("main"):
|
||||
start(sys.argv[1:])
|
||||
@@ -14,71 +14,199 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import sys
|
||||
import argparse
|
||||
import collections
|
||||
import glob
|
||||
import os
|
||||
import os.path
|
||||
import subprocess
|
||||
import signal
|
||||
import subprocess
|
||||
import sys
|
||||
import yaml
|
||||
|
||||
SYNAPSE = ["python", "-B", "-m", "synapse.app.homeserver"]
|
||||
SYNAPSE = [sys.executable, "-B", "-m", "synapse.app.homeserver"]
|
||||
|
||||
GREEN = "\x1b[1;32m"
|
||||
RED = "\x1b[1;31m"
|
||||
NORMAL = "\x1b[m"
|
||||
|
||||
|
||||
def write(message, colour=NORMAL, stream=sys.stdout):
|
||||
if colour == NORMAL:
|
||||
stream.write(message + "\n")
|
||||
else:
|
||||
stream.write(colour + message + NORMAL + "\n")
|
||||
|
||||
|
||||
def start(configfile):
|
||||
print "Starting ...",
|
||||
write("Starting ...")
|
||||
args = SYNAPSE
|
||||
args.extend(["--daemonize", "-c", configfile])
|
||||
|
||||
try:
|
||||
subprocess.check_call(args)
|
||||
print GREEN + "started" + NORMAL
|
||||
write("started synapse.app.homeserver(%r)" % (configfile,), colour=GREEN)
|
||||
except subprocess.CalledProcessError as e:
|
||||
print (
|
||||
RED +
|
||||
"error starting (exit code: %d); see above for logs" % e.returncode +
|
||||
NORMAL
|
||||
write(
|
||||
"error starting (exit code: %d); see above for logs" % e.returncode,
|
||||
colour=RED,
|
||||
)
|
||||
|
||||
|
||||
def stop(pidfile):
|
||||
def start_worker(app, configfile, worker_configfile):
|
||||
args = [
|
||||
"python", "-B",
|
||||
"-m", app,
|
||||
"-c", configfile,
|
||||
"-c", worker_configfile
|
||||
]
|
||||
|
||||
try:
|
||||
subprocess.check_call(args)
|
||||
write("started %s(%r)" % (app, worker_configfile), colour=GREEN)
|
||||
except subprocess.CalledProcessError as e:
|
||||
write(
|
||||
"error starting %s(%r) (exit code: %d); see above for logs" % (
|
||||
app, worker_configfile, e.returncode,
|
||||
),
|
||||
colour=RED,
|
||||
)
|
||||
|
||||
|
||||
def stop(pidfile, app):
|
||||
if os.path.exists(pidfile):
|
||||
pid = int(open(pidfile).read())
|
||||
os.kill(pid, signal.SIGTERM)
|
||||
print GREEN + "stopped" + NORMAL
|
||||
write("stopped %s" % (app,), colour=GREEN)
|
||||
|
||||
|
||||
Worker = collections.namedtuple("Worker", [
|
||||
"app", "configfile", "pidfile", "cache_factor"
|
||||
])
|
||||
|
||||
|
||||
def main():
|
||||
configfile = sys.argv[2] if len(sys.argv) == 3 else "homeserver.yaml"
|
||||
|
||||
if not os.path.exists(configfile):
|
||||
sys.stderr.write(
|
||||
"No config file found\n"
|
||||
"To generate a config file, run '%s -c %s --generate-config"
|
||||
" --server-name=<server name>'\n" % (
|
||||
" ".join(SYNAPSE), configfile
|
||||
)
|
||||
parser = argparse.ArgumentParser()
|
||||
|
||||
parser.add_argument(
|
||||
"action",
|
||||
choices=["start", "stop", "restart"],
|
||||
help="whether to start, stop or restart the synapse",
|
||||
)
|
||||
parser.add_argument(
|
||||
"configfile",
|
||||
nargs="?",
|
||||
default="homeserver.yaml",
|
||||
help="the homeserver config file, defaults to homserver.yaml",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-w", "--worker",
|
||||
metavar="WORKERCONFIG",
|
||||
help="start or stop a single worker",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-a", "--all-processes",
|
||||
metavar="WORKERCONFIGDIR",
|
||||
help="start or stop all the workers in the given directory"
|
||||
" and the main synapse process",
|
||||
)
|
||||
|
||||
options = parser.parse_args()
|
||||
|
||||
if options.worker and options.all_processes:
|
||||
write(
|
||||
'Cannot use "--worker" with "--all-processes"',
|
||||
stream=sys.stderr
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
config = yaml.load(open(configfile))
|
||||
pidfile = config["pid_file"]
|
||||
configfile = options.configfile
|
||||
|
||||
action = sys.argv[1] if sys.argv[1:] else "usage"
|
||||
if action == "start":
|
||||
start(configfile)
|
||||
elif action == "stop":
|
||||
stop(pidfile)
|
||||
elif action == "restart":
|
||||
stop(pidfile)
|
||||
start(configfile)
|
||||
else:
|
||||
sys.stderr.write("Usage: %s [start|stop|restart] [configfile]\n" % (sys.argv[0],))
|
||||
if not os.path.exists(configfile):
|
||||
write(
|
||||
"No config file found\n"
|
||||
"To generate a config file, run '%s -c %s --generate-config"
|
||||
" --server-name=<server name>'\n" % (
|
||||
" ".join(SYNAPSE), options.configfile
|
||||
),
|
||||
stream=sys.stderr,
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
with open(configfile) as stream:
|
||||
config = yaml.load(stream)
|
||||
|
||||
pidfile = config["pid_file"]
|
||||
cache_factor = config.get("synctl_cache_factor")
|
||||
start_stop_synapse = True
|
||||
|
||||
if cache_factor:
|
||||
os.environ["SYNAPSE_CACHE_FACTOR"] = str(cache_factor)
|
||||
|
||||
worker_configfiles = []
|
||||
if options.worker:
|
||||
start_stop_synapse = False
|
||||
worker_configfile = options.worker
|
||||
if not os.path.exists(worker_configfile):
|
||||
write(
|
||||
"No worker config found at %r" % (worker_configfile,),
|
||||
stream=sys.stderr,
|
||||
)
|
||||
sys.exit(1)
|
||||
worker_configfiles.append(worker_configfile)
|
||||
|
||||
if options.all_processes:
|
||||
worker_configdir = options.all_processes
|
||||
if not os.path.isdir(worker_configdir):
|
||||
write(
|
||||
"No worker config directory found at %r" % (worker_configdir,),
|
||||
stream=sys.stderr,
|
||||
)
|
||||
sys.exit(1)
|
||||
worker_configfiles.extend(sorted(glob.glob(
|
||||
os.path.join(worker_configdir, "*.yaml")
|
||||
)))
|
||||
|
||||
workers = []
|
||||
for worker_configfile in worker_configfiles:
|
||||
with open(worker_configfile) as stream:
|
||||
worker_config = yaml.load(stream)
|
||||
worker_app = worker_config["worker_app"]
|
||||
worker_pidfile = worker_config["worker_pid_file"]
|
||||
worker_daemonize = worker_config["worker_daemonize"]
|
||||
assert worker_daemonize # TODO print something more user friendly
|
||||
worker_cache_factor = worker_config.get("synctl_cache_factor")
|
||||
workers.append(Worker(
|
||||
worker_app, worker_configfile, worker_pidfile, worker_cache_factor,
|
||||
))
|
||||
|
||||
action = options.action
|
||||
|
||||
if action == "stop" or action == "restart":
|
||||
for worker in workers:
|
||||
stop(worker.pidfile, worker.app)
|
||||
|
||||
if start_stop_synapse:
|
||||
stop(pidfile, "synapse.app.homeserver")
|
||||
|
||||
# TODO: Wait for synapse to actually shutdown before starting it again
|
||||
|
||||
if action == "start" or action == "restart":
|
||||
if start_stop_synapse:
|
||||
start(configfile)
|
||||
|
||||
for worker in workers:
|
||||
if worker.cache_factor:
|
||||
os.environ["SYNAPSE_CACHE_FACTOR"] = str(worker.cache_factor)
|
||||
|
||||
start_worker(worker.app, configfile, worker.configfile)
|
||||
|
||||
if cache_factor:
|
||||
os.environ["SYNAPSE_CACHE_FACTOR"] = str(cache_factor)
|
||||
else:
|
||||
os.environ.pop("SYNAPSE_CACHE_FACTOR", None)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
||||
@@ -14,6 +14,8 @@
|
||||
# limitations under the License.
|
||||
from synapse.api.constants import EventTypes
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
import logging
|
||||
import re
|
||||
|
||||
@@ -79,7 +81,7 @@ class ApplicationService(object):
|
||||
NS_LIST = [NS_USERS, NS_ALIASES, NS_ROOMS]
|
||||
|
||||
def __init__(self, token, url=None, namespaces=None, hs_token=None,
|
||||
sender=None, id=None):
|
||||
sender=None, id=None, protocols=None, rate_limited=True):
|
||||
self.token = token
|
||||
self.url = url
|
||||
self.hs_token = hs_token
|
||||
@@ -87,6 +89,14 @@ class ApplicationService(object):
|
||||
self.namespaces = self._check_namespaces(namespaces)
|
||||
self.id = id
|
||||
|
||||
# .protocols is a publicly visible field
|
||||
if protocols:
|
||||
self.protocols = set(protocols)
|
||||
else:
|
||||
self.protocols = set()
|
||||
|
||||
self.rate_limited = rate_limited
|
||||
|
||||
def _check_namespaces(self, namespaces):
|
||||
# Sanity check that it is of the form:
|
||||
# {
|
||||
@@ -138,65 +148,66 @@ class ApplicationService(object):
|
||||
return regex_obj["exclusive"]
|
||||
return False
|
||||
|
||||
def _matches_user(self, event, member_list):
|
||||
if (hasattr(event, "sender") and
|
||||
self.is_interested_in_user(event.sender)):
|
||||
return True
|
||||
@defer.inlineCallbacks
|
||||
def _matches_user(self, event, store):
|
||||
if not event:
|
||||
defer.returnValue(False)
|
||||
|
||||
if self.is_interested_in_user(event.sender):
|
||||
defer.returnValue(True)
|
||||
# also check m.room.member state key
|
||||
if (hasattr(event, "type") and event.type == EventTypes.Member
|
||||
and hasattr(event, "state_key")
|
||||
and self.is_interested_in_user(event.state_key)):
|
||||
return True
|
||||
if (event.type == EventTypes.Member and
|
||||
self.is_interested_in_user(event.state_key)):
|
||||
defer.returnValue(True)
|
||||
|
||||
if not store:
|
||||
defer.returnValue(False)
|
||||
|
||||
member_list = yield store.get_users_in_room(event.room_id)
|
||||
|
||||
# check joined member events
|
||||
for user_id in member_list:
|
||||
if self.is_interested_in_user(user_id):
|
||||
return True
|
||||
return False
|
||||
defer.returnValue(True)
|
||||
defer.returnValue(False)
|
||||
|
||||
def _matches_room_id(self, event):
|
||||
if hasattr(event, "room_id"):
|
||||
return self.is_interested_in_room(event.room_id)
|
||||
return False
|
||||
|
||||
def _matches_aliases(self, event, alias_list):
|
||||
@defer.inlineCallbacks
|
||||
def _matches_aliases(self, event, store):
|
||||
if not store or not event:
|
||||
defer.returnValue(False)
|
||||
|
||||
alias_list = yield store.get_aliases_for_room(event.room_id)
|
||||
for alias in alias_list:
|
||||
if self.is_interested_in_alias(alias):
|
||||
return True
|
||||
return False
|
||||
defer.returnValue(True)
|
||||
defer.returnValue(False)
|
||||
|
||||
def is_interested(self, event, restrict_to=None, aliases_for_event=None,
|
||||
member_list=None):
|
||||
@defer.inlineCallbacks
|
||||
def is_interested(self, event, store=None):
|
||||
"""Check if this service is interested in this event.
|
||||
|
||||
Args:
|
||||
event(Event): The event to check.
|
||||
restrict_to(str): The namespace to restrict regex tests to.
|
||||
aliases_for_event(list): A list of all the known room aliases for
|
||||
this event.
|
||||
member_list(list): A list of all joined user_ids in this room.
|
||||
store(DataStore)
|
||||
Returns:
|
||||
bool: True if this service would like to know about this event.
|
||||
"""
|
||||
if aliases_for_event is None:
|
||||
aliases_for_event = []
|
||||
if member_list is None:
|
||||
member_list = []
|
||||
# Do cheap checks first
|
||||
if self._matches_room_id(event):
|
||||
defer.returnValue(True)
|
||||
|
||||
if restrict_to and restrict_to not in ApplicationService.NS_LIST:
|
||||
# this is a programming error, so fail early and raise a general
|
||||
# exception
|
||||
raise Exception("Unexpected restrict_to value: %s". restrict_to)
|
||||
if (yield self._matches_aliases(event, store)):
|
||||
defer.returnValue(True)
|
||||
|
||||
if not restrict_to:
|
||||
return (self._matches_user(event, member_list)
|
||||
or self._matches_aliases(event, aliases_for_event)
|
||||
or self._matches_room_id(event))
|
||||
elif restrict_to == ApplicationService.NS_ALIASES:
|
||||
return self._matches_aliases(event, aliases_for_event)
|
||||
elif restrict_to == ApplicationService.NS_ROOMS:
|
||||
return self._matches_room_id(event)
|
||||
elif restrict_to == ApplicationService.NS_USERS:
|
||||
return self._matches_user(event, member_list)
|
||||
if (yield self._matches_user(event, store)):
|
||||
defer.returnValue(True)
|
||||
|
||||
defer.returnValue(False)
|
||||
|
||||
def is_interested_in_user(self, user_id):
|
||||
return (
|
||||
@@ -216,11 +227,17 @@ class ApplicationService(object):
|
||||
or user_id == self.sender
|
||||
)
|
||||
|
||||
def is_interested_in_protocol(self, protocol):
|
||||
return protocol in self.protocols
|
||||
|
||||
def is_exclusive_alias(self, alias):
|
||||
return self._is_exclusive(ApplicationService.NS_ALIASES, alias)
|
||||
|
||||
def is_exclusive_room(self, room_id):
|
||||
return self._is_exclusive(ApplicationService.NS_ROOMS, room_id)
|
||||
|
||||
def is_rate_limited(self):
|
||||
return self.rate_limited
|
||||
|
||||
def __str__(self):
|
||||
return "ApplicationService: %s" % (self.__dict__,)
|
||||
|
||||
@@ -14,9 +14,11 @@
|
||||
# limitations under the License.
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.constants import ThirdPartyEntityKind
|
||||
from synapse.api.errors import CodeMessageException
|
||||
from synapse.http.client import SimpleHttpClient
|
||||
from synapse.events.utils import serialize_event
|
||||
from synapse.util.caches.response_cache import ResponseCache
|
||||
|
||||
import logging
|
||||
import urllib
|
||||
@@ -24,6 +26,42 @@ import urllib
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
HOUR_IN_MS = 60 * 60 * 1000
|
||||
|
||||
|
||||
APP_SERVICE_PREFIX = "/_matrix/app/unstable"
|
||||
|
||||
|
||||
def _is_valid_3pe_metadata(info):
|
||||
if "instances" not in info:
|
||||
return False
|
||||
if not isinstance(info["instances"], list):
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def _is_valid_3pe_result(r, field):
|
||||
if not isinstance(r, dict):
|
||||
return False
|
||||
|
||||
for k in (field, "protocol"):
|
||||
if k not in r:
|
||||
return False
|
||||
if not isinstance(r[k], str):
|
||||
return False
|
||||
|
||||
if "fields" not in r:
|
||||
return False
|
||||
fields = r["fields"]
|
||||
if not isinstance(fields, dict):
|
||||
return False
|
||||
for k in fields.keys():
|
||||
if not isinstance(fields[k], str):
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
class ApplicationServiceApi(SimpleHttpClient):
|
||||
"""This class manages HS -> AS communications, including querying and
|
||||
pushing.
|
||||
@@ -33,8 +71,12 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
super(ApplicationServiceApi, self).__init__(hs)
|
||||
self.clock = hs.get_clock()
|
||||
|
||||
self.protocol_meta_cache = ResponseCache(hs, timeout_ms=HOUR_IN_MS)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def query_user(self, service, user_id):
|
||||
if service.url is None:
|
||||
defer.returnValue(False)
|
||||
uri = service.url + ("/users/%s" % urllib.quote(user_id))
|
||||
response = None
|
||||
try:
|
||||
@@ -54,6 +96,8 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def query_alias(self, service, alias):
|
||||
if service.url is None:
|
||||
defer.returnValue(False)
|
||||
uri = service.url + ("/rooms/%s" % urllib.quote(alias))
|
||||
response = None
|
||||
try:
|
||||
@@ -71,8 +115,84 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
logger.warning("query_alias to %s threw exception %s", uri, ex)
|
||||
defer.returnValue(False)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def query_3pe(self, service, kind, protocol, fields):
|
||||
if kind == ThirdPartyEntityKind.USER:
|
||||
required_field = "userid"
|
||||
elif kind == ThirdPartyEntityKind.LOCATION:
|
||||
required_field = "alias"
|
||||
else:
|
||||
raise ValueError(
|
||||
"Unrecognised 'kind' argument %r to query_3pe()", kind
|
||||
)
|
||||
if service.url is None:
|
||||
defer.returnValue([])
|
||||
|
||||
uri = "%s%s/thirdparty/%s/%s" % (
|
||||
service.url,
|
||||
APP_SERVICE_PREFIX,
|
||||
kind,
|
||||
urllib.quote(protocol)
|
||||
)
|
||||
try:
|
||||
response = yield self.get_json(uri, fields)
|
||||
if not isinstance(response, list):
|
||||
logger.warning(
|
||||
"query_3pe to %s returned an invalid response %r",
|
||||
uri, response
|
||||
)
|
||||
defer.returnValue([])
|
||||
|
||||
ret = []
|
||||
for r in response:
|
||||
if _is_valid_3pe_result(r, field=required_field):
|
||||
ret.append(r)
|
||||
else:
|
||||
logger.warning(
|
||||
"query_3pe to %s returned an invalid result %r",
|
||||
uri, r
|
||||
)
|
||||
|
||||
defer.returnValue(ret)
|
||||
except Exception as ex:
|
||||
logger.warning("query_3pe to %s threw exception %s", uri, ex)
|
||||
defer.returnValue([])
|
||||
|
||||
def get_3pe_protocol(self, service, protocol):
|
||||
if service.url is None:
|
||||
defer.returnValue({})
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _get():
|
||||
uri = "%s%s/thirdparty/protocol/%s" % (
|
||||
service.url,
|
||||
APP_SERVICE_PREFIX,
|
||||
urllib.quote(protocol)
|
||||
)
|
||||
try:
|
||||
info = yield self.get_json(uri, {})
|
||||
|
||||
if not _is_valid_3pe_metadata(info):
|
||||
logger.warning("query_3pe_protocol to %s did not return a"
|
||||
" valid result", uri)
|
||||
defer.returnValue(None)
|
||||
|
||||
defer.returnValue(info)
|
||||
except Exception as ex:
|
||||
logger.warning("query_3pe_protocol to %s threw exception %s",
|
||||
uri, ex)
|
||||
defer.returnValue(None)
|
||||
|
||||
key = (service.id, protocol)
|
||||
return self.protocol_meta_cache.get(key) or (
|
||||
self.protocol_meta_cache.set(key, _get())
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def push_bulk(self, service, events, txn_id=None):
|
||||
if service.url is None:
|
||||
defer.returnValue(True)
|
||||
|
||||
events = self._serialize(events)
|
||||
|
||||
if txn_id is None:
|
||||
@@ -100,11 +220,6 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
logger.warning("push_bulk to %s threw exception %s", uri, ex)
|
||||
defer.returnValue(False)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def push(self, service, event, txn_id=None):
|
||||
response = yield self.push_bulk(service, [event], txn_id)
|
||||
defer.returnValue(response)
|
||||
|
||||
def _serialize(self, events):
|
||||
time_now = self.clock.time_msec()
|
||||
return [
|
||||
|
||||
@@ -48,32 +48,35 @@ UP & quit +---------- YES SUCCESS
|
||||
This is all tied together by the AppServiceScheduler which DIs the required
|
||||
components.
|
||||
"""
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.appservice import ApplicationServiceState
|
||||
from twisted.internet import defer
|
||||
from synapse.util.logcontext import preserve_fn
|
||||
from synapse.util.metrics import Measure
|
||||
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AppServiceScheduler(object):
|
||||
class ApplicationServiceScheduler(object):
|
||||
""" Public facing API for this module. Does the required DI to tie the
|
||||
components together. This also serves as the "event_pool", which in this
|
||||
case is a simple array.
|
||||
"""
|
||||
|
||||
def __init__(self, clock, store, as_api):
|
||||
self.clock = clock
|
||||
self.store = store
|
||||
self.as_api = as_api
|
||||
def __init__(self, hs):
|
||||
self.clock = hs.get_clock()
|
||||
self.store = hs.get_datastore()
|
||||
self.as_api = hs.get_application_service_api()
|
||||
|
||||
def create_recoverer(service, callback):
|
||||
return _Recoverer(clock, store, as_api, service, callback)
|
||||
return _Recoverer(self.clock, self.store, self.as_api, service, callback)
|
||||
|
||||
self.txn_ctrl = _TransactionController(
|
||||
clock, store, as_api, create_recoverer
|
||||
self.clock, self.store, self.as_api, create_recoverer
|
||||
)
|
||||
self.queuer = _ServiceQueuer(self.txn_ctrl)
|
||||
self.queuer = _ServiceQueuer(self.txn_ctrl, self.clock)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def start(self):
|
||||
@@ -94,38 +97,36 @@ class _ServiceQueuer(object):
|
||||
this schedules any other events in the queue to run.
|
||||
"""
|
||||
|
||||
def __init__(self, txn_ctrl):
|
||||
def __init__(self, txn_ctrl, clock):
|
||||
self.queued_events = {} # dict of {service_id: [events]}
|
||||
self.pending_requests = {} # dict of {service_id: Deferred}
|
||||
self.requests_in_flight = set()
|
||||
self.txn_ctrl = txn_ctrl
|
||||
self.clock = clock
|
||||
|
||||
def enqueue(self, service, event):
|
||||
# if this service isn't being sent something
|
||||
if not self.pending_requests.get(service.id):
|
||||
self._send_request(service, [event])
|
||||
else:
|
||||
# add to queue for this service
|
||||
if service.id not in self.queued_events:
|
||||
self.queued_events[service.id] = []
|
||||
self.queued_events[service.id].append(event)
|
||||
self.queued_events.setdefault(service.id, []).append(event)
|
||||
preserve_fn(self._send_request)(service)
|
||||
|
||||
def _send_request(self, service, events):
|
||||
# send request and add callbacks
|
||||
d = self.txn_ctrl.send(service, events)
|
||||
d.addBoth(self._on_request_finish)
|
||||
d.addErrback(self._on_request_fail)
|
||||
self.pending_requests[service.id] = d
|
||||
@defer.inlineCallbacks
|
||||
def _send_request(self, service):
|
||||
if service.id in self.requests_in_flight:
|
||||
return
|
||||
|
||||
def _on_request_finish(self, service):
|
||||
self.pending_requests[service.id] = None
|
||||
# if there are queued events, then send them.
|
||||
if (service.id in self.queued_events
|
||||
and len(self.queued_events[service.id]) > 0):
|
||||
self._send_request(service, self.queued_events[service.id])
|
||||
self.queued_events[service.id] = []
|
||||
self.requests_in_flight.add(service.id)
|
||||
try:
|
||||
while True:
|
||||
events = self.queued_events.pop(service.id, [])
|
||||
if not events:
|
||||
return
|
||||
|
||||
def _on_request_fail(self, err):
|
||||
logger.error("AS request failed: %s", err)
|
||||
with Measure(self.clock, "servicequeuer.send"):
|
||||
try:
|
||||
yield self.txn_ctrl.send(service, events)
|
||||
except:
|
||||
logger.exception("AS request failed")
|
||||
finally:
|
||||
self.requests_in_flight.discard(service.id)
|
||||
|
||||
|
||||
class _TransactionController(object):
|
||||
@@ -149,14 +150,12 @@ class _TransactionController(object):
|
||||
if service_is_up:
|
||||
sent = yield txn.send(self.as_api)
|
||||
if sent:
|
||||
txn.complete(self.store)
|
||||
yield txn.complete(self.store)
|
||||
else:
|
||||
self._start_recoverer(service)
|
||||
preserve_fn(self._start_recoverer)(service)
|
||||
except Exception as e:
|
||||
logger.exception(e)
|
||||
self._start_recoverer(service)
|
||||
# request has finished
|
||||
defer.returnValue(service)
|
||||
preserve_fn(self._start_recoverer)(service)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_recovered(self, recoverer):
|
||||
|
||||
@@ -28,7 +28,7 @@ if __name__ == "__main__":
|
||||
sys.stderr.write("\n" + e.message + "\n")
|
||||
sys.exit(1)
|
||||
|
||||
print getattr(config, key)
|
||||
print (getattr(config, key))
|
||||
sys.exit(0)
|
||||
else:
|
||||
sys.stderr.write("Unknown command %r\n" % (action,))
|
||||
|
||||
@@ -64,11 +64,12 @@ class Config(object):
|
||||
if isinstance(value, int) or isinstance(value, long):
|
||||
return value
|
||||
second = 1000
|
||||
hour = 60 * 60 * second
|
||||
minute = 60 * second
|
||||
hour = 60 * minute
|
||||
day = 24 * hour
|
||||
week = 7 * day
|
||||
year = 365 * day
|
||||
sizes = {"s": second, "h": hour, "d": day, "w": week, "y": year}
|
||||
sizes = {"s": second, "m": minute, "h": hour, "d": day, "w": week, "y": year}
|
||||
size = 1
|
||||
suffix = value[-1]
|
||||
if suffix in sizes:
|
||||
@@ -104,7 +105,7 @@ class Config(object):
|
||||
dir_path = cls.abspath(dir_path)
|
||||
try:
|
||||
os.makedirs(dir_path)
|
||||
except OSError, e:
|
||||
except OSError as e:
|
||||
if e.errno != errno.EEXIST:
|
||||
raise
|
||||
if not os.path.isdir(dir_path):
|
||||
@@ -157,9 +158,40 @@ class Config(object):
|
||||
return default_config, config
|
||||
|
||||
@classmethod
|
||||
def load_config(cls, description, argv, generate_section=None):
|
||||
obj = cls()
|
||||
def load_config(cls, description, argv):
|
||||
config_parser = argparse.ArgumentParser(
|
||||
description=description,
|
||||
)
|
||||
config_parser.add_argument(
|
||||
"-c", "--config-path",
|
||||
action="append",
|
||||
metavar="CONFIG_FILE",
|
||||
help="Specify config file. Can be given multiple times and"
|
||||
" may specify directories containing *.yaml files."
|
||||
)
|
||||
|
||||
config_parser.add_argument(
|
||||
"--keys-directory",
|
||||
metavar="DIRECTORY",
|
||||
help="Where files such as certs and signing keys are stored when"
|
||||
" their location is given explicitly in the config."
|
||||
" Defaults to the directory containing the last config file",
|
||||
)
|
||||
|
||||
config_args = config_parser.parse_args(argv)
|
||||
|
||||
config_files = find_config_files(search_paths=config_args.config_path)
|
||||
|
||||
obj = cls()
|
||||
obj.read_config_files(
|
||||
config_files,
|
||||
keys_directory=config_args.keys_directory,
|
||||
generate_keys=False,
|
||||
)
|
||||
return obj
|
||||
|
||||
@classmethod
|
||||
def load_or_generate_config(cls, description, argv):
|
||||
config_parser = argparse.ArgumentParser(add_help=False)
|
||||
config_parser.add_argument(
|
||||
"-c", "--config-path",
|
||||
@@ -176,7 +208,7 @@ class Config(object):
|
||||
config_parser.add_argument(
|
||||
"--report-stats",
|
||||
action="store",
|
||||
help="Stuff",
|
||||
help="Whether the generated config reports anonymized usage statistics",
|
||||
choices=["yes", "no"]
|
||||
)
|
||||
config_parser.add_argument(
|
||||
@@ -197,36 +229,11 @@ class Config(object):
|
||||
)
|
||||
config_args, remaining_args = config_parser.parse_known_args(argv)
|
||||
|
||||
config_files = find_config_files(search_paths=config_args.config_path)
|
||||
|
||||
generate_keys = config_args.generate_keys
|
||||
|
||||
config_files = []
|
||||
if config_args.config_path:
|
||||
for config_path in config_args.config_path:
|
||||
if os.path.isdir(config_path):
|
||||
# We accept specifying directories as config paths, we search
|
||||
# inside that directory for all files matching *.yaml, and then
|
||||
# we apply them in *sorted* order.
|
||||
files = []
|
||||
for entry in os.listdir(config_path):
|
||||
entry_path = os.path.join(config_path, entry)
|
||||
if not os.path.isfile(entry_path):
|
||||
print (
|
||||
"Found subdirectory in config directory: %r. IGNORING."
|
||||
) % (entry_path, )
|
||||
continue
|
||||
|
||||
if not entry.endswith(".yaml"):
|
||||
print (
|
||||
"Found file in config directory that does not"
|
||||
" end in '.yaml': %r. IGNORING."
|
||||
) % (entry_path, )
|
||||
continue
|
||||
|
||||
files.append(entry_path)
|
||||
|
||||
config_files.extend(sorted(files))
|
||||
else:
|
||||
config_files.append(config_path)
|
||||
obj = cls()
|
||||
|
||||
if config_args.generate_config:
|
||||
if config_args.report_stats is None:
|
||||
@@ -299,28 +306,43 @@ class Config(object):
|
||||
" -c CONFIG-FILE\""
|
||||
)
|
||||
|
||||
if config_args.keys_directory:
|
||||
config_dir_path = config_args.keys_directory
|
||||
else:
|
||||
config_dir_path = os.path.dirname(config_args.config_path[-1])
|
||||
config_dir_path = os.path.abspath(config_dir_path)
|
||||
obj.read_config_files(
|
||||
config_files,
|
||||
keys_directory=config_args.keys_directory,
|
||||
generate_keys=generate_keys,
|
||||
)
|
||||
|
||||
if generate_keys:
|
||||
return None
|
||||
|
||||
obj.invoke_all("read_arguments", args)
|
||||
|
||||
return obj
|
||||
|
||||
def read_config_files(self, config_files, keys_directory=None,
|
||||
generate_keys=False):
|
||||
if not keys_directory:
|
||||
keys_directory = os.path.dirname(config_files[-1])
|
||||
|
||||
config_dir_path = os.path.abspath(keys_directory)
|
||||
|
||||
specified_config = {}
|
||||
for config_file in config_files:
|
||||
yaml_config = cls.read_config_file(config_file)
|
||||
yaml_config = self.read_config_file(config_file)
|
||||
specified_config.update(yaml_config)
|
||||
|
||||
if "server_name" not in specified_config:
|
||||
raise ConfigError(MISSING_SERVER_NAME)
|
||||
|
||||
server_name = specified_config["server_name"]
|
||||
_, config = obj.generate_config(
|
||||
_, config = self.generate_config(
|
||||
config_dir_path=config_dir_path,
|
||||
server_name=server_name,
|
||||
is_generating_file=False,
|
||||
)
|
||||
config.pop("log_config")
|
||||
config.update(specified_config)
|
||||
|
||||
if "report_stats" not in config:
|
||||
raise ConfigError(
|
||||
MISSING_REPORT_STATS_CONFIG_INSTRUCTIONS + "\n" +
|
||||
@@ -328,11 +350,51 @@ class Config(object):
|
||||
)
|
||||
|
||||
if generate_keys:
|
||||
obj.invoke_all("generate_files", config)
|
||||
self.invoke_all("generate_files", config)
|
||||
return
|
||||
|
||||
obj.invoke_all("read_config", config)
|
||||
self.invoke_all("read_config", config)
|
||||
|
||||
obj.invoke_all("read_arguments", args)
|
||||
|
||||
return obj
|
||||
def find_config_files(search_paths):
|
||||
"""Finds config files using a list of search paths. If a path is a file
|
||||
then that file path is added to the list. If a search path is a directory
|
||||
then all the "*.yaml" files in that directory are added to the list in
|
||||
sorted order.
|
||||
|
||||
Args:
|
||||
search_paths(list(str)): A list of paths to search.
|
||||
|
||||
Returns:
|
||||
list(str): A list of file paths.
|
||||
"""
|
||||
|
||||
config_files = []
|
||||
if search_paths:
|
||||
for config_path in search_paths:
|
||||
if os.path.isdir(config_path):
|
||||
# We accept specifying directories as config paths, we search
|
||||
# inside that directory for all files matching *.yaml, and then
|
||||
# we apply them in *sorted* order.
|
||||
files = []
|
||||
for entry in os.listdir(config_path):
|
||||
entry_path = os.path.join(config_path, entry)
|
||||
if not os.path.isfile(entry_path):
|
||||
print (
|
||||
"Found subdirectory in config directory: %r. IGNORING."
|
||||
) % (entry_path, )
|
||||
continue
|
||||
|
||||
if not entry.endswith(".yaml"):
|
||||
print (
|
||||
"Found file in config directory that does not"
|
||||
" end in '.yaml': %r. IGNORING."
|
||||
) % (entry_path, )
|
||||
continue
|
||||
|
||||
files.append(entry_path)
|
||||
|
||||
config_files.extend(sorted(files))
|
||||
else:
|
||||
config_files.append(config_path)
|
||||
return config_files
|
||||
|
||||
40
synapse/config/api.py
Normal file
40
synapse/config/api.py
Normal file
@@ -0,0 +1,40 @@
|
||||
# Copyright 2015, 2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from ._base import Config
|
||||
|
||||
from synapse.api.constants import EventTypes
|
||||
|
||||
|
||||
class ApiConfig(Config):
|
||||
|
||||
def read_config(self, config):
|
||||
self.room_invite_state_types = config.get("room_invite_state_types", [
|
||||
EventTypes.JoinRules,
|
||||
EventTypes.CanonicalAlias,
|
||||
EventTypes.RoomAvatar,
|
||||
EventTypes.Name,
|
||||
])
|
||||
|
||||
def default_config(cls, **kwargs):
|
||||
return """\
|
||||
## API Configuration ##
|
||||
|
||||
# A list of event types that will be included in the room_invite_state
|
||||
room_invite_state_types:
|
||||
- "{JoinRules}"
|
||||
- "{CanonicalAlias}"
|
||||
- "{RoomAvatar}"
|
||||
- "{Name}"
|
||||
""".format(**vars(EventTypes))
|
||||
@@ -12,16 +12,153 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from ._base import Config
|
||||
from ._base import Config, ConfigError
|
||||
|
||||
from synapse.appservice import ApplicationService
|
||||
from synapse.types import UserID
|
||||
|
||||
import urllib
|
||||
import yaml
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AppServiceConfig(Config):
|
||||
|
||||
def read_config(self, config):
|
||||
self.app_service_config_files = config.get("app_service_config_files", [])
|
||||
self.notify_appservices = config.get("notify_appservices", True)
|
||||
|
||||
def default_config(cls, **kwargs):
|
||||
return """\
|
||||
# A list of application service config file to use
|
||||
app_service_config_files: []
|
||||
"""
|
||||
|
||||
|
||||
def load_appservices(hostname, config_files):
|
||||
"""Returns a list of Application Services from the config files."""
|
||||
if not isinstance(config_files, list):
|
||||
logger.warning(
|
||||
"Expected %s to be a list of AS config files.", config_files
|
||||
)
|
||||
return []
|
||||
|
||||
# Dicts of value -> filename
|
||||
seen_as_tokens = {}
|
||||
seen_ids = {}
|
||||
|
||||
appservices = []
|
||||
|
||||
for config_file in config_files:
|
||||
try:
|
||||
with open(config_file, 'r') as f:
|
||||
appservice = _load_appservice(
|
||||
hostname, yaml.load(f), config_file
|
||||
)
|
||||
if appservice.id in seen_ids:
|
||||
raise ConfigError(
|
||||
"Cannot reuse ID across application services: "
|
||||
"%s (files: %s, %s)" % (
|
||||
appservice.id, config_file, seen_ids[appservice.id],
|
||||
)
|
||||
)
|
||||
seen_ids[appservice.id] = config_file
|
||||
if appservice.token in seen_as_tokens:
|
||||
raise ConfigError(
|
||||
"Cannot reuse as_token across application services: "
|
||||
"%s (files: %s, %s)" % (
|
||||
appservice.token,
|
||||
config_file,
|
||||
seen_as_tokens[appservice.token],
|
||||
)
|
||||
)
|
||||
seen_as_tokens[appservice.token] = config_file
|
||||
logger.info("Loaded application service: %s", appservice)
|
||||
appservices.append(appservice)
|
||||
except Exception as e:
|
||||
logger.error("Failed to load appservice from '%s'", config_file)
|
||||
logger.exception(e)
|
||||
raise
|
||||
return appservices
|
||||
|
||||
|
||||
def _load_appservice(hostname, as_info, config_filename):
|
||||
required_string_fields = [
|
||||
"id", "as_token", "hs_token", "sender_localpart"
|
||||
]
|
||||
for field in required_string_fields:
|
||||
if not isinstance(as_info.get(field), basestring):
|
||||
raise KeyError("Required string field: '%s' (%s)" % (
|
||||
field, config_filename,
|
||||
))
|
||||
|
||||
# 'url' must either be a string or explicitly null, not missing
|
||||
# to avoid accidentally turning off push for ASes.
|
||||
if (not isinstance(as_info.get("url"), basestring) and
|
||||
as_info.get("url", "") is not None):
|
||||
raise KeyError(
|
||||
"Required string field or explicit null: 'url' (%s)" % (config_filename,)
|
||||
)
|
||||
|
||||
localpart = as_info["sender_localpart"]
|
||||
if urllib.quote(localpart) != localpart:
|
||||
raise ValueError(
|
||||
"sender_localpart needs characters which are not URL encoded."
|
||||
)
|
||||
user = UserID(localpart, hostname)
|
||||
user_id = user.to_string()
|
||||
|
||||
# Rate limiting for users of this AS is on by default (excludes sender)
|
||||
rate_limited = True
|
||||
if isinstance(as_info.get("rate_limited"), bool):
|
||||
rate_limited = as_info.get("rate_limited")
|
||||
|
||||
# namespace checks
|
||||
if not isinstance(as_info.get("namespaces"), dict):
|
||||
raise KeyError("Requires 'namespaces' object.")
|
||||
for ns in ApplicationService.NS_LIST:
|
||||
# specific namespaces are optional
|
||||
if ns in as_info["namespaces"]:
|
||||
# expect a list of dicts with exclusive and regex keys
|
||||
for regex_obj in as_info["namespaces"][ns]:
|
||||
if not isinstance(regex_obj, dict):
|
||||
raise ValueError(
|
||||
"Expected namespace entry in %s to be an object,"
|
||||
" but got %s", ns, regex_obj
|
||||
)
|
||||
if not isinstance(regex_obj.get("regex"), basestring):
|
||||
raise ValueError(
|
||||
"Missing/bad type 'regex' key in %s", regex_obj
|
||||
)
|
||||
if not isinstance(regex_obj.get("exclusive"), bool):
|
||||
raise ValueError(
|
||||
"Missing/bad type 'exclusive' key in %s", regex_obj
|
||||
)
|
||||
# protocols check
|
||||
protocols = as_info.get("protocols")
|
||||
if protocols:
|
||||
# Because strings are lists in python
|
||||
if isinstance(protocols, str) or not isinstance(protocols, list):
|
||||
raise KeyError("Optional 'protocols' must be a list if present.")
|
||||
for p in protocols:
|
||||
if not isinstance(p, str):
|
||||
raise KeyError("Bad value for 'protocols' item")
|
||||
|
||||
if as_info["url"] is None:
|
||||
logger.info(
|
||||
"(%s) Explicitly empty 'url' provided. This application service"
|
||||
" will not receive events or queries.",
|
||||
config_filename,
|
||||
)
|
||||
return ApplicationService(
|
||||
token=as_info["as_token"],
|
||||
url=as_info["url"],
|
||||
namespaces=as_info["namespaces"],
|
||||
hs_token=as_info["hs_token"],
|
||||
sender=user_id,
|
||||
id=as_info["id"],
|
||||
protocols=protocols,
|
||||
rate_limited=rate_limited
|
||||
)
|
||||
|
||||
@@ -27,6 +27,7 @@ class CaptchaConfig(Config):
|
||||
def default_config(self, **kwargs):
|
||||
return """\
|
||||
## Captcha ##
|
||||
# See docs/CAPTCHA_SETUP for full details of configuring this.
|
||||
|
||||
# This Home Server's ReCAPTCHA public key.
|
||||
recaptcha_public_key: "YOUR_PUBLIC_KEY"
|
||||
|
||||
98
synapse/config/emailconfig.py
Normal file
98
synapse/config/emailconfig.py
Normal file
@@ -0,0 +1,98 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2015, 2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# This file can't be called email.py because if it is, we cannot:
|
||||
import email.utils
|
||||
|
||||
from ._base import Config
|
||||
|
||||
|
||||
class EmailConfig(Config):
|
||||
def read_config(self, config):
|
||||
self.email_enable_notifs = False
|
||||
|
||||
email_config = config.get("email", {})
|
||||
self.email_enable_notifs = email_config.get("enable_notifs", False)
|
||||
|
||||
if self.email_enable_notifs:
|
||||
# make sure we can import the required deps
|
||||
import jinja2
|
||||
import bleach
|
||||
# prevent unused warnings
|
||||
jinja2
|
||||
bleach
|
||||
|
||||
required = [
|
||||
"smtp_host",
|
||||
"smtp_port",
|
||||
"notif_from",
|
||||
"template_dir",
|
||||
"notif_template_html",
|
||||
"notif_template_text",
|
||||
]
|
||||
|
||||
missing = []
|
||||
for k in required:
|
||||
if k not in email_config:
|
||||
missing.append(k)
|
||||
|
||||
if (len(missing) > 0):
|
||||
raise RuntimeError(
|
||||
"email.enable_notifs is True but required keys are missing: %s" %
|
||||
(", ".join(["email." + k for k in missing]),)
|
||||
)
|
||||
|
||||
if config.get("public_baseurl") is None:
|
||||
raise RuntimeError(
|
||||
"email.enable_notifs is True but no public_baseurl is set"
|
||||
)
|
||||
|
||||
self.email_smtp_host = email_config["smtp_host"]
|
||||
self.email_smtp_port = email_config["smtp_port"]
|
||||
self.email_notif_from = email_config["notif_from"]
|
||||
self.email_template_dir = email_config["template_dir"]
|
||||
self.email_notif_template_html = email_config["notif_template_html"]
|
||||
self.email_notif_template_text = email_config["notif_template_text"]
|
||||
self.email_notif_for_new_users = email_config.get(
|
||||
"notif_for_new_users", True
|
||||
)
|
||||
if "app_name" in email_config:
|
||||
self.email_app_name = email_config["app_name"]
|
||||
else:
|
||||
self.email_app_name = "Matrix"
|
||||
|
||||
# make sure it's valid
|
||||
parsed = email.utils.parseaddr(self.email_notif_from)
|
||||
if parsed[1] == '':
|
||||
raise RuntimeError("Invalid notif_from address")
|
||||
else:
|
||||
self.email_enable_notifs = False
|
||||
# Not much point setting defaults for the rest: it would be an
|
||||
# error for them to be used.
|
||||
|
||||
def default_config(self, config_dir_path, server_name, **kwargs):
|
||||
return """
|
||||
# Enable sending emails for notification events
|
||||
#email:
|
||||
# enable_notifs: false
|
||||
# smtp_host: "localhost"
|
||||
# smtp_port: 25
|
||||
# notif_from: "Your Friendly %(app)s Home Server <noreply@example.com>"
|
||||
# app_name: Matrix
|
||||
# template_dir: res/templates
|
||||
# notif_template_html: notif_mail.html
|
||||
# notif_template_text: notif_mail.txt
|
||||
# notif_for_new_users: True
|
||||
"""
|
||||
@@ -23,18 +23,24 @@ from .captcha import CaptchaConfig
|
||||
from .voip import VoipConfig
|
||||
from .registration import RegistrationConfig
|
||||
from .metrics import MetricsConfig
|
||||
from .api import ApiConfig
|
||||
from .appservice import AppServiceConfig
|
||||
from .key import KeyConfig
|
||||
from .saml2 import SAML2Config
|
||||
from .cas import CasConfig
|
||||
from .password import PasswordConfig
|
||||
from .jwt import JWTConfig
|
||||
from .password_auth_providers import PasswordAuthProviderConfig
|
||||
from .emailconfig import EmailConfig
|
||||
from .workers import WorkerConfig
|
||||
|
||||
|
||||
class HomeServerConfig(TlsConfig, ServerConfig, DatabaseConfig, LoggingConfig,
|
||||
RatelimitConfig, ContentRepositoryConfig, CaptchaConfig,
|
||||
VoipConfig, RegistrationConfig, MetricsConfig,
|
||||
VoipConfig, RegistrationConfig, MetricsConfig, ApiConfig,
|
||||
AppServiceConfig, KeyConfig, SAML2Config, CasConfig,
|
||||
PasswordConfig,):
|
||||
JWTConfig, PasswordConfig, EmailConfig,
|
||||
WorkerConfig, PasswordAuthProviderConfig,):
|
||||
pass
|
||||
|
||||
|
||||
|
||||
54
synapse/config/jwt.py
Normal file
54
synapse/config/jwt.py
Normal file
@@ -0,0 +1,54 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2015 Niklas Riekenbrauck
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from ._base import Config, ConfigError
|
||||
|
||||
|
||||
MISSING_JWT = (
|
||||
"""Missing jwt library. This is required for jwt login.
|
||||
|
||||
Install by running:
|
||||
pip install pyjwt
|
||||
"""
|
||||
)
|
||||
|
||||
|
||||
class JWTConfig(Config):
|
||||
def read_config(self, config):
|
||||
jwt_config = config.get("jwt_config", None)
|
||||
if jwt_config:
|
||||
self.jwt_enabled = jwt_config.get("enabled", False)
|
||||
self.jwt_secret = jwt_config["secret"]
|
||||
self.jwt_algorithm = jwt_config["algorithm"]
|
||||
|
||||
try:
|
||||
import jwt
|
||||
jwt # To stop unused lint.
|
||||
except ImportError:
|
||||
raise ConfigError(MISSING_JWT)
|
||||
else:
|
||||
self.jwt_enabled = False
|
||||
self.jwt_secret = None
|
||||
self.jwt_algorithm = None
|
||||
|
||||
def default_config(self, **kwargs):
|
||||
return """\
|
||||
# The JWT needs to contain a globally unique "sub" (subject) claim.
|
||||
#
|
||||
# jwt_config:
|
||||
# enabled: true
|
||||
# secret: "a secret"
|
||||
# algorithm: "HS256"
|
||||
"""
|
||||
@@ -57,6 +57,8 @@ class KeyConfig(Config):
|
||||
seed = self.signing_key[0].seed
|
||||
self.macaroon_secret_key = hashlib.sha256(seed)
|
||||
|
||||
self.expire_access_token = config.get("expire_access_token", False)
|
||||
|
||||
def default_config(self, config_dir_path, server_name, is_generating_file=False,
|
||||
**kwargs):
|
||||
base_key_name = os.path.join(config_dir_path, server_name)
|
||||
@@ -69,6 +71,9 @@ class KeyConfig(Config):
|
||||
return """\
|
||||
macaroon_secret_key: "%(macaroon_secret_key)s"
|
||||
|
||||
# Used to enable access token expiration.
|
||||
expire_access_token: False
|
||||
|
||||
## Signing Keys ##
|
||||
|
||||
# Path to the signing key to sign messages with
|
||||
|
||||
@@ -126,54 +126,58 @@ class LoggingConfig(Config):
|
||||
)
|
||||
|
||||
def setup_logging(self):
|
||||
log_format = (
|
||||
"%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s"
|
||||
" - %(message)s"
|
||||
)
|
||||
if self.log_config is None:
|
||||
setup_logging(self.log_config, self.log_file, self.verbosity)
|
||||
|
||||
level = logging.INFO
|
||||
level_for_storage = logging.INFO
|
||||
if self.verbosity:
|
||||
level = logging.DEBUG
|
||||
if self.verbosity > 1:
|
||||
level_for_storage = logging.DEBUG
|
||||
|
||||
# FIXME: we need a logging.WARN for a -q quiet option
|
||||
logger = logging.getLogger('')
|
||||
logger.setLevel(level)
|
||||
def setup_logging(log_config=None, log_file=None, verbosity=None):
|
||||
log_format = (
|
||||
"%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s"
|
||||
" - %(message)s"
|
||||
)
|
||||
if log_config is None:
|
||||
|
||||
logging.getLogger('synapse.storage').setLevel(level_for_storage)
|
||||
level = logging.INFO
|
||||
level_for_storage = logging.INFO
|
||||
if verbosity:
|
||||
level = logging.DEBUG
|
||||
if verbosity > 1:
|
||||
level_for_storage = logging.DEBUG
|
||||
|
||||
formatter = logging.Formatter(log_format)
|
||||
if self.log_file:
|
||||
# TODO: Customisable file size / backup count
|
||||
handler = logging.handlers.RotatingFileHandler(
|
||||
self.log_file, maxBytes=(1000 * 1000 * 100), backupCount=3
|
||||
)
|
||||
# FIXME: we need a logging.WARN for a -q quiet option
|
||||
logger = logging.getLogger('')
|
||||
logger.setLevel(level)
|
||||
|
||||
def sighup(signum, stack):
|
||||
logger.info("Closing log file due to SIGHUP")
|
||||
handler.doRollover()
|
||||
logger.info("Opened new log file due to SIGHUP")
|
||||
logging.getLogger('synapse.storage').setLevel(level_for_storage)
|
||||
|
||||
# TODO(paul): obviously this is a terrible mechanism for
|
||||
# stealing SIGHUP, because it means no other part of synapse
|
||||
# can use it instead. If we want to catch SIGHUP anywhere
|
||||
# else as well, I'd suggest we find a nicer way to broadcast
|
||||
# it around.
|
||||
if getattr(signal, "SIGHUP"):
|
||||
signal.signal(signal.SIGHUP, sighup)
|
||||
else:
|
||||
handler = logging.StreamHandler()
|
||||
handler.setFormatter(formatter)
|
||||
formatter = logging.Formatter(log_format)
|
||||
if log_file:
|
||||
# TODO: Customisable file size / backup count
|
||||
handler = logging.handlers.RotatingFileHandler(
|
||||
log_file, maxBytes=(1000 * 1000 * 100), backupCount=3
|
||||
)
|
||||
|
||||
handler.addFilter(LoggingContextFilter(request=""))
|
||||
def sighup(signum, stack):
|
||||
logger.info("Closing log file due to SIGHUP")
|
||||
handler.doRollover()
|
||||
logger.info("Opened new log file due to SIGHUP")
|
||||
|
||||
logger.addHandler(handler)
|
||||
# TODO(paul): obviously this is a terrible mechanism for
|
||||
# stealing SIGHUP, because it means no other part of synapse
|
||||
# can use it instead. If we want to catch SIGHUP anywhere
|
||||
# else as well, I'd suggest we find a nicer way to broadcast
|
||||
# it around.
|
||||
if getattr(signal, "SIGHUP"):
|
||||
signal.signal(signal.SIGHUP, sighup)
|
||||
else:
|
||||
with open(self.log_config, 'r') as f:
|
||||
logging.config.dictConfig(yaml.load(f))
|
||||
handler = logging.StreamHandler()
|
||||
handler.setFormatter(formatter)
|
||||
|
||||
observer = PythonLoggingObserver()
|
||||
observer.start()
|
||||
handler.addFilter(LoggingContextFilter(request=""))
|
||||
|
||||
logger.addHandler(handler)
|
||||
else:
|
||||
with open(log_config, 'r') as f:
|
||||
logging.config.dictConfig(yaml.load(f))
|
||||
|
||||
observer = PythonLoggingObserver()
|
||||
observer.start()
|
||||
|
||||
@@ -23,10 +23,14 @@ class PasswordConfig(Config):
|
||||
def read_config(self, config):
|
||||
password_config = config.get("password_config", {})
|
||||
self.password_enabled = password_config.get("enabled", True)
|
||||
self.password_pepper = password_config.get("pepper", "")
|
||||
|
||||
def default_config(self, config_dir_path, server_name, **kwargs):
|
||||
return """
|
||||
# Enable password for login.
|
||||
password_config:
|
||||
enabled: true
|
||||
# Uncomment and change to a secret random string for extra security.
|
||||
# DO NOT CHANGE THIS AFTER INITIAL SETUP!
|
||||
#pepper: ""
|
||||
"""
|
||||
|
||||
66
synapse/config/password_auth_providers.py
Normal file
66
synapse/config/password_auth_providers.py
Normal file
@@ -0,0 +1,66 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2016 Openmarket
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from ._base import Config, ConfigError
|
||||
|
||||
import importlib
|
||||
|
||||
|
||||
class PasswordAuthProviderConfig(Config):
|
||||
def read_config(self, config):
|
||||
self.password_providers = []
|
||||
|
||||
# We want to be backwards compatible with the old `ldap_config`
|
||||
# param.
|
||||
ldap_config = config.get("ldap_config", {})
|
||||
self.ldap_enabled = ldap_config.get("enabled", False)
|
||||
if self.ldap_enabled:
|
||||
from synapse.util.ldap_auth_provider import LdapAuthProvider
|
||||
parsed_config = LdapAuthProvider.parse_config(ldap_config)
|
||||
self.password_providers.append((LdapAuthProvider, parsed_config))
|
||||
|
||||
providers = config.get("password_providers", [])
|
||||
for provider in providers:
|
||||
# We need to import the module, and then pick the class out of
|
||||
# that, so we split based on the last dot.
|
||||
module, clz = provider['module'].rsplit(".", 1)
|
||||
module = importlib.import_module(module)
|
||||
provider_class = getattr(module, clz)
|
||||
|
||||
try:
|
||||
provider_config = provider_class.parse_config(provider["config"])
|
||||
except Exception as e:
|
||||
raise ConfigError(
|
||||
"Failed to parse config for %r: %r" % (provider['module'], e)
|
||||
)
|
||||
self.password_providers.append((provider_class, provider_config))
|
||||
|
||||
def default_config(self, **kwargs):
|
||||
return """\
|
||||
# password_providers:
|
||||
# - module: "synapse.util.ldap_auth_provider.LdapAuthProvider"
|
||||
# config:
|
||||
# enabled: true
|
||||
# uri: "ldap://ldap.example.com:389"
|
||||
# start_tls: true
|
||||
# base: "ou=users,dc=example,dc=com"
|
||||
# attributes:
|
||||
# uid: "cn"
|
||||
# mail: "email"
|
||||
# name: "givenName"
|
||||
# #bind_dn:
|
||||
# #bind_password:
|
||||
# #filter: "(objectClass=posixAccount)"
|
||||
"""
|
||||
@@ -32,11 +32,16 @@ class RegistrationConfig(Config):
|
||||
)
|
||||
|
||||
self.registration_shared_secret = config.get("registration_shared_secret")
|
||||
self.user_creation_max_duration = int(config["user_creation_max_duration"])
|
||||
|
||||
self.bcrypt_rounds = config.get("bcrypt_rounds", 12)
|
||||
self.trusted_third_party_id_servers = config["trusted_third_party_id_servers"]
|
||||
self.allow_guest_access = config.get("allow_guest_access", False)
|
||||
|
||||
self.invite_3pid_guest = (
|
||||
self.allow_guest_access and config.get("invite_3pid_guest", False)
|
||||
)
|
||||
|
||||
def default_config(self, **kwargs):
|
||||
registration_shared_secret = random_string_with_symbols(50)
|
||||
|
||||
@@ -50,6 +55,11 @@ class RegistrationConfig(Config):
|
||||
# secret, even if registration is otherwise disabled.
|
||||
registration_shared_secret: "%(registration_shared_secret)s"
|
||||
|
||||
# Sets the expiry for the short term user creation in
|
||||
# milliseconds. For instance the bellow duration is two weeks
|
||||
# in milliseconds.
|
||||
user_creation_max_duration: 1209600000
|
||||
|
||||
# Set the number of bcrypt rounds used to generate password hash.
|
||||
# Larger numbers increase the work factor needed to generate the hash.
|
||||
# The default number of rounds is 12.
|
||||
|
||||
@@ -13,9 +13,25 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from ._base import Config
|
||||
from ._base import Config, ConfigError
|
||||
from collections import namedtuple
|
||||
|
||||
|
||||
MISSING_NETADDR = (
|
||||
"Missing netaddr library. This is required for URL preview API."
|
||||
)
|
||||
|
||||
MISSING_LXML = (
|
||||
"""Missing lxml library. This is required for URL preview API.
|
||||
|
||||
Install by running:
|
||||
pip install lxml
|
||||
|
||||
Requires libxslt1-dev system package.
|
||||
"""
|
||||
)
|
||||
|
||||
|
||||
ThumbnailRequirement = namedtuple(
|
||||
"ThumbnailRequirement", ["width", "height", "method", "media_type"]
|
||||
)
|
||||
@@ -23,7 +39,7 @@ ThumbnailRequirement = namedtuple(
|
||||
|
||||
def parse_thumbnail_requirements(thumbnail_sizes):
|
||||
""" Takes a list of dictionaries with "width", "height", and "method" keys
|
||||
and creates a map from image media types to the thumbnail size, thumnailing
|
||||
and creates a map from image media types to the thumbnail size, thumbnailing
|
||||
method, and thumbnail media type to precalculate
|
||||
|
||||
Args:
|
||||
@@ -53,12 +69,44 @@ class ContentRepositoryConfig(Config):
|
||||
def read_config(self, config):
|
||||
self.max_upload_size = self.parse_size(config["max_upload_size"])
|
||||
self.max_image_pixels = self.parse_size(config["max_image_pixels"])
|
||||
self.max_spider_size = self.parse_size(config["max_spider_size"])
|
||||
self.media_store_path = self.ensure_directory(config["media_store_path"])
|
||||
self.uploads_path = self.ensure_directory(config["uploads_path"])
|
||||
self.dynamic_thumbnails = config["dynamic_thumbnails"]
|
||||
self.thumbnail_requirements = parse_thumbnail_requirements(
|
||||
config["thumbnail_sizes"]
|
||||
)
|
||||
self.url_preview_enabled = config.get("url_preview_enabled", False)
|
||||
if self.url_preview_enabled:
|
||||
try:
|
||||
import lxml
|
||||
lxml # To stop unused lint.
|
||||
except ImportError:
|
||||
raise ConfigError(MISSING_LXML)
|
||||
|
||||
try:
|
||||
from netaddr import IPSet
|
||||
except ImportError:
|
||||
raise ConfigError(MISSING_NETADDR)
|
||||
|
||||
if "url_preview_ip_range_blacklist" in config:
|
||||
self.url_preview_ip_range_blacklist = IPSet(
|
||||
config["url_preview_ip_range_blacklist"]
|
||||
)
|
||||
else:
|
||||
raise ConfigError(
|
||||
"For security, you must specify an explicit target IP address "
|
||||
"blacklist in url_preview_ip_range_blacklist for url previewing "
|
||||
"to work"
|
||||
)
|
||||
|
||||
self.url_preview_ip_range_whitelist = IPSet(
|
||||
config.get("url_preview_ip_range_whitelist", ())
|
||||
)
|
||||
|
||||
self.url_preview_url_blacklist = config.get(
|
||||
"url_preview_url_blacklist", ()
|
||||
)
|
||||
|
||||
def default_config(self, **kwargs):
|
||||
media_store = self.default_path("media_store")
|
||||
@@ -80,7 +128,7 @@ class ContentRepositoryConfig(Config):
|
||||
# the resolution requested by the client. If true then whenever
|
||||
# a new resolution is requested by the client the server will
|
||||
# generate a new thumbnail. If false the server will pick a thumbnail
|
||||
# from a precalcualted list.
|
||||
# from a precalculated list.
|
||||
dynamic_thumbnails: false
|
||||
|
||||
# List of thumbnail to precalculate when an image is uploaded.
|
||||
@@ -97,4 +145,76 @@ class ContentRepositoryConfig(Config):
|
||||
- width: 640
|
||||
height: 480
|
||||
method: scale
|
||||
- width: 800
|
||||
height: 600
|
||||
method: scale
|
||||
|
||||
# Is the preview URL API enabled? If enabled, you *must* specify
|
||||
# an explicit url_preview_ip_range_blacklist of IPs that the spider is
|
||||
# denied from accessing.
|
||||
url_preview_enabled: False
|
||||
|
||||
# List of IP address CIDR ranges that the URL preview spider is denied
|
||||
# from accessing. There are no defaults: you must explicitly
|
||||
# specify a list for URL previewing to work. You should specify any
|
||||
# internal services in your network that you do not want synapse to try
|
||||
# to connect to, otherwise anyone in any Matrix room could cause your
|
||||
# synapse to issue arbitrary GET requests to your internal services,
|
||||
# causing serious security issues.
|
||||
#
|
||||
# url_preview_ip_range_blacklist:
|
||||
# - '127.0.0.0/8'
|
||||
# - '10.0.0.0/8'
|
||||
# - '172.16.0.0/12'
|
||||
# - '192.168.0.0/16'
|
||||
# - '100.64.0.0/10'
|
||||
# - '169.254.0.0/16'
|
||||
#
|
||||
# List of IP address CIDR ranges that the URL preview spider is allowed
|
||||
# to access even if they are specified in url_preview_ip_range_blacklist.
|
||||
# This is useful for specifying exceptions to wide-ranging blacklisted
|
||||
# target IP ranges - e.g. for enabling URL previews for a specific private
|
||||
# website only visible in your network.
|
||||
#
|
||||
# url_preview_ip_range_whitelist:
|
||||
# - '192.168.1.1'
|
||||
|
||||
# Optional list of URL matches that the URL preview spider is
|
||||
# denied from accessing. You should use url_preview_ip_range_blacklist
|
||||
# in preference to this, otherwise someone could define a public DNS
|
||||
# entry that points to a private IP address and circumvent the blacklist.
|
||||
# This is more useful if you know there is an entire shape of URL that
|
||||
# you know that will never want synapse to try to spider.
|
||||
#
|
||||
# Each list entry is a dictionary of url component attributes as returned
|
||||
# by urlparse.urlsplit as applied to the absolute form of the URL. See
|
||||
# https://docs.python.org/2/library/urlparse.html#urlparse.urlsplit
|
||||
# The values of the dictionary are treated as an filename match pattern
|
||||
# applied to that component of URLs, unless they start with a ^ in which
|
||||
# case they are treated as a regular expression match. If all the
|
||||
# specified component matches for a given list item succeed, the URL is
|
||||
# blacklisted.
|
||||
#
|
||||
# url_preview_url_blacklist:
|
||||
# # blacklist any URL with a username in its URI
|
||||
# - username: '*'
|
||||
#
|
||||
# # blacklist all *.google.com URLs
|
||||
# - netloc: 'google.com'
|
||||
# - netloc: '*.google.com'
|
||||
#
|
||||
# # blacklist all plain HTTP URLs
|
||||
# - scheme: 'http'
|
||||
#
|
||||
# # blacklist http(s)://www.acme.com/foo
|
||||
# - netloc: 'www.acme.com'
|
||||
# path: '/foo'
|
||||
#
|
||||
# # blacklist any URL with a literal IPv4 address
|
||||
# - netloc: '^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$'
|
||||
|
||||
# The largest allowed URL preview spidering size in bytes
|
||||
max_spider_size: "10M"
|
||||
|
||||
|
||||
""" % locals()
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from ._base import Config
|
||||
from ._base import Config, ConfigError
|
||||
|
||||
|
||||
class ServerConfig(Config):
|
||||
@@ -27,10 +27,18 @@ class ServerConfig(Config):
|
||||
self.daemonize = config.get("daemonize")
|
||||
self.print_pidfile = config.get("print_pidfile")
|
||||
self.user_agent_suffix = config.get("user_agent_suffix")
|
||||
self.use_frozen_dicts = config.get("use_frozen_dicts", True)
|
||||
self.use_frozen_dicts = config.get("use_frozen_dicts", False)
|
||||
self.public_baseurl = config.get("public_baseurl")
|
||||
|
||||
if self.public_baseurl is not None:
|
||||
if self.public_baseurl[-1] != '/':
|
||||
self.public_baseurl += '/'
|
||||
self.start_pushers = config.get("start_pushers", True)
|
||||
|
||||
self.listeners = config.get("listeners", [])
|
||||
|
||||
self.gc_thresholds = read_gc_thresholds(config.get("gc_thresholds", None))
|
||||
|
||||
bind_port = config.get("bind_port")
|
||||
if bind_port:
|
||||
self.listeners = []
|
||||
@@ -98,26 +106,6 @@ class ServerConfig(Config):
|
||||
]
|
||||
})
|
||||
|
||||
# Attempt to guess the content_addr for the v0 content repostitory
|
||||
content_addr = config.get("content_addr")
|
||||
if not content_addr:
|
||||
for listener in self.listeners:
|
||||
if listener["type"] == "http" and not listener.get("tls", False):
|
||||
unsecure_port = listener["port"]
|
||||
break
|
||||
else:
|
||||
raise RuntimeError("Could not determine 'content_addr'")
|
||||
|
||||
host = self.server_name
|
||||
if ':' not in host:
|
||||
host = "%s:%d" % (host, unsecure_port)
|
||||
else:
|
||||
host = host.split(':')[0]
|
||||
host = "%s:%d" % (host, unsecure_port)
|
||||
content_addr = "http://%s" % (host,)
|
||||
|
||||
self.content_addr = content_addr
|
||||
|
||||
def default_config(self, server_name, **kwargs):
|
||||
if ":" in server_name:
|
||||
bind_port = int(server_name.split(":")[1])
|
||||
@@ -142,11 +130,17 @@ class ServerConfig(Config):
|
||||
# Whether to serve a web client from the HTTP/HTTPS root resource.
|
||||
web_client: True
|
||||
|
||||
# The public-facing base URL for the client API (not including _matrix/...)
|
||||
# public_baseurl: https://example.com:8448/
|
||||
|
||||
# Set the soft limit on the number of file descriptors synapse can use
|
||||
# Zero is used to indicate synapse should set the soft limit to the
|
||||
# hard limit.
|
||||
soft_file_limit: 0
|
||||
|
||||
# The GC threshold parameters to pass to `gc.set_threshold`, if defined
|
||||
# gc_thresholds: [700, 10, 10]
|
||||
|
||||
# List of ports that Synapse should listen on, their purpose and their
|
||||
# configuration.
|
||||
listeners:
|
||||
@@ -228,3 +222,20 @@ class ServerConfig(Config):
|
||||
type=int,
|
||||
help="Turn on the twisted telnet manhole"
|
||||
" service on the given port.")
|
||||
|
||||
|
||||
def read_gc_thresholds(thresholds):
|
||||
"""Reads the three integer thresholds for garbage collection. Ensures that
|
||||
the thresholds are integers if thresholds are supplied.
|
||||
"""
|
||||
if thresholds is None:
|
||||
return None
|
||||
try:
|
||||
assert len(thresholds) == 3
|
||||
return (
|
||||
int(thresholds[0]), int(thresholds[1]), int(thresholds[2]),
|
||||
)
|
||||
except:
|
||||
raise ConfigError(
|
||||
"Value of `gc_threshold` must be a list of three integers if set"
|
||||
)
|
||||
|
||||
@@ -19,6 +19,9 @@ from OpenSSL import crypto
|
||||
import subprocess
|
||||
import os
|
||||
|
||||
from hashlib import sha256
|
||||
from unpaddedbase64 import encode_base64
|
||||
|
||||
GENERATE_DH_PARAMS = False
|
||||
|
||||
|
||||
@@ -42,6 +45,19 @@ class TlsConfig(Config):
|
||||
config.get("tls_dh_params_path"), "tls_dh_params"
|
||||
)
|
||||
|
||||
self.tls_fingerprints = config["tls_fingerprints"]
|
||||
|
||||
# Check that our own certificate is included in the list of fingerprints
|
||||
# and include it if it is not.
|
||||
x509_certificate_bytes = crypto.dump_certificate(
|
||||
crypto.FILETYPE_ASN1,
|
||||
self.tls_certificate
|
||||
)
|
||||
sha256_fingerprint = encode_base64(sha256(x509_certificate_bytes).digest())
|
||||
sha256_fingerprints = set(f["sha256"] for f in self.tls_fingerprints)
|
||||
if sha256_fingerprint not in sha256_fingerprints:
|
||||
self.tls_fingerprints.append({u"sha256": sha256_fingerprint})
|
||||
|
||||
# This config option applies to non-federation HTTP clients
|
||||
# (e.g. for talking to recaptcha, identity servers, and such)
|
||||
# It should never be used in production, and is intended for
|
||||
@@ -73,6 +89,28 @@ class TlsConfig(Config):
|
||||
|
||||
# Don't bind to the https port
|
||||
no_tls: False
|
||||
|
||||
# List of allowed TLS fingerprints for this server to publish along
|
||||
# with the signing keys for this server. Other matrix servers that
|
||||
# make HTTPS requests to this server will check that the TLS
|
||||
# certificates returned by this server match one of the fingerprints.
|
||||
#
|
||||
# Synapse automatically adds its the fingerprint of its own certificate
|
||||
# to the list. So if federation traffic is handle directly by synapse
|
||||
# then no modification to the list is required.
|
||||
#
|
||||
# If synapse is run behind a load balancer that handles the TLS then it
|
||||
# will be necessary to add the fingerprints of the certificates used by
|
||||
# the loadbalancers to this list if they are different to the one
|
||||
# synapse is using.
|
||||
#
|
||||
# Homeservers are permitted to cache the list of TLS fingerprints
|
||||
# returned in the key responses up to the "valid_until_ts" returned in
|
||||
# key. It may be necessary to publish the fingerprints of a new
|
||||
# certificate and wait until the "valid_until_ts" of the previous key
|
||||
# responses have passed before deploying it.
|
||||
tls_fingerprints: []
|
||||
# tls_fingerprints: [{"sha256": "<base64_encoded_sha256_fingerprint>"}]
|
||||
""" % locals()
|
||||
|
||||
def read_tls_certificate(self, cert_path):
|
||||
|
||||
31
synapse/config/workers.py
Normal file
31
synapse/config/workers.py
Normal file
@@ -0,0 +1,31 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2016 matrix.org
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from ._base import Config
|
||||
|
||||
|
||||
class WorkerConfig(Config):
|
||||
"""The workers are processes run separately to the main synapse process.
|
||||
They have their own pid_file and listener configuration. They use the
|
||||
replication_url to talk to the main synapse process."""
|
||||
|
||||
def read_config(self, config):
|
||||
self.worker_app = config.get("worker_app")
|
||||
self.worker_listeners = config.get("worker_listeners")
|
||||
self.worker_daemonize = config.get("worker_daemonize")
|
||||
self.worker_pid_file = config.get("worker_pid_file")
|
||||
self.worker_log_file = config.get("worker_log_file")
|
||||
self.worker_log_config = config.get("worker_log_config")
|
||||
self.worker_replication_url = config.get("worker_replication_url")
|
||||
@@ -36,6 +36,7 @@ def fetch_server_key(server_name, ssl_context_factory, path=KEY_API_V1):
|
||||
|
||||
factory = SynapseKeyClientFactory()
|
||||
factory.path = path
|
||||
factory.host = server_name
|
||||
endpoint = matrix_federation_endpoint(
|
||||
reactor, server_name, ssl_context_factory, timeout=30
|
||||
)
|
||||
@@ -76,11 +77,15 @@ class SynapseKeyClientProtocol(HTTPClient):
|
||||
def __init__(self):
|
||||
self.remote_key = defer.Deferred()
|
||||
self.host = None
|
||||
self._peer = None
|
||||
|
||||
def connectionMade(self):
|
||||
self.host = self.transport.getHost()
|
||||
logger.debug("Connected to %s", self.host)
|
||||
self._peer = self.transport.getPeer()
|
||||
logger.debug("Connected to %s", self._peer)
|
||||
|
||||
self.sendCommand(b"GET", self.path)
|
||||
if self.host:
|
||||
self.sendHeader(b"Host", self.host)
|
||||
self.endHeaders()
|
||||
self.timer = reactor.callLater(
|
||||
self.timeout,
|
||||
@@ -121,7 +126,10 @@ class SynapseKeyClientProtocol(HTTPClient):
|
||||
self.timer.cancel()
|
||||
|
||||
def on_timeout(self):
|
||||
logger.debug("Timeout waiting for response from %s", self.host)
|
||||
logger.debug(
|
||||
"Timeout waiting for response from %s: %s",
|
||||
self.host, self._peer,
|
||||
)
|
||||
self.errback(IOError("Timeout waiting for response"))
|
||||
self.transport.abortConnection()
|
||||
|
||||
@@ -130,4 +138,5 @@ class SynapseKeyClientFactory(Factory):
|
||||
def protocol(self):
|
||||
protocol = SynapseKeyClientProtocol()
|
||||
protocol.path = self.path
|
||||
protocol.host = self.host
|
||||
return protocol
|
||||
|
||||
@@ -22,6 +22,7 @@ from synapse.util.logcontext import (
|
||||
preserve_context_over_deferred, preserve_context_over_fn, PreserveLoggingContext,
|
||||
preserve_fn
|
||||
)
|
||||
from synapse.util.metrics import Measure
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
@@ -44,7 +45,25 @@ import logging
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
KeyGroup = namedtuple("KeyGroup", ("server_name", "group_id", "key_ids"))
|
||||
VerifyKeyRequest = namedtuple("VerifyRequest", (
|
||||
"server_name", "key_ids", "json_object", "deferred"
|
||||
))
|
||||
"""
|
||||
A request for a verify key to verify a JSON object.
|
||||
|
||||
Attributes:
|
||||
server_name(str): The name of the server to verify against.
|
||||
key_ids(set(str)): The set of key_ids to that could be used to verify the
|
||||
JSON object
|
||||
json_object(dict): The JSON object to verify.
|
||||
deferred(twisted.internet.defer.Deferred):
|
||||
A deferred (server_name, key_id, verify_key) tuple that resolves when
|
||||
a verify key has been fetched
|
||||
"""
|
||||
|
||||
|
||||
class KeyLookupError(ValueError):
|
||||
pass
|
||||
|
||||
|
||||
class Keyring(object):
|
||||
@@ -74,39 +93,32 @@ class Keyring(object):
|
||||
list of deferreds indicating success or failure to verify each
|
||||
json object's signature for the given server_name.
|
||||
"""
|
||||
group_id_to_json = {}
|
||||
group_id_to_group = {}
|
||||
group_ids = []
|
||||
|
||||
next_group_id = 0
|
||||
deferreds = {}
|
||||
verify_requests = []
|
||||
|
||||
for server_name, json_object in server_and_json:
|
||||
logger.debug("Verifying for %s", server_name)
|
||||
group_id = next_group_id
|
||||
next_group_id += 1
|
||||
group_ids.append(group_id)
|
||||
|
||||
key_ids = signature_ids(json_object, server_name)
|
||||
if not key_ids:
|
||||
deferreds[group_id] = defer.fail(SynapseError(
|
||||
deferred = defer.fail(SynapseError(
|
||||
400,
|
||||
"Not signed with a supported algorithm",
|
||||
Codes.UNAUTHORIZED,
|
||||
))
|
||||
else:
|
||||
deferreds[group_id] = defer.Deferred()
|
||||
deferred = defer.Deferred()
|
||||
|
||||
group = KeyGroup(server_name, group_id, key_ids)
|
||||
verify_request = VerifyKeyRequest(
|
||||
server_name, key_ids, json_object, deferred
|
||||
)
|
||||
|
||||
group_id_to_group[group_id] = group
|
||||
group_id_to_json[group_id] = json_object
|
||||
verify_requests.append(verify_request)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def handle_key_deferred(group, deferred):
|
||||
server_name = group.server_name
|
||||
def handle_key_deferred(verify_request):
|
||||
server_name = verify_request.server_name
|
||||
try:
|
||||
_, _, key_id, verify_key = yield deferred
|
||||
_, key_id, verify_key = yield verify_request.deferred
|
||||
except IOError as e:
|
||||
logger.warn(
|
||||
"Got IOError when downloading keys for %s: %s %s",
|
||||
@@ -128,7 +140,7 @@ class Keyring(object):
|
||||
Codes.UNAUTHORIZED,
|
||||
)
|
||||
|
||||
json_object = group_id_to_json[group.group_id]
|
||||
json_object = verify_request.json_object
|
||||
|
||||
try:
|
||||
verify_signed_json(json_object, server_name, verify_key)
|
||||
@@ -157,36 +169,34 @@ class Keyring(object):
|
||||
|
||||
# Actually start fetching keys.
|
||||
wait_on_deferred.addBoth(
|
||||
lambda _: self.get_server_verify_keys(group_id_to_group, deferreds)
|
||||
lambda _: self.get_server_verify_keys(verify_requests)
|
||||
)
|
||||
|
||||
# When we've finished fetching all the keys for a given server_name,
|
||||
# resolve the deferred passed to `wait_for_previous_lookups` so that
|
||||
# any lookups waiting will proceed.
|
||||
server_to_gids = {}
|
||||
server_to_request_ids = {}
|
||||
|
||||
def remove_deferreds(res, server_name, group_id):
|
||||
server_to_gids[server_name].discard(group_id)
|
||||
if not server_to_gids[server_name]:
|
||||
def remove_deferreds(res, server_name, verify_request):
|
||||
request_id = id(verify_request)
|
||||
server_to_request_ids[server_name].discard(request_id)
|
||||
if not server_to_request_ids[server_name]:
|
||||
d = server_to_deferred.pop(server_name, None)
|
||||
if d:
|
||||
d.callback(None)
|
||||
return res
|
||||
|
||||
for g_id, deferred in deferreds.items():
|
||||
server_name = group_id_to_group[g_id].server_name
|
||||
server_to_gids.setdefault(server_name, set()).add(g_id)
|
||||
deferred.addBoth(remove_deferreds, server_name, g_id)
|
||||
for verify_request in verify_requests:
|
||||
server_name = verify_request.server_name
|
||||
request_id = id(verify_request)
|
||||
server_to_request_ids.setdefault(server_name, set()).add(request_id)
|
||||
deferred.addBoth(remove_deferreds, server_name, verify_request)
|
||||
|
||||
# Pass those keys to handle_key_deferred so that the json object
|
||||
# signatures can be verified
|
||||
return [
|
||||
preserve_context_over_fn(
|
||||
handle_key_deferred,
|
||||
group_id_to_group[g_id],
|
||||
deferreds[g_id],
|
||||
)
|
||||
for g_id in group_ids
|
||||
preserve_context_over_fn(handle_key_deferred, verify_request)
|
||||
for verify_request in verify_requests
|
||||
]
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@@ -220,7 +230,7 @@ class Keyring(object):
|
||||
|
||||
d.addBoth(rm, server_name)
|
||||
|
||||
def get_server_verify_keys(self, group_id_to_group, group_id_to_deferred):
|
||||
def get_server_verify_keys(self, verify_requests):
|
||||
"""Takes a dict of KeyGroups and tries to find at least one key for
|
||||
each group.
|
||||
"""
|
||||
@@ -234,76 +244,79 @@ class Keyring(object):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def do_iterations():
|
||||
merged_results = {}
|
||||
with Measure(self.clock, "get_server_verify_keys"):
|
||||
merged_results = {}
|
||||
|
||||
missing_keys = {}
|
||||
for group in group_id_to_group.values():
|
||||
missing_keys.setdefault(group.server_name, set()).update(
|
||||
group.key_ids
|
||||
)
|
||||
|
||||
for fn in key_fetch_fns:
|
||||
results = yield fn(missing_keys.items())
|
||||
merged_results.update(results)
|
||||
|
||||
# We now need to figure out which groups we have keys for
|
||||
# and which we don't
|
||||
missing_groups = {}
|
||||
for group in group_id_to_group.values():
|
||||
for key_id in group.key_ids:
|
||||
if key_id in merged_results[group.server_name]:
|
||||
with PreserveLoggingContext():
|
||||
group_id_to_deferred[group.group_id].callback((
|
||||
group.group_id,
|
||||
group.server_name,
|
||||
key_id,
|
||||
merged_results[group.server_name][key_id],
|
||||
))
|
||||
break
|
||||
else:
|
||||
missing_groups.setdefault(
|
||||
group.server_name, []
|
||||
).append(group)
|
||||
|
||||
if not missing_groups:
|
||||
break
|
||||
|
||||
missing_keys = {
|
||||
server_name: set(
|
||||
key_id for group in groups for key_id in group.key_ids
|
||||
missing_keys = {}
|
||||
for verify_request in verify_requests:
|
||||
missing_keys.setdefault(verify_request.server_name, set()).update(
|
||||
verify_request.key_ids
|
||||
)
|
||||
for server_name, groups in missing_groups.items()
|
||||
}
|
||||
|
||||
for group in missing_groups.values():
|
||||
group_id_to_deferred[group.group_id].errback(SynapseError(
|
||||
401,
|
||||
"No key for %s with id %s" % (
|
||||
group.server_name, group.key_ids,
|
||||
),
|
||||
Codes.UNAUTHORIZED,
|
||||
))
|
||||
for fn in key_fetch_fns:
|
||||
results = yield fn(missing_keys.items())
|
||||
merged_results.update(results)
|
||||
|
||||
# We now need to figure out which verify requests we have keys
|
||||
# for and which we don't
|
||||
missing_keys = {}
|
||||
requests_missing_keys = []
|
||||
for verify_request in verify_requests:
|
||||
server_name = verify_request.server_name
|
||||
result_keys = merged_results[server_name]
|
||||
|
||||
if verify_request.deferred.called:
|
||||
# We've already called this deferred, which probably
|
||||
# means that we've already found a key for it.
|
||||
continue
|
||||
|
||||
for key_id in verify_request.key_ids:
|
||||
if key_id in result_keys:
|
||||
with PreserveLoggingContext():
|
||||
verify_request.deferred.callback((
|
||||
server_name,
|
||||
key_id,
|
||||
result_keys[key_id],
|
||||
))
|
||||
break
|
||||
else:
|
||||
# The else block is only reached if the loop above
|
||||
# doesn't break.
|
||||
missing_keys.setdefault(server_name, set()).update(
|
||||
verify_request.key_ids
|
||||
)
|
||||
requests_missing_keys.append(verify_request)
|
||||
|
||||
if not missing_keys:
|
||||
break
|
||||
|
||||
for verify_request in requests_missing_keys.values():
|
||||
verify_request.deferred.errback(SynapseError(
|
||||
401,
|
||||
"No key for %s with id %s" % (
|
||||
verify_request.server_name, verify_request.key_ids,
|
||||
),
|
||||
Codes.UNAUTHORIZED,
|
||||
))
|
||||
|
||||
def on_err(err):
|
||||
for deferred in group_id_to_deferred.values():
|
||||
if not deferred.called:
|
||||
deferred.errback(err)
|
||||
for verify_request in verify_requests:
|
||||
if not verify_request.deferred.called:
|
||||
verify_request.deferred.errback(err)
|
||||
|
||||
do_iterations().addErrback(on_err)
|
||||
|
||||
return group_id_to_deferred
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_keys_from_store(self, server_name_and_key_ids):
|
||||
res = yield defer.gatherResults(
|
||||
res = yield preserve_context_over_deferred(defer.gatherResults(
|
||||
[
|
||||
self.store.get_server_verify_keys(
|
||||
preserve_fn(self.store.get_server_verify_keys)(
|
||||
server_name, key_ids
|
||||
).addCallback(lambda ks, server: (server, ks), server_name)
|
||||
for server_name, key_ids in server_name_and_key_ids
|
||||
],
|
||||
consumeErrors=True,
|
||||
).addErrback(unwrapFirstError)
|
||||
)).addErrback(unwrapFirstError)
|
||||
|
||||
defer.returnValue(dict(res))
|
||||
|
||||
@@ -324,13 +337,13 @@ class Keyring(object):
|
||||
)
|
||||
defer.returnValue({})
|
||||
|
||||
results = yield defer.gatherResults(
|
||||
results = yield preserve_context_over_deferred(defer.gatherResults(
|
||||
[
|
||||
get_key(p_name, p_keys)
|
||||
preserve_fn(get_key)(p_name, p_keys)
|
||||
for p_name, p_keys in self.perspective_servers.items()
|
||||
],
|
||||
consumeErrors=True,
|
||||
).addErrback(unwrapFirstError)
|
||||
)).addErrback(unwrapFirstError)
|
||||
|
||||
union_of_keys = {}
|
||||
for result in results:
|
||||
@@ -356,7 +369,7 @@ class Keyring(object):
|
||||
)
|
||||
except Exception as e:
|
||||
logger.info(
|
||||
"Unable to getting key %r for %r directly: %s %s",
|
||||
"Unable to get key %r for %r directly: %s %s",
|
||||
key_ids, server_name,
|
||||
type(e).__name__, str(e.message),
|
||||
)
|
||||
@@ -370,13 +383,13 @@ class Keyring(object):
|
||||
|
||||
defer.returnValue(keys)
|
||||
|
||||
results = yield defer.gatherResults(
|
||||
results = yield preserve_context_over_deferred(defer.gatherResults(
|
||||
[
|
||||
get_key(server_name, key_ids)
|
||||
preserve_fn(get_key)(server_name, key_ids)
|
||||
for server_name, key_ids in server_name_and_key_ids
|
||||
],
|
||||
consumeErrors=True,
|
||||
).addErrback(unwrapFirstError)
|
||||
)).addErrback(unwrapFirstError)
|
||||
|
||||
merged = {}
|
||||
for result in results:
|
||||
@@ -418,7 +431,7 @@ class Keyring(object):
|
||||
for response in responses:
|
||||
if (u"signatures" not in response
|
||||
or perspective_name not in response[u"signatures"]):
|
||||
raise ValueError(
|
||||
raise KeyLookupError(
|
||||
"Key response not signed by perspective server"
|
||||
" %r" % (perspective_name,)
|
||||
)
|
||||
@@ -441,21 +454,21 @@ class Keyring(object):
|
||||
list(response[u"signatures"][perspective_name]),
|
||||
list(perspective_keys)
|
||||
)
|
||||
raise ValueError(
|
||||
raise KeyLookupError(
|
||||
"Response not signed with a known key for perspective"
|
||||
" server %r" % (perspective_name,)
|
||||
)
|
||||
|
||||
processed_response = yield self.process_v2_response(
|
||||
perspective_name, response
|
||||
perspective_name, response, only_from_server=False
|
||||
)
|
||||
|
||||
for server_name, response_keys in processed_response.items():
|
||||
keys.setdefault(server_name, {}).update(response_keys)
|
||||
|
||||
yield defer.gatherResults(
|
||||
yield preserve_context_over_deferred(defer.gatherResults(
|
||||
[
|
||||
self.store_keys(
|
||||
preserve_fn(self.store_keys)(
|
||||
server_name=server_name,
|
||||
from_server=perspective_name,
|
||||
verify_keys=response_keys,
|
||||
@@ -463,7 +476,7 @@ class Keyring(object):
|
||||
for server_name, response_keys in keys.items()
|
||||
],
|
||||
consumeErrors=True
|
||||
).addErrback(unwrapFirstError)
|
||||
)).addErrback(unwrapFirstError)
|
||||
|
||||
defer.returnValue(keys)
|
||||
|
||||
@@ -484,10 +497,10 @@ class Keyring(object):
|
||||
|
||||
if (u"signatures" not in response
|
||||
or server_name not in response[u"signatures"]):
|
||||
raise ValueError("Key response not signed by remote server")
|
||||
raise KeyLookupError("Key response not signed by remote server")
|
||||
|
||||
if "tls_fingerprints" not in response:
|
||||
raise ValueError("Key response missing TLS fingerprints")
|
||||
raise KeyLookupError("Key response missing TLS fingerprints")
|
||||
|
||||
certificate_bytes = crypto.dump_certificate(
|
||||
crypto.FILETYPE_ASN1, tls_certificate
|
||||
@@ -501,7 +514,7 @@ class Keyring(object):
|
||||
response_sha256_fingerprints.add(fingerprint[u"sha256"])
|
||||
|
||||
if sha256_fingerprint_b64 not in response_sha256_fingerprints:
|
||||
raise ValueError("TLS certificate not allowed by fingerprints")
|
||||
raise KeyLookupError("TLS certificate not allowed by fingerprints")
|
||||
|
||||
response_keys = yield self.process_v2_response(
|
||||
from_server=server_name,
|
||||
@@ -511,7 +524,7 @@ class Keyring(object):
|
||||
|
||||
keys.update(response_keys)
|
||||
|
||||
yield defer.gatherResults(
|
||||
yield preserve_context_over_deferred(defer.gatherResults(
|
||||
[
|
||||
preserve_fn(self.store_keys)(
|
||||
server_name=key_server_name,
|
||||
@@ -521,13 +534,13 @@ class Keyring(object):
|
||||
for key_server_name, verify_keys in keys.items()
|
||||
],
|
||||
consumeErrors=True
|
||||
).addErrback(unwrapFirstError)
|
||||
)).addErrback(unwrapFirstError)
|
||||
|
||||
defer.returnValue(keys)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def process_v2_response(self, from_server, response_json,
|
||||
requested_ids=[]):
|
||||
requested_ids=[], only_from_server=True):
|
||||
time_now_ms = self.clock.time_msec()
|
||||
response_keys = {}
|
||||
verify_keys = {}
|
||||
@@ -551,9 +564,16 @@ class Keyring(object):
|
||||
|
||||
results = {}
|
||||
server_name = response_json["server_name"]
|
||||
if only_from_server:
|
||||
if server_name != from_server:
|
||||
raise KeyLookupError(
|
||||
"Expected a response for server %r not %r" % (
|
||||
from_server, server_name
|
||||
)
|
||||
)
|
||||
for key_id in response_json["signatures"].get(server_name, {}):
|
||||
if key_id not in response_json["verify_keys"]:
|
||||
raise ValueError(
|
||||
raise KeyLookupError(
|
||||
"Key response must include verification keys for all"
|
||||
" signatures"
|
||||
)
|
||||
@@ -580,7 +600,7 @@ class Keyring(object):
|
||||
response_keys.update(verify_keys)
|
||||
response_keys.update(old_verify_keys)
|
||||
|
||||
yield defer.gatherResults(
|
||||
yield preserve_context_over_deferred(defer.gatherResults(
|
||||
[
|
||||
preserve_fn(self.store.store_server_keys_json)(
|
||||
server_name=server_name,
|
||||
@@ -593,7 +613,7 @@ class Keyring(object):
|
||||
for key_id in updated_key_ids
|
||||
],
|
||||
consumeErrors=True,
|
||||
).addErrback(unwrapFirstError)
|
||||
)).addErrback(unwrapFirstError)
|
||||
|
||||
results[server_name] = response_keys
|
||||
|
||||
@@ -621,15 +641,15 @@ class Keyring(object):
|
||||
|
||||
if ("signatures" not in response
|
||||
or server_name not in response["signatures"]):
|
||||
raise ValueError("Key response not signed by remote server")
|
||||
raise KeyLookupError("Key response not signed by remote server")
|
||||
|
||||
if "tls_certificate" not in response:
|
||||
raise ValueError("Key response missing TLS certificate")
|
||||
raise KeyLookupError("Key response missing TLS certificate")
|
||||
|
||||
tls_certificate_b64 = response["tls_certificate"]
|
||||
|
||||
if encode_base64(x509_certificate_bytes) != tls_certificate_b64:
|
||||
raise ValueError("TLS certificate doesn't match")
|
||||
raise KeyLookupError("TLS certificate doesn't match")
|
||||
|
||||
# Cache the result in the datastore.
|
||||
|
||||
@@ -645,7 +665,7 @@ class Keyring(object):
|
||||
|
||||
for key_id in response["signatures"][server_name]:
|
||||
if key_id not in response["verify_keys"]:
|
||||
raise ValueError(
|
||||
raise KeyLookupError(
|
||||
"Key response must include verification keys for all"
|
||||
" signatures"
|
||||
)
|
||||
@@ -682,7 +702,7 @@ class Keyring(object):
|
||||
A deferred that completes when the keys are stored.
|
||||
"""
|
||||
# TODO(markjh): Store whether the keys have expired.
|
||||
yield defer.gatherResults(
|
||||
yield preserve_context_over_deferred(defer.gatherResults(
|
||||
[
|
||||
preserve_fn(self.store.store_server_verify_key)(
|
||||
server_name, server_name, key.time_added, key
|
||||
@@ -690,4 +710,4 @@ class Keyring(object):
|
||||
for key_id, key in verify_keys.items()
|
||||
],
|
||||
consumeErrors=True,
|
||||
).addErrback(unwrapFirstError)
|
||||
)).addErrback(unwrapFirstError)
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
# limitations under the License.
|
||||
|
||||
from synapse.util.frozenutils import freeze
|
||||
from synapse.util.caches import intern_dict
|
||||
|
||||
|
||||
# Whether we should use frozen_dict in FrozenEvent. Using frozen_dicts prevents
|
||||
@@ -30,7 +31,10 @@ class _EventInternalMetadata(object):
|
||||
return dict(self.__dict__)
|
||||
|
||||
def is_outlier(self):
|
||||
return hasattr(self, "outlier") and self.outlier
|
||||
return getattr(self, "outlier", False)
|
||||
|
||||
def is_invite_from_remote(self):
|
||||
return getattr(self, "invite_from_remote", False)
|
||||
|
||||
|
||||
def _event_dict_property(key):
|
||||
@@ -95,7 +99,7 @@ class EventBase(object):
|
||||
|
||||
return d
|
||||
|
||||
def get(self, key, default):
|
||||
def get(self, key, default=None):
|
||||
return self._event_dict.get(key, default)
|
||||
|
||||
def get_internal_metadata_dict(self):
|
||||
@@ -140,6 +144,10 @@ class FrozenEvent(EventBase):
|
||||
|
||||
unsigned = dict(event_dict.pop("unsigned", {}))
|
||||
|
||||
# We intern these strings because they turn up a lot (especially when
|
||||
# caching).
|
||||
event_dict = intern_dict(event_dict)
|
||||
|
||||
if USE_FROZEN_DICTS:
|
||||
frozen_dict = freeze(event_dict)
|
||||
else:
|
||||
@@ -168,5 +176,7 @@ class FrozenEvent(EventBase):
|
||||
|
||||
def __repr__(self):
|
||||
return "<FrozenEvent event_id='%s', type='%s', state_key='%s'>" % (
|
||||
self.event_id, self.type, self.get("state_key", None),
|
||||
self.get("event_id", None),
|
||||
self.get("type", None),
|
||||
self.get("state_key", None),
|
||||
)
|
||||
|
||||
@@ -15,9 +15,30 @@
|
||||
|
||||
|
||||
class EventContext(object):
|
||||
__slots__ = [
|
||||
"current_state_ids",
|
||||
"prev_state_ids",
|
||||
"state_group",
|
||||
"rejected",
|
||||
"push_actions",
|
||||
"prev_group",
|
||||
"delta_ids",
|
||||
"prev_state_events",
|
||||
]
|
||||
|
||||
def __init__(self, current_state=None):
|
||||
self.current_state = current_state
|
||||
def __init__(self):
|
||||
# The current state including the current event
|
||||
self.current_state_ids = None
|
||||
# The current state excluding the current event
|
||||
self.prev_state_ids = None
|
||||
self.state_group = None
|
||||
|
||||
self.rejected = False
|
||||
self.push_actions = []
|
||||
|
||||
# A previously persisted state group and a delta between that
|
||||
# and this state.
|
||||
self.prev_group = None
|
||||
self.delta_ids = None
|
||||
|
||||
self.prev_state_events = None
|
||||
|
||||
@@ -88,6 +88,8 @@ def prune_event(event):
|
||||
|
||||
if "age_ts" in event.unsigned:
|
||||
allowed_fields["unsigned"]["age_ts"] = event.unsigned["age_ts"]
|
||||
if "replaces_state" in event.unsigned:
|
||||
allowed_fields["unsigned"]["replaces_state"] = event.unsigned["replaces_state"]
|
||||
|
||||
return type(event)(
|
||||
allowed_fields,
|
||||
|
||||
@@ -23,6 +23,7 @@ from synapse.crypto.event_signing import check_event_content_hash
|
||||
from synapse.api.errors import SynapseError
|
||||
|
||||
from synapse.util import unwrapFirstError
|
||||
from synapse.util.logcontext import preserve_fn, preserve_context_over_deferred
|
||||
|
||||
import logging
|
||||
|
||||
@@ -31,6 +32,9 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class FederationBase(object):
|
||||
def __init__(self, hs):
|
||||
pass
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _check_sigs_and_hash_and_fetch(self, origin, pdus, outlier=False,
|
||||
include_none=False):
|
||||
@@ -99,10 +103,10 @@ class FederationBase(object):
|
||||
warn, pdu
|
||||
)
|
||||
|
||||
valid_pdus = yield defer.gatherResults(
|
||||
valid_pdus = yield preserve_context_over_deferred(defer.gatherResults(
|
||||
deferreds,
|
||||
consumeErrors=True
|
||||
).addErrback(unwrapFirstError)
|
||||
)).addErrback(unwrapFirstError)
|
||||
|
||||
if include_none:
|
||||
defer.returnValue(valid_pdus)
|
||||
@@ -126,7 +130,7 @@ class FederationBase(object):
|
||||
for pdu in pdus
|
||||
]
|
||||
|
||||
deferreds = self.keyring.verify_json_objects_for_server([
|
||||
deferreds = preserve_fn(self.keyring.verify_json_objects_for_server)([
|
||||
(p.origin, p.get_pdu_json())
|
||||
for p in redacted_pdus
|
||||
])
|
||||
|
||||
@@ -26,7 +26,9 @@ from synapse.api.errors import (
|
||||
from synapse.util import unwrapFirstError
|
||||
from synapse.util.caches.expiringcache import ExpiringCache
|
||||
from synapse.util.logutils import log_function
|
||||
from synapse.util.logcontext import preserve_fn, preserve_context_over_deferred
|
||||
from synapse.events import FrozenEvent
|
||||
from synapse.types import get_domain_from_id
|
||||
import synapse.metrics
|
||||
|
||||
from synapse.util.retryutils import get_retry_limiter, NotRetryingDestination
|
||||
@@ -50,7 +52,34 @@ sent_edus_counter = metrics.register_counter("sent_edus")
|
||||
sent_queries_counter = metrics.register_counter("sent_queries", labels=["type"])
|
||||
|
||||
|
||||
PDU_RETRY_TIME_MS = 1 * 60 * 1000
|
||||
|
||||
|
||||
class FederationClient(FederationBase):
|
||||
def __init__(self, hs):
|
||||
super(FederationClient, self).__init__(hs)
|
||||
|
||||
self.pdu_destination_tried = {}
|
||||
self._clock.looping_call(
|
||||
self._clear_tried_cache, 60 * 1000,
|
||||
)
|
||||
self.state = hs.get_state_handler()
|
||||
|
||||
def _clear_tried_cache(self):
|
||||
"""Clear pdu_destination_tried cache"""
|
||||
now = self._clock.time_msec()
|
||||
|
||||
old_dict = self.pdu_destination_tried
|
||||
self.pdu_destination_tried = {}
|
||||
|
||||
for event_id, destination_dict in old_dict.items():
|
||||
destination_dict = {
|
||||
dest: time
|
||||
for dest, time in destination_dict.items()
|
||||
if time + PDU_RETRY_TIME_MS > now
|
||||
}
|
||||
if destination_dict:
|
||||
self.pdu_destination_tried[event_id] = destination_dict
|
||||
|
||||
def start_get_pdu_cache(self):
|
||||
self._get_pdu_cache = ExpiringCache(
|
||||
@@ -92,8 +121,12 @@ class FederationClient(FederationBase):
|
||||
pdu.event_id
|
||||
)
|
||||
|
||||
def send_presence(self, destination, states):
|
||||
if destination != self.server_name:
|
||||
self._transaction_queue.enqueue_presence(destination, states)
|
||||
|
||||
@log_function
|
||||
def send_edu(self, destination, edu_type, content):
|
||||
def send_edu(self, destination, edu_type, content, key=None):
|
||||
edu = Edu(
|
||||
origin=self.server_name,
|
||||
destination=destination,
|
||||
@@ -103,9 +136,13 @@ class FederationClient(FederationBase):
|
||||
|
||||
sent_edus_counter.inc()
|
||||
|
||||
# TODO, add errback, etc.
|
||||
self._transaction_queue.enqueue_edu(edu)
|
||||
return defer.succeed(None)
|
||||
self._transaction_queue.enqueue_edu(edu, key=key)
|
||||
|
||||
@log_function
|
||||
def send_device_messages(self, destination):
|
||||
"""Sends the device messages in the local database to the remote
|
||||
destination"""
|
||||
self._transaction_queue.enqueue_device_messages(destination)
|
||||
|
||||
@log_function
|
||||
def send_failure(self, failure, destination):
|
||||
@@ -114,7 +151,7 @@ class FederationClient(FederationBase):
|
||||
|
||||
@log_function
|
||||
def make_query(self, destination, query_type, args,
|
||||
retry_on_dns_fail=True):
|
||||
retry_on_dns_fail=False):
|
||||
"""Sends a federation Query to a remote homeserver of the given type
|
||||
and arguments.
|
||||
|
||||
@@ -136,7 +173,7 @@ class FederationClient(FederationBase):
|
||||
)
|
||||
|
||||
@log_function
|
||||
def query_client_keys(self, destination, content):
|
||||
def query_client_keys(self, destination, content, timeout):
|
||||
"""Query device keys for a device hosted on a remote server.
|
||||
|
||||
Args:
|
||||
@@ -148,10 +185,12 @@ class FederationClient(FederationBase):
|
||||
response
|
||||
"""
|
||||
sent_queries_counter.inc("client_device_keys")
|
||||
return self.transport_layer.query_client_keys(destination, content)
|
||||
return self.transport_layer.query_client_keys(
|
||||
destination, content, timeout
|
||||
)
|
||||
|
||||
@log_function
|
||||
def claim_client_keys(self, destination, content):
|
||||
def claim_client_keys(self, destination, content, timeout):
|
||||
"""Claims one-time keys for a device hosted on a remote server.
|
||||
|
||||
Args:
|
||||
@@ -163,7 +202,9 @@ class FederationClient(FederationBase):
|
||||
response
|
||||
"""
|
||||
sent_queries_counter.inc("client_one_time_keys")
|
||||
return self.transport_layer.claim_client_keys(destination, content)
|
||||
return self.transport_layer.claim_client_keys(
|
||||
destination, content, timeout
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
@@ -198,10 +239,10 @@ class FederationClient(FederationBase):
|
||||
]
|
||||
|
||||
# FIXME: We should handle signature failures more gracefully.
|
||||
pdus[:] = yield defer.gatherResults(
|
||||
pdus[:] = yield preserve_context_over_deferred(defer.gatherResults(
|
||||
self._check_sigs_and_hashes(pdus),
|
||||
consumeErrors=True,
|
||||
).addErrback(unwrapFirstError)
|
||||
)).addErrback(unwrapFirstError)
|
||||
|
||||
defer.returnValue(pdus)
|
||||
|
||||
@@ -233,12 +274,19 @@ class FederationClient(FederationBase):
|
||||
# TODO: Rate limit the number of times we try and get the same event.
|
||||
|
||||
if self._get_pdu_cache:
|
||||
e = self._get_pdu_cache.get(event_id)
|
||||
if e:
|
||||
defer.returnValue(e)
|
||||
ev = self._get_pdu_cache.get(event_id)
|
||||
if ev:
|
||||
defer.returnValue(ev)
|
||||
|
||||
pdu = None
|
||||
pdu_attempts = self.pdu_destination_tried.setdefault(event_id, {})
|
||||
|
||||
signed_pdu = None
|
||||
for destination in destinations:
|
||||
now = self._clock.time_msec()
|
||||
last_attempt = pdu_attempts.get(destination, 0)
|
||||
if last_attempt + PDU_RETRY_TIME_MS > now:
|
||||
continue
|
||||
|
||||
try:
|
||||
limiter = yield get_retry_limiter(
|
||||
destination,
|
||||
@@ -262,39 +310,33 @@ class FederationClient(FederationBase):
|
||||
pdu = pdu_list[0]
|
||||
|
||||
# Check signatures are correct.
|
||||
pdu = yield self._check_sigs_and_hashes([pdu])[0]
|
||||
signed_pdu = yield self._check_sigs_and_hashes([pdu])[0]
|
||||
|
||||
break
|
||||
|
||||
except SynapseError:
|
||||
logger.info(
|
||||
"Failed to get PDU %s from %s because %s",
|
||||
event_id, destination, e,
|
||||
)
|
||||
continue
|
||||
except CodeMessageException as e:
|
||||
if 400 <= e.code < 500:
|
||||
raise
|
||||
pdu_attempts[destination] = now
|
||||
|
||||
except SynapseError as e:
|
||||
logger.info(
|
||||
"Failed to get PDU %s from %s because %s",
|
||||
event_id, destination, e,
|
||||
)
|
||||
continue
|
||||
except NotRetryingDestination as e:
|
||||
logger.info(e.message)
|
||||
continue
|
||||
except Exception as e:
|
||||
pdu_attempts[destination] = now
|
||||
|
||||
logger.info(
|
||||
"Failed to get PDU %s from %s because %s",
|
||||
event_id, destination, e,
|
||||
)
|
||||
continue
|
||||
|
||||
if self._get_pdu_cache is not None and pdu:
|
||||
self._get_pdu_cache[event_id] = pdu
|
||||
if self._get_pdu_cache is not None and signed_pdu:
|
||||
self._get_pdu_cache[event_id] = signed_pdu
|
||||
|
||||
defer.returnValue(pdu)
|
||||
defer.returnValue(signed_pdu)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
@@ -311,6 +353,42 @@ class FederationClient(FederationBase):
|
||||
Deferred: Results in a list of PDUs.
|
||||
"""
|
||||
|
||||
try:
|
||||
# First we try and ask for just the IDs, as thats far quicker if
|
||||
# we have most of the state and auth_chain already.
|
||||
# However, this may 404 if the other side has an old synapse.
|
||||
result = yield self.transport_layer.get_room_state_ids(
|
||||
destination, room_id, event_id=event_id,
|
||||
)
|
||||
|
||||
state_event_ids = result["pdu_ids"]
|
||||
auth_event_ids = result.get("auth_chain_ids", [])
|
||||
|
||||
fetched_events, failed_to_fetch = yield self.get_events(
|
||||
[destination], room_id, set(state_event_ids + auth_event_ids)
|
||||
)
|
||||
|
||||
if failed_to_fetch:
|
||||
logger.warn("Failed to get %r", failed_to_fetch)
|
||||
|
||||
event_map = {
|
||||
ev.event_id: ev for ev in fetched_events
|
||||
}
|
||||
|
||||
pdus = [event_map[e_id] for e_id in state_event_ids if e_id in event_map]
|
||||
auth_chain = [
|
||||
event_map[e_id] for e_id in auth_event_ids if e_id in event_map
|
||||
]
|
||||
|
||||
auth_chain.sort(key=lambda e: e.depth)
|
||||
|
||||
defer.returnValue((pdus, auth_chain))
|
||||
except HttpResponseException as e:
|
||||
if e.code == 400 or e.code == 404:
|
||||
logger.info("Failed to use get_room_state_ids API, falling back")
|
||||
else:
|
||||
raise e
|
||||
|
||||
result = yield self.transport_layer.get_room_state(
|
||||
destination, room_id, event_id=event_id,
|
||||
)
|
||||
@@ -324,18 +402,95 @@ class FederationClient(FederationBase):
|
||||
for p in result.get("auth_chain", [])
|
||||
]
|
||||
|
||||
seen_events = yield self.store.get_events([
|
||||
ev.event_id for ev in itertools.chain(pdus, auth_chain)
|
||||
])
|
||||
|
||||
signed_pdus = yield self._check_sigs_and_hash_and_fetch(
|
||||
destination, pdus, outlier=True
|
||||
destination,
|
||||
[p for p in pdus if p.event_id not in seen_events],
|
||||
outlier=True
|
||||
)
|
||||
signed_pdus.extend(
|
||||
seen_events[p.event_id] for p in pdus if p.event_id in seen_events
|
||||
)
|
||||
|
||||
signed_auth = yield self._check_sigs_and_hash_and_fetch(
|
||||
destination, auth_chain, outlier=True
|
||||
destination,
|
||||
[p for p in auth_chain if p.event_id not in seen_events],
|
||||
outlier=True
|
||||
)
|
||||
signed_auth.extend(
|
||||
seen_events[p.event_id] for p in auth_chain if p.event_id in seen_events
|
||||
)
|
||||
|
||||
signed_auth.sort(key=lambda e: e.depth)
|
||||
|
||||
defer.returnValue((signed_pdus, signed_auth))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_events(self, destinations, room_id, event_ids, return_local=True):
|
||||
"""Fetch events from some remote destinations, checking if we already
|
||||
have them.
|
||||
|
||||
Args:
|
||||
destinations (list)
|
||||
room_id (str)
|
||||
event_ids (list)
|
||||
return_local (bool): Whether to include events we already have in
|
||||
the DB in the returned list of events
|
||||
|
||||
Returns:
|
||||
Deferred: A deferred resolving to a 2-tuple where the first is a list of
|
||||
events and the second is a list of event ids that we failed to fetch.
|
||||
"""
|
||||
if return_local:
|
||||
seen_events = yield self.store.get_events(event_ids, allow_rejected=True)
|
||||
signed_events = seen_events.values()
|
||||
else:
|
||||
seen_events = yield self.store.have_events(event_ids)
|
||||
signed_events = []
|
||||
|
||||
failed_to_fetch = set()
|
||||
|
||||
missing_events = set(event_ids)
|
||||
for k in seen_events:
|
||||
missing_events.discard(k)
|
||||
|
||||
if not missing_events:
|
||||
defer.returnValue((signed_events, failed_to_fetch))
|
||||
|
||||
def random_server_list():
|
||||
srvs = list(destinations)
|
||||
random.shuffle(srvs)
|
||||
return srvs
|
||||
|
||||
batch_size = 20
|
||||
missing_events = list(missing_events)
|
||||
for i in xrange(0, len(missing_events), batch_size):
|
||||
batch = set(missing_events[i:i + batch_size])
|
||||
|
||||
deferreds = [
|
||||
preserve_fn(self.get_pdu)(
|
||||
destinations=random_server_list(),
|
||||
event_id=e_id,
|
||||
)
|
||||
for e_id in batch
|
||||
]
|
||||
|
||||
res = yield preserve_context_over_deferred(
|
||||
defer.DeferredList(deferreds, consumeErrors=True)
|
||||
)
|
||||
for success, result in res:
|
||||
if success and result:
|
||||
signed_events.append(result)
|
||||
batch.discard(result.event_id)
|
||||
|
||||
# We removed all events we successfully fetched from `batch`
|
||||
failed_to_fetch.update(batch)
|
||||
|
||||
defer.returnValue((signed_events, failed_to_fetch))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def get_event_auth(self, destination, room_id, event_id):
|
||||
@@ -411,8 +566,14 @@ class FederationClient(FederationBase):
|
||||
(destination, self.event_from_pdu_json(pdu_dict))
|
||||
)
|
||||
break
|
||||
except CodeMessageException:
|
||||
raise
|
||||
except CodeMessageException as e:
|
||||
if not 500 <= e.code < 600:
|
||||
raise
|
||||
else:
|
||||
logger.warn(
|
||||
"Failed to make_%s via %s: %s",
|
||||
membership, destination, e.message
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warn(
|
||||
"Failed to make_%s via %s: %s",
|
||||
@@ -489,8 +650,14 @@ class FederationClient(FederationBase):
|
||||
"auth_chain": signed_auth,
|
||||
"origin": destination,
|
||||
})
|
||||
except CodeMessageException:
|
||||
raise
|
||||
except CodeMessageException as e:
|
||||
if not 500 <= e.code < 600:
|
||||
raise
|
||||
else:
|
||||
logger.exception(
|
||||
"Failed to send_join via %s: %s",
|
||||
destination, e.message
|
||||
)
|
||||
except Exception as e:
|
||||
logger.exception(
|
||||
"Failed to send_join via %s: %s",
|
||||
@@ -549,6 +716,15 @@ class FederationClient(FederationBase):
|
||||
|
||||
raise RuntimeError("Failed to send to any server.")
|
||||
|
||||
def get_public_rooms(self, destination, limit=None, since_token=None,
|
||||
search_filter=None):
|
||||
if destination == self.server_name:
|
||||
return
|
||||
|
||||
return self.transport_layer.get_public_rooms(
|
||||
destination, limit, since_token, search_filter
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def query_auth(self, destination, room_id, event_id, local_auth):
|
||||
"""
|
||||
@@ -638,7 +814,8 @@ class FederationClient(FederationBase):
|
||||
if len(signed_events) >= limit:
|
||||
defer.returnValue(signed_events)
|
||||
|
||||
servers = yield self.store.get_joined_hosts_for_room(room_id)
|
||||
users = yield self.state.get_current_user_in_room(room_id)
|
||||
servers = set(get_domain_from_id(u) for u in users)
|
||||
|
||||
servers = set(servers)
|
||||
servers.discard(self.server_name)
|
||||
@@ -683,14 +860,16 @@ class FederationClient(FederationBase):
|
||||
return srvs
|
||||
|
||||
deferreds = [
|
||||
self.get_pdu(
|
||||
preserve_fn(self.get_pdu)(
|
||||
destinations=random_server_list(),
|
||||
event_id=e_id,
|
||||
)
|
||||
for e_id, depth in ordered_missing[:limit - len(signed_events)]
|
||||
]
|
||||
|
||||
res = yield defer.DeferredList(deferreds, consumeErrors=True)
|
||||
res = yield preserve_context_over_deferred(
|
||||
defer.DeferredList(deferreds, consumeErrors=True)
|
||||
)
|
||||
for (result, val), (e_id, _) in zip(res, ordered_missing):
|
||||
if result and val:
|
||||
signed_events.append(val)
|
||||
|
||||
@@ -19,11 +19,13 @@ from twisted.internet import defer
|
||||
from .federation_base import FederationBase
|
||||
from .units import Transaction, Edu
|
||||
|
||||
from synapse.util.async import Linearizer
|
||||
from synapse.util.logutils import log_function
|
||||
from synapse.util.caches.response_cache import ResponseCache
|
||||
from synapse.events import FrozenEvent
|
||||
import synapse.metrics
|
||||
|
||||
from synapse.api.errors import FederationError, SynapseError
|
||||
from synapse.api.errors import AuthError, FederationError, SynapseError
|
||||
|
||||
from synapse.crypto.event_signing import compute_event_signature
|
||||
|
||||
@@ -44,6 +46,18 @@ received_queries_counter = metrics.register_counter("received_queries", labels=[
|
||||
|
||||
|
||||
class FederationServer(FederationBase):
|
||||
def __init__(self, hs):
|
||||
super(FederationServer, self).__init__(hs)
|
||||
|
||||
self.auth = hs.get_auth()
|
||||
|
||||
self._room_pdu_linearizer = Linearizer()
|
||||
self._server_linearizer = Linearizer()
|
||||
|
||||
# We cache responses to state queries, as they take a while and often
|
||||
# come in waves.
|
||||
self._state_resp_cache = ResponseCache(hs, timeout_ms=30000)
|
||||
|
||||
def set_handler(self, handler):
|
||||
"""Sets the handler that the replication layer will use to communicate
|
||||
receipt of new PDUs from other home servers. The required methods are
|
||||
@@ -83,11 +97,14 @@ class FederationServer(FederationBase):
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def on_backfill_request(self, origin, room_id, versions, limit):
|
||||
pdus = yield self.handler.on_backfill_request(
|
||||
origin, room_id, versions, limit
|
||||
)
|
||||
with (yield self._server_linearizer.queue((origin, room_id))):
|
||||
pdus = yield self.handler.on_backfill_request(
|
||||
origin, room_id, versions, limit
|
||||
)
|
||||
|
||||
defer.returnValue((200, self._transaction_from_pdus(pdus).get_dict()))
|
||||
res = self._transaction_from_pdus(pdus).get_dict()
|
||||
|
||||
defer.returnValue((200, res))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
@@ -137,8 +154,8 @@ class FederationServer(FederationBase):
|
||||
logger.exception("Failed to handle PDU")
|
||||
|
||||
if hasattr(transaction, "edus"):
|
||||
for edu in [Edu(**x) for x in transaction.edus]:
|
||||
self.received_edu(
|
||||
for edu in (Edu(**x) for x in transaction.edus):
|
||||
yield self.received_edu(
|
||||
transaction.origin,
|
||||
edu.edu_type,
|
||||
edu.content
|
||||
@@ -161,26 +178,74 @@ class FederationServer(FederationBase):
|
||||
)
|
||||
defer.returnValue((200, response))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def received_edu(self, origin, edu_type, content):
|
||||
received_edus_counter.inc()
|
||||
|
||||
if edu_type in self.edu_handlers:
|
||||
self.edu_handlers[edu_type](origin, content)
|
||||
try:
|
||||
yield self.edu_handlers[edu_type](origin, content)
|
||||
except SynapseError as e:
|
||||
logger.info("Failed to handle edu %r: %r", edu_type, e)
|
||||
except Exception as e:
|
||||
logger.exception("Failed to handle edu %r", edu_type)
|
||||
else:
|
||||
logger.warn("Received EDU of type %s with no handler", edu_type)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def on_context_state_request(self, origin, room_id, event_id):
|
||||
if event_id:
|
||||
pdus = yield self.handler.get_state_for_pdu(
|
||||
origin, room_id, event_id,
|
||||
)
|
||||
auth_chain = yield self.store.get_auth_chain(
|
||||
[pdu.event_id for pdu in pdus]
|
||||
)
|
||||
if not event_id:
|
||||
raise NotImplementedError("Specify an event")
|
||||
|
||||
for event in auth_chain:
|
||||
in_room = yield self.auth.check_host_in_room(room_id, origin)
|
||||
if not in_room:
|
||||
raise AuthError(403, "Host not in room.")
|
||||
|
||||
result = self._state_resp_cache.get((room_id, event_id))
|
||||
if not result:
|
||||
with (yield self._server_linearizer.queue((origin, room_id))):
|
||||
resp = yield self._state_resp_cache.set(
|
||||
(room_id, event_id),
|
||||
self._on_context_state_request_compute(room_id, event_id)
|
||||
)
|
||||
else:
|
||||
resp = yield result
|
||||
|
||||
defer.returnValue((200, resp))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_state_ids_request(self, origin, room_id, event_id):
|
||||
if not event_id:
|
||||
raise NotImplementedError("Specify an event")
|
||||
|
||||
in_room = yield self.auth.check_host_in_room(room_id, origin)
|
||||
if not in_room:
|
||||
raise AuthError(403, "Host not in room.")
|
||||
|
||||
state_ids = yield self.handler.get_state_ids_for_pdu(
|
||||
room_id, event_id,
|
||||
)
|
||||
auth_chain_ids = yield self.store.get_auth_chain_ids(state_ids)
|
||||
|
||||
defer.returnValue((200, {
|
||||
"pdu_ids": state_ids,
|
||||
"auth_chain_ids": auth_chain_ids,
|
||||
}))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _on_context_state_request_compute(self, room_id, event_id):
|
||||
pdus = yield self.handler.get_state_for_pdu(
|
||||
room_id, event_id,
|
||||
)
|
||||
auth_chain = yield self.store.get_auth_chain(
|
||||
[pdu.event_id for pdu in pdus]
|
||||
)
|
||||
|
||||
for event in auth_chain:
|
||||
# We sign these again because there was a bug where we
|
||||
# incorrectly signed things the first time round
|
||||
if self.hs.is_mine_id(event.event_id):
|
||||
event.signatures.update(
|
||||
compute_event_signature(
|
||||
event,
|
||||
@@ -188,13 +253,11 @@ class FederationServer(FederationBase):
|
||||
self.hs.config.signing_key[0]
|
||||
)
|
||||
)
|
||||
else:
|
||||
raise NotImplementedError("Specify an event")
|
||||
|
||||
defer.returnValue((200, {
|
||||
defer.returnValue({
|
||||
"pdus": [pdu.get_pdu_json() for pdu in pdus],
|
||||
"auth_chain": [pdu.get_pdu_json() for pdu in auth_chain],
|
||||
}))
|
||||
})
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
@@ -268,14 +331,16 @@ class FederationServer(FederationBase):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_event_auth(self, origin, room_id, event_id):
|
||||
time_now = self._clock.time_msec()
|
||||
auth_pdus = yield self.handler.on_event_auth(event_id)
|
||||
defer.returnValue((200, {
|
||||
"auth_chain": [a.get_pdu_json(time_now) for a in auth_pdus],
|
||||
}))
|
||||
with (yield self._server_linearizer.queue((origin, room_id))):
|
||||
time_now = self._clock.time_msec()
|
||||
auth_pdus = yield self.handler.on_event_auth(event_id)
|
||||
res = {
|
||||
"auth_chain": [a.get_pdu_json(time_now) for a in auth_pdus],
|
||||
}
|
||||
defer.returnValue((200, res))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_query_auth_request(self, origin, content, event_id):
|
||||
def on_query_auth_request(self, origin, content, room_id, event_id):
|
||||
"""
|
||||
Content is a dict with keys::
|
||||
auth_chain (list): A list of events that give the auth chain.
|
||||
@@ -294,58 +359,41 @@ class FederationServer(FederationBase):
|
||||
Returns:
|
||||
Deferred: Results in `dict` with the same format as `content`
|
||||
"""
|
||||
auth_chain = [
|
||||
self.event_from_pdu_json(e)
|
||||
for e in content["auth_chain"]
|
||||
]
|
||||
with (yield self._server_linearizer.queue((origin, room_id))):
|
||||
auth_chain = [
|
||||
self.event_from_pdu_json(e)
|
||||
for e in content["auth_chain"]
|
||||
]
|
||||
|
||||
signed_auth = yield self._check_sigs_and_hash_and_fetch(
|
||||
origin, auth_chain, outlier=True
|
||||
)
|
||||
signed_auth = yield self._check_sigs_and_hash_and_fetch(
|
||||
origin, auth_chain, outlier=True
|
||||
)
|
||||
|
||||
ret = yield self.handler.on_query_auth(
|
||||
origin,
|
||||
event_id,
|
||||
signed_auth,
|
||||
content.get("rejects", []),
|
||||
content.get("missing", []),
|
||||
)
|
||||
ret = yield self.handler.on_query_auth(
|
||||
origin,
|
||||
event_id,
|
||||
signed_auth,
|
||||
content.get("rejects", []),
|
||||
content.get("missing", []),
|
||||
)
|
||||
|
||||
time_now = self._clock.time_msec()
|
||||
send_content = {
|
||||
"auth_chain": [
|
||||
e.get_pdu_json(time_now)
|
||||
for e in ret["auth_chain"]
|
||||
],
|
||||
"rejects": ret.get("rejects", []),
|
||||
"missing": ret.get("missing", []),
|
||||
}
|
||||
time_now = self._clock.time_msec()
|
||||
send_content = {
|
||||
"auth_chain": [
|
||||
e.get_pdu_json(time_now)
|
||||
for e in ret["auth_chain"]
|
||||
],
|
||||
"rejects": ret.get("rejects", []),
|
||||
"missing": ret.get("missing", []),
|
||||
}
|
||||
|
||||
defer.returnValue(
|
||||
(200, send_content)
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def on_query_client_keys(self, origin, content):
|
||||
query = []
|
||||
for user_id, device_ids in content.get("device_keys", {}).items():
|
||||
if not device_ids:
|
||||
query.append((user_id, None))
|
||||
else:
|
||||
for device_id in device_ids:
|
||||
query.append((user_id, device_id))
|
||||
|
||||
results = yield self.store.get_e2e_device_keys(query)
|
||||
|
||||
json_result = {}
|
||||
for user_id, device_keys in results.items():
|
||||
for device_id, json_bytes in device_keys.items():
|
||||
json_result.setdefault(user_id, {})[device_id] = json.loads(
|
||||
json_bytes
|
||||
)
|
||||
|
||||
defer.returnValue({"device_keys": json_result})
|
||||
return self.on_query_request("client_keys", content)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
@@ -371,16 +419,34 @@ class FederationServer(FederationBase):
|
||||
@log_function
|
||||
def on_get_missing_events(self, origin, room_id, earliest_events,
|
||||
latest_events, limit, min_depth):
|
||||
missing_events = yield self.handler.on_get_missing_events(
|
||||
origin, room_id, earliest_events, latest_events, limit, min_depth
|
||||
)
|
||||
with (yield self._server_linearizer.queue((origin, room_id))):
|
||||
logger.info(
|
||||
"on_get_missing_events: earliest_events: %r, latest_events: %r,"
|
||||
" limit: %d, min_depth: %d",
|
||||
earliest_events, latest_events, limit, min_depth
|
||||
)
|
||||
missing_events = yield self.handler.on_get_missing_events(
|
||||
origin, room_id, earliest_events, latest_events, limit, min_depth
|
||||
)
|
||||
|
||||
time_now = self._clock.time_msec()
|
||||
if len(missing_events) < 5:
|
||||
logger.info(
|
||||
"Returning %d events: %r", len(missing_events), missing_events
|
||||
)
|
||||
else:
|
||||
logger.info("Returning %d events", len(missing_events))
|
||||
|
||||
time_now = self._clock.time_msec()
|
||||
|
||||
defer.returnValue({
|
||||
"events": [ev.get_pdu_json(time_now) for ev in missing_events],
|
||||
})
|
||||
|
||||
@log_function
|
||||
def on_openid_userinfo(self, token):
|
||||
ts_now_ms = self._clock.time_msec()
|
||||
return self.store.get_user_id_for_open_id_token(token, ts_now_ms)
|
||||
|
||||
@log_function
|
||||
def _get_persisted_pdu(self, origin, event_id, do_auth=True):
|
||||
""" Get a PDU from the database with given origin and id.
|
||||
@@ -470,42 +536,59 @@ class FederationServer(FederationBase):
|
||||
pdu.internal_metadata.outlier = True
|
||||
elif min_depth and pdu.depth > min_depth:
|
||||
if get_missing and prevs - seen:
|
||||
latest = yield self.store.get_latest_event_ids_in_room(
|
||||
pdu.room_id
|
||||
)
|
||||
# If we're missing stuff, ensure we only fetch stuff one
|
||||
# at a time.
|
||||
with (yield self._room_pdu_linearizer.queue(pdu.room_id)):
|
||||
# We recalculate seen, since it may have changed.
|
||||
have_seen = yield self.store.have_events(prevs)
|
||||
seen = set(have_seen.keys())
|
||||
|
||||
# We add the prev events that we have seen to the latest
|
||||
# list to ensure the remote server doesn't give them to us
|
||||
latest = set(latest)
|
||||
latest |= seen
|
||||
if prevs - seen:
|
||||
latest = yield self.store.get_latest_event_ids_in_room(
|
||||
pdu.room_id
|
||||
)
|
||||
|
||||
missing_events = yield self.get_missing_events(
|
||||
origin,
|
||||
pdu.room_id,
|
||||
earliest_events_ids=list(latest),
|
||||
latest_events=[pdu],
|
||||
limit=10,
|
||||
min_depth=min_depth,
|
||||
)
|
||||
# We add the prev events that we have seen to the latest
|
||||
# list to ensure the remote server doesn't give them to us
|
||||
latest = set(latest)
|
||||
latest |= seen
|
||||
|
||||
# We want to sort these by depth so we process them and
|
||||
# tell clients about them in order.
|
||||
missing_events.sort(key=lambda x: x.depth)
|
||||
logger.info(
|
||||
"Missing %d events for room %r: %r...",
|
||||
len(prevs - seen), pdu.room_id, list(prevs - seen)[:5]
|
||||
)
|
||||
|
||||
for e in missing_events:
|
||||
yield self._handle_new_pdu(
|
||||
origin,
|
||||
e,
|
||||
get_missing=False
|
||||
)
|
||||
missing_events = yield self.get_missing_events(
|
||||
origin,
|
||||
pdu.room_id,
|
||||
earliest_events_ids=list(latest),
|
||||
latest_events=[pdu],
|
||||
limit=10,
|
||||
min_depth=min_depth,
|
||||
)
|
||||
|
||||
have_seen = yield self.store.have_events(
|
||||
[ev for ev, _ in pdu.prev_events]
|
||||
)
|
||||
# We want to sort these by depth so we process them and
|
||||
# tell clients about them in order.
|
||||
missing_events.sort(key=lambda x: x.depth)
|
||||
|
||||
for e in missing_events:
|
||||
yield self._handle_new_pdu(
|
||||
origin,
|
||||
e,
|
||||
get_missing=False
|
||||
)
|
||||
|
||||
have_seen = yield self.store.have_events(
|
||||
[ev for ev, _ in pdu.prev_events]
|
||||
)
|
||||
|
||||
prevs = {e_id for e_id, _ in pdu.prev_events}
|
||||
seen = set(have_seen.keys())
|
||||
if prevs - seen:
|
||||
logger.info(
|
||||
"Still missing %d events for room %r: %r...",
|
||||
len(prevs - seen), pdu.room_id, list(prevs - seen)[:5]
|
||||
)
|
||||
fetch_state = True
|
||||
|
||||
if fetch_state:
|
||||
@@ -520,12 +603,11 @@ class FederationServer(FederationBase):
|
||||
origin, pdu.room_id, pdu.event_id,
|
||||
)
|
||||
except:
|
||||
logger.warn("Failed to get state for event: %s", pdu.event_id)
|
||||
logger.exception("Failed to get state for event: %s", pdu.event_id)
|
||||
|
||||
yield self.handler.on_receive_pdu(
|
||||
origin,
|
||||
pdu,
|
||||
backfilled=False,
|
||||
state=state,
|
||||
auth_chain=auth_chain,
|
||||
)
|
||||
@@ -543,8 +625,19 @@ class FederationServer(FederationBase):
|
||||
return event
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def exchange_third_party_invite(self, invite):
|
||||
ret = yield self.handler.exchange_third_party_invite(invite)
|
||||
def exchange_third_party_invite(
|
||||
self,
|
||||
sender_user_id,
|
||||
target_user_id,
|
||||
room_id,
|
||||
signed,
|
||||
):
|
||||
ret = yield self.handler.exchange_third_party_invite(
|
||||
sender_user_id,
|
||||
target_user_id,
|
||||
room_id,
|
||||
signed,
|
||||
)
|
||||
defer.returnValue(ret)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
|
||||
@@ -72,5 +72,7 @@ class ReplicationLayer(FederationClient, FederationServer):
|
||||
|
||||
self.hs = hs
|
||||
|
||||
super(ReplicationLayer, self).__init__(hs)
|
||||
|
||||
def __str__(self):
|
||||
return "<ReplicationLayer(%s)>" % self.server_name
|
||||
|
||||
@@ -17,14 +17,16 @@
|
||||
from twisted.internet import defer
|
||||
|
||||
from .persistence import TransactionActions
|
||||
from .units import Transaction
|
||||
from .units import Transaction, Edu
|
||||
|
||||
from synapse.api.errors import HttpResponseException
|
||||
from synapse.util.logutils import log_function
|
||||
from synapse.util.logcontext import PreserveLoggingContext
|
||||
from synapse.util.async import run_on_reactor
|
||||
from synapse.util.logcontext import preserve_context_over_fn
|
||||
from synapse.util.retryutils import (
|
||||
get_retry_limiter, NotRetryingDestination,
|
||||
)
|
||||
from synapse.util.metrics import measure_func
|
||||
from synapse.handlers.presence import format_user_presence_state
|
||||
import synapse.metrics
|
||||
|
||||
import logging
|
||||
@@ -50,7 +52,7 @@ class TransactionQueue(object):
|
||||
|
||||
self.transport_layer = transport_layer
|
||||
|
||||
self._clock = hs.get_clock()
|
||||
self.clock = hs.get_clock()
|
||||
|
||||
# Is a mapping from destinations -> deferreds. Used to keep track
|
||||
# of which destinations have transactions in flight and when they are
|
||||
@@ -68,20 +70,30 @@ class TransactionQueue(object):
|
||||
# destination -> list of tuple(edu, deferred)
|
||||
self.pending_edus_by_dest = edus = {}
|
||||
|
||||
# Presence needs to be separate as we send single aggragate EDUs
|
||||
self.pending_presence_by_dest = presence = {}
|
||||
self.pending_edus_keyed_by_dest = edus_keyed = {}
|
||||
|
||||
metrics.register_callback(
|
||||
"pending_pdus",
|
||||
lambda: sum(map(len, pdus.values())),
|
||||
)
|
||||
metrics.register_callback(
|
||||
"pending_edus",
|
||||
lambda: sum(map(len, edus.values())),
|
||||
lambda: (
|
||||
sum(map(len, edus.values()))
|
||||
+ sum(map(len, presence.values()))
|
||||
+ sum(map(len, edus_keyed.values()))
|
||||
),
|
||||
)
|
||||
|
||||
# destination -> list of tuple(failure, deferred)
|
||||
self.pending_failures_by_dest = {}
|
||||
|
||||
self.last_device_stream_id_by_dest = {}
|
||||
|
||||
# HACK to get unique tx id
|
||||
self._next_txn_id = int(self._clock.time_msec())
|
||||
self._next_txn_id = int(self.clock.time_msec())
|
||||
|
||||
def can_send_to(self, destination):
|
||||
"""Can we send messages to the given server?
|
||||
@@ -118,86 +130,68 @@ class TransactionQueue(object):
|
||||
if not destinations:
|
||||
return
|
||||
|
||||
deferreds = []
|
||||
|
||||
for destination in destinations:
|
||||
deferred = defer.Deferred()
|
||||
self.pending_pdus_by_dest.setdefault(destination, []).append(
|
||||
(pdu, deferred, order)
|
||||
(pdu, order)
|
||||
)
|
||||
|
||||
def chain(failure):
|
||||
if not deferred.called:
|
||||
deferred.errback(failure)
|
||||
preserve_context_over_fn(
|
||||
self._attempt_new_transaction, destination
|
||||
)
|
||||
|
||||
def log_failure(f):
|
||||
logger.warn("Failed to send pdu to %s: %s", destination, f.value)
|
||||
def enqueue_presence(self, destination, states):
|
||||
self.pending_presence_by_dest.setdefault(destination, {}).update({
|
||||
state.user_id: state for state in states
|
||||
})
|
||||
|
||||
deferred.addErrback(log_failure)
|
||||
preserve_context_over_fn(
|
||||
self._attempt_new_transaction, destination
|
||||
)
|
||||
|
||||
with PreserveLoggingContext():
|
||||
self._attempt_new_transaction(destination).addErrback(chain)
|
||||
|
||||
deferreds.append(deferred)
|
||||
|
||||
# NO inlineCallbacks
|
||||
def enqueue_edu(self, edu):
|
||||
def enqueue_edu(self, edu, key=None):
|
||||
destination = edu.destination
|
||||
|
||||
if not self.can_send_to(destination):
|
||||
return
|
||||
|
||||
deferred = defer.Deferred()
|
||||
self.pending_edus_by_dest.setdefault(destination, []).append(
|
||||
(edu, deferred)
|
||||
if key:
|
||||
self.pending_edus_keyed_by_dest.setdefault(
|
||||
destination, {}
|
||||
)[(edu.edu_type, key)] = edu
|
||||
else:
|
||||
self.pending_edus_by_dest.setdefault(destination, []).append(edu)
|
||||
|
||||
preserve_context_over_fn(
|
||||
self._attempt_new_transaction, destination
|
||||
)
|
||||
|
||||
def chain(failure):
|
||||
if not deferred.called:
|
||||
deferred.errback(failure)
|
||||
|
||||
def log_failure(f):
|
||||
logger.warn("Failed to send edu to %s: %s", destination, f.value)
|
||||
|
||||
deferred.addErrback(log_failure)
|
||||
|
||||
with PreserveLoggingContext():
|
||||
self._attempt_new_transaction(destination).addErrback(chain)
|
||||
|
||||
return deferred
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def enqueue_failure(self, failure, destination):
|
||||
if destination == self.server_name or destination == "localhost":
|
||||
return
|
||||
|
||||
deferred = defer.Deferred()
|
||||
|
||||
if not self.can_send_to(destination):
|
||||
return
|
||||
|
||||
self.pending_failures_by_dest.setdefault(
|
||||
destination, []
|
||||
).append(
|
||||
(failure, deferred)
|
||||
).append(failure)
|
||||
|
||||
preserve_context_over_fn(
|
||||
self._attempt_new_transaction, destination
|
||||
)
|
||||
|
||||
def chain(f):
|
||||
if not deferred.called:
|
||||
deferred.errback(f)
|
||||
def enqueue_device_messages(self, destination):
|
||||
if destination == self.server_name or destination == "localhost":
|
||||
return
|
||||
|
||||
def log_failure(f):
|
||||
logger.warn("Failed to send failure to %s: %s", destination, f.value)
|
||||
if not self.can_send_to(destination):
|
||||
return
|
||||
|
||||
deferred.addErrback(log_failure)
|
||||
|
||||
with PreserveLoggingContext():
|
||||
self._attempt_new_transaction(destination).addErrback(chain)
|
||||
|
||||
yield deferred
|
||||
preserve_context_over_fn(
|
||||
self._attempt_new_transaction, destination
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def _attempt_new_transaction(self, destination):
|
||||
# list of (pending_pdu, deferred, order)
|
||||
if destination in self.pending_transactions:
|
||||
@@ -211,55 +205,128 @@ class TransactionQueue(object):
|
||||
)
|
||||
return
|
||||
|
||||
pending_pdus = self.pending_pdus_by_dest.pop(destination, [])
|
||||
pending_edus = self.pending_edus_by_dest.pop(destination, [])
|
||||
pending_failures = self.pending_failures_by_dest.pop(destination, [])
|
||||
|
||||
if pending_pdus:
|
||||
logger.debug("TX [%s] len(pending_pdus_by_dest[dest]) = %d",
|
||||
destination, len(pending_pdus))
|
||||
|
||||
if not pending_pdus and not pending_edus and not pending_failures:
|
||||
logger.debug("TX [%s] Nothing to send", destination)
|
||||
return
|
||||
|
||||
try:
|
||||
self.pending_transactions[destination] = 1
|
||||
|
||||
yield run_on_reactor()
|
||||
|
||||
while True:
|
||||
pending_pdus = self.pending_pdus_by_dest.pop(destination, [])
|
||||
pending_edus = self.pending_edus_by_dest.pop(destination, [])
|
||||
pending_presence = self.pending_presence_by_dest.pop(destination, {})
|
||||
pending_failures = self.pending_failures_by_dest.pop(destination, [])
|
||||
|
||||
pending_edus.extend(
|
||||
self.pending_edus_keyed_by_dest.pop(destination, {}).values()
|
||||
)
|
||||
|
||||
limiter = yield get_retry_limiter(
|
||||
destination,
|
||||
self.clock,
|
||||
self.store,
|
||||
)
|
||||
|
||||
device_message_edus, device_stream_id = (
|
||||
yield self._get_new_device_messages(destination)
|
||||
)
|
||||
|
||||
pending_edus.extend(device_message_edus)
|
||||
if pending_presence:
|
||||
pending_edus.append(
|
||||
Edu(
|
||||
origin=self.server_name,
|
||||
destination=destination,
|
||||
edu_type="m.presence",
|
||||
content={
|
||||
"push": [
|
||||
format_user_presence_state(
|
||||
presence, self.clock.time_msec()
|
||||
)
|
||||
for presence in pending_presence.values()
|
||||
]
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
if pending_pdus:
|
||||
logger.debug("TX [%s] len(pending_pdus_by_dest[dest]) = %d",
|
||||
destination, len(pending_pdus))
|
||||
|
||||
if not pending_pdus and not pending_edus and not pending_failures:
|
||||
logger.debug("TX [%s] Nothing to send", destination)
|
||||
self.last_device_stream_id_by_dest[destination] = (
|
||||
device_stream_id
|
||||
)
|
||||
return
|
||||
|
||||
success = yield self._send_new_transaction(
|
||||
destination, pending_pdus, pending_edus, pending_failures,
|
||||
device_stream_id,
|
||||
should_delete_from_device_stream=bool(device_message_edus),
|
||||
limiter=limiter,
|
||||
)
|
||||
if not success:
|
||||
break
|
||||
except NotRetryingDestination:
|
||||
logger.info(
|
||||
"TX [%s] not ready for retry yet - "
|
||||
"dropping transaction for now",
|
||||
destination,
|
||||
)
|
||||
finally:
|
||||
# We want to be *very* sure we delete this after we stop processing
|
||||
self.pending_transactions.pop(destination, None)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _get_new_device_messages(self, destination):
|
||||
last_device_stream_id = self.last_device_stream_id_by_dest.get(destination, 0)
|
||||
to_device_stream_id = self.store.get_to_device_stream_token()
|
||||
contents, stream_id = yield self.store.get_new_device_msgs_for_remote(
|
||||
destination, last_device_stream_id, to_device_stream_id
|
||||
)
|
||||
edus = [
|
||||
Edu(
|
||||
origin=self.server_name,
|
||||
destination=destination,
|
||||
edu_type="m.direct_to_device",
|
||||
content=content,
|
||||
)
|
||||
for content in contents
|
||||
]
|
||||
defer.returnValue((edus, stream_id))
|
||||
|
||||
@measure_func("_send_new_transaction")
|
||||
@defer.inlineCallbacks
|
||||
def _send_new_transaction(self, destination, pending_pdus, pending_edus,
|
||||
pending_failures, device_stream_id,
|
||||
should_delete_from_device_stream, limiter):
|
||||
|
||||
# Sort based on the order field
|
||||
pending_pdus.sort(key=lambda t: t[1])
|
||||
pdus = [x[0] for x in pending_pdus]
|
||||
edus = pending_edus
|
||||
failures = [x.get_dict() for x in pending_failures]
|
||||
|
||||
success = True
|
||||
|
||||
try:
|
||||
logger.debug("TX [%s] _attempt_new_transaction", destination)
|
||||
|
||||
# Sort based on the order field
|
||||
pending_pdus.sort(key=lambda t: t[2])
|
||||
|
||||
pdus = [x[0] for x in pending_pdus]
|
||||
edus = [x[0] for x in pending_edus]
|
||||
failures = [x[0].get_dict() for x in pending_failures]
|
||||
deferreds = [
|
||||
x[1]
|
||||
for x in pending_pdus + pending_edus + pending_failures
|
||||
]
|
||||
|
||||
txn_id = str(self._next_txn_id)
|
||||
|
||||
limiter = yield get_retry_limiter(
|
||||
destination,
|
||||
self._clock,
|
||||
self.store,
|
||||
)
|
||||
|
||||
logger.debug(
|
||||
"TX [%s] {%s} Attempting new transaction"
|
||||
" (pdus: %d, edus: %d, failures: %d)",
|
||||
destination, txn_id,
|
||||
len(pending_pdus),
|
||||
len(pending_edus),
|
||||
len(pending_failures)
|
||||
len(pdus),
|
||||
len(edus),
|
||||
len(failures)
|
||||
)
|
||||
|
||||
logger.debug("TX [%s] Persisting transaction...", destination)
|
||||
|
||||
transaction = Transaction.create_new(
|
||||
origin_server_ts=int(self._clock.time_msec()),
|
||||
origin_server_ts=int(self.clock.time_msec()),
|
||||
transaction_id=txn_id,
|
||||
origin=self.server_name,
|
||||
destination=destination,
|
||||
@@ -278,9 +345,9 @@ class TransactionQueue(object):
|
||||
" (PDUs: %d, EDUs: %d, failures: %d)",
|
||||
destination, txn_id,
|
||||
transaction.transaction_id,
|
||||
len(pending_pdus),
|
||||
len(pending_edus),
|
||||
len(pending_failures),
|
||||
len(pdus),
|
||||
len(edus),
|
||||
len(failures),
|
||||
)
|
||||
|
||||
with limiter:
|
||||
@@ -290,7 +357,7 @@ class TransactionQueue(object):
|
||||
# keys work
|
||||
def json_data_cb():
|
||||
data = transaction.get_dict()
|
||||
now = int(self._clock.time_msec())
|
||||
now = int(self.clock.time_msec())
|
||||
if "pdus" in data:
|
||||
for p in data["pdus"]:
|
||||
if "age_ts" in p:
|
||||
@@ -330,28 +397,19 @@ class TransactionQueue(object):
|
||||
|
||||
logger.debug("TX [%s] Marked as delivered", destination)
|
||||
|
||||
logger.debug("TX [%s] Yielding to callbacks...", destination)
|
||||
|
||||
for deferred in deferreds:
|
||||
if code == 200:
|
||||
deferred.callback(None)
|
||||
else:
|
||||
deferred.errback(RuntimeError("Got status %d" % code))
|
||||
|
||||
# Ensures we don't continue until all callbacks on that
|
||||
# deferred have fired
|
||||
try:
|
||||
yield deferred
|
||||
except:
|
||||
pass
|
||||
|
||||
logger.debug("TX [%s] Yielded to callbacks", destination)
|
||||
except NotRetryingDestination:
|
||||
logger.info(
|
||||
"TX [%s] not ready for retry yet - "
|
||||
"dropping transaction for now",
|
||||
destination,
|
||||
)
|
||||
if code != 200:
|
||||
for p in pdus:
|
||||
logger.info(
|
||||
"Failed to send event %s to %s", p.event_id, destination
|
||||
)
|
||||
success = False
|
||||
else:
|
||||
# Remove the acknowledged device messages from the database
|
||||
if should_delete_from_device_stream:
|
||||
yield self.store.delete_device_msgs_for_remote(
|
||||
destination, device_stream_id
|
||||
)
|
||||
self.last_device_stream_id_by_dest[destination] = device_stream_id
|
||||
except RuntimeError as e:
|
||||
# We capture this here as there as nothing actually listens
|
||||
# for this finishing functions deferred.
|
||||
@@ -360,6 +418,11 @@ class TransactionQueue(object):
|
||||
destination,
|
||||
e,
|
||||
)
|
||||
|
||||
success = False
|
||||
|
||||
for p in pdus:
|
||||
logger.info("Failed to send event %s to %s", p.event_id, destination)
|
||||
except Exception as e:
|
||||
# We capture this here as there as nothing actually listens
|
||||
# for this finishing functions deferred.
|
||||
@@ -369,13 +432,9 @@ class TransactionQueue(object):
|
||||
e,
|
||||
)
|
||||
|
||||
for deferred in deferreds:
|
||||
if not deferred.called:
|
||||
deferred.errback(e)
|
||||
success = False
|
||||
|
||||
finally:
|
||||
# We want to be *very* sure we delete this after we stop processing
|
||||
self.pending_transactions.pop(destination, None)
|
||||
for p in pdus:
|
||||
logger.info("Failed to send event %s to %s", p.event_id, destination)
|
||||
|
||||
# Check to see if there is anything else to send.
|
||||
self._attempt_new_transaction(destination)
|
||||
defer.returnValue(success)
|
||||
|
||||
@@ -54,6 +54,28 @@ class TransportLayerClient(object):
|
||||
destination, path=path, args={"event_id": event_id},
|
||||
)
|
||||
|
||||
@log_function
|
||||
def get_room_state_ids(self, destination, room_id, event_id):
|
||||
""" Requests all state for a given room from the given server at the
|
||||
given event. Returns the state's event_id's
|
||||
|
||||
Args:
|
||||
destination (str): The host name of the remote home server we want
|
||||
to get the state from.
|
||||
context (str): The name of the context we want the state of
|
||||
event_id (str): The event we want the context at.
|
||||
|
||||
Returns:
|
||||
Deferred: Results in a dict received from the remote homeserver.
|
||||
"""
|
||||
logger.debug("get_room_state_ids dest=%s, room=%s",
|
||||
destination, room_id)
|
||||
|
||||
path = PREFIX + "/state_ids/%s/" % room_id
|
||||
return self.client.get_json(
|
||||
destination, path=path, args={"event_id": event_id},
|
||||
)
|
||||
|
||||
@log_function
|
||||
def get_event(self, destination, event_id, timeout=None):
|
||||
""" Requests the pdu with give id and origin from the given server.
|
||||
@@ -160,6 +182,7 @@ class TransportLayerClient(object):
|
||||
path=path,
|
||||
args=args,
|
||||
retry_on_dns_fail=retry_on_dns_fail,
|
||||
timeout=10000,
|
||||
)
|
||||
|
||||
defer.returnValue(content)
|
||||
@@ -178,7 +201,8 @@ class TransportLayerClient(object):
|
||||
content = yield self.client.get_json(
|
||||
destination=destination,
|
||||
path=path,
|
||||
retry_on_dns_fail=True,
|
||||
retry_on_dns_fail=False,
|
||||
timeout=20000,
|
||||
)
|
||||
|
||||
defer.returnValue(content)
|
||||
@@ -222,6 +246,28 @@ class TransportLayerClient(object):
|
||||
|
||||
defer.returnValue(response)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def get_public_rooms(self, remote_server, limit, since_token,
|
||||
search_filter=None):
|
||||
path = PREFIX + "/publicRooms"
|
||||
|
||||
args = {}
|
||||
if limit:
|
||||
args["limit"] = [str(limit)]
|
||||
if since_token:
|
||||
args["since"] = [since_token]
|
||||
|
||||
# TODO(erikj): Actually send the search_filter across federation.
|
||||
|
||||
response = yield self.client.get_json(
|
||||
destination=remote_server,
|
||||
path=path,
|
||||
args=args,
|
||||
)
|
||||
|
||||
defer.returnValue(response)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def exchange_third_party_invite(self, destination, room_id, event_dict):
|
||||
@@ -262,7 +308,7 @@ class TransportLayerClient(object):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def query_client_keys(self, destination, query_content):
|
||||
def query_client_keys(self, destination, query_content, timeout):
|
||||
"""Query the device keys for a list of user ids hosted on a remote
|
||||
server.
|
||||
|
||||
@@ -291,12 +337,13 @@ class TransportLayerClient(object):
|
||||
destination=destination,
|
||||
path=path,
|
||||
data=query_content,
|
||||
timeout=timeout,
|
||||
)
|
||||
defer.returnValue(content)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def claim_client_keys(self, destination, query_content):
|
||||
def claim_client_keys(self, destination, query_content, timeout):
|
||||
"""Claim one-time keys for a list of devices hosted on a remote server.
|
||||
|
||||
Request:
|
||||
@@ -327,6 +374,7 @@ class TransportLayerClient(object):
|
||||
destination=destination,
|
||||
path=path,
|
||||
data=query_content,
|
||||
timeout=timeout,
|
||||
)
|
||||
defer.returnValue(content)
|
||||
|
||||
|
||||
@@ -18,12 +18,16 @@ from twisted.internet import defer
|
||||
from synapse.api.urls import FEDERATION_PREFIX as PREFIX
|
||||
from synapse.api.errors import Codes, SynapseError
|
||||
from synapse.http.server import JsonResource
|
||||
from synapse.http.servlet import (
|
||||
parse_json_object_from_request, parse_integer_from_args, parse_string_from_args,
|
||||
)
|
||||
from synapse.util.ratelimitutils import FederationRateLimiter
|
||||
from synapse.util.versionstring import get_version_string
|
||||
|
||||
import functools
|
||||
import logging
|
||||
import simplejson as json
|
||||
import re
|
||||
import synapse
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -36,7 +40,7 @@ class TransportLayerServer(JsonResource):
|
||||
self.hs = hs
|
||||
self.clock = hs.get_clock()
|
||||
|
||||
super(TransportLayerServer, self).__init__(hs)
|
||||
super(TransportLayerServer, self).__init__(hs, canonical_json=False)
|
||||
|
||||
self.authenticator = Authenticator(hs)
|
||||
self.ratelimiter = FederationRateLimiter(
|
||||
@@ -59,6 +63,16 @@ class TransportLayerServer(JsonResource):
|
||||
)
|
||||
|
||||
|
||||
class AuthenticationError(SynapseError):
|
||||
"""There was a problem authenticating the request"""
|
||||
pass
|
||||
|
||||
|
||||
class NoAuthenticationError(AuthenticationError):
|
||||
"""The request had no authentication information"""
|
||||
pass
|
||||
|
||||
|
||||
class Authenticator(object):
|
||||
def __init__(self, hs):
|
||||
self.keyring = hs.get_keyring()
|
||||
@@ -66,7 +80,7 @@ class Authenticator(object):
|
||||
|
||||
# A method just so we can pass 'self' as the authenticator to the Servlets
|
||||
@defer.inlineCallbacks
|
||||
def authenticate_request(self, request):
|
||||
def authenticate_request(self, request, content):
|
||||
json_request = {
|
||||
"method": request.method,
|
||||
"uri": request.uri,
|
||||
@@ -74,17 +88,10 @@ class Authenticator(object):
|
||||
"signatures": {},
|
||||
}
|
||||
|
||||
content = None
|
||||
origin = None
|
||||
if content is not None:
|
||||
json_request["content"] = content
|
||||
|
||||
if request.method in ["PUT", "POST"]:
|
||||
# TODO: Handle other method types? other content types?
|
||||
try:
|
||||
content_bytes = request.content.read()
|
||||
content = json.loads(content_bytes)
|
||||
json_request["content"] = content
|
||||
except:
|
||||
raise SynapseError(400, "Unable to parse JSON", Codes.BAD_JSON)
|
||||
origin = None
|
||||
|
||||
def parse_auth_header(header_str):
|
||||
try:
|
||||
@@ -102,14 +109,14 @@ class Authenticator(object):
|
||||
sig = strip_quotes(param_dict["sig"])
|
||||
return (origin, key, sig)
|
||||
except:
|
||||
raise SynapseError(
|
||||
raise AuthenticationError(
|
||||
400, "Malformed Authorization header", Codes.UNAUTHORIZED
|
||||
)
|
||||
|
||||
auth_headers = request.requestHeaders.getRawHeaders(b"Authorization")
|
||||
|
||||
if not auth_headers:
|
||||
raise SynapseError(
|
||||
raise NoAuthenticationError(
|
||||
401, "Missing Authorization headers", Codes.UNAUTHORIZED,
|
||||
)
|
||||
|
||||
@@ -120,7 +127,7 @@ class Authenticator(object):
|
||||
json_request["signatures"].setdefault(origin, {})[key] = sig
|
||||
|
||||
if not json_request["signatures"]:
|
||||
raise SynapseError(
|
||||
raise NoAuthenticationError(
|
||||
401, "Missing Authorization headers", Codes.UNAUTHORIZED,
|
||||
)
|
||||
|
||||
@@ -129,38 +136,59 @@ class Authenticator(object):
|
||||
logger.info("Request from %s", origin)
|
||||
request.authenticated_entity = origin
|
||||
|
||||
defer.returnValue((origin, content))
|
||||
defer.returnValue(origin)
|
||||
|
||||
|
||||
class BaseFederationServlet(object):
|
||||
def __init__(self, handler, authenticator, ratelimiter, server_name):
|
||||
REQUIRE_AUTH = True
|
||||
|
||||
def __init__(self, handler, authenticator, ratelimiter, server_name,
|
||||
room_list_handler):
|
||||
self.handler = handler
|
||||
self.authenticator = authenticator
|
||||
self.ratelimiter = ratelimiter
|
||||
self.room_list_handler = room_list_handler
|
||||
|
||||
def _wrap(self, code):
|
||||
def _wrap(self, func):
|
||||
authenticator = self.authenticator
|
||||
ratelimiter = self.ratelimiter
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@functools.wraps(code)
|
||||
def new_code(request, *args, **kwargs):
|
||||
@functools.wraps(func)
|
||||
def new_func(request, *args, **kwargs):
|
||||
content = None
|
||||
if request.method in ["PUT", "POST"]:
|
||||
# TODO: Handle other method types? other content types?
|
||||
content = parse_json_object_from_request(request)
|
||||
|
||||
try:
|
||||
(origin, content) = yield authenticator.authenticate_request(request)
|
||||
with ratelimiter.ratelimit(origin) as d:
|
||||
yield d
|
||||
response = yield code(
|
||||
origin, content, request.args, *args, **kwargs
|
||||
)
|
||||
origin = yield authenticator.authenticate_request(request, content)
|
||||
except NoAuthenticationError:
|
||||
origin = None
|
||||
if self.REQUIRE_AUTH:
|
||||
logger.exception("authenticate_request failed")
|
||||
raise
|
||||
except:
|
||||
logger.exception("authenticate_request failed")
|
||||
raise
|
||||
|
||||
if origin:
|
||||
with ratelimiter.ratelimit(origin) as d:
|
||||
yield d
|
||||
response = yield func(
|
||||
origin, content, request.args, *args, **kwargs
|
||||
)
|
||||
else:
|
||||
response = yield func(
|
||||
origin, content, request.args, *args, **kwargs
|
||||
)
|
||||
|
||||
defer.returnValue(response)
|
||||
|
||||
# Extra logic that functools.wraps() doesn't finish
|
||||
new_code.__self__ = code.__self__
|
||||
new_func.__self__ = func.__self__
|
||||
|
||||
return new_code
|
||||
return new_func
|
||||
|
||||
def register(self, server):
|
||||
pattern = re.compile("^" + PREFIX + self.PATH + "$")
|
||||
@@ -174,7 +202,7 @@ class BaseFederationServlet(object):
|
||||
|
||||
|
||||
class FederationSendServlet(BaseFederationServlet):
|
||||
PATH = "/send/([^/]*)/"
|
||||
PATH = "/send/(?P<transaction_id>[^/]*)/"
|
||||
|
||||
def __init__(self, handler, server_name, **kwargs):
|
||||
super(FederationSendServlet, self).__init__(
|
||||
@@ -249,7 +277,7 @@ class FederationPullServlet(BaseFederationServlet):
|
||||
|
||||
|
||||
class FederationEventServlet(BaseFederationServlet):
|
||||
PATH = "/event/([^/]*)/"
|
||||
PATH = "/event/(?P<event_id>[^/]*)/"
|
||||
|
||||
# This is when someone asks for a data item for a given server data_id pair.
|
||||
def on_GET(self, origin, content, query, event_id):
|
||||
@@ -257,7 +285,7 @@ class FederationEventServlet(BaseFederationServlet):
|
||||
|
||||
|
||||
class FederationStateServlet(BaseFederationServlet):
|
||||
PATH = "/state/([^/]*)/"
|
||||
PATH = "/state/(?P<context>[^/]*)/"
|
||||
|
||||
# This is when someone asks for all data for a given context.
|
||||
def on_GET(self, origin, content, query, context):
|
||||
@@ -268,8 +296,19 @@ class FederationStateServlet(BaseFederationServlet):
|
||||
)
|
||||
|
||||
|
||||
class FederationStateIdsServlet(BaseFederationServlet):
|
||||
PATH = "/state_ids/(?P<room_id>[^/]*)/"
|
||||
|
||||
def on_GET(self, origin, content, query, room_id):
|
||||
return self.handler.on_state_ids_request(
|
||||
origin,
|
||||
room_id,
|
||||
query.get("event_id", [None])[0],
|
||||
)
|
||||
|
||||
|
||||
class FederationBackfillServlet(BaseFederationServlet):
|
||||
PATH = "/backfill/([^/]*)/"
|
||||
PATH = "/backfill/(?P<context>[^/]*)/"
|
||||
|
||||
def on_GET(self, origin, content, query, context):
|
||||
versions = query["v"]
|
||||
@@ -284,7 +323,7 @@ class FederationBackfillServlet(BaseFederationServlet):
|
||||
|
||||
|
||||
class FederationQueryServlet(BaseFederationServlet):
|
||||
PATH = "/query/([^/]*)"
|
||||
PATH = "/query/(?P<query_type>[^/]*)"
|
||||
|
||||
# This is when we receive a server-server Query
|
||||
def on_GET(self, origin, content, query, query_type):
|
||||
@@ -295,7 +334,7 @@ class FederationQueryServlet(BaseFederationServlet):
|
||||
|
||||
|
||||
class FederationMakeJoinServlet(BaseFederationServlet):
|
||||
PATH = "/make_join/([^/]*)/([^/]*)"
|
||||
PATH = "/make_join/(?P<context>[^/]*)/(?P<user_id>[^/]*)"
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_GET(self, origin, content, query, context, user_id):
|
||||
@@ -304,7 +343,7 @@ class FederationMakeJoinServlet(BaseFederationServlet):
|
||||
|
||||
|
||||
class FederationMakeLeaveServlet(BaseFederationServlet):
|
||||
PATH = "/make_leave/([^/]*)/([^/]*)"
|
||||
PATH = "/make_leave/(?P<context>[^/]*)/(?P<user_id>[^/]*)"
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_GET(self, origin, content, query, context, user_id):
|
||||
@@ -313,7 +352,7 @@ class FederationMakeLeaveServlet(BaseFederationServlet):
|
||||
|
||||
|
||||
class FederationSendLeaveServlet(BaseFederationServlet):
|
||||
PATH = "/send_leave/([^/]*)/([^/]*)"
|
||||
PATH = "/send_leave/(?P<room_id>[^/]*)/(?P<txid>[^/]*)"
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_PUT(self, origin, content, query, room_id, txid):
|
||||
@@ -322,14 +361,14 @@ class FederationSendLeaveServlet(BaseFederationServlet):
|
||||
|
||||
|
||||
class FederationEventAuthServlet(BaseFederationServlet):
|
||||
PATH = "/event_auth/([^/]*)/([^/]*)"
|
||||
PATH = "/event_auth/(?P<context>[^/]*)/(?P<event_id>[^/]*)"
|
||||
|
||||
def on_GET(self, origin, content, query, context, event_id):
|
||||
return self.handler.on_event_auth(origin, context, event_id)
|
||||
|
||||
|
||||
class FederationSendJoinServlet(BaseFederationServlet):
|
||||
PATH = "/send_join/([^/]*)/([^/]*)"
|
||||
PATH = "/send_join/(?P<context>[^/]*)/(?P<event_id>[^/]*)"
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_PUT(self, origin, content, query, context, event_id):
|
||||
@@ -340,7 +379,7 @@ class FederationSendJoinServlet(BaseFederationServlet):
|
||||
|
||||
|
||||
class FederationInviteServlet(BaseFederationServlet):
|
||||
PATH = "/invite/([^/]*)/([^/]*)"
|
||||
PATH = "/invite/(?P<context>[^/]*)/(?P<event_id>[^/]*)"
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_PUT(self, origin, content, query, context, event_id):
|
||||
@@ -351,7 +390,7 @@ class FederationInviteServlet(BaseFederationServlet):
|
||||
|
||||
|
||||
class FederationThirdPartyInviteExchangeServlet(BaseFederationServlet):
|
||||
PATH = "/exchange_third_party_invite/([^/]*)"
|
||||
PATH = "/exchange_third_party_invite/(?P<room_id>[^/]*)"
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_PUT(self, origin, content, query, room_id):
|
||||
@@ -364,10 +403,8 @@ class FederationThirdPartyInviteExchangeServlet(BaseFederationServlet):
|
||||
class FederationClientKeysQueryServlet(BaseFederationServlet):
|
||||
PATH = "/user/keys/query"
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_POST(self, origin, content, query):
|
||||
response = yield self.handler.on_query_client_keys(origin, content)
|
||||
defer.returnValue((200, response))
|
||||
return self.handler.on_query_client_keys(origin, content)
|
||||
|
||||
|
||||
class FederationClientKeysClaimServlet(BaseFederationServlet):
|
||||
@@ -380,12 +417,12 @@ class FederationClientKeysClaimServlet(BaseFederationServlet):
|
||||
|
||||
|
||||
class FederationQueryAuthServlet(BaseFederationServlet):
|
||||
PATH = "/query_auth/([^/]*)/([^/]*)"
|
||||
PATH = "/query_auth/(?P<context>[^/]*)/(?P<event_id>[^/]*)"
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_POST(self, origin, content, query, context, event_id):
|
||||
new_content = yield self.handler.on_query_auth_request(
|
||||
origin, content, event_id
|
||||
origin, content, context, event_id
|
||||
)
|
||||
|
||||
defer.returnValue((200, new_content))
|
||||
@@ -393,7 +430,7 @@ class FederationQueryAuthServlet(BaseFederationServlet):
|
||||
|
||||
class FederationGetMissingEventsServlet(BaseFederationServlet):
|
||||
# TODO(paul): Why does this path alone end with "/?" optional?
|
||||
PATH = "/get_missing_events/([^/]*)/?"
|
||||
PATH = "/get_missing_events/(?P<room_id>[^/]*)/?"
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_POST(self, origin, content, query, room_id):
|
||||
@@ -417,25 +454,128 @@ class FederationGetMissingEventsServlet(BaseFederationServlet):
|
||||
class On3pidBindServlet(BaseFederationServlet):
|
||||
PATH = "/3pid/onbind"
|
||||
|
||||
REQUIRE_AUTH = False
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_POST(self, request):
|
||||
content_bytes = request.content.read()
|
||||
content = json.loads(content_bytes)
|
||||
def on_POST(self, origin, content, query):
|
||||
if "invites" in content:
|
||||
last_exception = None
|
||||
for invite in content["invites"]:
|
||||
try:
|
||||
yield self.handler.exchange_third_party_invite(invite)
|
||||
if "signed" not in invite or "token" not in invite["signed"]:
|
||||
message = ("Rejecting received notification of third-"
|
||||
"party invite without signed: %s" % (invite,))
|
||||
logger.info(message)
|
||||
raise SynapseError(400, message)
|
||||
yield self.handler.exchange_third_party_invite(
|
||||
invite["sender"],
|
||||
invite["mxid"],
|
||||
invite["room_id"],
|
||||
invite["signed"],
|
||||
)
|
||||
except Exception as e:
|
||||
last_exception = e
|
||||
if last_exception:
|
||||
raise last_exception
|
||||
defer.returnValue((200, {}))
|
||||
|
||||
# Avoid doing remote HS authorization checks which are done by default by
|
||||
# BaseFederationServlet.
|
||||
def _wrap(self, code):
|
||||
return code
|
||||
|
||||
class OpenIdUserInfo(BaseFederationServlet):
|
||||
"""
|
||||
Exchange a bearer token for information about a user.
|
||||
|
||||
The response format should be compatible with:
|
||||
http://openid.net/specs/openid-connect-core-1_0.html#UserInfoResponse
|
||||
|
||||
GET /openid/userinfo?access_token=ABDEFGH HTTP/1.1
|
||||
|
||||
HTTP/1.1 200 OK
|
||||
Content-Type: application/json
|
||||
|
||||
{
|
||||
"sub": "@userpart:example.org",
|
||||
}
|
||||
"""
|
||||
|
||||
PATH = "/openid/userinfo"
|
||||
|
||||
REQUIRE_AUTH = False
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_GET(self, origin, content, query):
|
||||
token = query.get("access_token", [None])[0]
|
||||
if token is None:
|
||||
defer.returnValue((401, {
|
||||
"errcode": "M_MISSING_TOKEN", "error": "Access Token required"
|
||||
}))
|
||||
return
|
||||
|
||||
user_id = yield self.handler.on_openid_userinfo(token)
|
||||
|
||||
if user_id is None:
|
||||
defer.returnValue((401, {
|
||||
"errcode": "M_UNKNOWN_TOKEN",
|
||||
"error": "Access Token unknown or expired"
|
||||
}))
|
||||
|
||||
defer.returnValue((200, {"sub": user_id}))
|
||||
|
||||
|
||||
class PublicRoomList(BaseFederationServlet):
|
||||
"""
|
||||
Fetch the public room list for this server.
|
||||
|
||||
This API returns information in the same format as /publicRooms on the
|
||||
client API, but will only ever include local public rooms and hence is
|
||||
intended for consumption by other home servers.
|
||||
|
||||
GET /publicRooms HTTP/1.1
|
||||
|
||||
HTTP/1.1 200 OK
|
||||
Content-Type: application/json
|
||||
|
||||
{
|
||||
"chunk": [
|
||||
{
|
||||
"aliases": [
|
||||
"#test:localhost"
|
||||
],
|
||||
"guest_can_join": false,
|
||||
"name": "test room",
|
||||
"num_joined_members": 3,
|
||||
"room_id": "!whkydVegtvatLfXmPN:localhost",
|
||||
"world_readable": false
|
||||
}
|
||||
],
|
||||
"end": "END",
|
||||
"start": "START"
|
||||
}
|
||||
"""
|
||||
|
||||
PATH = "/publicRooms"
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_GET(self, origin, content, query):
|
||||
limit = parse_integer_from_args(query, "limit", 0)
|
||||
since_token = parse_string_from_args(query, "since", None)
|
||||
data = yield self.room_list_handler.get_local_public_room_list(
|
||||
limit, since_token
|
||||
)
|
||||
defer.returnValue((200, data))
|
||||
|
||||
|
||||
class FederationVersionServlet(BaseFederationServlet):
|
||||
PATH = "/version"
|
||||
|
||||
REQUIRE_AUTH = False
|
||||
|
||||
def on_GET(self, origin, content, query):
|
||||
return defer.succeed((200, {
|
||||
"server": {
|
||||
"name": "Synapse",
|
||||
"version": get_version_string(synapse)
|
||||
},
|
||||
}))
|
||||
|
||||
|
||||
SERVLET_CLASSES = (
|
||||
@@ -443,6 +583,7 @@ SERVLET_CLASSES = (
|
||||
FederationPullServlet,
|
||||
FederationEventServlet,
|
||||
FederationStateServlet,
|
||||
FederationStateIdsServlet,
|
||||
FederationBackfillServlet,
|
||||
FederationQueryServlet,
|
||||
FederationMakeJoinServlet,
|
||||
@@ -458,6 +599,9 @@ SERVLET_CLASSES = (
|
||||
FederationClientKeysClaimServlet,
|
||||
FederationThirdPartyInviteExchangeServlet,
|
||||
On3pidBindServlet,
|
||||
OpenIdUserInfo,
|
||||
PublicRoomList,
|
||||
FederationVersionServlet,
|
||||
)
|
||||
|
||||
|
||||
@@ -468,4 +612,5 @@ def register_servlets(hs, resource, authenticator, ratelimiter):
|
||||
authenticator=authenticator,
|
||||
ratelimiter=ratelimiter,
|
||||
server_name=hs.hostname,
|
||||
room_list_handler=hs.get_room_list_handler(),
|
||||
).register(resource)
|
||||
|
||||
@@ -13,23 +13,16 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from synapse.appservice.scheduler import AppServiceScheduler
|
||||
from synapse.appservice.api import ApplicationServiceApi
|
||||
from .register import RegistrationHandler
|
||||
from .room import (
|
||||
RoomCreationHandler, RoomMemberHandler, RoomListHandler, RoomContextHandler,
|
||||
RoomCreationHandler, RoomContextHandler,
|
||||
)
|
||||
from .room_member import RoomMemberHandler
|
||||
from .message import MessageHandler
|
||||
from .events import EventStreamHandler, EventHandler
|
||||
from .federation import FederationHandler
|
||||
from .profile import ProfileHandler
|
||||
from .presence import PresenceHandler
|
||||
from .directory import DirectoryHandler
|
||||
from .typing import TypingNotificationHandler
|
||||
from .admin import AdminHandler
|
||||
from .appservice import ApplicationServicesHandler
|
||||
from .sync import SyncHandler
|
||||
from .auth import AuthHandler
|
||||
from .identity import IdentityHandler
|
||||
from .receipts import ReceiptsHandler
|
||||
from .search import SearchHandler
|
||||
@@ -37,10 +30,21 @@ from .search import SearchHandler
|
||||
|
||||
class Handlers(object):
|
||||
|
||||
""" A collection of all the event handlers.
|
||||
""" Deprecated. A collection of handlers.
|
||||
|
||||
There's no need to lazily create these; we'll just make them all eagerly
|
||||
at construction time.
|
||||
At some point most of the classes whose name ended "Handler" were
|
||||
accessed through this class.
|
||||
|
||||
However this makes it painful to unit test the handlers and to run cut
|
||||
down versions of synapse that only use specific handlers because using a
|
||||
single handler required creating all of the handlers. So some of the
|
||||
handlers have been lifted out of the Handlers object and are now accessed
|
||||
directly through the homeserver object itself.
|
||||
|
||||
Any new handlers should follow the new pattern of being accessed through
|
||||
the homeserver object and should not be added to the Handlers object.
|
||||
|
||||
The remaining handlers should be moved out of the handlers object.
|
||||
"""
|
||||
|
||||
def __init__(self, hs):
|
||||
@@ -48,26 +52,11 @@ class Handlers(object):
|
||||
self.message_handler = MessageHandler(hs)
|
||||
self.room_creation_handler = RoomCreationHandler(hs)
|
||||
self.room_member_handler = RoomMemberHandler(hs)
|
||||
self.event_stream_handler = EventStreamHandler(hs)
|
||||
self.event_handler = EventHandler(hs)
|
||||
self.federation_handler = FederationHandler(hs)
|
||||
self.profile_handler = ProfileHandler(hs)
|
||||
self.presence_handler = PresenceHandler(hs)
|
||||
self.room_list_handler = RoomListHandler(hs)
|
||||
self.directory_handler = DirectoryHandler(hs)
|
||||
self.typing_notification_handler = TypingNotificationHandler(hs)
|
||||
self.admin_handler = AdminHandler(hs)
|
||||
self.receipts_handler = ReceiptsHandler(hs)
|
||||
asapi = ApplicationServiceApi(hs)
|
||||
self.appservice_handler = ApplicationServicesHandler(
|
||||
hs, asapi, AppServiceScheduler(
|
||||
clock=hs.get_clock(),
|
||||
store=hs.get_datastore(),
|
||||
as_api=asapi
|
||||
)
|
||||
)
|
||||
self.sync_handler = SyncHandler(hs)
|
||||
self.auth_handler = AuthHandler(hs)
|
||||
self.identity_handler = IdentityHandler(hs)
|
||||
self.search_handler = SearchHandler(hs)
|
||||
self.room_context_handler = RoomContextHandler(hs)
|
||||
|
||||
@@ -13,17 +13,14 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.errors import LimitExceededError, SynapseError, AuthError
|
||||
from synapse.crypto.event_signing import add_hashes_and_signatures
|
||||
import synapse.types
|
||||
from synapse.api.constants import Membership, EventTypes
|
||||
from synapse.types import UserID, RoomAlias
|
||||
from synapse.push.action_generator import ActionGenerator
|
||||
|
||||
from synapse.util.logcontext import PreserveLoggingContext
|
||||
|
||||
import logging
|
||||
from synapse.api.errors import LimitExceededError
|
||||
from synapse.types import UserID
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -33,11 +30,16 @@ class BaseHandler(object):
|
||||
"""
|
||||
Common base class for the event handlers.
|
||||
|
||||
:type store: synapse.storage.events.StateStore
|
||||
:type state_handler: synapse.state.StateHandler
|
||||
Attributes:
|
||||
store (synapse.storage.DataStore):
|
||||
state_handler (synapse.state.StateHandler):
|
||||
"""
|
||||
|
||||
def __init__(self, hs):
|
||||
"""
|
||||
Args:
|
||||
hs (synapse.server.HomeServer):
|
||||
"""
|
||||
self.store = hs.get_datastore()
|
||||
self.auth = hs.get_auth()
|
||||
self.notifier = hs.get_notifier()
|
||||
@@ -47,94 +49,24 @@ class BaseHandler(object):
|
||||
self.clock = hs.get_clock()
|
||||
self.hs = hs
|
||||
|
||||
self.signing_key = hs.config.signing_key[0]
|
||||
self.server_name = hs.hostname
|
||||
|
||||
self.event_builder_factory = hs.get_event_builder_factory()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _filter_events_for_clients(self, user_tuples, events, event_id_to_state):
|
||||
""" Returns dict of user_id -> list of events that user is allowed to
|
||||
see.
|
||||
"""
|
||||
forgotten = yield defer.gatherResults([
|
||||
self.store.who_forgot_in_room(
|
||||
room_id,
|
||||
)
|
||||
for room_id in frozenset(e.room_id for e in events)
|
||||
], consumeErrors=True)
|
||||
|
||||
# Set of membership event_ids that have been forgotten
|
||||
event_id_forgotten = frozenset(
|
||||
row["event_id"] for rows in forgotten for row in rows
|
||||
)
|
||||
|
||||
def allowed(event, user_id, is_peeking):
|
||||
state = event_id_to_state[event.event_id]
|
||||
|
||||
visibility_event = state.get((EventTypes.RoomHistoryVisibility, ""), None)
|
||||
if visibility_event:
|
||||
visibility = visibility_event.content.get("history_visibility", "shared")
|
||||
else:
|
||||
visibility = "shared"
|
||||
|
||||
if visibility == "world_readable":
|
||||
return True
|
||||
|
||||
if is_peeking:
|
||||
return False
|
||||
|
||||
membership_event = state.get((EventTypes.Member, user_id), None)
|
||||
if membership_event:
|
||||
if membership_event.event_id in event_id_forgotten:
|
||||
membership = None
|
||||
else:
|
||||
membership = membership_event.membership
|
||||
else:
|
||||
membership = None
|
||||
|
||||
if membership == Membership.JOIN:
|
||||
return True
|
||||
|
||||
if event.type == EventTypes.RoomHistoryVisibility:
|
||||
return not is_peeking
|
||||
|
||||
if visibility == "shared":
|
||||
return True
|
||||
elif visibility == "joined":
|
||||
return membership == Membership.JOIN
|
||||
elif visibility == "invited":
|
||||
return membership == Membership.INVITE
|
||||
|
||||
return True
|
||||
|
||||
defer.returnValue({
|
||||
user_id: [
|
||||
event
|
||||
for event in events
|
||||
if allowed(event, user_id, is_peeking)
|
||||
]
|
||||
for user_id, is_peeking in user_tuples
|
||||
})
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _filter_events_for_client(self, user_id, events, is_peeking=False):
|
||||
# Assumes that user has at some point joined the room if not is_guest.
|
||||
types = (
|
||||
(EventTypes.RoomHistoryVisibility, ""),
|
||||
(EventTypes.Member, user_id),
|
||||
)
|
||||
event_id_to_state = yield self.store.get_state_for_events(
|
||||
frozenset(e.event_id for e in events),
|
||||
types=types
|
||||
)
|
||||
res = yield self._filter_events_for_clients(
|
||||
[(user_id, is_peeking)], events, event_id_to_state
|
||||
)
|
||||
defer.returnValue(res.get(user_id, []))
|
||||
|
||||
def ratelimit(self, user_id):
|
||||
def ratelimit(self, requester):
|
||||
time_now = self.clock.time()
|
||||
user_id = requester.user.to_string()
|
||||
|
||||
# The AS user itself is never rate limited.
|
||||
app_service = self.store.get_app_service_by_user_id(user_id)
|
||||
if app_service is not None:
|
||||
return # do not ratelimit app service senders
|
||||
|
||||
# Disable rate limiting of users belonging to any AS that is configured
|
||||
# not to be rate limited in its registration file (rate_limited: true|false).
|
||||
if requester.app_service and not requester.app_service.is_rate_limited():
|
||||
return
|
||||
|
||||
allowed, time_allowed = self.ratelimiter.send_message(
|
||||
user_id, time_now,
|
||||
msg_rate_hz=self.hs.config.rc_messages_per_second,
|
||||
@@ -146,167 +78,20 @@ class BaseHandler(object):
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _create_new_client_event(self, builder):
|
||||
latest_ret = yield self.store.get_latest_events_in_room(
|
||||
builder.room_id,
|
||||
)
|
||||
|
||||
if latest_ret:
|
||||
depth = max([d for _, _, d in latest_ret]) + 1
|
||||
else:
|
||||
depth = 1
|
||||
|
||||
prev_events = [(e, h) for e, h, _ in latest_ret]
|
||||
|
||||
builder.prev_events = prev_events
|
||||
builder.depth = depth
|
||||
|
||||
state_handler = self.state_handler
|
||||
|
||||
context = yield state_handler.compute_event_context(builder)
|
||||
|
||||
if builder.is_state():
|
||||
builder.prev_state = yield self.store.add_event_hashes(
|
||||
context.prev_state_events
|
||||
)
|
||||
|
||||
yield self.auth.add_auth_events(builder, context)
|
||||
|
||||
add_hashes_and_signatures(
|
||||
builder, self.server_name, self.signing_key
|
||||
)
|
||||
|
||||
event = builder.build()
|
||||
|
||||
logger.debug(
|
||||
"Created event %s with current state: %s",
|
||||
event.event_id, context.current_state,
|
||||
)
|
||||
|
||||
defer.returnValue(
|
||||
(event, context,)
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def handle_new_client_event(self, event, context, extra_users=[]):
|
||||
# We now need to go and hit out to wherever we need to hit out to.
|
||||
|
||||
self.auth.check(event, auth_events=context.current_state)
|
||||
|
||||
yield self.maybe_kick_guest_users(event, context.current_state.values())
|
||||
|
||||
if event.type == EventTypes.CanonicalAlias:
|
||||
# Check the alias is acually valid (at this time at least)
|
||||
room_alias_str = event.content.get("alias", None)
|
||||
if room_alias_str:
|
||||
room_alias = RoomAlias.from_string(room_alias_str)
|
||||
directory_handler = self.hs.get_handlers().directory_handler
|
||||
mapping = yield directory_handler.get_association(room_alias)
|
||||
|
||||
if mapping["room_id"] != event.room_id:
|
||||
raise SynapseError(
|
||||
400,
|
||||
"Room alias %s does not point to the room" % (
|
||||
room_alias_str,
|
||||
)
|
||||
)
|
||||
|
||||
federation_handler = self.hs.get_handlers().federation_handler
|
||||
|
||||
if event.type == EventTypes.Member:
|
||||
if event.content["membership"] == Membership.INVITE:
|
||||
event.unsigned["invite_room_state"] = [
|
||||
{
|
||||
"type": e.type,
|
||||
"state_key": e.state_key,
|
||||
"content": e.content,
|
||||
"sender": e.sender,
|
||||
}
|
||||
for k, e in context.current_state.items()
|
||||
if e.type in (
|
||||
EventTypes.JoinRules,
|
||||
EventTypes.CanonicalAlias,
|
||||
EventTypes.RoomAvatar,
|
||||
EventTypes.Name,
|
||||
)
|
||||
]
|
||||
|
||||
invitee = UserID.from_string(event.state_key)
|
||||
if not self.hs.is_mine(invitee):
|
||||
# TODO: Can we add signature from remote server in a nicer
|
||||
# way? If we have been invited by a remote server, we need
|
||||
# to get them to sign the event.
|
||||
|
||||
returned_invite = yield federation_handler.send_invite(
|
||||
invitee.domain,
|
||||
event,
|
||||
)
|
||||
|
||||
event.unsigned.pop("room_state", None)
|
||||
|
||||
# TODO: Make sure the signatures actually are correct.
|
||||
event.signatures.update(
|
||||
returned_invite.signatures
|
||||
)
|
||||
|
||||
if event.type == EventTypes.Redaction:
|
||||
if self.auth.check_redaction(event, auth_events=context.current_state):
|
||||
original_event = yield self.store.get_event(
|
||||
event.redacts,
|
||||
check_redacted=False,
|
||||
get_prev_content=False,
|
||||
allow_rejected=False,
|
||||
allow_none=False
|
||||
)
|
||||
if event.user_id != original_event.user_id:
|
||||
raise AuthError(
|
||||
403,
|
||||
"You don't have permission to redact events"
|
||||
)
|
||||
|
||||
action_generator = ActionGenerator(self.hs)
|
||||
yield action_generator.handle_push_actions_for_event(
|
||||
event, context, self
|
||||
)
|
||||
|
||||
(event_stream_id, max_stream_id) = yield self.store.persist_event(
|
||||
event, context=context
|
||||
)
|
||||
|
||||
destinations = set()
|
||||
for k, s in context.current_state.items():
|
||||
try:
|
||||
if k[0] == EventTypes.Member:
|
||||
if s.content["membership"] == Membership.JOIN:
|
||||
destinations.add(
|
||||
UserID.from_string(s.state_key).domain
|
||||
)
|
||||
except SynapseError:
|
||||
logger.warn(
|
||||
"Failed to get destination from event %s", s.event_id
|
||||
)
|
||||
|
||||
with PreserveLoggingContext():
|
||||
# Don't block waiting on waking up all the listeners.
|
||||
self.notifier.on_new_room_event(
|
||||
event, event_stream_id, max_stream_id,
|
||||
extra_users=extra_users
|
||||
)
|
||||
|
||||
# If invite, remove room_state from unsigned before sending.
|
||||
event.unsigned.pop("invite_room_state", None)
|
||||
|
||||
federation_handler.handle_new_event(
|
||||
event, destinations=destinations,
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def maybe_kick_guest_users(self, event, current_state):
|
||||
def maybe_kick_guest_users(self, event, context=None):
|
||||
# Technically this function invalidates current_state by changing it.
|
||||
# Hopefully this isn't that important to the caller.
|
||||
if event.type == EventTypes.GuestAccess:
|
||||
guest_access = event.content.get("guest_access", "forbidden")
|
||||
if guest_access != "can_join":
|
||||
if context:
|
||||
current_state = yield self.store.get_events(
|
||||
context.current_state_ids.values()
|
||||
)
|
||||
current_state = current_state.values()
|
||||
else:
|
||||
current_state = yield self.store.get_current_state(event.room_id)
|
||||
logger.info("maybe_kick_guest_users %r", current_state)
|
||||
yield self.kick_guest_users(current_state)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@@ -316,7 +101,8 @@ class BaseHandler(object):
|
||||
if member_event.type != EventTypes.Member:
|
||||
continue
|
||||
|
||||
if not self.hs.is_mine(UserID.from_string(member_event.state_key)):
|
||||
target_user = UserID.from_string(member_event.state_key)
|
||||
if not self.hs.is_mine(target_user):
|
||||
continue
|
||||
|
||||
if member_event.content["membership"] not in {
|
||||
@@ -338,18 +124,14 @@ class BaseHandler(object):
|
||||
# and having homeservers have their own users leave keeps more
|
||||
# of that decision-making and control local to the guest-having
|
||||
# homeserver.
|
||||
message_handler = self.hs.get_handlers().message_handler
|
||||
yield message_handler.create_and_send_event(
|
||||
{
|
||||
"type": EventTypes.Member,
|
||||
"state_key": member_event.state_key,
|
||||
"content": {
|
||||
"membership": Membership.LEAVE,
|
||||
"kind": "guest"
|
||||
},
|
||||
"room_id": member_event.room_id,
|
||||
"sender": member_event.state_key
|
||||
},
|
||||
requester = synapse.types.create_requester(
|
||||
target_user, is_guest=True)
|
||||
handler = self.hs.get_handlers().room_member_handler
|
||||
yield handler.update_membership(
|
||||
requester,
|
||||
target_user,
|
||||
member_event.room_id,
|
||||
"leave",
|
||||
ratelimit=False,
|
||||
)
|
||||
except Exception as e:
|
||||
|
||||
@@ -16,8 +16,8 @@
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.constants import EventTypes
|
||||
from synapse.appservice import ApplicationService
|
||||
from synapse.types import UserID
|
||||
from synapse.util.metrics import Measure
|
||||
from synapse.util.logcontext import preserve_fn, preserve_context_over_deferred
|
||||
|
||||
import logging
|
||||
|
||||
@@ -35,47 +35,81 @@ def log_failure(failure):
|
||||
)
|
||||
|
||||
|
||||
# NB: Purposefully not inheriting BaseHandler since that contains way too much
|
||||
# setup code which this handler does not need or use. This makes testing a lot
|
||||
# easier.
|
||||
class ApplicationServicesHandler(object):
|
||||
|
||||
def __init__(self, hs, appservice_api, appservice_scheduler):
|
||||
def __init__(self, hs):
|
||||
self.store = hs.get_datastore()
|
||||
self.hs = hs
|
||||
self.appservice_api = appservice_api
|
||||
self.scheduler = appservice_scheduler
|
||||
self.is_mine_id = hs.is_mine_id
|
||||
self.appservice_api = hs.get_application_service_api()
|
||||
self.scheduler = hs.get_application_service_scheduler()
|
||||
self.started_scheduler = False
|
||||
self.clock = hs.get_clock()
|
||||
self.notify_appservices = hs.config.notify_appservices
|
||||
|
||||
self.current_max = 0
|
||||
self.is_processing = False
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def notify_interested_services(self, event):
|
||||
def notify_interested_services(self, current_id):
|
||||
"""Notifies (pushes) all application services interested in this event.
|
||||
|
||||
Pushing is done asynchronously, so this method won't block for any
|
||||
prolonged length of time.
|
||||
|
||||
Args:
|
||||
event(Event): The event to push out to interested services.
|
||||
current_id(int): The current maximum ID.
|
||||
"""
|
||||
# Gather interested services
|
||||
services = yield self._get_services_for_event(event)
|
||||
if len(services) == 0:
|
||||
return # no services need notifying
|
||||
services = self.store.get_app_services()
|
||||
if not services or not self.notify_appservices:
|
||||
return
|
||||
|
||||
# Do we know this user exists? If not, poke the user query API for
|
||||
# all services which match that user regex. This needs to block as these
|
||||
# user queries need to be made BEFORE pushing the event.
|
||||
yield self._check_user_exists(event.sender)
|
||||
if event.type == EventTypes.Member:
|
||||
yield self._check_user_exists(event.state_key)
|
||||
self.current_max = max(self.current_max, current_id)
|
||||
if self.is_processing:
|
||||
return
|
||||
|
||||
if not self.started_scheduler:
|
||||
self.scheduler.start().addErrback(log_failure)
|
||||
self.started_scheduler = True
|
||||
with Measure(self.clock, "notify_interested_services"):
|
||||
self.is_processing = True
|
||||
try:
|
||||
upper_bound = self.current_max
|
||||
limit = 100
|
||||
while True:
|
||||
upper_bound, events = yield self.store.get_new_events_for_appservice(
|
||||
upper_bound, limit
|
||||
)
|
||||
|
||||
# Fork off pushes to these services
|
||||
for service in services:
|
||||
self.scheduler.submit_event_for_as(service, event)
|
||||
if not events:
|
||||
break
|
||||
|
||||
for event in events:
|
||||
# Gather interested services
|
||||
services = yield self._get_services_for_event(event)
|
||||
if len(services) == 0:
|
||||
continue # no services need notifying
|
||||
|
||||
# Do we know this user exists? If not, poke the user
|
||||
# query API for all services which match that user regex.
|
||||
# This needs to block as these user queries need to be
|
||||
# made BEFORE pushing the event.
|
||||
yield self._check_user_exists(event.sender)
|
||||
if event.type == EventTypes.Member:
|
||||
yield self._check_user_exists(event.state_key)
|
||||
|
||||
if not self.started_scheduler:
|
||||
self.scheduler.start().addErrback(log_failure)
|
||||
self.started_scheduler = True
|
||||
|
||||
# Fork off pushes to these services
|
||||
for service in services:
|
||||
preserve_fn(self.scheduler.submit_event_for_as)(
|
||||
service, event
|
||||
)
|
||||
|
||||
yield self.store.set_appservice_last_pos(upper_bound)
|
||||
|
||||
if len(events) < limit:
|
||||
break
|
||||
finally:
|
||||
self.is_processing = False
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def query_user_exists(self, user_id):
|
||||
@@ -108,11 +142,12 @@ class ApplicationServicesHandler(object):
|
||||
association can be found.
|
||||
"""
|
||||
room_alias_str = room_alias.to_string()
|
||||
alias_query_services = yield self._get_services_for_event(
|
||||
event=None,
|
||||
restrict_to=ApplicationService.NS_ALIASES,
|
||||
alias_list=[room_alias_str]
|
||||
)
|
||||
services = self.store.get_app_services()
|
||||
alias_query_services = [
|
||||
s for s in services if (
|
||||
s.is_interested_in_alias(room_alias_str)
|
||||
)
|
||||
]
|
||||
for alias_service in alias_query_services:
|
||||
is_known_alias = yield self.appservice_api.query_alias(
|
||||
alias_service, room_alias_str
|
||||
@@ -125,52 +160,97 @@ class ApplicationServicesHandler(object):
|
||||
defer.returnValue(result)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _get_services_for_event(self, event, restrict_to="", alias_list=None):
|
||||
def query_3pe(self, kind, protocol, fields):
|
||||
services = yield self._get_services_for_3pn(protocol)
|
||||
|
||||
results = yield preserve_context_over_deferred(defer.DeferredList([
|
||||
preserve_fn(self.appservice_api.query_3pe)(service, kind, protocol, fields)
|
||||
for service in services
|
||||
], consumeErrors=True))
|
||||
|
||||
ret = []
|
||||
for (success, result) in results:
|
||||
if success:
|
||||
ret.extend(result)
|
||||
|
||||
defer.returnValue(ret)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_3pe_protocols(self, only_protocol=None):
|
||||
services = self.store.get_app_services()
|
||||
protocols = {}
|
||||
|
||||
# Collect up all the individual protocol responses out of the ASes
|
||||
for s in services:
|
||||
for p in s.protocols:
|
||||
if only_protocol is not None and p != only_protocol:
|
||||
continue
|
||||
|
||||
if p not in protocols:
|
||||
protocols[p] = []
|
||||
|
||||
info = yield self.appservice_api.get_3pe_protocol(s, p)
|
||||
|
||||
if info is not None:
|
||||
protocols[p].append(info)
|
||||
|
||||
def _merge_instances(infos):
|
||||
if not infos:
|
||||
return {}
|
||||
|
||||
# Merge the 'instances' lists of multiple results, but just take
|
||||
# the other fields from the first as they ought to be identical
|
||||
# copy the result so as not to corrupt the cached one
|
||||
combined = dict(infos[0])
|
||||
combined["instances"] = list(combined["instances"])
|
||||
|
||||
for info in infos[1:]:
|
||||
combined["instances"].extend(info["instances"])
|
||||
|
||||
return combined
|
||||
|
||||
for p in protocols.keys():
|
||||
protocols[p] = _merge_instances(protocols[p])
|
||||
|
||||
defer.returnValue(protocols)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _get_services_for_event(self, event):
|
||||
"""Retrieve a list of application services interested in this event.
|
||||
|
||||
Args:
|
||||
event(Event): The event to check. Can be None if alias_list is not.
|
||||
restrict_to(str): The namespace to restrict regex tests to.
|
||||
alias_list: A list of aliases to get services for. If None, this
|
||||
list is obtained from the database.
|
||||
Returns:
|
||||
list<ApplicationService>: A list of services interested in this
|
||||
event based on the service regex.
|
||||
"""
|
||||
member_list = None
|
||||
if hasattr(event, "room_id"):
|
||||
# We need to know the aliases associated with this event.room_id,
|
||||
# if any.
|
||||
if not alias_list:
|
||||
alias_list = yield self.store.get_aliases_for_room(
|
||||
event.room_id
|
||||
)
|
||||
# We need to know the members associated with this event.room_id,
|
||||
# if any.
|
||||
member_list = yield self.store.get_users_in_room(event.room_id)
|
||||
|
||||
services = yield self.store.get_app_services()
|
||||
services = self.store.get_app_services()
|
||||
interested_list = [
|
||||
s for s in services if (
|
||||
s.is_interested(event, restrict_to, alias_list, member_list)
|
||||
yield s.is_interested(event, self.store)
|
||||
)
|
||||
]
|
||||
defer.returnValue(interested_list)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _get_services_for_user(self, user_id):
|
||||
services = yield self.store.get_app_services()
|
||||
services = self.store.get_app_services()
|
||||
interested_list = [
|
||||
s for s in services if (
|
||||
s.is_interested_in_user(user_id)
|
||||
)
|
||||
]
|
||||
defer.returnValue(interested_list)
|
||||
return defer.succeed(interested_list)
|
||||
|
||||
def _get_services_for_3pn(self, protocol):
|
||||
services = self.store.get_app_services()
|
||||
interested_list = [
|
||||
s for s in services if s.is_interested_in_protocol(protocol)
|
||||
]
|
||||
return defer.succeed(interested_list)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _is_unknown_user(self, user_id):
|
||||
user = UserID.from_string(user_id)
|
||||
if not self.hs.is_mine(user):
|
||||
if not self.is_mine_id(user_id):
|
||||
# we don't know if they are unknown or not since it isn't one of our
|
||||
# users. We can't poke ASes.
|
||||
defer.returnValue(False)
|
||||
@@ -182,7 +262,7 @@ class ApplicationServicesHandler(object):
|
||||
return
|
||||
|
||||
# user not found; could be the AS though, so check.
|
||||
services = yield self.store.get_app_services()
|
||||
services = self.store.get_app_services()
|
||||
service_list = [s for s in services if s.sender == user_id]
|
||||
defer.returnValue(len(service_list) == 0)
|
||||
|
||||
|
||||
@@ -18,7 +18,7 @@ from twisted.internet import defer
|
||||
from ._base import BaseHandler
|
||||
from synapse.api.constants import LoginType
|
||||
from synapse.types import UserID
|
||||
from synapse.api.errors import AuthError, LoginError, Codes
|
||||
from synapse.api.errors import AuthError, LoginError, Codes, StoreError, SynapseError
|
||||
from synapse.util.async import run_on_reactor
|
||||
|
||||
from twisted.web.client import PartialDownloadError
|
||||
@@ -35,8 +35,13 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AuthHandler(BaseHandler):
|
||||
SESSION_EXPIRE_MS = 48 * 60 * 60 * 1000
|
||||
|
||||
def __init__(self, hs):
|
||||
"""
|
||||
Args:
|
||||
hs (synapse.server.HomeServer):
|
||||
"""
|
||||
super(AuthHandler, self).__init__(hs)
|
||||
self.checkers = {
|
||||
LoginType.PASSWORD: self._check_password_auth,
|
||||
@@ -46,7 +51,18 @@ class AuthHandler(BaseHandler):
|
||||
}
|
||||
self.bcrypt_rounds = hs.config.bcrypt_rounds
|
||||
self.sessions = {}
|
||||
self.INVALID_TOKEN_HTTP_STATUS = 401
|
||||
|
||||
account_handler = _AccountHandler(
|
||||
hs, check_user_exists=self.check_user_exists
|
||||
)
|
||||
|
||||
self.password_providers = [
|
||||
module(config=config, account_handler=account_handler)
|
||||
for module, config in hs.config.password_providers
|
||||
]
|
||||
|
||||
self.hs = hs # FIXME better possibility to access registrationHandler later?
|
||||
self.device_handler = hs.get_device_handler()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def check_auth(self, flows, clientdict, clientip):
|
||||
@@ -66,15 +82,18 @@ class AuthHandler(BaseHandler):
|
||||
'auth' key: this method prompts for auth if none is sent.
|
||||
clientip (str): The IP address of the client.
|
||||
Returns:
|
||||
A tuple of (authed, dict, dict) where authed is true if the client
|
||||
has successfully completed an auth flow. If it is true, the first
|
||||
dict contains the authenticated credentials of each stage.
|
||||
A tuple of (authed, dict, dict, session_id) where authed is true if
|
||||
the client has successfully completed an auth flow. If it is true
|
||||
the first dict contains the authenticated credentials of each stage.
|
||||
|
||||
If authed is false, the first dictionary is the server response to
|
||||
the login request and should be passed back to the client.
|
||||
|
||||
In either case, the second dict contains the parameters for this
|
||||
request (which may have been given only in a previous call).
|
||||
|
||||
session_id is the ID of this session, either passed in by the client
|
||||
or assigned by the call to check_auth
|
||||
"""
|
||||
|
||||
authdict = None
|
||||
@@ -103,7 +122,10 @@ class AuthHandler(BaseHandler):
|
||||
|
||||
if not authdict:
|
||||
defer.returnValue(
|
||||
(False, self._auth_dict_for_flows(flows, session), clientdict)
|
||||
(
|
||||
False, self._auth_dict_for_flows(flows, session),
|
||||
clientdict, session['id']
|
||||
)
|
||||
)
|
||||
|
||||
if 'creds' not in session:
|
||||
@@ -111,23 +133,40 @@ class AuthHandler(BaseHandler):
|
||||
creds = session['creds']
|
||||
|
||||
# check auth type currently being presented
|
||||
errordict = {}
|
||||
if 'type' in authdict:
|
||||
if authdict['type'] not in self.checkers:
|
||||
login_type = authdict['type']
|
||||
if login_type not in self.checkers:
|
||||
raise LoginError(400, "", Codes.UNRECOGNIZED)
|
||||
result = yield self.checkers[authdict['type']](authdict, clientip)
|
||||
if result:
|
||||
creds[authdict['type']] = result
|
||||
self._save_session(session)
|
||||
try:
|
||||
result = yield self.checkers[login_type](authdict, clientip)
|
||||
if result:
|
||||
creds[login_type] = result
|
||||
self._save_session(session)
|
||||
except LoginError, e:
|
||||
if login_type == LoginType.EMAIL_IDENTITY:
|
||||
# riot used to have a bug where it would request a new
|
||||
# validation token (thus sending a new email) each time it
|
||||
# got a 401 with a 'flows' field.
|
||||
# (https://github.com/vector-im/vector-web/issues/2447).
|
||||
#
|
||||
# Grandfather in the old behaviour for now to avoid
|
||||
# breaking old riot deployments.
|
||||
raise e
|
||||
|
||||
# this step failed. Merge the error dict into the response
|
||||
# so that the client can have another go.
|
||||
errordict = e.error_dict()
|
||||
|
||||
for f in flows:
|
||||
if len(set(f) - set(creds.keys())) == 0:
|
||||
logger.info("Auth completed with creds: %r", creds)
|
||||
self._remove_session(session)
|
||||
defer.returnValue((True, creds, clientdict))
|
||||
defer.returnValue((True, creds, clientdict, session['id']))
|
||||
|
||||
ret = self._auth_dict_for_flows(flows, session)
|
||||
ret['completed'] = creds.keys()
|
||||
defer.returnValue((False, ret, clientdict))
|
||||
ret.update(errordict)
|
||||
defer.returnValue((False, ret, clientdict, session['id']))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def add_oob_auth(self, stagetype, authdict, clientip):
|
||||
@@ -154,7 +193,51 @@ class AuthHandler(BaseHandler):
|
||||
defer.returnValue(True)
|
||||
defer.returnValue(False)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_session_id(self, clientdict):
|
||||
"""
|
||||
Gets the session ID for a client given the client dictionary
|
||||
|
||||
Args:
|
||||
clientdict: The dictionary sent by the client in the request
|
||||
|
||||
Returns:
|
||||
str|None: The string session ID the client sent. If the client did
|
||||
not send a session ID, returns None.
|
||||
"""
|
||||
sid = None
|
||||
if clientdict and 'auth' in clientdict:
|
||||
authdict = clientdict['auth']
|
||||
if 'session' in authdict:
|
||||
sid = authdict['session']
|
||||
return sid
|
||||
|
||||
def set_session_data(self, session_id, key, value):
|
||||
"""
|
||||
Store a key-value pair into the sessions data associated with this
|
||||
request. This data is stored server-side and cannot be modified by
|
||||
the client.
|
||||
|
||||
Args:
|
||||
session_id (string): The ID of this session as returned from check_auth
|
||||
key (string): The key to store the data under
|
||||
value (any): The data to store
|
||||
"""
|
||||
sess = self._get_session_info(session_id)
|
||||
sess.setdefault('serverdict', {})[key] = value
|
||||
self._save_session(sess)
|
||||
|
||||
def get_session_data(self, session_id, key, default=None):
|
||||
"""
|
||||
Retrieve data stored with set_session_data
|
||||
|
||||
Args:
|
||||
session_id (string): The ID of this session as returned from check_auth
|
||||
key (string): The key to store the data under
|
||||
default (any): Value to return if the key has not been set
|
||||
"""
|
||||
sess = self._get_session_info(session_id)
|
||||
return sess.setdefault('serverdict', {}).get(key, default)
|
||||
|
||||
def _check_password_auth(self, authdict, _):
|
||||
if "user" not in authdict or "password" not in authdict:
|
||||
raise LoginError(400, "", Codes.MISSING_PARAM)
|
||||
@@ -164,9 +247,7 @@ class AuthHandler(BaseHandler):
|
||||
if not user_id.startswith('@'):
|
||||
user_id = UserID.create(user_id, self.hs.hostname).to_string()
|
||||
|
||||
user_id, password_hash = yield self._find_user_id_and_pwd_hash(user_id)
|
||||
self._check_password(user_id, password, password_hash)
|
||||
defer.returnValue(user_id)
|
||||
return self._check_password(user_id, password)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _check_recaptcha(self, authdict, clientip):
|
||||
@@ -202,8 +283,17 @@ class AuthHandler(BaseHandler):
|
||||
data = pde.response
|
||||
resp_body = simplejson.loads(data)
|
||||
|
||||
if 'success' in resp_body and resp_body['success']:
|
||||
defer.returnValue(True)
|
||||
if 'success' in resp_body:
|
||||
# Note that we do NOT check the hostname here: we explicitly
|
||||
# intend the CAPTCHA to be presented by whatever client the
|
||||
# user is using, we just care that they have completed a CAPTCHA.
|
||||
logger.info(
|
||||
"%s reCAPTCHA from hostname %s",
|
||||
"Successful" if resp_body['success'] else "Failed",
|
||||
resp_body.get('hostname')
|
||||
)
|
||||
if resp_body['success']:
|
||||
defer.returnValue(True)
|
||||
raise LoginError(401, "", errcode=Codes.UNAUTHORIZED)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@@ -270,116 +360,189 @@ class AuthHandler(BaseHandler):
|
||||
|
||||
return self.sessions[session_id]
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def login_with_password(self, user_id, password):
|
||||
def validate_password_login(self, user_id, password):
|
||||
"""
|
||||
Authenticates the user with their username and password.
|
||||
|
||||
Used only by the v1 login API.
|
||||
|
||||
Args:
|
||||
user_id (str): User ID
|
||||
user_id (str): complete @user:id
|
||||
password (str): Password
|
||||
Returns:
|
||||
A tuple of:
|
||||
The user's ID.
|
||||
The access token for the user's session.
|
||||
The refresh token for the user's session.
|
||||
defer.Deferred: (str) canonical user id
|
||||
Raises:
|
||||
StoreError if there was a problem storing the token.
|
||||
StoreError if there was a problem accessing the database
|
||||
LoginError if there was an authentication problem.
|
||||
"""
|
||||
user_id, password_hash = yield self._find_user_id_and_pwd_hash(user_id)
|
||||
self._check_password(user_id, password, password_hash)
|
||||
|
||||
logger.info("Logging in user %s", user_id)
|
||||
access_token = yield self.issue_access_token(user_id)
|
||||
refresh_token = yield self.issue_refresh_token(user_id)
|
||||
defer.returnValue((user_id, access_token, refresh_token))
|
||||
return self._check_password(user_id, password)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_login_tuple_for_user_id(self, user_id):
|
||||
def get_login_tuple_for_user_id(self, user_id, device_id=None,
|
||||
initial_display_name=None):
|
||||
"""
|
||||
Gets login tuple for the user with the given user ID.
|
||||
|
||||
Creates a new access/refresh token for the user.
|
||||
|
||||
The user is assumed to have been authenticated by some other
|
||||
machanism (e.g. CAS)
|
||||
machanism (e.g. CAS), and the user_id converted to the canonical case.
|
||||
|
||||
The device will be recorded in the table if it is not there already.
|
||||
|
||||
Args:
|
||||
user_id (str): User ID
|
||||
user_id (str): canonical User ID
|
||||
device_id (str|None): the device ID to associate with the tokens.
|
||||
None to leave the tokens unassociated with a device (deprecated:
|
||||
we should always have a device ID)
|
||||
initial_display_name (str): display name to associate with the
|
||||
device if it needs re-registering
|
||||
Returns:
|
||||
A tuple of:
|
||||
The user's ID.
|
||||
The access token for the user's session.
|
||||
The refresh token for the user's session.
|
||||
Raises:
|
||||
StoreError if there was a problem storing the token.
|
||||
LoginError if there was an authentication problem.
|
||||
"""
|
||||
user_id, ignored = yield self._find_user_id_and_pwd_hash(user_id)
|
||||
logger.info("Logging in user %s on device %s", user_id, device_id)
|
||||
access_token = yield self.issue_access_token(user_id, device_id)
|
||||
refresh_token = yield self.issue_refresh_token(user_id, device_id)
|
||||
|
||||
logger.info("Logging in user %s", user_id)
|
||||
access_token = yield self.issue_access_token(user_id)
|
||||
refresh_token = yield self.issue_refresh_token(user_id)
|
||||
defer.returnValue((user_id, access_token, refresh_token))
|
||||
# the device *should* have been registered before we got here; however,
|
||||
# it's possible we raced against a DELETE operation. The thing we
|
||||
# really don't want is active access_tokens without a record of the
|
||||
# device, so we double-check it here.
|
||||
if device_id is not None:
|
||||
yield self.device_handler.check_device_registered(
|
||||
user_id, device_id, initial_display_name
|
||||
)
|
||||
|
||||
defer.returnValue((access_token, refresh_token))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def does_user_exist(self, user_id):
|
||||
try:
|
||||
yield self._find_user_id_and_pwd_hash(user_id)
|
||||
defer.returnValue(True)
|
||||
except LoginError:
|
||||
defer.returnValue(False)
|
||||
def check_user_exists(self, user_id):
|
||||
"""
|
||||
Checks to see if a user with the given id exists. Will check case
|
||||
insensitively, but return None if there are multiple inexact matches.
|
||||
|
||||
Args:
|
||||
(str) user_id: complete @user:id
|
||||
|
||||
Returns:
|
||||
defer.Deferred: (str) canonical_user_id, or None if zero or
|
||||
multiple matches
|
||||
"""
|
||||
res = yield self._find_user_id_and_pwd_hash(user_id)
|
||||
if res is not None:
|
||||
defer.returnValue(res[0])
|
||||
defer.returnValue(None)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _find_user_id_and_pwd_hash(self, user_id):
|
||||
"""Checks to see if a user with the given id exists. Will check case
|
||||
insensitively, but will throw if there are multiple inexact matches.
|
||||
insensitively, but will return None if there are multiple inexact
|
||||
matches.
|
||||
|
||||
Returns:
|
||||
tuple: A 2-tuple of `(canonical_user_id, password_hash)`
|
||||
None: if there is not exactly one match
|
||||
"""
|
||||
user_infos = yield self.store.get_users_by_id_case_insensitive(user_id)
|
||||
|
||||
result = None
|
||||
if not user_infos:
|
||||
logger.warn("Attempted to login as %s but they do not exist", user_id)
|
||||
raise LoginError(403, "", errcode=Codes.FORBIDDEN)
|
||||
|
||||
if len(user_infos) > 1:
|
||||
if user_id not in user_infos:
|
||||
logger.warn(
|
||||
"Attempted to login as %s but it matches more than one user "
|
||||
"inexactly: %r",
|
||||
user_id, user_infos.keys()
|
||||
)
|
||||
raise LoginError(403, "", errcode=Codes.FORBIDDEN)
|
||||
|
||||
defer.returnValue((user_id, user_infos[user_id]))
|
||||
elif len(user_infos) == 1:
|
||||
# a single match (possibly not exact)
|
||||
result = user_infos.popitem()
|
||||
elif user_id in user_infos:
|
||||
# multiple matches, but one is exact
|
||||
result = (user_id, user_infos[user_id])
|
||||
else:
|
||||
defer.returnValue(user_infos.popitem())
|
||||
|
||||
def _check_password(self, user_id, password, stored_hash):
|
||||
"""Checks that user_id has passed password, raises LoginError if not."""
|
||||
if not self.validate_hash(password, stored_hash):
|
||||
logger.warn("Failed password login for user %s", user_id)
|
||||
raise LoginError(403, "", errcode=Codes.FORBIDDEN)
|
||||
# multiple matches, none of them exact
|
||||
logger.warn(
|
||||
"Attempted to login as %s but it matches more than one user "
|
||||
"inexactly: %r",
|
||||
user_id, user_infos.keys()
|
||||
)
|
||||
defer.returnValue(result)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def issue_access_token(self, user_id):
|
||||
def _check_password(self, user_id, password):
|
||||
"""Authenticate a user against the LDAP and local databases.
|
||||
|
||||
user_id is checked case insensitively against the local database, but
|
||||
will throw if there are multiple inexact matches.
|
||||
|
||||
Args:
|
||||
user_id (str): complete @user:id
|
||||
Returns:
|
||||
(str) the canonical_user_id
|
||||
Raises:
|
||||
LoginError if login fails
|
||||
"""
|
||||
for provider in self.password_providers:
|
||||
is_valid = yield provider.check_password(user_id, password)
|
||||
if is_valid:
|
||||
defer.returnValue(user_id)
|
||||
|
||||
canonical_user_id = yield self._check_local_password(user_id, password)
|
||||
|
||||
if canonical_user_id:
|
||||
defer.returnValue(canonical_user_id)
|
||||
|
||||
# unknown username or invalid password. We raise a 403 here, but note
|
||||
# that if we're doing user-interactive login, it turns all LoginErrors
|
||||
# into a 401 anyway.
|
||||
raise LoginError(
|
||||
403, "Invalid password",
|
||||
errcode=Codes.FORBIDDEN
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _check_local_password(self, user_id, password):
|
||||
"""Authenticate a user against the local password database.
|
||||
|
||||
user_id is checked case insensitively, but will return None if there are
|
||||
multiple inexact matches.
|
||||
|
||||
Args:
|
||||
user_id (str): complete @user:id
|
||||
Returns:
|
||||
(str) the canonical_user_id, or None if unknown user / bad password
|
||||
"""
|
||||
lookupres = yield self._find_user_id_and_pwd_hash(user_id)
|
||||
if not lookupres:
|
||||
defer.returnValue(None)
|
||||
(user_id, password_hash) = lookupres
|
||||
result = self.validate_hash(password, password_hash)
|
||||
if not result:
|
||||
logger.warn("Failed password login for user %s", user_id)
|
||||
defer.returnValue(None)
|
||||
defer.returnValue(user_id)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def issue_access_token(self, user_id, device_id=None):
|
||||
access_token = self.generate_access_token(user_id)
|
||||
yield self.store.add_access_token_to_user(user_id, access_token)
|
||||
yield self.store.add_access_token_to_user(user_id, access_token,
|
||||
device_id)
|
||||
defer.returnValue(access_token)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def issue_refresh_token(self, user_id):
|
||||
def issue_refresh_token(self, user_id, device_id=None):
|
||||
refresh_token = self.generate_refresh_token(user_id)
|
||||
yield self.store.add_refresh_token_to_user(user_id, refresh_token)
|
||||
yield self.store.add_refresh_token_to_user(user_id, refresh_token,
|
||||
device_id)
|
||||
defer.returnValue(refresh_token)
|
||||
|
||||
def generate_access_token(self, user_id, extra_caveats=None):
|
||||
def generate_access_token(self, user_id, extra_caveats=None,
|
||||
duration_in_ms=(60 * 60 * 1000)):
|
||||
extra_caveats = extra_caveats or []
|
||||
macaroon = self._generate_base_macaroon(user_id)
|
||||
macaroon.add_first_party_caveat("type = access")
|
||||
now = self.hs.get_clock().time_msec()
|
||||
expiry = now + (60 * 60 * 1000)
|
||||
expiry = now + duration_in_ms
|
||||
macaroon.add_first_party_caveat("time < %d" % (expiry,))
|
||||
for caveat in extra_caveats:
|
||||
macaroon.add_first_party_caveat(caveat)
|
||||
@@ -395,22 +558,28 @@ class AuthHandler(BaseHandler):
|
||||
))
|
||||
return m.serialize()
|
||||
|
||||
def generate_short_term_login_token(self, user_id):
|
||||
def generate_short_term_login_token(self, user_id, duration_in_ms=(2 * 60 * 1000)):
|
||||
macaroon = self._generate_base_macaroon(user_id)
|
||||
macaroon.add_first_party_caveat("type = login")
|
||||
now = self.hs.get_clock().time_msec()
|
||||
expiry = now + (2 * 60 * 1000)
|
||||
expiry = now + duration_in_ms
|
||||
macaroon.add_first_party_caveat("time < %d" % (expiry,))
|
||||
return macaroon.serialize()
|
||||
|
||||
def generate_delete_pusher_token(self, user_id):
|
||||
macaroon = self._generate_base_macaroon(user_id)
|
||||
macaroon.add_first_party_caveat("type = delete_pusher")
|
||||
return macaroon.serialize()
|
||||
|
||||
def validate_short_term_login_token_and_get_user_id(self, login_token):
|
||||
auth_api = self.hs.get_auth()
|
||||
try:
|
||||
macaroon = pymacaroons.Macaroon.deserialize(login_token)
|
||||
auth_api = self.hs.get_auth()
|
||||
auth_api.validate_macaroon(macaroon, "login", True)
|
||||
return self.get_user_from_macaroon(macaroon)
|
||||
except (pymacaroons.exceptions.MacaroonException, TypeError, ValueError):
|
||||
raise AuthError(401, "Invalid token", errcode=Codes.UNKNOWN_TOKEN)
|
||||
user_id = auth_api.get_user_id_from_macaroon(macaroon)
|
||||
auth_api.validate_macaroon(macaroon, "login", True, user_id)
|
||||
return user_id
|
||||
except Exception:
|
||||
raise AuthError(403, "Invalid token", errcode=Codes.FORBIDDEN)
|
||||
|
||||
def _generate_base_macaroon(self, user_id):
|
||||
macaroon = pymacaroons.Macaroon(
|
||||
@@ -421,27 +590,39 @@ class AuthHandler(BaseHandler):
|
||||
macaroon.add_first_party_caveat("user_id = %s" % (user_id,))
|
||||
return macaroon
|
||||
|
||||
def get_user_from_macaroon(self, macaroon):
|
||||
user_prefix = "user_id = "
|
||||
for caveat in macaroon.caveats:
|
||||
if caveat.caveat_id.startswith(user_prefix):
|
||||
return caveat.caveat_id[len(user_prefix):]
|
||||
raise AuthError(
|
||||
self.INVALID_TOKEN_HTTP_STATUS, "No user_id found in token",
|
||||
errcode=Codes.UNKNOWN_TOKEN
|
||||
@defer.inlineCallbacks
|
||||
def set_password(self, user_id, newpassword, requester=None):
|
||||
password_hash = self.hash(newpassword)
|
||||
|
||||
except_access_token_id = requester.access_token_id if requester else None
|
||||
|
||||
try:
|
||||
yield self.store.user_set_password_hash(user_id, password_hash)
|
||||
except StoreError as e:
|
||||
if e.code == 404:
|
||||
raise SynapseError(404, "Unknown user", Codes.NOT_FOUND)
|
||||
raise e
|
||||
yield self.store.user_delete_access_tokens(
|
||||
user_id, except_access_token_id
|
||||
)
|
||||
yield self.hs.get_pusherpool().remove_pushers_by_user(
|
||||
user_id, except_access_token_id
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def set_password(self, user_id, newpassword):
|
||||
password_hash = self.hash(newpassword)
|
||||
|
||||
yield self.store.user_set_password_hash(user_id, password_hash)
|
||||
yield self.store.user_delete_access_tokens(user_id)
|
||||
yield self.hs.get_pusherpool().remove_pushers_by_user(user_id)
|
||||
yield self.store.flush_user(user_id)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def add_threepid(self, user_id, medium, address, validated_at):
|
||||
# 'Canonicalise' email addresses down to lower case.
|
||||
# We've now moving towards the Home Server being the entity that
|
||||
# is responsible for validating threepids used for resetting passwords
|
||||
# on accounts, so in future Synapse will gain knowledge of specific
|
||||
# types (mediums) of threepid. For now, we still use the existing
|
||||
# infrastructure, but this is the start of synapse gaining knowledge
|
||||
# of specific types of threepid (and fixes the fact that checking
|
||||
# for the presenc eof an email address during password reset was
|
||||
# case sensitive).
|
||||
if medium == 'email':
|
||||
address = address.lower()
|
||||
|
||||
yield self.store.user_add_threepid(
|
||||
user_id, medium, address, validated_at,
|
||||
self.hs.get_clock().time_msec()
|
||||
@@ -450,11 +631,18 @@ class AuthHandler(BaseHandler):
|
||||
def _save_session(self, session):
|
||||
# TODO: Persistent storage
|
||||
logger.debug("Saving session %s", session)
|
||||
session["last_used"] = self.hs.get_clock().time_msec()
|
||||
self.sessions[session["id"]] = session
|
||||
self._prune_sessions()
|
||||
|
||||
def _remove_session(self, session):
|
||||
logger.debug("Removing session %s", session)
|
||||
del self.sessions[session["id"]]
|
||||
def _prune_sessions(self):
|
||||
for sid, sess in self.sessions.items():
|
||||
last_used = 0
|
||||
if 'last_used' in sess:
|
||||
last_used = sess['last_used']
|
||||
now = self.hs.get_clock().time_msec()
|
||||
if last_used < now - AuthHandler.SESSION_EXPIRE_MS:
|
||||
del self.sessions[sid]
|
||||
|
||||
def hash(self, password):
|
||||
"""Computes a secure hash of password.
|
||||
@@ -465,7 +653,8 @@ class AuthHandler(BaseHandler):
|
||||
Returns:
|
||||
Hashed password (str).
|
||||
"""
|
||||
return bcrypt.hashpw(password, bcrypt.gensalt(self.bcrypt_rounds))
|
||||
return bcrypt.hashpw(password.encode('utf8') + self.hs.config.password_pepper,
|
||||
bcrypt.gensalt(self.bcrypt_rounds))
|
||||
|
||||
def validate_hash(self, password, stored_hash):
|
||||
"""Validates that self.hash(password) == stored_hash.
|
||||
@@ -477,4 +666,35 @@ class AuthHandler(BaseHandler):
|
||||
Returns:
|
||||
Whether self.hash(password) == stored_hash (bool).
|
||||
"""
|
||||
return bcrypt.checkpw(password, stored_hash)
|
||||
if stored_hash:
|
||||
return bcrypt.hashpw(password + self.hs.config.password_pepper,
|
||||
stored_hash.encode('utf-8')) == stored_hash
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
class _AccountHandler(object):
|
||||
"""A proxy object that gets passed to password auth providers so they
|
||||
can register new users etc if necessary.
|
||||
"""
|
||||
def __init__(self, hs, check_user_exists):
|
||||
self.hs = hs
|
||||
|
||||
self._check_user_exists = check_user_exists
|
||||
|
||||
def check_user_exists(self, user_id):
|
||||
"""Check if user exissts.
|
||||
|
||||
Returns:
|
||||
Deferred(bool)
|
||||
"""
|
||||
return self._check_user_exists(user_id)
|
||||
|
||||
def register(self, localpart):
|
||||
"""Registers a new user with given localpart
|
||||
|
||||
Returns:
|
||||
Deferred: a 2-tuple of (user_id, access_token)
|
||||
"""
|
||||
reg = self.hs.get_handlers().registration_handler
|
||||
return reg.register(localpart=localpart)
|
||||
|
||||
181
synapse/handlers/device.py
Normal file
181
synapse/handlers/device.py
Normal file
@@ -0,0 +1,181 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from synapse.api import errors
|
||||
from synapse.util import stringutils
|
||||
from twisted.internet import defer
|
||||
from ._base import BaseHandler
|
||||
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class DeviceHandler(BaseHandler):
|
||||
def __init__(self, hs):
|
||||
super(DeviceHandler, self).__init__(hs)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def check_device_registered(self, user_id, device_id,
|
||||
initial_device_display_name=None):
|
||||
"""
|
||||
If the given device has not been registered, register it with the
|
||||
supplied display name.
|
||||
|
||||
If no device_id is supplied, we make one up.
|
||||
|
||||
Args:
|
||||
user_id (str): @user:id
|
||||
device_id (str | None): device id supplied by client
|
||||
initial_device_display_name (str | None): device display name from
|
||||
client
|
||||
Returns:
|
||||
str: device id (generated if none was supplied)
|
||||
"""
|
||||
if device_id is not None:
|
||||
yield self.store.store_device(
|
||||
user_id=user_id,
|
||||
device_id=device_id,
|
||||
initial_device_display_name=initial_device_display_name,
|
||||
ignore_if_known=True,
|
||||
)
|
||||
defer.returnValue(device_id)
|
||||
|
||||
# if the device id is not specified, we'll autogen one, but loop a few
|
||||
# times in case of a clash.
|
||||
attempts = 0
|
||||
while attempts < 5:
|
||||
try:
|
||||
device_id = stringutils.random_string(10).upper()
|
||||
yield self.store.store_device(
|
||||
user_id=user_id,
|
||||
device_id=device_id,
|
||||
initial_device_display_name=initial_device_display_name,
|
||||
ignore_if_known=False,
|
||||
)
|
||||
defer.returnValue(device_id)
|
||||
except errors.StoreError:
|
||||
attempts += 1
|
||||
|
||||
raise errors.StoreError(500, "Couldn't generate a device ID.")
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_devices_by_user(self, user_id):
|
||||
"""
|
||||
Retrieve the given user's devices
|
||||
|
||||
Args:
|
||||
user_id (str):
|
||||
Returns:
|
||||
defer.Deferred: list[dict[str, X]]: info on each device
|
||||
"""
|
||||
|
||||
device_map = yield self.store.get_devices_by_user(user_id)
|
||||
|
||||
ips = yield self.store.get_last_client_ip_by_device(
|
||||
devices=((user_id, device_id) for device_id in device_map.keys())
|
||||
)
|
||||
|
||||
devices = device_map.values()
|
||||
for device in devices:
|
||||
_update_device_from_client_ips(device, ips)
|
||||
|
||||
defer.returnValue(devices)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_device(self, user_id, device_id):
|
||||
""" Retrieve the given device
|
||||
|
||||
Args:
|
||||
user_id (str):
|
||||
device_id (str):
|
||||
|
||||
Returns:
|
||||
defer.Deferred: dict[str, X]: info on the device
|
||||
Raises:
|
||||
errors.NotFoundError: if the device was not found
|
||||
"""
|
||||
try:
|
||||
device = yield self.store.get_device(user_id, device_id)
|
||||
except errors.StoreError:
|
||||
raise errors.NotFoundError
|
||||
ips = yield self.store.get_last_client_ip_by_device(
|
||||
devices=((user_id, device_id),)
|
||||
)
|
||||
_update_device_from_client_ips(device, ips)
|
||||
defer.returnValue(device)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def delete_device(self, user_id, device_id):
|
||||
""" Delete the given device
|
||||
|
||||
Args:
|
||||
user_id (str):
|
||||
device_id (str):
|
||||
|
||||
Returns:
|
||||
defer.Deferred:
|
||||
"""
|
||||
|
||||
try:
|
||||
yield self.store.delete_device(user_id, device_id)
|
||||
except errors.StoreError, e:
|
||||
if e.code == 404:
|
||||
# no match
|
||||
pass
|
||||
else:
|
||||
raise
|
||||
|
||||
yield self.store.user_delete_access_tokens(
|
||||
user_id, device_id=device_id,
|
||||
delete_refresh_tokens=True,
|
||||
)
|
||||
|
||||
yield self.store.delete_e2e_keys_by_device(
|
||||
user_id=user_id, device_id=device_id
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def update_device(self, user_id, device_id, content):
|
||||
""" Update the given device
|
||||
|
||||
Args:
|
||||
user_id (str):
|
||||
device_id (str):
|
||||
content (dict): body of update request
|
||||
|
||||
Returns:
|
||||
defer.Deferred:
|
||||
"""
|
||||
|
||||
try:
|
||||
yield self.store.update_device(
|
||||
user_id,
|
||||
device_id,
|
||||
new_display_name=content.get("display_name")
|
||||
)
|
||||
except errors.StoreError, e:
|
||||
if e.code == 404:
|
||||
raise errors.NotFoundError()
|
||||
else:
|
||||
raise
|
||||
|
||||
|
||||
def _update_device_from_client_ips(device, client_ips):
|
||||
ip = client_ips.get((device["user_id"], device["device_id"]), {})
|
||||
device.update({
|
||||
"last_seen_ts": ip.get("last_seen"),
|
||||
"last_seen_ip": ip.get("ip"),
|
||||
})
|
||||
117
synapse/handlers/devicemessage.py
Normal file
117
synapse/handlers/devicemessage.py
Normal file
@@ -0,0 +1,117 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.types import get_domain_from_id
|
||||
from synapse.util.stringutils import random_string
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class DeviceMessageHandler(object):
|
||||
|
||||
def __init__(self, hs):
|
||||
"""
|
||||
Args:
|
||||
hs (synapse.server.HomeServer): server
|
||||
"""
|
||||
self.store = hs.get_datastore()
|
||||
self.notifier = hs.get_notifier()
|
||||
self.is_mine_id = hs.is_mine_id
|
||||
self.federation = hs.get_replication_layer()
|
||||
|
||||
self.federation.register_edu_handler(
|
||||
"m.direct_to_device", self.on_direct_to_device_edu
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_direct_to_device_edu(self, origin, content):
|
||||
local_messages = {}
|
||||
sender_user_id = content["sender"]
|
||||
if origin != get_domain_from_id(sender_user_id):
|
||||
logger.warn(
|
||||
"Dropping device message from %r with spoofed sender %r",
|
||||
origin, sender_user_id
|
||||
)
|
||||
message_type = content["type"]
|
||||
message_id = content["message_id"]
|
||||
for user_id, by_device in content["messages"].items():
|
||||
messages_by_device = {
|
||||
device_id: {
|
||||
"content": message_content,
|
||||
"type": message_type,
|
||||
"sender": sender_user_id,
|
||||
}
|
||||
for device_id, message_content in by_device.items()
|
||||
}
|
||||
if messages_by_device:
|
||||
local_messages[user_id] = messages_by_device
|
||||
|
||||
stream_id = yield self.store.add_messages_from_remote_to_device_inbox(
|
||||
origin, message_id, local_messages
|
||||
)
|
||||
|
||||
self.notifier.on_new_event(
|
||||
"to_device_key", stream_id, users=local_messages.keys()
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def send_device_message(self, sender_user_id, message_type, messages):
|
||||
|
||||
local_messages = {}
|
||||
remote_messages = {}
|
||||
for user_id, by_device in messages.items():
|
||||
if self.is_mine_id(user_id):
|
||||
messages_by_device = {
|
||||
device_id: {
|
||||
"content": message_content,
|
||||
"type": message_type,
|
||||
"sender": sender_user_id,
|
||||
}
|
||||
for device_id, message_content in by_device.items()
|
||||
}
|
||||
if messages_by_device:
|
||||
local_messages[user_id] = messages_by_device
|
||||
else:
|
||||
destination = get_domain_from_id(user_id)
|
||||
remote_messages.setdefault(destination, {})[user_id] = by_device
|
||||
|
||||
message_id = random_string(16)
|
||||
|
||||
remote_edu_contents = {}
|
||||
for destination, messages in remote_messages.items():
|
||||
remote_edu_contents[destination] = {
|
||||
"messages": messages,
|
||||
"sender": sender_user_id,
|
||||
"type": message_type,
|
||||
"message_id": message_id,
|
||||
}
|
||||
|
||||
stream_id = yield self.store.add_messages_to_device_inbox(
|
||||
local_messages, remote_edu_contents
|
||||
)
|
||||
|
||||
self.notifier.on_new_event(
|
||||
"to_device_key", stream_id, users=local_messages.keys()
|
||||
)
|
||||
|
||||
for destination in remote_messages.keys():
|
||||
# Enqueue a new federation transaction to send the new
|
||||
# device messages to each remote destination.
|
||||
self.federation.send_device_messages(destination)
|
||||
@@ -17,9 +17,9 @@
|
||||
from twisted.internet import defer
|
||||
from ._base import BaseHandler
|
||||
|
||||
from synapse.api.errors import SynapseError, Codes, CodeMessageException
|
||||
from synapse.api.errors import SynapseError, Codes, CodeMessageException, AuthError
|
||||
from synapse.api.constants import EventTypes
|
||||
from synapse.types import RoomAlias
|
||||
from synapse.types import RoomAlias, UserID, get_domain_from_id
|
||||
|
||||
import logging
|
||||
import string
|
||||
@@ -32,13 +32,16 @@ class DirectoryHandler(BaseHandler):
|
||||
def __init__(self, hs):
|
||||
super(DirectoryHandler, self).__init__(hs)
|
||||
|
||||
self.state = hs.get_state_handler()
|
||||
self.appservice_handler = hs.get_application_service_handler()
|
||||
|
||||
self.federation = hs.get_replication_layer()
|
||||
self.federation.register_query_handler(
|
||||
"directory", self.on_directory_query
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _create_association(self, room_alias, room_id, servers=None):
|
||||
def _create_association(self, room_alias, room_id, servers=None, creator=None):
|
||||
# general association creation for both human users and app services
|
||||
|
||||
for wchar in string.whitespace:
|
||||
@@ -52,7 +55,8 @@ class DirectoryHandler(BaseHandler):
|
||||
# TODO(erikj): Add transactions.
|
||||
# TODO(erikj): Check if there is a current association.
|
||||
if not servers:
|
||||
servers = yield self.store.get_joined_hosts_for_room(room_id)
|
||||
users = yield self.state.get_current_user_in_room(room_id)
|
||||
servers = set(get_domain_from_id(u) for u in users)
|
||||
|
||||
if not servers:
|
||||
raise SynapseError(400, "Failed to get server list")
|
||||
@@ -60,7 +64,8 @@ class DirectoryHandler(BaseHandler):
|
||||
yield self.store.create_room_alias_association(
|
||||
room_alias,
|
||||
room_id,
|
||||
servers
|
||||
servers,
|
||||
creator=creator,
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@@ -77,7 +82,7 @@ class DirectoryHandler(BaseHandler):
|
||||
400, "This alias is reserved by an application service.",
|
||||
errcode=Codes.EXCLUSIVE
|
||||
)
|
||||
yield self._create_association(room_alias, room_id, servers)
|
||||
yield self._create_association(room_alias, room_id, servers, creator=user_id)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def create_appservice_association(self, service, room_alias, room_id,
|
||||
@@ -92,10 +97,14 @@ class DirectoryHandler(BaseHandler):
|
||||
yield self._create_association(room_alias, room_id, servers)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def delete_association(self, user_id, room_alias):
|
||||
def delete_association(self, requester, user_id, room_alias):
|
||||
# association deletion for human users
|
||||
|
||||
# TODO Check if server admin
|
||||
can_delete = yield self._user_can_delete_alias(room_alias, user_id)
|
||||
if not can_delete:
|
||||
raise AuthError(
|
||||
403, "You don't have permission to delete the alias.",
|
||||
)
|
||||
|
||||
can_delete = yield self.can_modify_alias(
|
||||
room_alias,
|
||||
@@ -107,7 +116,25 @@ class DirectoryHandler(BaseHandler):
|
||||
errcode=Codes.EXCLUSIVE
|
||||
)
|
||||
|
||||
yield self._delete_association(room_alias)
|
||||
room_id = yield self._delete_association(room_alias)
|
||||
|
||||
try:
|
||||
yield self.send_room_alias_update_event(
|
||||
requester,
|
||||
requester.user.to_string(),
|
||||
room_id
|
||||
)
|
||||
|
||||
yield self._update_canonical_alias(
|
||||
requester,
|
||||
requester.user.to_string(),
|
||||
room_id,
|
||||
room_alias,
|
||||
)
|
||||
except AuthError as e:
|
||||
logger.info("Failed to update alias events: %s", e)
|
||||
|
||||
defer.returnValue(room_id)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def delete_appservice_association(self, service, room_alias):
|
||||
@@ -124,11 +151,9 @@ class DirectoryHandler(BaseHandler):
|
||||
if not self.hs.is_mine(room_alias):
|
||||
raise SynapseError(400, "Room alias must be local")
|
||||
|
||||
yield self.store.delete_room_alias(room_alias)
|
||||
room_id = yield self.store.delete_room_alias(room_alias)
|
||||
|
||||
# TODO - Looks like _update_room_alias_event has never been implemented
|
||||
# if room_id:
|
||||
# yield self._update_room_alias_events(user_id, room_id)
|
||||
defer.returnValue(room_id)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_association(self, room_alias):
|
||||
@@ -169,7 +194,8 @@ class DirectoryHandler(BaseHandler):
|
||||
Codes.NOT_FOUND
|
||||
)
|
||||
|
||||
extra_servers = yield self.store.get_joined_hosts_for_room(room_id)
|
||||
users = yield self.state.get_current_user_in_room(room_id)
|
||||
extra_servers = set(get_domain_from_id(u) for u in users)
|
||||
servers = set(extra_servers) | set(servers)
|
||||
|
||||
# If this server is in the list of servers, return it first.
|
||||
@@ -212,17 +238,44 @@ class DirectoryHandler(BaseHandler):
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def send_room_alias_update_event(self, user_id, room_id):
|
||||
def send_room_alias_update_event(self, requester, user_id, room_id):
|
||||
aliases = yield self.store.get_aliases_for_room(room_id)
|
||||
|
||||
msg_handler = self.hs.get_handlers().message_handler
|
||||
yield msg_handler.create_and_send_event({
|
||||
"type": EventTypes.Aliases,
|
||||
"state_key": self.hs.hostname,
|
||||
"room_id": room_id,
|
||||
"sender": user_id,
|
||||
"content": {"aliases": aliases},
|
||||
}, ratelimit=False)
|
||||
yield msg_handler.create_and_send_nonmember_event(
|
||||
requester,
|
||||
{
|
||||
"type": EventTypes.Aliases,
|
||||
"state_key": self.hs.hostname,
|
||||
"room_id": room_id,
|
||||
"sender": user_id,
|
||||
"content": {"aliases": aliases},
|
||||
},
|
||||
ratelimit=False
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _update_canonical_alias(self, requester, user_id, room_id, room_alias):
|
||||
alias_event = yield self.state.get_current_state(
|
||||
room_id, EventTypes.CanonicalAlias, ""
|
||||
)
|
||||
|
||||
alias_str = room_alias.to_string()
|
||||
if not alias_event or alias_event.content.get("alias", "") != alias_str:
|
||||
return
|
||||
|
||||
msg_handler = self.hs.get_handlers().message_handler
|
||||
yield msg_handler.create_and_send_nonmember_event(
|
||||
requester,
|
||||
{
|
||||
"type": EventTypes.CanonicalAlias,
|
||||
"state_key": "",
|
||||
"room_id": room_id,
|
||||
"sender": user_id,
|
||||
"content": {},
|
||||
},
|
||||
ratelimit=False
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_association_from_room_alias(self, room_alias):
|
||||
@@ -231,17 +284,16 @@ class DirectoryHandler(BaseHandler):
|
||||
)
|
||||
if not result:
|
||||
# Query AS to see if it exists
|
||||
as_handler = self.hs.get_handlers().appservice_handler
|
||||
as_handler = self.appservice_handler
|
||||
result = yield as_handler.query_room_alias_exists(room_alias)
|
||||
defer.returnValue(result)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def can_modify_alias(self, alias, user_id=None):
|
||||
# Any application service "interested" in an alias they are regexing on
|
||||
# can modify the alias.
|
||||
# Users can only modify the alias if ALL the interested services have
|
||||
# non-exclusive locks on the alias (or there are no interested services)
|
||||
services = yield self.store.get_app_services()
|
||||
services = self.store.get_app_services()
|
||||
interested_services = [
|
||||
s for s in services if s.is_interested_in_alias(alias.to_string())
|
||||
]
|
||||
@@ -249,11 +301,41 @@ class DirectoryHandler(BaseHandler):
|
||||
for service in interested_services:
|
||||
if user_id == service.sender:
|
||||
# this user IS the app service so they can do whatever they like
|
||||
defer.returnValue(True)
|
||||
return
|
||||
return defer.succeed(True)
|
||||
elif service.is_exclusive_alias(alias.to_string()):
|
||||
# another service has an exclusive lock on this alias.
|
||||
defer.returnValue(False)
|
||||
return
|
||||
return defer.succeed(False)
|
||||
# either no interested services, or no service with an exclusive lock
|
||||
defer.returnValue(True)
|
||||
return defer.succeed(True)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _user_can_delete_alias(self, alias, user_id):
|
||||
creator = yield self.store.get_room_alias_creator(alias.to_string())
|
||||
|
||||
if creator and creator == user_id:
|
||||
defer.returnValue(True)
|
||||
|
||||
is_admin = yield self.auth.is_server_admin(UserID.from_string(user_id))
|
||||
defer.returnValue(is_admin)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def edit_published_room_list(self, requester, room_id, visibility):
|
||||
"""Edit the entry of the room in the published room list.
|
||||
|
||||
requester
|
||||
room_id (str)
|
||||
visibility (str): "public" or "private"
|
||||
"""
|
||||
if requester.is_guest:
|
||||
raise AuthError(403, "Guests cannot edit the published room list")
|
||||
|
||||
if visibility not in ["public", "private"]:
|
||||
raise SynapseError(400, "Invalid visibility setting")
|
||||
|
||||
room = yield self.store.get_room(room_id)
|
||||
if room is None:
|
||||
raise SynapseError(400, "Unknown room")
|
||||
|
||||
yield self.auth.check_can_change_room_list(room_id, requester.user)
|
||||
|
||||
yield self.store.set_room_is_public(room_id, visibility == "public")
|
||||
|
||||
279
synapse/handlers/e2e_keys.py
Normal file
279
synapse/handlers/e2e_keys.py
Normal file
@@ -0,0 +1,279 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import ujson as json
|
||||
import logging
|
||||
|
||||
from canonicaljson import encode_canonical_json
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.errors import SynapseError, CodeMessageException
|
||||
from synapse.types import get_domain_from_id
|
||||
from synapse.util.logcontext import preserve_fn, preserve_context_over_deferred
|
||||
from synapse.util.retryutils import get_retry_limiter, NotRetryingDestination
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class E2eKeysHandler(object):
|
||||
def __init__(self, hs):
|
||||
self.store = hs.get_datastore()
|
||||
self.federation = hs.get_replication_layer()
|
||||
self.device_handler = hs.get_device_handler()
|
||||
self.is_mine_id = hs.is_mine_id
|
||||
self.clock = hs.get_clock()
|
||||
|
||||
# doesn't really work as part of the generic query API, because the
|
||||
# query request requires an object POST, but we abuse the
|
||||
# "query handler" interface.
|
||||
self.federation.register_query_handler(
|
||||
"client_keys", self.on_federation_query_client_keys
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def query_devices(self, query_body, timeout):
|
||||
""" Handle a device key query from a client
|
||||
|
||||
{
|
||||
"device_keys": {
|
||||
"<user_id>": ["<device_id>"]
|
||||
}
|
||||
}
|
||||
->
|
||||
{
|
||||
"device_keys": {
|
||||
"<user_id>": {
|
||||
"<device_id>": {
|
||||
...
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
device_keys_query = query_body.get("device_keys", {})
|
||||
|
||||
# separate users by domain.
|
||||
# make a map from domain to user_id to device_ids
|
||||
local_query = {}
|
||||
remote_queries = {}
|
||||
|
||||
for user_id, device_ids in device_keys_query.items():
|
||||
if self.is_mine_id(user_id):
|
||||
local_query[user_id] = device_ids
|
||||
else:
|
||||
domain = get_domain_from_id(user_id)
|
||||
remote_queries.setdefault(domain, {})[user_id] = device_ids
|
||||
|
||||
# do the queries
|
||||
failures = {}
|
||||
results = {}
|
||||
if local_query:
|
||||
local_result = yield self.query_local_devices(local_query)
|
||||
for user_id, keys in local_result.items():
|
||||
if user_id in local_query:
|
||||
results[user_id] = keys
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def do_remote_query(destination):
|
||||
destination_query = remote_queries[destination]
|
||||
try:
|
||||
limiter = yield get_retry_limiter(
|
||||
destination, self.clock, self.store
|
||||
)
|
||||
with limiter:
|
||||
remote_result = yield self.federation.query_client_keys(
|
||||
destination,
|
||||
{"device_keys": destination_query},
|
||||
timeout=timeout
|
||||
)
|
||||
|
||||
for user_id, keys in remote_result["device_keys"].items():
|
||||
if user_id in destination_query:
|
||||
results[user_id] = keys
|
||||
|
||||
except CodeMessageException as e:
|
||||
failures[destination] = {
|
||||
"status": e.code, "message": e.message
|
||||
}
|
||||
except NotRetryingDestination as e:
|
||||
failures[destination] = {
|
||||
"status": 503, "message": "Not ready for retry",
|
||||
}
|
||||
|
||||
yield preserve_context_over_deferred(defer.gatherResults([
|
||||
preserve_fn(do_remote_query)(destination)
|
||||
for destination in remote_queries
|
||||
]))
|
||||
|
||||
defer.returnValue({
|
||||
"device_keys": results, "failures": failures,
|
||||
})
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def query_local_devices(self, query):
|
||||
"""Get E2E device keys for local users
|
||||
|
||||
Args:
|
||||
query (dict[string, list[string]|None): map from user_id to a list
|
||||
of devices to query (None for all devices)
|
||||
|
||||
Returns:
|
||||
defer.Deferred: (resolves to dict[string, dict[string, dict]]):
|
||||
map from user_id -> device_id -> device details
|
||||
"""
|
||||
local_query = []
|
||||
|
||||
result_dict = {}
|
||||
for user_id, device_ids in query.items():
|
||||
if not self.is_mine_id(user_id):
|
||||
logger.warning("Request for keys for non-local user %s",
|
||||
user_id)
|
||||
raise SynapseError(400, "Not a user here")
|
||||
|
||||
if not device_ids:
|
||||
local_query.append((user_id, None))
|
||||
else:
|
||||
for device_id in device_ids:
|
||||
local_query.append((user_id, device_id))
|
||||
|
||||
# make sure that each queried user appears in the result dict
|
||||
result_dict[user_id] = {}
|
||||
|
||||
results = yield self.store.get_e2e_device_keys(local_query)
|
||||
|
||||
# Build the result structure, un-jsonify the results, and add the
|
||||
# "unsigned" section
|
||||
for user_id, device_keys in results.items():
|
||||
for device_id, device_info in device_keys.items():
|
||||
r = json.loads(device_info["key_json"])
|
||||
r["unsigned"] = {}
|
||||
display_name = device_info["device_display_name"]
|
||||
if display_name is not None:
|
||||
r["unsigned"]["device_display_name"] = display_name
|
||||
result_dict[user_id][device_id] = r
|
||||
|
||||
defer.returnValue(result_dict)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_federation_query_client_keys(self, query_body):
|
||||
""" Handle a device key query from a federated server
|
||||
"""
|
||||
device_keys_query = query_body.get("device_keys", {})
|
||||
res = yield self.query_local_devices(device_keys_query)
|
||||
defer.returnValue({"device_keys": res})
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def claim_one_time_keys(self, query, timeout):
|
||||
local_query = []
|
||||
remote_queries = {}
|
||||
|
||||
for user_id, device_keys in query.get("one_time_keys", {}).items():
|
||||
if self.is_mine_id(user_id):
|
||||
for device_id, algorithm in device_keys.items():
|
||||
local_query.append((user_id, device_id, algorithm))
|
||||
else:
|
||||
domain = get_domain_from_id(user_id)
|
||||
remote_queries.setdefault(domain, {})[user_id] = device_keys
|
||||
|
||||
results = yield self.store.claim_e2e_one_time_keys(local_query)
|
||||
|
||||
json_result = {}
|
||||
failures = {}
|
||||
for user_id, device_keys in results.items():
|
||||
for device_id, keys in device_keys.items():
|
||||
for key_id, json_bytes in keys.items():
|
||||
json_result.setdefault(user_id, {})[device_id] = {
|
||||
key_id: json.loads(json_bytes)
|
||||
}
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def claim_client_keys(destination):
|
||||
device_keys = remote_queries[destination]
|
||||
try:
|
||||
limiter = yield get_retry_limiter(
|
||||
destination, self.clock, self.store
|
||||
)
|
||||
with limiter:
|
||||
remote_result = yield self.federation.claim_client_keys(
|
||||
destination,
|
||||
{"one_time_keys": device_keys},
|
||||
timeout=timeout
|
||||
)
|
||||
for user_id, keys in remote_result["one_time_keys"].items():
|
||||
if user_id in device_keys:
|
||||
json_result[user_id] = keys
|
||||
except CodeMessageException as e:
|
||||
failures[destination] = {
|
||||
"status": e.code, "message": e.message
|
||||
}
|
||||
except NotRetryingDestination as e:
|
||||
failures[destination] = {
|
||||
"status": 503, "message": "Not ready for retry",
|
||||
}
|
||||
|
||||
yield preserve_context_over_deferred(defer.gatherResults([
|
||||
preserve_fn(claim_client_keys)(destination)
|
||||
for destination in remote_queries
|
||||
]))
|
||||
|
||||
defer.returnValue({
|
||||
"one_time_keys": json_result,
|
||||
"failures": failures
|
||||
})
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def upload_keys_for_user(self, user_id, device_id, keys):
|
||||
time_now = self.clock.time_msec()
|
||||
|
||||
# TODO: Validate the JSON to make sure it has the right keys.
|
||||
device_keys = keys.get("device_keys", None)
|
||||
if device_keys:
|
||||
logger.info(
|
||||
"Updating device_keys for device %r for user %s at %d",
|
||||
device_id, user_id, time_now
|
||||
)
|
||||
# TODO: Sign the JSON with the server key
|
||||
yield self.store.set_e2e_device_keys(
|
||||
user_id, device_id, time_now,
|
||||
encode_canonical_json(device_keys)
|
||||
)
|
||||
|
||||
one_time_keys = keys.get("one_time_keys", None)
|
||||
if one_time_keys:
|
||||
logger.info(
|
||||
"Adding %d one_time_keys for device %r for user %r at %d",
|
||||
len(one_time_keys), device_id, user_id, time_now
|
||||
)
|
||||
key_list = []
|
||||
for key_id, key_json in one_time_keys.items():
|
||||
algorithm, key_id = key_id.split(":")
|
||||
key_list.append((
|
||||
algorithm, key_id, encode_canonical_json(key_json)
|
||||
))
|
||||
|
||||
yield self.store.add_e2e_one_time_keys(
|
||||
user_id, device_id, time_now, key_list
|
||||
)
|
||||
|
||||
# the device should have been registered already, but it may have been
|
||||
# deleted due to a race with a DELETE request. Or we may be using an
|
||||
# old access_token without an associated device_id. Either way, we
|
||||
# need to double-check the device is registered to avoid ending up with
|
||||
# keys without a corresponding device.
|
||||
self.device_handler.check_device_registered(user_id, device_id)
|
||||
|
||||
result = yield self.store.count_e2e_one_time_keys(user_id, device_id)
|
||||
|
||||
defer.returnValue({"one_time_key_counts": result})
|
||||
@@ -18,7 +18,8 @@ from twisted.internet import defer
|
||||
from synapse.util.logutils import log_function
|
||||
from synapse.types import UserID
|
||||
from synapse.events.utils import serialize_event
|
||||
from synapse.util.logcontext import preserve_context_over_fn
|
||||
from synapse.api.constants import Membership, EventTypes
|
||||
from synapse.events import EventBase
|
||||
|
||||
from ._base import BaseHandler
|
||||
|
||||
@@ -29,20 +30,6 @@ import random
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def started_user_eventstream(distributor, user):
|
||||
return preserve_context_over_fn(
|
||||
distributor.fire,
|
||||
"started_user_eventstream", user
|
||||
)
|
||||
|
||||
|
||||
def stopped_user_eventstream(distributor, user):
|
||||
return preserve_context_over_fn(
|
||||
distributor.fire,
|
||||
"stopped_user_eventstream", user
|
||||
)
|
||||
|
||||
|
||||
class EventStreamHandler(BaseHandler):
|
||||
|
||||
def __init__(self, hs):
|
||||
@@ -60,61 +47,7 @@ class EventStreamHandler(BaseHandler):
|
||||
self.clock = hs.get_clock()
|
||||
|
||||
self.notifier = hs.get_notifier()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def started_stream(self, user):
|
||||
"""Tells the presence handler that we have started an eventstream for
|
||||
the user:
|
||||
|
||||
Args:
|
||||
user (User): The user who started a stream.
|
||||
Returns:
|
||||
A deferred that completes once their presence has been updated.
|
||||
"""
|
||||
if user not in self._streams_per_user:
|
||||
# Make sure we set the streams per user to 1 here rather than
|
||||
# setting it to zero and incrementing the value below.
|
||||
# Otherwise this may race with stopped_stream causing the
|
||||
# user to be erased from the map before we have a chance
|
||||
# to increment it.
|
||||
self._streams_per_user[user] = 1
|
||||
if user in self._stop_timer_per_user:
|
||||
try:
|
||||
self.clock.cancel_call_later(
|
||||
self._stop_timer_per_user.pop(user)
|
||||
)
|
||||
except:
|
||||
logger.exception("Failed to cancel event timer")
|
||||
else:
|
||||
yield started_user_eventstream(self.distributor, user)
|
||||
else:
|
||||
self._streams_per_user[user] += 1
|
||||
|
||||
def stopped_stream(self, user):
|
||||
"""If there are no streams for a user this starts a timer that will
|
||||
notify the presence handler that we haven't got an event stream for
|
||||
the user unless the user starts a new stream in 30 seconds.
|
||||
|
||||
Args:
|
||||
user (User): The user who stopped a stream.
|
||||
"""
|
||||
self._streams_per_user[user] -= 1
|
||||
if not self._streams_per_user[user]:
|
||||
del self._streams_per_user[user]
|
||||
|
||||
# 30 seconds of grace to allow the client to reconnect again
|
||||
# before we think they're gone
|
||||
def _later():
|
||||
logger.debug("_later stopped_user_eventstream %s", user)
|
||||
|
||||
self._stop_timer_per_user.pop(user, None)
|
||||
|
||||
return stopped_user_eventstream(self.distributor, user)
|
||||
|
||||
logger.debug("Scheduling _later: for %s", user)
|
||||
self._stop_timer_per_user[user] = (
|
||||
self.clock.call_later(30, _later)
|
||||
)
|
||||
self.state = hs.get_state_handler()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
@@ -126,11 +59,12 @@ class EventStreamHandler(BaseHandler):
|
||||
If `only_keys` is not None, events from keys will be sent down.
|
||||
"""
|
||||
auth_user = UserID.from_string(auth_user_id)
|
||||
presence_handler = self.hs.get_presence_handler()
|
||||
|
||||
try:
|
||||
if affect_presence:
|
||||
yield self.started_stream(auth_user)
|
||||
|
||||
context = yield presence_handler.user_syncing(
|
||||
auth_user_id, affect_presence=affect_presence,
|
||||
)
|
||||
with context:
|
||||
if timeout:
|
||||
# If they've set a timeout set a minimum limit.
|
||||
timeout = max(timeout, 500)
|
||||
@@ -145,6 +79,34 @@ class EventStreamHandler(BaseHandler):
|
||||
is_guest=is_guest, explicit_room_id=room_id
|
||||
)
|
||||
|
||||
# When the user joins a new room, or another user joins a currently
|
||||
# joined room, we need to send down presence for those users.
|
||||
to_add = []
|
||||
for event in events:
|
||||
if not isinstance(event, EventBase):
|
||||
continue
|
||||
if event.type == EventTypes.Member:
|
||||
if event.membership != Membership.JOIN:
|
||||
continue
|
||||
# Send down presence.
|
||||
if event.state_key == auth_user_id:
|
||||
# Send down presence for everyone in the room.
|
||||
users = yield self.state.get_current_user_in_room(event.room_id)
|
||||
states = yield presence_handler.get_states(
|
||||
users,
|
||||
as_event=True,
|
||||
)
|
||||
to_add.extend(states)
|
||||
else:
|
||||
|
||||
ev = yield presence_handler.get_state(
|
||||
UserID.from_string(event.state_key),
|
||||
as_event=True,
|
||||
)
|
||||
to_add.append(ev)
|
||||
|
||||
events.extend(to_add)
|
||||
|
||||
time_now = self.clock.time_msec()
|
||||
|
||||
chunks = [
|
||||
@@ -159,10 +121,6 @@ class EventStreamHandler(BaseHandler):
|
||||
|
||||
defer.returnValue(chunk)
|
||||
|
||||
finally:
|
||||
if affect_presence:
|
||||
self.stopped_stream(auth_user)
|
||||
|
||||
|
||||
class EventHandler(BaseHandler):
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user