mirror of
https://github.com/element-hq/synapse.git
synced 2025-12-07 01:20:16 +00:00
Compare commits
1395 Commits
erikj/urle
...
v0.33.3
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3504982cb7 | ||
|
|
4e5a4549b6 | ||
|
|
9b7d9d8ba0 | ||
|
|
d7585a4c83 | ||
|
|
afb4b490a4 | ||
|
|
f7bf181a90 | ||
|
|
f7baff6f7b | ||
|
|
3b5b64ac99 | ||
|
|
23d7e63a4a | ||
|
|
012d612f9d | ||
|
|
be6527325a | ||
|
|
55e6bdf287 | ||
|
|
80bf7d3580 | ||
|
|
9a2f960736 | ||
|
|
324525f40c | ||
|
|
cf6f9a8b53 | ||
|
|
48a910e128 | ||
|
|
f2a48d87df | ||
|
|
2aa7cc6a46 | ||
|
|
3cef867cc1 | ||
|
|
c144252a8c | ||
|
|
c334ca67bb | ||
|
|
04f5d2db62 | ||
|
|
63260397c6 | ||
|
|
3f8709ffe4 | ||
|
|
4c22b4047b | ||
|
|
9fd161c6fb | ||
|
|
0195dfbf52 | ||
|
|
69c49d3fa3 | ||
|
|
a2d872e7b3 | ||
|
|
fa27073b14 | ||
|
|
73737bd0f9 | ||
|
|
3b2dcfff78 | ||
|
|
521d369e7a | ||
|
|
b99a0f3941 | ||
|
|
a8ffc27db7 | ||
|
|
4c9da1440f | ||
|
|
d9efd87d55 | ||
|
|
0e8d78f6aa | ||
|
|
66f7dc8c87 | ||
|
|
7e51342196 | ||
|
|
bcfeb44afe | ||
|
|
372bf073c1 | ||
|
|
7edd11623d | ||
|
|
13ad9930c8 | ||
|
|
b4d6db5c4a | ||
|
|
aae86a81ef | ||
|
|
7d5b1a60a3 | ||
|
|
c151b32b1d | ||
|
|
762a758fea | ||
|
|
3f543dc021 | ||
|
|
859989f958 | ||
|
|
81d727efa9 | ||
|
|
da7fe43899 | ||
|
|
e2e846628d | ||
|
|
2f78f432c4 | ||
|
|
86a00e05e1 | ||
|
|
d82fa0e9a6 | ||
|
|
a87af25fbb | ||
|
|
afcd655ab6 | ||
|
|
dc56c47dc0 | ||
|
|
4601129c44 | ||
|
|
ef184caf30 | ||
|
|
488ffe6fdb | ||
|
|
773db62a22 | ||
|
|
39176f27f7 | ||
|
|
2de813a611 | ||
|
|
eaaa2248ff | ||
|
|
87a824bad1 | ||
|
|
c4eb97518f | ||
|
|
55afba0fc5 | ||
|
|
75c663c7b9 | ||
|
|
1c5e690a6b | ||
|
|
fef2e65d12 | ||
|
|
596ce63576 | ||
|
|
b8429c7c81 | ||
|
|
ab035bdeac | ||
|
|
19b433e3f4 | ||
|
|
b586b8b986 | ||
|
|
70e48cbbb1 | ||
|
|
aa3220df6a | ||
|
|
7277216d01 | ||
|
|
cd0c749c4f | ||
|
|
9ecbaf8ba8 | ||
|
|
1522ed9c07 | ||
|
|
629f390035 | ||
|
|
e5962f845c | ||
|
|
e7d091fb86 | ||
|
|
414d54b61a | ||
|
|
2545993ce4 | ||
|
|
06b331ff40 | ||
|
|
8f9a7eb58d | ||
|
|
c74c71128d | ||
|
|
ed4bc3d2fc | ||
|
|
bd92c8eaa7 | ||
|
|
99ebaed8e6 | ||
|
|
9b5bf3d858 | ||
|
|
e25d87d97b | ||
|
|
614e6d517d | ||
|
|
0bdc362598 | ||
|
|
591bf87c6a | ||
|
|
bdfbd934d6 | ||
|
|
8f0c430ca4 | ||
|
|
1f24d8681b | ||
|
|
f4b49152e2 | ||
|
|
b6d55588e1 | ||
|
|
8ba2dac6ea | ||
|
|
50bcaf1a27 | ||
|
|
ce7de9ae6b | ||
|
|
0d43f991a1 | ||
|
|
99dd975dae | ||
|
|
ac205a54b2 | ||
|
|
31fa743567 | ||
|
|
a001038b92 | ||
|
|
807449d8f2 | ||
|
|
c31793a784 | ||
|
|
c08f9d95b2 | ||
|
|
dd16e7dfcc | ||
|
|
8b3d9b6b19 | ||
|
|
b37c472419 | ||
|
|
c75b71a397 | ||
|
|
3c0213a217 | ||
|
|
178ab76ac0 | ||
|
|
638d35ef08 | ||
|
|
2e9c73e8ca | ||
|
|
64899341dc | ||
|
|
885ea9c602 | ||
|
|
c1f9dec92a | ||
|
|
04df714259 | ||
|
|
09cf130898 | ||
|
|
c6b28fb479 | ||
|
|
d967653705 | ||
|
|
69ce057ea6 | ||
|
|
3dce9050cf | ||
|
|
0ad98e38d0 | ||
|
|
a5ef110749 | ||
|
|
5c6226707d | ||
|
|
8876ce7f77 | ||
|
|
b179537f2a | ||
|
|
72d1902bbe | ||
|
|
984376745b | ||
|
|
67dbe4c899 | ||
|
|
dafe90a6be | ||
|
|
3d6f9fba19 | ||
|
|
5785b93711 | ||
|
|
bf7598f582 | ||
|
|
2bdafaf3c1 | ||
|
|
62564797f5 | ||
|
|
2511f3f8a0 | ||
|
|
bb89c84614 | ||
|
|
d5c0ce4cad | ||
|
|
e92fb00f32 | ||
|
|
839a317c96 | ||
|
|
5298d79fb5 | ||
|
|
8521ae13e3 | ||
|
|
d2f3ef98ac | ||
|
|
54685d294d | ||
|
|
cc187debf3 | ||
|
|
2b5baebeba | ||
|
|
be59910b93 | ||
|
|
990fe9fc23 | ||
|
|
312ae74746 | ||
|
|
d92675a486 | ||
|
|
360ba89c50 | ||
|
|
7f3d897e7a | ||
|
|
bebe325e6c | ||
|
|
5011417632 | ||
|
|
e6d73b8582 | ||
|
|
bab94da79c | ||
|
|
53bca4690b | ||
|
|
ef3589063a | ||
|
|
e8eba2b4e3 | ||
|
|
e5d2c67844 | ||
|
|
3523f5432a | ||
|
|
9b92720d88 | ||
|
|
23cd86554e | ||
|
|
848431be1d | ||
|
|
cf8ef11f35 | ||
|
|
3b9662339b | ||
|
|
865d07cd38 | ||
|
|
ca9bc1f4fe | ||
|
|
a74b25faaa | ||
|
|
fbe255f9a4 | ||
|
|
7daa8a78c5 | ||
|
|
89834d9a29 | ||
|
|
e54794f5b6 | ||
|
|
7bcf126b18 | ||
|
|
1911c037cb | ||
|
|
33bd07d062 | ||
|
|
16d78be315 | ||
|
|
7f3f108561 | ||
|
|
19a17068f1 | ||
|
|
96a9a29645 | ||
|
|
62ace05c45 | ||
|
|
1e2bed9656 | ||
|
|
a3f5bf79a0 | ||
|
|
e26dbd82ef | ||
|
|
051a99c400 | ||
|
|
f900d50824 | ||
|
|
42c6823827 | ||
|
|
e40a510fbf | ||
|
|
d08296f9f2 | ||
|
|
886be75ad1 | ||
|
|
16d9701892 | ||
|
|
e10830e976 | ||
|
|
3777fa26aa | ||
|
|
0d63d93ca8 | ||
|
|
0ca459ea33 | ||
|
|
15c1ae45e5 | ||
|
|
5593ff6773 | ||
|
|
b2aab04d2c | ||
|
|
da90337d89 | ||
|
|
950807d93a | ||
|
|
df7f9871c1 | ||
|
|
897c51d274 | ||
|
|
cb298ff623 | ||
|
|
42960aa047 | ||
|
|
c3f596180f | ||
|
|
637b11b9ed | ||
|
|
feacd13932 | ||
|
|
c0affa7b4f | ||
|
|
5e2f7b8084 | ||
|
|
4ecb4bdac9 | ||
|
|
8a86d7ea96 | ||
|
|
70baa61e95 | ||
|
|
4c4a123bd0 | ||
|
|
489949879e | ||
|
|
1758f4e1c7 | ||
|
|
26a37f3d4d | ||
|
|
0d25724419 | ||
|
|
1fa98495d0 | ||
|
|
bdae8f2e68 | ||
|
|
74b1d46ad9 | ||
|
|
9180061b49 | ||
|
|
704c3e6239 | ||
|
|
43ecfe0b10 | ||
|
|
c2a83349f0 | ||
|
|
db1f33fb36 | ||
|
|
14a4e7d5a4 | ||
|
|
50d9d97408 | ||
|
|
8cefc690c9 | ||
|
|
0bf5ec0db7 | ||
|
|
a937497cf5 | ||
|
|
a013404292 | ||
|
|
14fa9d4d92 | ||
|
|
c4ffbecb68 | ||
|
|
0a65450d04 | ||
|
|
00f99f74b1 | ||
|
|
4a6725d9d1 | ||
|
|
165e067033 | ||
|
|
40c1c59cf4 | ||
|
|
08281fe6b7 | ||
|
|
c21d82bab3 | ||
|
|
ec716a35b2 | ||
|
|
d766f26de9 | ||
|
|
085435e13a | ||
|
|
b8d7d3996b | ||
|
|
908be65e64 | ||
|
|
b7f203a566 | ||
|
|
7ff44d9215 | ||
|
|
38b98e5a98 | ||
|
|
01e93f48ed | ||
|
|
018d75a148 | ||
|
|
fa7dc889f1 | ||
|
|
c82ccd3027 | ||
|
|
da7785147d | ||
|
|
c480c4c962 | ||
|
|
6eed16d8a2 | ||
|
|
303f1c851f | ||
|
|
a6d7b74915 | ||
|
|
4b256b9271 | ||
|
|
4e5ac901dd | ||
|
|
f9f5559971 | ||
|
|
4e6e00152c | ||
|
|
0aba3d361a | ||
|
|
2c54f1c225 | ||
|
|
c4842e16cb | ||
|
|
6e63d6868c | ||
|
|
f49147d14f | ||
|
|
cab782c17e | ||
|
|
6023cdd227 | ||
|
|
7931393495 | ||
|
|
c507fa15ce | ||
|
|
70af98e361 | ||
|
|
5e2ee64660 | ||
|
|
1841672c85 | ||
|
|
6ef983ce5c | ||
|
|
bdbdceeafa | ||
|
|
16bd63f32f | ||
|
|
443da003bc | ||
|
|
729b672823 | ||
|
|
d81602b75a | ||
|
|
5de936caa1 | ||
|
|
5bb39b1e0c | ||
|
|
df2235e7fa | ||
|
|
0bc9b9e397 | ||
|
|
7d05406a07 | ||
|
|
82977477e3 | ||
|
|
9c14c2b561 | ||
|
|
6aab397ada | ||
|
|
52384f2ee5 | ||
|
|
e908b86832 | ||
|
|
254e8267e2 | ||
|
|
21276ff846 | ||
|
|
fef7e58ac6 | ||
|
|
cefac79c10 | ||
|
|
9b13817e06 | ||
|
|
251e6c1210 | ||
|
|
143f1a2532 | ||
|
|
2903e65aff | ||
|
|
a8cbce0ced | ||
|
|
e9b2d047f6 | ||
|
|
ad7cd95a78 | ||
|
|
f102c05856 | ||
|
|
8e3f75b39a | ||
|
|
b0b5566f36 | ||
|
|
781c2bd21a | ||
|
|
7041cd872b | ||
|
|
d42455cdc9 | ||
|
|
65c8dee900 | ||
|
|
a75231b507 | ||
|
|
9e68b1bd2d | ||
|
|
49254d43a6 | ||
|
|
7d32f0d745 | ||
|
|
ef9d51b081 | ||
|
|
85531a06a2 | ||
|
|
51d7df1915 | ||
|
|
5c1d301fd9 | ||
|
|
cf78eaebad | ||
|
|
bd4b25f4d0 | ||
|
|
1b4d73fa52 | ||
|
|
a15ed52267 | ||
|
|
21e878ebb6 | ||
|
|
03751a6420 | ||
|
|
1bcd0490c2 | ||
|
|
bc7944e6d2 | ||
|
|
a4fe9d2d36 | ||
|
|
6185650f9c | ||
|
|
d8e65ed7e1 | ||
|
|
2565804030 | ||
|
|
0620d27f4d | ||
|
|
0a7ee0ab8b | ||
|
|
7d9fb88617 | ||
|
|
78a691d005 | ||
|
|
3849f7f69f | ||
|
|
cee1ae1b72 | ||
|
|
32b30e15f2 | ||
|
|
4081bd1560 | ||
|
|
1bfb5bed1d | ||
|
|
7780a7b47c | ||
|
|
1be94440d3 | ||
|
|
07defd5fe6 | ||
|
|
9c237a7ab5 | ||
|
|
55acd6856c | ||
|
|
f59be4eb0e | ||
|
|
a297ff2b16 | ||
|
|
3f11d84534 | ||
|
|
371da42ae4 | ||
|
|
0b300d323a | ||
|
|
ec56121b0d | ||
|
|
cb5c37a57c | ||
|
|
38eaa5280d | ||
|
|
1e5dbdcbb1 | ||
|
|
1674a85238 | ||
|
|
87951d3891 | ||
|
|
f14c866e37 | ||
|
|
8b8c4f34a3 | ||
|
|
3188973857 | ||
|
|
60a1d147a7 | ||
|
|
709c309b0e | ||
|
|
8f65ab98d2 | ||
|
|
ed0dd68731 | ||
|
|
f33c596533 | ||
|
|
811ac73a42 | ||
|
|
a79410e7b8 | ||
|
|
f7b133fc26 | ||
|
|
81946db9cf | ||
|
|
a321f78991 | ||
|
|
93b0722c50 | ||
|
|
454f59b7ad | ||
|
|
1a01a5b964 | ||
|
|
223341205e | ||
|
|
e22700c3dd | ||
|
|
7417951117 | ||
|
|
eb1d911ab7 | ||
|
|
0a8e4f3af9 | ||
|
|
d19fba3655 | ||
|
|
cd241d6bda | ||
|
|
30bfed5aa5 | ||
|
|
97acd385a3 | ||
|
|
0fa73e4a63 | ||
|
|
2581eb3e1d | ||
|
|
69292de6cc | ||
|
|
ff5426f6b8 | ||
|
|
a678145010 | ||
|
|
d436ad332c | ||
|
|
2601ee28bd | ||
|
|
536bc63a4e | ||
|
|
cf2d15c6a9 | ||
|
|
c6e66821a9 | ||
|
|
8dff6e0322 | ||
|
|
30957a941a | ||
|
|
69fb5dbdab | ||
|
|
1938cffaea | ||
|
|
efcdacad7d | ||
|
|
004a83b43a | ||
|
|
d8709df739 | ||
|
|
ce0c18dec5 | ||
|
|
c1f80effbe | ||
|
|
adfe29ec0b | ||
|
|
254fb430d1 | ||
|
|
cc99256e90 | ||
|
|
5c705f70c9 | ||
|
|
f559119de0 | ||
|
|
8b9f164fff | ||
|
|
2d5bba151b | ||
|
|
50c60e5fad | ||
|
|
4f5cc8e4e7 | ||
|
|
dae6dc1e77 | ||
|
|
a646bdc670 | ||
|
|
9f41ad491d | ||
|
|
0faa3223cd | ||
|
|
37e87611bc | ||
|
|
a4d24781bf | ||
|
|
999bcf9d01 | ||
|
|
9c294ea864 | ||
|
|
4797ed000e | ||
|
|
726a0b1e64 | ||
|
|
f0a1b8e4cd | ||
|
|
8fbe418777 | ||
|
|
0b0b24cb82 | ||
|
|
4fc52b1037 | ||
|
|
f3182bb1d0 | ||
|
|
027bc01a1b | ||
|
|
e42510ba63 | ||
|
|
440b8845b5 | ||
|
|
842cdece42 | ||
|
|
959f4b9074 | ||
|
|
acbfdc3442 | ||
|
|
354a99c968 | ||
|
|
9b34f3ea3a | ||
|
|
c1bf2b587e | ||
|
|
3132b89f12 | ||
|
|
5c88bb722f | ||
|
|
0ecf68aedc | ||
|
|
ff48ab8527 | ||
|
|
5c30cb709a | ||
|
|
3d6df84658 | ||
|
|
4f67623674 | ||
|
|
e1a237eaab | ||
|
|
683f4058c1 | ||
|
|
7c712f95bb | ||
|
|
8462c26485 | ||
|
|
d7275eecf3 | ||
|
|
650daf5628 | ||
|
|
1fa4f7e03e | ||
|
|
2f558300cc | ||
|
|
bcaec2915a | ||
|
|
924eb34d94 | ||
|
|
7044af3298 | ||
|
|
5f3658baf5 | ||
|
|
fd9b08873c | ||
|
|
11d592290c | ||
|
|
87086ac69d | ||
|
|
7814e4cc86 | ||
|
|
6284f579bf | ||
|
|
7cf76c9a09 | ||
|
|
37af0d2a13 | ||
|
|
252f80094c | ||
|
|
9a8acdc720 | ||
|
|
d69decd5c7 | ||
|
|
38f53399a2 | ||
|
|
13d501c773 | ||
|
|
ce0545eca1 | ||
|
|
95ccb6e2ec | ||
|
|
ba6477feac | ||
|
|
be3adfc331 | ||
|
|
9e40834f74 | ||
|
|
f1a15ea206 | ||
|
|
1ffb7bec20 | ||
|
|
2de3d994f3 | ||
|
|
c754e006f4 | ||
|
|
a97c845271 | ||
|
|
c0685f67c0 | ||
|
|
18a2b2c0b4 | ||
|
|
00bc979137 | ||
|
|
f1dd89fe86 | ||
|
|
6f62a6ef21 | ||
|
|
77091d7e8e | ||
|
|
eed24893fa | ||
|
|
3f9e649f17 | ||
|
|
8c69b735e3 | ||
|
|
08436c556a | ||
|
|
667fba68f3 | ||
|
|
3a993a660d | ||
|
|
9b596177ae | ||
|
|
bacdf0cbf9 | ||
|
|
8cb8df55e9 | ||
|
|
65d6a0e477 | ||
|
|
5bd0a47fcd | ||
|
|
00845c49d2 | ||
|
|
e45a46b6e4 | ||
|
|
dab00faa83 | ||
|
|
c91a44572e | ||
|
|
92aecd557b | ||
|
|
6e3fc657b4 | ||
|
|
21d3b87943 | ||
|
|
5f3d02f6eb | ||
|
|
0aed3fc346 | ||
|
|
79eb339c66 | ||
|
|
4a11df5b64 | ||
|
|
d897be6a98 | ||
|
|
9c04b4abf9 | ||
|
|
94440ae994 | ||
|
|
bc006b3c9d | ||
|
|
c7320a5564 | ||
|
|
2172a3d8cb | ||
|
|
b2aa05a8d6 | ||
|
|
850238b4ef | ||
|
|
9952d18e4d | ||
|
|
547b1355d3 | ||
|
|
fe089b13cb | ||
|
|
3fe0938b76 | ||
|
|
fe10dd9fb2 | ||
|
|
9677b1d1c0 | ||
|
|
2731bf7ac3 | ||
|
|
09e29fb58b | ||
|
|
15b13b537f | ||
|
|
78a9ddcf9a | ||
|
|
ea69d35651 | ||
|
|
4a27000548 | ||
|
|
505530f36a | ||
|
|
8a4f05fefb | ||
|
|
8532953c04 | ||
|
|
a2374b2c7f | ||
|
|
33b60c01b5 | ||
|
|
516f960ad8 | ||
|
|
3366b9c534 | ||
|
|
32fd6910d0 | ||
|
|
bc832f822f | ||
|
|
2aba1f549c | ||
|
|
08546be40f | ||
|
|
b03a0e14db | ||
|
|
3b391d9c45 | ||
|
|
33b40d0a25 | ||
|
|
5f263b607e | ||
|
|
77b692e65d | ||
|
|
ba22b6a456 | ||
|
|
6dff49b8a9 | ||
|
|
37c4fba0ac | ||
|
|
38c5fa7ee4 | ||
|
|
12ec58301f | ||
|
|
fa5c2bc082 | ||
|
|
4c4dd6299d | ||
|
|
c969754a91 | ||
|
|
482d17b58b | ||
|
|
0456e05977 | ||
|
|
aff1dfdf3d | ||
|
|
5797f5542b | ||
|
|
1b5425527c | ||
|
|
36f4fd3e1e | ||
|
|
2ef3f84945 | ||
|
|
129ffd7b88 | ||
|
|
85354bb18e | ||
|
|
6ccefef07a | ||
|
|
ea752bdd99 | ||
|
|
bb3d536087 | ||
|
|
05f5dabc10 | ||
|
|
c3c29aa196 | ||
|
|
55370331da | ||
|
|
16b10666e7 | ||
|
|
4ea391a6ae | ||
|
|
b1fe697b3c | ||
|
|
6c1ec5a1bd | ||
|
|
f3b3b9dd8f | ||
|
|
e31e5dee38 | ||
|
|
395fa8d1fd | ||
|
|
b5e157d895 | ||
|
|
09477bd884 | ||
|
|
49af402019 | ||
|
|
2ee9f1bd1a | ||
|
|
1241156c82 | ||
|
|
3060bcc8e9 | ||
|
|
e845fd41c2 | ||
|
|
20ff89d6e1 | ||
|
|
1cfc2c4790 | ||
|
|
2087d5d046 | ||
|
|
1464a0578a | ||
|
|
277c561766 | ||
|
|
89690aaaeb | ||
|
|
be8b32dbc2 | ||
|
|
d196fe42a9 | ||
|
|
feef8461d1 | ||
|
|
1a88640677 | ||
|
|
3cf3e08a97 | ||
|
|
546bc9e28b | ||
|
|
f192a93875 | ||
|
|
13f7adf84b | ||
|
|
40252d13d1 | ||
|
|
ea555d5633 | ||
|
|
c4e7ad0e0f | ||
|
|
a4ab491371 | ||
|
|
508196e08a | ||
|
|
f741630847 | ||
|
|
6ec3aa2f72 | ||
|
|
abb183438c | ||
|
|
f88dea577d | ||
|
|
cea4662b13 | ||
|
|
2c33b55738 | ||
|
|
cbf82dddf1 | ||
|
|
3905c693c5 | ||
|
|
fc4f8f33be | ||
|
|
1c867f5391 | ||
|
|
f131bf8d3e | ||
|
|
ff65916108 | ||
|
|
75d4986a8c | ||
|
|
7c0cdd330f | ||
|
|
e3b4043800 | ||
|
|
ec766b2530 | ||
|
|
fc0e17b3e5 | ||
|
|
f82cf3c7df | ||
|
|
0d7eabeada | ||
|
|
e72234f6bd | ||
|
|
f4f1cda928 | ||
|
|
6350bf925e | ||
|
|
cfda61e9cd | ||
|
|
72d2143ea8 | ||
|
|
caf07f770a | ||
|
|
99800de63d | ||
|
|
f03a5d1a17 | ||
|
|
94f09618e5 | ||
|
|
a7ecf34b70 | ||
|
|
35cc3e8b14 | ||
|
|
8d62baa48c | ||
|
|
77078d6c8e | ||
|
|
cd6bcdaf87 | ||
|
|
95341a8f6f | ||
|
|
26651b0d6a | ||
|
|
b7f34ee348 | ||
|
|
71ad43a8b5 | ||
|
|
8057489b26 | ||
|
|
d91efb06cf | ||
|
|
1202508067 | ||
|
|
bd3d329c88 | ||
|
|
abfe4b2957 | ||
|
|
c695a8d003 | ||
|
|
028490afd4 | ||
|
|
c7f6b420ae | ||
|
|
9570aa82eb | ||
|
|
1e788db430 | ||
|
|
1d62c4a127 | ||
|
|
d72fb9a448 | ||
|
|
1b947d6dde | ||
|
|
df48f7ef37 | ||
|
|
a0e8a53c6d | ||
|
|
7bdc5c8fa3 | ||
|
|
ea7a9c0483 | ||
|
|
0269367f18 | ||
|
|
784189b1f4 | ||
|
|
6eb861b67f | ||
|
|
947fea67cb | ||
|
|
36cb570641 | ||
|
|
33fdcfa957 | ||
|
|
eb50c44eaf | ||
|
|
07cad26d65 | ||
|
|
244484bf3c | ||
|
|
07b4f88de9 | ||
|
|
3d605853c8 | ||
|
|
ec1e799e17 | ||
|
|
1d009013b3 | ||
|
|
516f884176 | ||
|
|
9850f66abe | ||
|
|
fe8d2968e3 | ||
|
|
28ddc6cfbe | ||
|
|
4b4cec3989 | ||
|
|
200e11c5bf | ||
|
|
f8272813a9 | ||
|
|
1d7ad11747 | ||
|
|
ce0d911156 | ||
|
|
b4a5d767a9 | ||
|
|
f79abda87f | ||
|
|
75dc3ddeab | ||
|
|
bb018d0b5b | ||
|
|
43e02c409d | ||
|
|
240f192523 | ||
|
|
70e6501913 | ||
|
|
0495fe0035 | ||
|
|
9a685d60ae | ||
|
|
77ac14b960 | ||
|
|
cbbfaa4be8 | ||
|
|
c2eff937ac | ||
|
|
99b77aa829 | ||
|
|
b088aafcae | ||
|
|
aff3d76920 | ||
|
|
02bfc581f8 | ||
|
|
245d53d32a | ||
|
|
f6c4d74f96 | ||
|
|
ccfdaf68be | ||
|
|
9a793f861c | ||
|
|
53969e1960 | ||
|
|
667c6546bd | ||
|
|
7e1c616452 | ||
|
|
ba438a3ac1 | ||
|
|
61ab08a197 | ||
|
|
1e77ac66e3 | ||
|
|
a502cfec00 | ||
|
|
19cd3120ec | ||
|
|
5c9afd6f80 | ||
|
|
52423607bd | ||
|
|
f116f32ace | ||
|
|
557b686eac | ||
|
|
a61738b316 | ||
|
|
3681437c35 | ||
|
|
0fde1896cd | ||
|
|
2a4fde0a6f | ||
|
|
94700e55fa | ||
|
|
45768d1640 | ||
|
|
12285a1a76 | ||
|
|
96bad44f87 | ||
|
|
b6faef2ad7 | ||
|
|
f1023ebf4b | ||
|
|
3ff8a619f5 | ||
|
|
9fc5b74b24 | ||
|
|
bd348f0af6 | ||
|
|
eb32b2ca20 | ||
|
|
187a546bff | ||
|
|
d6cc369205 | ||
|
|
c96d882a02 | ||
|
|
b800834351 | ||
|
|
ed5a0780a4 | ||
|
|
1032393dfb | ||
|
|
aefcc0f5e5 | ||
|
|
82e751c43f | ||
|
|
0eb4722932 | ||
|
|
c6b1441c52 | ||
|
|
8b98acca05 | ||
|
|
0e505b1913 | ||
|
|
ad9edd1d96 | ||
|
|
e82db24a0e | ||
|
|
0834b49c6a | ||
|
|
36446ffedb | ||
|
|
8503dd0047 | ||
|
|
13d211edc1 | ||
|
|
1152f495a0 | ||
|
|
e2acf536d4 | ||
|
|
0160f6601f | ||
|
|
f4caf3f83d | ||
|
|
0546715c18 | ||
|
|
57e3f923d2 | ||
|
|
d3a8c9c55e | ||
|
|
fef6c2cdcc | ||
|
|
752b7b32ed | ||
|
|
3f589f9097 | ||
|
|
176f1206d1 | ||
|
|
61134debdc | ||
|
|
ad459a106c | ||
|
|
592c162516 | ||
|
|
330432031b | ||
|
|
bf54c1cf6c | ||
|
|
3e4bc4488c | ||
|
|
48e2b48888 | ||
|
|
d8db6d9267 | ||
|
|
23c785992f | ||
|
|
b3b16490f7 | ||
|
|
592ee217a3 | ||
|
|
304bb22c1d | ||
|
|
c88d50aa8f | ||
|
|
0c87eed294 | ||
|
|
e316407b5d | ||
|
|
e6cbf47773 | ||
|
|
607bd27c83 | ||
|
|
d62162bbec | ||
|
|
617afee069 | ||
|
|
522bd3c8a3 | ||
|
|
d6e3c2c79b | ||
|
|
f7869f8f8b | ||
|
|
604cff1a06 | ||
|
|
b50f18171d | ||
|
|
f29b41fde9 | ||
|
|
28b0490dfd | ||
|
|
b7e7fd2d0e | ||
|
|
244ab974e7 | ||
|
|
694968fa81 | ||
|
|
042eedfa2b | ||
|
|
c5930d513a | ||
|
|
6a29e815fc | ||
|
|
e44150a6de | ||
|
|
f731e42baf | ||
|
|
5dbf305444 | ||
|
|
86accac5d5 | ||
|
|
28f09fcdd5 | ||
|
|
5f6122fe10 | ||
|
|
7d9d75e4e8 | ||
|
|
09503126df | ||
|
|
c1f4118bb6 | ||
|
|
a9e97dcd65 | ||
|
|
71477f3317 | ||
|
|
41006d9c28 | ||
|
|
9f797a24a4 | ||
|
|
857e6fd8b6 | ||
|
|
4ef76f3ac4 | ||
|
|
4986b084f8 | ||
|
|
7e15410f02 | ||
|
|
c2c3092cce | ||
|
|
febe0ec8fd | ||
|
|
c936a52a9e | ||
|
|
e73635191f | ||
|
|
219c2a322b | ||
|
|
2e4be8bfd9 | ||
|
|
872cf43516 | ||
|
|
debff7ae09 | ||
|
|
34b85df7f5 | ||
|
|
711f61a31d | ||
|
|
a995fdae39 | ||
|
|
4a9cbdbc15 | ||
|
|
ab0ef31dc7 | ||
|
|
558f3d376a | ||
|
|
c379acd4fd | ||
|
|
db2e4608ab | ||
|
|
4b9d0cde97 | ||
|
|
7873cde526 | ||
|
|
1afafb3497 | ||
|
|
d6cd55532b | ||
|
|
9bbb9f5556 | ||
|
|
8df7bad839 | ||
|
|
235b53263a | ||
|
|
ff1bc0a279 | ||
|
|
adb6bac4d5 | ||
|
|
0a240ad36e | ||
|
|
284e36ad04 | ||
|
|
b69ff33d9e | ||
|
|
5e6b31f0da | ||
|
|
a6c8f7c875 | ||
|
|
7a6df013cc | ||
|
|
81717e8515 | ||
|
|
57ad76fa4a | ||
|
|
3ef5cd74a6 | ||
|
|
5c40ce3777 | ||
|
|
357c74a50f | ||
|
|
a2eb5db4a0 | ||
|
|
754826a830 | ||
|
|
08ea5fe635 | ||
|
|
60f09b1e11 | ||
|
|
66bdae986f | ||
|
|
ba1b163590 | ||
|
|
41921ac01b | ||
|
|
757ed27258 | ||
|
|
4ee4450d66 | ||
|
|
9c36c150e7 | ||
|
|
cc1349c06a | ||
|
|
5b788aba90 | ||
|
|
0e61705661 | ||
|
|
dd068ca979 | ||
|
|
17a70cf6e9 | ||
|
|
6c16a4ec1b | ||
|
|
7ea07c7305 | ||
|
|
1f69693347 | ||
|
|
c4fb15a06c | ||
|
|
36501068d8 | ||
|
|
2aff6eab6d | ||
|
|
095292304f | ||
|
|
77a23e2e05 | ||
|
|
ecc4b88bd1 | ||
|
|
46345187cc | ||
|
|
037c6db85d | ||
|
|
7a1af504d7 | ||
|
|
14ca678674 | ||
|
|
6f67163c63 | ||
|
|
bdd2ed5acf | ||
|
|
f72d5a44d5 | ||
|
|
68399fc4de | ||
|
|
91d95a1d8e | ||
|
|
9700d15611 | ||
|
|
a21a41bad7 | ||
|
|
b3bff53178 | ||
|
|
8c98281b8d | ||
|
|
6abcb5d22d | ||
|
|
389dac2c15 | ||
|
|
472a5ec4e2 | ||
|
|
e987079037 | ||
|
|
9bf4b2bda3 | ||
|
|
23aa70cea8 | ||
|
|
043f05a078 | ||
|
|
96f07cebda | ||
|
|
a0b3946fe2 | ||
|
|
2f7008d4eb | ||
|
|
e206b2c9ac | ||
|
|
2df8c3139a | ||
|
|
dda40fb55d | ||
|
|
3ff6f50eac | ||
|
|
82191b08f6 | ||
|
|
2c62ea2515 | ||
|
|
cd8ab9a0d8 | ||
|
|
2c7866d664 | ||
|
|
321f02d263 | ||
|
|
1cbb8e5a33 | ||
|
|
052d08a6a5 | ||
|
|
5ad1149f38 | ||
|
|
563606b8f2 | ||
|
|
2574ea3dc8 | ||
|
|
833db2d922 | ||
|
|
9e8ab0a4f4 | ||
|
|
3601a240aa | ||
|
|
e7598b666b | ||
|
|
5aaa3189d5 | ||
|
|
0a4bca4134 | ||
|
|
b6063631c3 | ||
|
|
53cc2cde1f | ||
|
|
071206304d | ||
|
|
4abeaedcf3 | ||
|
|
85ba83eb51 | ||
|
|
228f1f584e | ||
|
|
e85b5a0ff7 | ||
|
|
586b66b197 | ||
|
|
35ca3e7b65 | ||
|
|
a17e901f4d | ||
|
|
5494c1d71e | ||
|
|
d8cb7225d2 | ||
|
|
ad2823ee27 | ||
|
|
08bfc48abf | ||
|
|
0a078026ea | ||
|
|
07bb9bdae8 | ||
|
|
8f5a688d42 | ||
|
|
a8990fa2ec | ||
|
|
cb2a2ad791 | ||
|
|
08a14b32ae | ||
|
|
82c2a52987 | ||
|
|
7b36d06a69 | ||
|
|
669400e22f | ||
|
|
b5b2d5d64b | ||
|
|
3b2def6c7a | ||
|
|
a5e2941aad | ||
|
|
8aeb529262 | ||
|
|
8810685df9 | ||
|
|
d5dca9a04f | ||
|
|
9ea219c514 | ||
|
|
d14d7b8fdc | ||
|
|
7cfa8a87a1 | ||
|
|
7948ecf234 | ||
|
|
020377a550 | ||
|
|
13a8dfba0d | ||
|
|
c435b0b441 | ||
|
|
fb2806b186 | ||
|
|
fcc525b0b7 | ||
|
|
df9f72d9e5 | ||
|
|
c60e0d5e02 | ||
|
|
02c1d29133 | ||
|
|
f258deffcb | ||
|
|
413482f578 | ||
|
|
4aac88928f | ||
|
|
6e1cb54a05 | ||
|
|
6d6e7288fe | ||
|
|
d689e0dba1 | ||
|
|
dfa70adc33 | ||
|
|
933bf2dd35 | ||
|
|
d9fe2b2d9d | ||
|
|
45b55e23d3 | ||
|
|
dcc235b47d | ||
|
|
73cbdef5f7 | ||
|
|
aafb0f6b0d | ||
|
|
b932b4ea25 | ||
|
|
644aac5f73 | ||
|
|
08462620bf | ||
|
|
ef466b3a13 | ||
|
|
861f8a9b21 | ||
|
|
2725223f08 | ||
|
|
ab5e888927 | ||
|
|
f3d9dca975 | ||
|
|
6d9dc67139 | ||
|
|
ed3125b0a1 | ||
|
|
67af392712 | ||
|
|
011e1f4010 | ||
|
|
26305788fe | ||
|
|
d10707c810 | ||
|
|
fa30ac38cc | ||
|
|
8b1c856d81 | ||
|
|
6958459b50 | ||
|
|
88d3405332 | ||
|
|
d43d480d86 | ||
|
|
a2da6de40e | ||
|
|
450f500d0c | ||
|
|
82b0361f02 | ||
|
|
1b1b47aec6 | ||
|
|
fed62e21ad | ||
|
|
f8a1e76d64 | ||
|
|
f7906203f6 | ||
|
|
ae53c71d90 | ||
|
|
616da9eb1d | ||
|
|
c46367d0d7 | ||
|
|
85b8acdeb4 | ||
|
|
3c099219e0 | ||
|
|
680530cc7f | ||
|
|
43e6e82c4d | ||
|
|
dc8930ea9e | ||
|
|
c945af8799 | ||
|
|
be11a02c4f | ||
|
|
a2204cc9cc | ||
|
|
31c2502ca8 | ||
|
|
8030a825c8 | ||
|
|
c92a8aa578 | ||
|
|
05ac15ae82 | ||
|
|
5f27ed75ad | ||
|
|
37dbee6490 | ||
|
|
47815edcfa | ||
|
|
589ecc5b58 | ||
|
|
e71fb118f4 | ||
|
|
aea80a0118 | ||
|
|
f077e97914 | ||
|
|
8cbbfd16fb | ||
|
|
977765bde2 | ||
|
|
16f41237f0 | ||
|
|
c25d7ba12e | ||
|
|
6406b70aeb | ||
|
|
23e2dfe940 | ||
|
|
bd8d0cfab1 | ||
|
|
db18d854cd | ||
|
|
318711e139 | ||
|
|
7b411007e6 | ||
|
|
6b49628e3b | ||
|
|
217bc53c98 | ||
|
|
645cb4bf06 | ||
|
|
09f570b935 | ||
|
|
9589a1925e | ||
|
|
49e5a613f1 | ||
|
|
b8700dd7d0 | ||
|
|
c6f730282c | ||
|
|
09b29f9c4a | ||
|
|
4d298506dd | ||
|
|
8460e48d06 | ||
|
|
18e144fe08 | ||
|
|
bfe1f73855 | ||
|
|
5adb75bcba | ||
|
|
a5c98dda48 | ||
|
|
d26bec8a43 | ||
|
|
fcf55f2255 | ||
|
|
7ce98804ff | ||
|
|
cddf91c8b9 | ||
|
|
9896dab8f6 | ||
|
|
1e5280b7d0 | ||
|
|
75552d2148 | ||
|
|
294e9a0c9b | ||
|
|
46df23f581 | ||
|
|
52281e4c54 | ||
|
|
7e8726b8fb | ||
|
|
c0e08dc45b | ||
|
|
0461ef01b7 | ||
|
|
e2accd7f1d | ||
|
|
e5ab9cd24b | ||
|
|
60590211c1 | ||
|
|
c4af4c24ca | ||
|
|
05e0a2462c | ||
|
|
7dd13415db | ||
|
|
27cf170558 | ||
|
|
1aeb5e28a9 | ||
|
|
23ec51c94c | ||
|
|
d5377eba55 | ||
|
|
d11b8b6b65 | ||
|
|
8ff8ab3bce | ||
|
|
6c957e26f0 | ||
|
|
696f532453 | ||
|
|
3e6d306e94 | ||
|
|
274b8c6025 | ||
|
|
06c0d0ed08 | ||
|
|
bf98fa0864 | ||
|
|
678e649b78 | ||
|
|
0b7dfbb194 | ||
|
|
88868b2839 | ||
|
|
5addeaa02c | ||
|
|
6d8ec3462d | ||
|
|
95b6912045 | ||
|
|
966686c845 | ||
|
|
093d8c415a | ||
|
|
0ba609dc6f | ||
|
|
2117f84323 | ||
|
|
a7fe62f0cb | ||
|
|
2e7a94c36b | ||
|
|
a2aaa9cb3c | ||
|
|
d72faf2fad | ||
|
|
a0501ac57e | ||
|
|
0a3b51c420 | ||
|
|
31c7c29d43 | ||
|
|
902673e356 | ||
|
|
53a5fdf312 | ||
|
|
1dfd650348 | ||
|
|
9a779c2ddb | ||
|
|
a41117c63b | ||
|
|
32015e1109 | ||
|
|
3a42aed9a1 | ||
|
|
5a0be97ab2 | ||
|
|
415c6b672e | ||
|
|
4e9bdeba57 | ||
|
|
be31adb036 | ||
|
|
11607006d9 | ||
|
|
46beeb9a30 | ||
|
|
f22e7cda2c | ||
|
|
a8d8bf92e0 | ||
|
|
e482f8cd85 | ||
|
|
4f2e898c29 | ||
|
|
d4c14e1438 | ||
|
|
9f21de6a01 | ||
|
|
da602419b2 | ||
|
|
8ae7096958 | ||
|
|
562532dd2d | ||
|
|
5c2214f4c7 | ||
|
|
2414178ed6 | ||
|
|
40d1bbd257 | ||
|
|
8e6bd0e324 | ||
|
|
8570bb84cc | ||
|
|
ca7211104e | ||
|
|
d5eee5d601 | ||
|
|
d858f3bd4e | ||
|
|
33f469ba19 | ||
|
|
dd1a832419 | ||
|
|
d0857702e8 | ||
|
|
5917562b60 | ||
|
|
6495dbb326 | ||
|
|
2ad3fc36e6 | ||
|
|
cead75fae3 | ||
|
|
576b71dd3d | ||
|
|
99a54bf2af | ||
|
|
63ae5cbf34 | ||
|
|
fdb6849b81 | ||
|
|
66aa32ede2 | ||
|
|
6e005d1382 | ||
|
|
01e8a52825 | ||
|
|
0c9db26260 | ||
|
|
950a32eb47 | ||
|
|
bc2017a594 | ||
|
|
683149c1f9 | ||
|
|
7b908aeec4 | ||
|
|
3b0e431c82 | ||
|
|
db75c86e84 | ||
|
|
2fd96727b1 | ||
|
|
b8ee12b978 | ||
|
|
049b0b5af2 | ||
|
|
d1d54d6088 | ||
|
|
ac5f2f4d86 | ||
|
|
af3cc50511 | ||
|
|
dbf6f28d64 | ||
|
|
7767a9fc0e | ||
|
|
aab2e4da60 | ||
|
|
1315d374cc | ||
|
|
9e2601f830 | ||
|
|
122593265b | ||
|
|
e9143b6593 | ||
|
|
adaf3ec87f | ||
|
|
006e18b6bb | ||
|
|
42c89c8215 | ||
|
|
d82b6ea9e6 | ||
|
|
4f2f5171b7 | ||
|
|
94f4d7f49e | ||
|
|
57b58e2174 | ||
|
|
cdb4647a80 | ||
|
|
a376d8f761 | ||
|
|
4f5694e2ce | ||
|
|
9558236728 | ||
|
|
453adf00b6 | ||
|
|
fc149b4eeb | ||
|
|
6146332387 | ||
|
|
d2737c1fae | ||
|
|
2a13af23bc | ||
|
|
3d1ae61399 | ||
|
|
9d2c1b8429 | ||
|
|
13843f771e | ||
|
|
41d4b07a53 | ||
|
|
05ba7e3a44 | ||
|
|
53849ea9d3 | ||
|
|
268e40341b | ||
|
|
9c3da24561 | ||
|
|
53494c34df | ||
|
|
6493b22b42 | ||
|
|
6e10eed28e | ||
|
|
605defb9e4 | ||
|
|
9255a6cb17 | ||
|
|
d842ed14f4 | ||
|
|
31c8be956f | ||
|
|
28dd536e80 | ||
|
|
8721580303 | ||
|
|
dbf76fd4b9 | ||
|
|
d78ada3166 | ||
|
|
0ced8b5b47 | ||
|
|
7ec8e798b4 | ||
|
|
fb6015d0a6 | ||
|
|
a5ad88913c | ||
|
|
617bf40924 | ||
|
|
22881b3d69 | ||
|
|
ba3166743c | ||
|
|
e3a373f002 | ||
|
|
48c01ae851 | ||
|
|
6ab3b9c743 | ||
|
|
1bb83d5d41 | ||
|
|
13a2beabca | ||
|
|
2c3e995f38 | ||
|
|
8e8b06715f | ||
|
|
08b29d4574 | ||
|
|
77ebef9d43 | ||
|
|
9b9c38373c | ||
|
|
286e20f2bc | ||
|
|
1ea904b9f0 | ||
|
|
dc875d2712 | ||
|
|
8dc4a6144b | ||
|
|
d06a9ea5f7 | ||
|
|
c09a6daf09 | ||
|
|
692a3cc806 | ||
|
|
366dd893fc | ||
|
|
bdb7714d13 | ||
|
|
67dabe143d | ||
|
|
3de7d9fe99 | ||
|
|
11a67b7c9d | ||
|
|
0c280d4d99 | ||
|
|
bc381d5798 | ||
|
|
b1dfbc3c40 | ||
|
|
dacf3a50ac | ||
|
|
1f4b498b73 | ||
|
|
e585228860 | ||
|
|
9b7794262f | ||
|
|
639480e14a | ||
|
|
878995e660 | ||
|
|
a1a3c9660f | ||
|
|
512633ef44 | ||
|
|
2a3c33ff03 | ||
|
|
f63ff73c7f | ||
|
|
36c59ce669 | ||
|
|
cb9cdfecd0 | ||
|
|
1515560f5c | ||
|
|
bfc2ade9b3 | ||
|
|
c4bdbc2bd2 | ||
|
|
041b41a825 | ||
|
|
154b44c249 | ||
|
|
0d8c50df44 | ||
|
|
78a9698650 | ||
|
|
25b0ba30b1 | ||
|
|
f8d46cad3c | ||
|
|
d4b2e05852 | ||
|
|
eb53439c4a | ||
|
|
51d628d28d | ||
|
|
df77837a33 | ||
|
|
d3347ad485 | ||
|
|
fac3f9e678 | ||
|
|
60f6014bb7 | ||
|
|
119596ab8f | ||
|
|
b78395b7fe | ||
|
|
d5c74b9f6c | ||
|
|
0f13f30fca | ||
|
|
415aeefd89 | ||
|
|
19ceb4851f | ||
|
|
261124396e | ||
|
|
23a7f9d7f4 | ||
|
|
d7bf3a68f0 | ||
|
|
f67e906e18 | ||
|
|
971059a733 | ||
|
|
e939f3bca6 | ||
|
|
4dae4a97ed | ||
|
|
92e34615c5 | ||
|
|
ab825aa328 | ||
|
|
233699c42e | ||
|
|
427e6c4059 | ||
|
|
781cd8c54f | ||
|
|
9ef0b179e0 | ||
|
|
121591568b | ||
|
|
b3384232a0 | ||
|
|
360d899a64 | ||
|
|
d54cfbb7a8 | ||
|
|
eaa2ebf20b | ||
|
|
9daf82278f | ||
|
|
a3f9ddbede | ||
|
|
7f8eebc8ee | ||
|
|
dd723267b2 | ||
|
|
a060dfa132 | ||
|
|
f8e8ec013b | ||
|
|
1246d23710 | ||
|
|
d49cbf712f | ||
|
|
ce72d590ed | ||
|
|
6d7f0f8dd3 | ||
|
|
f4284d943a | ||
|
|
d1e56cfcd1 | ||
|
|
89de934981 | ||
|
|
9fbe70a7dc | ||
|
|
a3599dda97 | ||
|
|
87478c5a60 | ||
|
|
c508b2f2f0 | ||
|
|
37354b55c9 | ||
|
|
0e9aa1d091 | ||
|
|
8eaa141d8f | ||
|
|
664adb4236 | ||
|
|
aea3a93611 | ||
|
|
41e0611895 | ||
|
|
61b439c904 | ||
|
|
87770300d5 | ||
|
|
9a311adfea | ||
|
|
64bc2162ef | ||
|
|
d2c6f4d626 | ||
|
|
5232d3bfb1 | ||
|
|
5e785d4d5b | ||
|
|
6e025a97b4 | ||
|
|
414b2b3bd1 | ||
|
|
b151eb14a2 | ||
|
|
64cebbc730 | ||
|
|
d9ae2bc826 | ||
|
|
21d5a2a08e | ||
|
|
c115deed12 | ||
|
|
072fb59446 | ||
|
|
89dda61315 | ||
|
|
687f3451bd | ||
|
|
f3ef60662f | ||
|
|
e5082494eb | ||
|
|
56b0589865 | ||
|
|
11974f3787 | ||
|
|
145d14656b | ||
|
|
a13b7860c6 | ||
|
|
e54c202b81 | ||
|
|
b0500d3774 | ||
|
|
4f40d058cc | ||
|
|
c7ede92d0b | ||
|
|
6168351877 | ||
|
|
72251d1b97 | ||
|
|
a9a74101a4 | ||
|
|
16aeb41547 | ||
|
|
6152e253d8 | ||
|
|
b2f2282947 | ||
|
|
478af0f720 | ||
|
|
366f730bf6 | ||
|
|
757f1b5843 | ||
|
|
0b56290f0b | ||
|
|
fc5397fdf5 | ||
|
|
4f0493c850 | ||
|
|
f7dcc404f2 | ||
|
|
5b3b3aada8 | ||
|
|
bf49d2dca8 | ||
|
|
3bc5bd2d22 | ||
|
|
5a6e54264d | ||
|
|
91ea0202e6 | ||
|
|
056a6df546 | ||
|
|
9f77001e27 | ||
|
|
4d0cfef6ee | ||
|
|
c9d72e4571 | ||
|
|
ccca02846d | ||
|
|
f0f9a0605b | ||
|
|
12350e3f9a | ||
|
|
14a9d2f73d | ||
|
|
afbf4d3dcc | ||
|
|
865377a70d | ||
|
|
b2aba9e430 | ||
|
|
52f7e23c72 | ||
|
|
1b1c137771 | ||
|
|
fdedcd1f4d | ||
|
|
97c0496cfa | ||
|
|
8713365265 | ||
|
|
9b334b3f97 | ||
|
|
af7ed8e1ef | ||
|
|
f44b7c022f | ||
|
|
07f1b71819 | ||
|
|
b815aa0e2d | ||
|
|
6f0b1f85f9 | ||
|
|
ca70148c05 | ||
|
|
e511979fe6 | ||
|
|
a03c382966 | ||
|
|
48e2c641b8 | ||
|
|
d8680c969b | ||
|
|
b9b668e4bb | ||
|
|
ef1f8d4be6 | ||
|
|
a0af0054ec | ||
|
|
914a59cb8c | ||
|
|
e174c46a29 | ||
|
|
b8a4dceb3c | ||
|
|
084afbb6a0 | ||
|
|
58df3a8c5d | ||
|
|
63fd148724 | ||
|
|
1ffd9cb936 | ||
|
|
107a5c9441 | ||
|
|
ee3b160a2a | ||
|
|
630573a932 | ||
|
|
f5364b47ec | ||
|
|
d8c7da5dca | ||
|
|
cf4ef60e28 | ||
|
|
cd51931b62 | ||
|
|
81010a126e | ||
|
|
8db84e9b21 | ||
|
|
e9021e16c4 | ||
|
|
f72c9c1fb6 | ||
|
|
b8ab78b82c | ||
|
|
9a87b8aaf7 | ||
|
|
84a9209ba7 | ||
|
|
53965334da | ||
|
|
a207cccb05 | ||
|
|
1ba2fe114c | ||
|
|
042757feb2 | ||
|
|
886c2d5019 | ||
|
|
f2bf0cda02 | ||
|
|
6d1e28a842 | ||
|
|
48bc22f89d | ||
|
|
d434ae3387 | ||
|
|
431476fbc4 | ||
|
|
24d162814b | ||
|
|
95e02b856b |
48
.circleci/config.yml
Normal file
48
.circleci/config.yml
Normal file
@@ -0,0 +1,48 @@
|
||||
version: 2
|
||||
jobs:
|
||||
sytestpy2:
|
||||
machine: true
|
||||
steps:
|
||||
- checkout
|
||||
- run: docker pull matrixdotorg/sytest-synapsepy2
|
||||
- run: docker run --rm -it -v $(pwd)\:/src -v $(pwd)/logs\:/logs matrixdotorg/sytest-synapsepy2
|
||||
- store_artifacts:
|
||||
path: ~/project/logs
|
||||
destination: logs
|
||||
sytestpy2postgres:
|
||||
machine: true
|
||||
steps:
|
||||
- checkout
|
||||
- run: docker pull matrixdotorg/sytest-synapsepy2
|
||||
- run: docker run --rm -it -v $(pwd)\:/src -v $(pwd)/logs\:/logs -e POSTGRES=1 matrixdotorg/sytest-synapsepy2
|
||||
- store_artifacts:
|
||||
path: ~/project/logs
|
||||
destination: logs
|
||||
sytestpy3:
|
||||
machine: true
|
||||
steps:
|
||||
- checkout
|
||||
- run: docker pull matrixdotorg/sytest-synapsepy3
|
||||
- run: docker run --rm -it -v $(pwd)\:/src -v $(pwd)/logs\:/logs hawkowl/sytestpy3
|
||||
- store_artifacts:
|
||||
path: ~/project/logs
|
||||
destination: logs
|
||||
sytestpy3postgres:
|
||||
machine: true
|
||||
steps:
|
||||
- checkout
|
||||
- run: docker pull matrixdotorg/sytest-synapsepy3
|
||||
- run: docker run --rm -it -v $(pwd)\:/src -v $(pwd)/logs\:/logs -e POSTGRES=1 matrixdotorg/sytest-synapsepy3
|
||||
- store_artifacts:
|
||||
path: ~/project/logs
|
||||
destination: logs
|
||||
|
||||
workflows:
|
||||
version: 2
|
||||
build:
|
||||
jobs:
|
||||
- sytestpy2
|
||||
- sytestpy2postgres
|
||||
# Currently broken while the Python 3 port is incomplete
|
||||
# - sytestpy3
|
||||
# - sytestpy3postgres
|
||||
8
.dockerignore
Normal file
8
.dockerignore
Normal file
@@ -0,0 +1,8 @@
|
||||
Dockerfile
|
||||
.travis.yml
|
||||
.gitignore
|
||||
demo/etc
|
||||
tox.ini
|
||||
synctl
|
||||
.git/*
|
||||
.tox/*
|
||||
5
.github/ISSUE_TEMPLATE.md
vendored
5
.github/ISSUE_TEMPLATE.md
vendored
@@ -27,8 +27,9 @@ Describe here the problem that you are experiencing, or the feature you are requ
|
||||
|
||||
Describe how what happens differs from what you expected.
|
||||
|
||||
If you can identify any relevant log snippets from _homeserver.log_, please include
|
||||
those here (please be careful to remove any personal or private data):
|
||||
<!-- If you can identify any relevant log snippets from _homeserver.log_, please include
|
||||
those (please be careful to remove any personal or private data). Please surround them with
|
||||
``` (three backticks, on a line on their own), so that they are formatted legibly. -->
|
||||
|
||||
### Version information
|
||||
|
||||
|
||||
6
.gitignore
vendored
6
.gitignore
vendored
@@ -1,5 +1,6 @@
|
||||
*.pyc
|
||||
.*.swp
|
||||
*~
|
||||
|
||||
.DS_Store
|
||||
_trial_temp/
|
||||
@@ -13,6 +14,7 @@ docs/build/
|
||||
cmdclient_config.json
|
||||
homeserver*.db
|
||||
homeserver*.log
|
||||
homeserver*.log.*
|
||||
homeserver*.pid
|
||||
homeserver*.yaml
|
||||
|
||||
@@ -32,6 +34,7 @@ demo/media_store.*
|
||||
demo/etc
|
||||
|
||||
uploads
|
||||
cache
|
||||
|
||||
.idea/
|
||||
media_store/
|
||||
@@ -39,6 +42,8 @@ media_store/
|
||||
*.tac
|
||||
|
||||
build/
|
||||
venv/
|
||||
venv*/
|
||||
|
||||
localhost-800*/
|
||||
static/client/register/register_config.js
|
||||
@@ -48,3 +53,4 @@ env/
|
||||
*.config
|
||||
|
||||
.vscode/
|
||||
.ropeproject/
|
||||
|
||||
39
.travis.yml
39
.travis.yml
@@ -1,14 +1,43 @@
|
||||
sudo: false
|
||||
language: python
|
||||
python: 2.7
|
||||
|
||||
# tell travis to cache ~/.cache/pip
|
||||
cache: pip
|
||||
|
||||
env:
|
||||
- TOX_ENV=packaging
|
||||
- TOX_ENV=pep8
|
||||
- TOX_ENV=py27
|
||||
before_script:
|
||||
- git remote set-branches --add origin develop
|
||||
- git fetch origin develop
|
||||
|
||||
services:
|
||||
- postgresql
|
||||
|
||||
matrix:
|
||||
fast_finish: true
|
||||
include:
|
||||
- python: 2.7
|
||||
env: TOX_ENV=packaging
|
||||
|
||||
- python: 2.7
|
||||
env: TOX_ENV=pep8
|
||||
|
||||
- python: 2.7
|
||||
env: TOX_ENV=py27
|
||||
|
||||
- python: 2.7
|
||||
env: TOX_ENV=py27-postgres TRIAL_FLAGS="-j 4"
|
||||
|
||||
- python: 3.6
|
||||
env: TOX_ENV=py36
|
||||
|
||||
- python: 3.6
|
||||
env: TOX_ENV=check_isort
|
||||
|
||||
- python: 3.6
|
||||
env: TOX_ENV=check-newsfragment
|
||||
|
||||
allow_failures:
|
||||
- python: 2.7
|
||||
env: TOX_ENV=py27-postgres TRIAL_FLAGS="-j 4"
|
||||
|
||||
install:
|
||||
- pip install tox
|
||||
|
||||
@@ -60,3 +60,9 @@ Niklas Riekenbrauck <nikriek at gmail dot.com>
|
||||
|
||||
Christoph Witzany <christoph at web.crofting.com>
|
||||
* Add LDAP support for authentication
|
||||
|
||||
Pierre Jaury <pierre at jaury.eu>
|
||||
* Docker packaging
|
||||
|
||||
Serban Constantin <serban.constantin at gmail dot com>
|
||||
* Small bug fix
|
||||
2634
CHANGES.md
Normal file
2634
CHANGES.md
Normal file
File diff suppressed because it is too large
Load Diff
2406
CHANGES.rst
2406
CHANGES.rst
File diff suppressed because it is too large
Load Diff
@@ -30,11 +30,11 @@ use github's pull request workflow to review the contribution, and either ask
|
||||
you to make any refinements needed or merge it and make them ourselves. The
|
||||
changes will then land on master when we next do a release.
|
||||
|
||||
We use `Jenkins <http://matrix.org/jenkins>`_ and
|
||||
We use `Jenkins <http://matrix.org/jenkins>`_ and
|
||||
`Travis <https://travis-ci.org/matrix-org/synapse>`_ for continuous
|
||||
integration. All pull requests to synapse get automatically tested by Travis;
|
||||
the Jenkins builds require an adminstrator to start them. If your change
|
||||
breaks the build, this will be shown in github, so please keep an eye on the
|
||||
integration. All pull requests to synapse get automatically tested by Travis;
|
||||
the Jenkins builds require an adminstrator to start them. If your change
|
||||
breaks the build, this will be shown in github, so please keep an eye on the
|
||||
pull request for feedback.
|
||||
|
||||
Code style
|
||||
@@ -48,6 +48,26 @@ Please ensure your changes match the cosmetic style of the existing project,
|
||||
and **never** mix cosmetic and functional changes in the same commit, as it
|
||||
makes it horribly hard to review otherwise.
|
||||
|
||||
Changelog
|
||||
~~~~~~~~~
|
||||
|
||||
All changes, even minor ones, need a corresponding changelog / newsfragment
|
||||
entry. These are managed by Towncrier
|
||||
(https://github.com/hawkowl/towncrier).
|
||||
|
||||
To create a changelog entry, make a new file in the ``changelog.d``
|
||||
file named in the format of ``PRnumber.type``. The type can be
|
||||
one of ``feature``, ``bugfix``, ``removal`` (also used for
|
||||
deprecations), or ``misc`` (for internal-only changes). The content of
|
||||
the file is your changelog entry, which can contain RestructuredText
|
||||
formatting. A note of contributors is welcomed in changelogs for
|
||||
non-misc changes (the content of misc changes is not displayed).
|
||||
|
||||
For example, a fix in PR #1234 would have its changelog entry in
|
||||
``changelog.d/1234.bugfix``, and contain content like "The security levels of
|
||||
Florbs are now validated when recieved over federation. Contributed by Jane
|
||||
Matrix".
|
||||
|
||||
Attribution
|
||||
~~~~~~~~~~~
|
||||
|
||||
@@ -105,16 +125,20 @@ the contribution or otherwise have the right to contribute it to Matrix::
|
||||
personal information I submit with it, including my sign-off) is
|
||||
maintained indefinitely and may be redistributed consistent with
|
||||
this project or the open source license(s) involved.
|
||||
|
||||
|
||||
If you agree to this for your contribution, then all that's needed is to
|
||||
include the line in your commit or pull request comment::
|
||||
|
||||
Signed-off-by: Your Name <your@email.example.org>
|
||||
|
||||
...using your real name; unfortunately pseudonyms and anonymous contributions
|
||||
can't be accepted. Git makes this trivial - just use the -s flag when you do
|
||||
``git commit``, having first set ``user.name`` and ``user.email`` git configs
|
||||
(which you should have done anyway :)
|
||||
|
||||
We accept contributions under a legally identifiable name, such as
|
||||
your name on government documentation or common-law names (names
|
||||
claimed by legitimate usage or repute). Unfortunately, we cannot
|
||||
accept anonymous contributions at this time.
|
||||
|
||||
Git allows you to add this signoff automatically when using the ``-s``
|
||||
flag to ``git commit``, which uses the name and email set in your
|
||||
``user.name`` and ``user.email`` git configs.
|
||||
|
||||
Conclusion
|
||||
~~~~~~~~~~
|
||||
|
||||
@@ -2,6 +2,7 @@ include synctl
|
||||
include LICENSE
|
||||
include VERSION
|
||||
include *.rst
|
||||
include *.md
|
||||
include demo/README
|
||||
include demo/demo.tls.dh
|
||||
include demo/*.py
|
||||
@@ -25,7 +26,14 @@ recursive-include synapse/static *.js
|
||||
exclude jenkins.sh
|
||||
exclude jenkins*.sh
|
||||
exclude jenkins*
|
||||
exclude Dockerfile
|
||||
exclude .dockerignore
|
||||
recursive-exclude jenkins *.sh
|
||||
|
||||
include pyproject.toml
|
||||
recursive-include changelog.d *
|
||||
|
||||
prune .github
|
||||
prune demo/etc
|
||||
prune docker
|
||||
prune .circleci
|
||||
|
||||
46
README.rst
46
README.rst
@@ -71,7 +71,7 @@ We'd like to invite you to join #matrix:matrix.org (via
|
||||
https://matrix.org/docs/projects/try-matrix-now.html), run a homeserver, take a look
|
||||
at the `Matrix spec <https://matrix.org/docs/spec>`_, and experiment with the
|
||||
`APIs <https://matrix.org/docs/api>`_ and `Client SDKs
|
||||
<http://matrix.org/docs/projects/try-matrix-now.html#client-sdks>`_.
|
||||
<https://matrix.org/docs/projects/try-matrix-now.html#client-sdks>`_.
|
||||
|
||||
Thanks for using Matrix!
|
||||
|
||||
@@ -157,11 +157,19 @@ if you prefer.
|
||||
|
||||
In case of problems, please see the _`Troubleshooting` section below.
|
||||
|
||||
Alternatively, Silvio Fricke has contributed a Dockerfile to automate the
|
||||
above in Docker at https://registry.hub.docker.com/u/silviof/docker-matrix/.
|
||||
There is an offical synapse image available at
|
||||
https://hub.docker.com/r/matrixdotorg/synapse/tags/ which can be used with
|
||||
the docker-compose file available at `contrib/docker <contrib/docker>`_. Further information on
|
||||
this including configuration options is available in the README on
|
||||
hub.docker.com.
|
||||
|
||||
Alternatively, Andreas Peters (previously Silvio Fricke) has contributed a
|
||||
Dockerfile to automate a synapse server in a single Docker image, at
|
||||
https://hub.docker.com/r/avhost/docker-matrix/tags/
|
||||
|
||||
Also, Martin Giess has created an auto-deployment process with vagrant/ansible,
|
||||
tested with VirtualBox/AWS/DigitalOcean - see https://github.com/EMnify/matrix-synapse-auto-deploy
|
||||
tested with VirtualBox/AWS/DigitalOcean - see
|
||||
https://github.com/EMnify/matrix-synapse-auto-deploy
|
||||
for details.
|
||||
|
||||
Configuring synapse
|
||||
@@ -282,7 +290,7 @@ Connecting to Synapse from a client
|
||||
|
||||
The easiest way to try out your new Synapse installation is by connecting to it
|
||||
from a web client. The easiest option is probably the one at
|
||||
http://riot.im/app. You will need to specify a "Custom server" when you log on
|
||||
https://riot.im/app. You will need to specify a "Custom server" when you log on
|
||||
or register: set this to ``https://domain.tld`` if you setup a reverse proxy
|
||||
following the recommended setup, or ``https://localhost:8448`` - remember to specify the
|
||||
port (``:8448``) if not ``:443`` unless you changed the configuration. (Leave the identity
|
||||
@@ -328,7 +336,7 @@ Security Note
|
||||
=============
|
||||
|
||||
Matrix serves raw user generated data in some APIs - specifically the `content
|
||||
repository endpoints <http://matrix.org/docs/spec/client_server/latest.html#get-matrix-media-r0-download-servername-mediaid>`_.
|
||||
repository endpoints <https://matrix.org/docs/spec/client_server/latest.html#get-matrix-media-r0-download-servername-mediaid>`_.
|
||||
|
||||
Whilst we have tried to mitigate against possible XSS attacks (e.g.
|
||||
https://github.com/matrix-org/synapse/pull/1021) we recommend running
|
||||
@@ -347,7 +355,7 @@ Platform-Specific Instructions
|
||||
Debian
|
||||
------
|
||||
|
||||
Matrix provides official Debian packages via apt from http://matrix.org/packages/debian/.
|
||||
Matrix provides official Debian packages via apt from https://matrix.org/packages/debian/.
|
||||
Note that these packages do not include a client - choose one from
|
||||
https://matrix.org/docs/projects/try-matrix-now.html (or build your own with one of our SDKs :)
|
||||
|
||||
@@ -361,6 +369,19 @@ Synapse is in the Fedora repositories as ``matrix-synapse``::
|
||||
Oleg Girko provides Fedora RPMs at
|
||||
https://obs.infoserver.lv/project/monitor/matrix-synapse
|
||||
|
||||
OpenSUSE
|
||||
--------
|
||||
|
||||
Synapse is in the OpenSUSE repositories as ``matrix-synapse``::
|
||||
|
||||
sudo zypper install matrix-synapse
|
||||
|
||||
SUSE Linux Enterprise Server
|
||||
----------------------------
|
||||
|
||||
Unofficial package are built for SLES 15 in the openSUSE:Backports:SLE-15 repository at
|
||||
https://download.opensuse.org/repositories/openSUSE:/Backports:/SLE-15/standard/
|
||||
|
||||
ArchLinux
|
||||
---------
|
||||
|
||||
@@ -523,7 +544,7 @@ Troubleshooting Running
|
||||
-----------------------
|
||||
|
||||
If synapse fails with ``missing "sodium.h"`` crypto errors, you may need
|
||||
to manually upgrade PyNaCL, as synapse uses NaCl (http://nacl.cr.yp.to/) for
|
||||
to manually upgrade PyNaCL, as synapse uses NaCl (https://nacl.cr.yp.to/) for
|
||||
encryption and digital signatures.
|
||||
Unfortunately PyNACL currently has a few issues
|
||||
(https://github.com/pyca/pynacl/issues/53) and
|
||||
@@ -614,6 +635,9 @@ should have the format ``_matrix._tcp.<yourdomain.com> <ttl> IN SRV 10 0 <port>
|
||||
$ dig -t srv _matrix._tcp.example.com
|
||||
_matrix._tcp.example.com. 3600 IN SRV 10 0 8448 synapse.example.com.
|
||||
|
||||
Note that the server hostname cannot be an alias (CNAME record): it has to point
|
||||
directly to the server hosting the synapse instance.
|
||||
|
||||
You can then configure your homeserver to use ``<yourdomain.com>`` as the domain in
|
||||
its user-ids, by setting ``server_name``::
|
||||
|
||||
@@ -668,8 +692,8 @@ useful just for development purposes. See `<demo/README>`_.
|
||||
Using PostgreSQL
|
||||
================
|
||||
|
||||
As of Synapse 0.9, `PostgreSQL <http://www.postgresql.org>`_ is supported as an
|
||||
alternative to the `SQLite <http://sqlite.org/>`_ database that Synapse has
|
||||
As of Synapse 0.9, `PostgreSQL <https://www.postgresql.org>`_ is supported as an
|
||||
alternative to the `SQLite <https://sqlite.org/>`_ database that Synapse has
|
||||
traditionally used for convenience and simplicity.
|
||||
|
||||
The advantages of Postgres include:
|
||||
@@ -693,7 +717,7 @@ Using a reverse proxy with Synapse
|
||||
It is recommended to put a reverse proxy such as
|
||||
`nginx <https://nginx.org/en/docs/http/ngx_http_proxy_module.html>`_,
|
||||
`Apache <https://httpd.apache.org/docs/current/mod/mod_proxy_http.html>`_ or
|
||||
`HAProxy <http://www.haproxy.org/>`_ in front of Synapse. One advantage of
|
||||
`HAProxy <https://www.haproxy.org/>`_ in front of Synapse. One advantage of
|
||||
doing so is that it means that you can expose the default https port (443) to
|
||||
Matrix clients without needing to run Synapse with root privileges.
|
||||
|
||||
|
||||
1
changelog.d/.gitignore
vendored
Normal file
1
changelog.d/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
!.gitignore
|
||||
10
contrib/README.rst
Normal file
10
contrib/README.rst
Normal file
@@ -0,0 +1,10 @@
|
||||
Community Contributions
|
||||
=======================
|
||||
|
||||
Everything in this directory are projects submitted by the community that may be useful
|
||||
to others. As such, the project maintainers cannot guarantee support, stability
|
||||
or backwards compatibility of these projects.
|
||||
|
||||
Files in this directory should *not* be relied on directly, as they may not
|
||||
continue to work or exist in future. If you wish to use any of these files then
|
||||
they should be copied to avoid them breaking from underneath you.
|
||||
41
contrib/docker/README.md
Normal file
41
contrib/docker/README.md
Normal file
@@ -0,0 +1,41 @@
|
||||
# Synapse Docker
|
||||
|
||||
### Automated configuration
|
||||
|
||||
It is recommended that you use Docker Compose to run your containers, including
|
||||
this image and a Postgres server. A sample ``docker-compose.yml`` is provided,
|
||||
including example labels for reverse proxying and other artifacts.
|
||||
|
||||
Read the section about environment variables and set at least mandatory variables,
|
||||
then run the server:
|
||||
|
||||
```
|
||||
docker-compose up -d
|
||||
```
|
||||
|
||||
If secrets are not specified in the environment variables, they will be generated
|
||||
as part of the startup. Please ensure these secrets are kept between launches of the
|
||||
Docker container, as their loss may require users to log in again.
|
||||
|
||||
### Manual configuration
|
||||
|
||||
A sample ``docker-compose.yml`` is provided, including example labels for
|
||||
reverse proxying and other artifacts. The docker-compose file is an example,
|
||||
please comment/uncomment sections that are not suitable for your usecase.
|
||||
|
||||
Specify a ``SYNAPSE_CONFIG_PATH``, preferably to a persistent path,
|
||||
to use manual configuration. To generate a fresh ``homeserver.yaml``, simply run:
|
||||
|
||||
```
|
||||
docker-compose run --rm -e SYNAPSE_SERVER_NAME=my.matrix.host synapse generate
|
||||
```
|
||||
|
||||
Then, customize your configuration and run the server:
|
||||
|
||||
```
|
||||
docker-compose up -d
|
||||
```
|
||||
|
||||
### More information
|
||||
|
||||
For more information on required environment variables and mounts, see the main docker documentation at [/docker/README.md](../../docker/README.md)
|
||||
50
contrib/docker/docker-compose.yml
Normal file
50
contrib/docker/docker-compose.yml
Normal file
@@ -0,0 +1,50 @@
|
||||
# This compose file is compatible with Compose itself, it might need some
|
||||
# adjustments to run properly with stack.
|
||||
|
||||
version: '3'
|
||||
|
||||
services:
|
||||
|
||||
synapse:
|
||||
build: ../..
|
||||
image: docker.io/matrixdotorg/synapse:latest
|
||||
# Since snyapse does not retry to connect to the database, restart upon
|
||||
# failure
|
||||
restart: unless-stopped
|
||||
# See the readme for a full documentation of the environment settings
|
||||
environment:
|
||||
- SYNAPSE_SERVER_NAME=my.matrix.host
|
||||
- SYNAPSE_REPORT_STATS=no
|
||||
- SYNAPSE_ENABLE_REGISTRATION=yes
|
||||
- SYNAPSE_LOG_LEVEL=INFO
|
||||
- POSTGRES_PASSWORD=changeme
|
||||
volumes:
|
||||
# You may either store all the files in a local folder
|
||||
- ./files:/data
|
||||
# .. or you may split this between different storage points
|
||||
# - ./files:/data
|
||||
# - /path/to/ssd:/data/uploads
|
||||
# - /path/to/large_hdd:/data/media
|
||||
depends_on:
|
||||
- db
|
||||
# In order to expose Synapse, remove one of the following, you might for
|
||||
# instance expose the TLS port directly:
|
||||
ports:
|
||||
- 8448:8448/tcp
|
||||
# ... or use a reverse proxy, here is an example for traefik:
|
||||
labels:
|
||||
- traefik.enable=true
|
||||
- traefik.frontend.rule=Host:my.matrix.Host
|
||||
- traefik.port=8448
|
||||
|
||||
db:
|
||||
image: docker.io/postgres:10-alpine
|
||||
# Change that password, of course!
|
||||
environment:
|
||||
- POSTGRES_USER=synapse
|
||||
- POSTGRES_PASSWORD=changeme
|
||||
volumes:
|
||||
# You may store the database tables in a local folder..
|
||||
- ./schemas:/var/lib/postgresql/data
|
||||
# .. or store them on some high performance storage for better results
|
||||
# - /path/to/ssd/storage:/var/lib/postfesql/data
|
||||
6
contrib/grafana/README.md
Normal file
6
contrib/grafana/README.md
Normal file
@@ -0,0 +1,6 @@
|
||||
# Using the Synapse Grafana dashboard
|
||||
|
||||
0. Set up Prometheus and Grafana. Out of scope for this readme. Useful documentation about using Grafana with Prometheus: http://docs.grafana.org/features/datasources/prometheus/
|
||||
1. Have your Prometheus scrape your Synapse. https://github.com/matrix-org/synapse/blob/master/docs/metrics-howto.rst
|
||||
2. Import dashboard into Grafana. Download `synapse.json`. Import it to Grafana and select the correct Prometheus datasource. http://docs.grafana.org/reference/export_import/
|
||||
3. Set up additional recording rules
|
||||
4969
contrib/grafana/synapse.json
Normal file
4969
contrib/grafana/synapse.json
Normal file
File diff suppressed because it is too large
Load Diff
@@ -22,6 +22,8 @@ import argparse
|
||||
from synapse.events import FrozenEvent
|
||||
from synapse.util.frozenutils import unfreeze
|
||||
|
||||
from six import string_types
|
||||
|
||||
|
||||
def make_graph(file_name, room_id, file_prefix, limit):
|
||||
print "Reading lines"
|
||||
@@ -58,7 +60,7 @@ def make_graph(file_name, room_id, file_prefix, limit):
|
||||
for key, value in unfreeze(event.get_dict()["content"]).items():
|
||||
if value is None:
|
||||
value = "<null>"
|
||||
elif isinstance(value, basestring):
|
||||
elif isinstance(value, string_types):
|
||||
pass
|
||||
else:
|
||||
value = json.dumps(value)
|
||||
|
||||
@@ -202,11 +202,11 @@ new PromConsole.Graph({
|
||||
<h1>Requests</h1>
|
||||
|
||||
<h3>Requests by Servlet</h3>
|
||||
<div id="synapse_http_server_requests_servlet"></div>
|
||||
<div id="synapse_http_server_request_count_servlet"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_http_server_requests_servlet"),
|
||||
expr: "rate(synapse_http_server_requests:servlet[2m])",
|
||||
node: document.querySelector("#synapse_http_server_request_count_servlet"),
|
||||
expr: "rate(synapse_http_server_request_count:servlet[2m])",
|
||||
name: "[[servlet]]",
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||
@@ -215,11 +215,11 @@ new PromConsole.Graph({
|
||||
})
|
||||
</script>
|
||||
<h4> (without <tt>EventStreamRestServlet</tt> or <tt>SyncRestServlet</tt>)</h4>
|
||||
<div id="synapse_http_server_requests_servlet_minus_events"></div>
|
||||
<div id="synapse_http_server_request_count_servlet_minus_events"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_http_server_requests_servlet_minus_events"),
|
||||
expr: "rate(synapse_http_server_requests:servlet{servlet!=\"EventStreamRestServlet\", servlet!=\"SyncRestServlet\"}[2m])",
|
||||
node: document.querySelector("#synapse_http_server_request_count_servlet_minus_events"),
|
||||
expr: "rate(synapse_http_server_request_count:servlet{servlet!=\"EventStreamRestServlet\", servlet!=\"SyncRestServlet\"}[2m])",
|
||||
name: "[[servlet]]",
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||
@@ -233,7 +233,7 @@ new PromConsole.Graph({
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_http_server_response_time_avg"),
|
||||
expr: "rate(synapse_http_server_response_time:total[2m]) / rate(synapse_http_server_response_time:count[2m]) / 1000",
|
||||
expr: "rate(synapse_http_server_response_time_seconds[2m]) / rate(synapse_http_server_response_count[2m]) / 1000",
|
||||
name: "[[servlet]]",
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||
@@ -276,7 +276,7 @@ new PromConsole.Graph({
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_http_server_response_ru_utime"),
|
||||
expr: "rate(synapse_http_server_response_ru_utime:total[2m])",
|
||||
expr: "rate(synapse_http_server_response_ru_utime_seconds[2m])",
|
||||
name: "[[servlet]]",
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||
@@ -291,7 +291,7 @@ new PromConsole.Graph({
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_http_server_response_db_txn_duration"),
|
||||
expr: "rate(synapse_http_server_response_db_txn_duration:total[2m])",
|
||||
expr: "rate(synapse_http_server_response_db_txn_duration_seconds[2m])",
|
||||
name: "[[servlet]]",
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||
@@ -306,7 +306,7 @@ new PromConsole.Graph({
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_http_server_send_time_avg"),
|
||||
expr: "rate(synapse_http_server_response_time:total{servlet='RoomSendEventRestServlet'}[2m]) / rate(synapse_http_server_response_time:count{servlet='RoomSendEventRestServlet'}[2m]) / 1000",
|
||||
expr: "rate(synapse_http_server_response_time_second{servlet='RoomSendEventRestServlet'}[2m]) / rate(synapse_http_server_response_count{servlet='RoomSendEventRestServlet'}[2m]) / 1000",
|
||||
name: "[[servlet]]",
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
synapse_federation_transaction_queue_pendingEdus:total = sum(synapse_federation_transaction_queue_pendingEdus or absent(synapse_federation_transaction_queue_pendingEdus)*0)
|
||||
synapse_federation_transaction_queue_pendingPdus:total = sum(synapse_federation_transaction_queue_pendingPdus or absent(synapse_federation_transaction_queue_pendingPdus)*0)
|
||||
|
||||
synapse_http_server_requests:method{servlet=""} = sum(synapse_http_server_requests) by (method)
|
||||
synapse_http_server_requests:servlet{method=""} = sum(synapse_http_server_requests) by (servlet)
|
||||
synapse_http_server_request_count:method{servlet=""} = sum(synapse_http_server_request_count) by (method)
|
||||
synapse_http_server_request_count:servlet{method=""} = sum(synapse_http_server_request_count) by (servlet)
|
||||
|
||||
synapse_http_server_requests:total{servlet=""} = sum(synapse_http_server_requests:by_method) by (servlet)
|
||||
synapse_http_server_request_count:total{servlet=""} = sum(synapse_http_server_request_count:by_method) by (servlet)
|
||||
|
||||
synapse_cache:hit_ratio_5m = rate(synapse_util_caches_cache:hits[5m]) / rate(synapse_util_caches_cache:total[5m])
|
||||
synapse_cache:hit_ratio_30s = rate(synapse_util_caches_cache:hits[30s]) / rate(synapse_util_caches_cache:total[30s])
|
||||
|
||||
@@ -5,19 +5,19 @@ groups:
|
||||
expr: "sum(synapse_federation_transaction_queue_pendingEdus or absent(synapse_federation_transaction_queue_pendingEdus)*0)"
|
||||
- record: "synapse_federation_transaction_queue_pendingPdus:total"
|
||||
expr: "sum(synapse_federation_transaction_queue_pendingPdus or absent(synapse_federation_transaction_queue_pendingPdus)*0)"
|
||||
- record: 'synapse_http_server_requests:method'
|
||||
- record: 'synapse_http_server_request_count:method'
|
||||
labels:
|
||||
servlet: ""
|
||||
expr: "sum(synapse_http_server_requests) by (method)"
|
||||
- record: 'synapse_http_server_requests:servlet'
|
||||
expr: "sum(synapse_http_server_request_count) by (method)"
|
||||
- record: 'synapse_http_server_request_count:servlet'
|
||||
labels:
|
||||
method: ""
|
||||
expr: 'sum(synapse_http_server_requests) by (servlet)'
|
||||
expr: 'sum(synapse_http_server_request_count) by (servlet)'
|
||||
|
||||
- record: 'synapse_http_server_requests:total'
|
||||
- record: 'synapse_http_server_request_count:total'
|
||||
labels:
|
||||
servlet: ""
|
||||
expr: 'sum(synapse_http_server_requests:by_method) by (servlet)'
|
||||
expr: 'sum(synapse_http_server_request_count:by_method) by (servlet)'
|
||||
|
||||
- record: 'synapse_cache:hit_ratio_5m'
|
||||
expr: 'rate(synapse_util_caches_cache:hits[5m]) / rate(synapse_util_caches_cache:total[5m])'
|
||||
|
||||
@@ -2,6 +2,9 @@
|
||||
# (e.g. https://www.archlinux.org/packages/community/any/matrix-synapse/ for ArchLinux)
|
||||
# rather than in a user home directory or similar under virtualenv.
|
||||
|
||||
# **NOTE:** This is an example service file that may change in the future. If you
|
||||
# wish to use this please copy rather than symlink it.
|
||||
|
||||
[Unit]
|
||||
Description=Synapse Matrix homeserver
|
||||
|
||||
@@ -12,6 +15,7 @@ Group=synapse
|
||||
WorkingDirectory=/var/lib/synapse
|
||||
ExecStart=/usr/bin/python2.7 -m synapse.app.homeserver --config-path=/etc/synapse/homeserver.yaml
|
||||
ExecStop=/usr/bin/synctl stop /etc/synapse/homeserver.yaml
|
||||
# EnvironmentFile=-/etc/sysconfig/synapse # Can be used to e.g. set SYNAPSE_CACHE_FACTOR
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
||||
35
docker/Dockerfile
Normal file
35
docker/Dockerfile
Normal file
@@ -0,0 +1,35 @@
|
||||
FROM docker.io/python:2-alpine3.8
|
||||
|
||||
RUN apk add --no-cache --virtual .nacl_deps \
|
||||
build-base \
|
||||
libffi-dev \
|
||||
libjpeg-turbo-dev \
|
||||
libressl-dev \
|
||||
libxslt-dev \
|
||||
linux-headers \
|
||||
postgresql-dev \
|
||||
su-exec \
|
||||
zlib-dev
|
||||
|
||||
COPY . /synapse
|
||||
|
||||
# A wheel cache may be provided in ./cache for faster build
|
||||
RUN cd /synapse \
|
||||
&& pip install --upgrade \
|
||||
lxml \
|
||||
pip \
|
||||
psycopg2 \
|
||||
setuptools \
|
||||
&& mkdir -p /synapse/cache \
|
||||
&& pip install -f /synapse/cache --upgrade --process-dependency-links . \
|
||||
&& mv /synapse/docker/start.py /synapse/docker/conf / \
|
||||
&& rm -rf \
|
||||
setup.cfg \
|
||||
setup.py \
|
||||
synapse
|
||||
|
||||
VOLUME ["/data"]
|
||||
|
||||
EXPOSE 8008/tcp 8448/tcp
|
||||
|
||||
ENTRYPOINT ["/start.py"]
|
||||
124
docker/README.md
Normal file
124
docker/README.md
Normal file
@@ -0,0 +1,124 @@
|
||||
# Synapse Docker
|
||||
|
||||
This Docker image will run Synapse as a single process. It does not provide a database
|
||||
server or a TURN server, you should run these separately.
|
||||
|
||||
## Run
|
||||
|
||||
We do not currently offer a `latest` image, as this has somewhat undefined semantics.
|
||||
We instead release only tagged versions so upgrading between releases is entirely
|
||||
within your control.
|
||||
|
||||
### Using docker-compose (easier)
|
||||
|
||||
This image is designed to run either with an automatically generated configuration
|
||||
file or with a custom configuration that requires manual editing.
|
||||
|
||||
An easy way to make use of this image is via docker-compose. See the
|
||||
[contrib/docker](../contrib/docker)
|
||||
section of the synapse project for examples.
|
||||
|
||||
### Without Compose (harder)
|
||||
|
||||
If you do not wish to use Compose, you may still run this image using plain
|
||||
Docker commands. Note that the following is just a guideline and you may need
|
||||
to add parameters to the docker run command to account for the network situation
|
||||
with your postgres database.
|
||||
|
||||
```
|
||||
docker run \
|
||||
-d \
|
||||
--name synapse \
|
||||
-v ${DATA_PATH}:/data \
|
||||
-e SYNAPSE_SERVER_NAME=my.matrix.host \
|
||||
-e SYNAPSE_REPORT_STATS=yes \
|
||||
docker.io/matrixdotorg/synapse:latest
|
||||
```
|
||||
|
||||
## Volumes
|
||||
|
||||
The image expects a single volume, located at ``/data``, that will hold:
|
||||
|
||||
* temporary files during uploads;
|
||||
* uploaded media and thumbnails;
|
||||
* the SQLite database if you do not configure postgres;
|
||||
* the appservices configuration.
|
||||
|
||||
You are free to use separate volumes depending on storage endpoints at your
|
||||
disposal. For instance, ``/data/media`` coud be stored on a large but low
|
||||
performance hdd storage while other files could be stored on high performance
|
||||
endpoints.
|
||||
|
||||
In order to setup an application service, simply create an ``appservices``
|
||||
directory in the data volume and write the application service Yaml
|
||||
configuration file there. Multiple application services are supported.
|
||||
|
||||
## Environment
|
||||
|
||||
Unless you specify a custom path for the configuration file, a very generic
|
||||
file will be generated, based on the following environment settings.
|
||||
These are a good starting point for setting up your own deployment.
|
||||
|
||||
Global settings:
|
||||
|
||||
* ``UID``, the user id Synapse will run as [default 991]
|
||||
* ``GID``, the group id Synapse will run as [default 991]
|
||||
* ``SYNAPSE_CONFIG_PATH``, path to a custom config file
|
||||
|
||||
If ``SYNAPSE_CONFIG_PATH`` is set, you should generate a configuration file
|
||||
then customize it manually. No other environment variable is required.
|
||||
|
||||
Otherwise, a dynamic configuration file will be used. The following environment
|
||||
variables are available for configuration:
|
||||
|
||||
* ``SYNAPSE_SERVER_NAME`` (mandatory), the current server public hostname.
|
||||
* ``SYNAPSE_REPORT_STATS``, (mandatory, ``yes`` or ``no``), enable anonymous
|
||||
statistics reporting back to the Matrix project which helps us to get funding.
|
||||
* ``SYNAPSE_NO_TLS``, set this variable to disable TLS in Synapse (use this if
|
||||
you run your own TLS-capable reverse proxy).
|
||||
* ``SYNAPSE_ENABLE_REGISTRATION``, set this variable to enable registration on
|
||||
the Synapse instance.
|
||||
* ``SYNAPSE_ALLOW_GUEST``, set this variable to allow guest joining this server.
|
||||
* ``SYNAPSE_EVENT_CACHE_SIZE``, the event cache size [default `10K`].
|
||||
* ``SYNAPSE_CACHE_FACTOR``, the cache factor [default `0.5`].
|
||||
* ``SYNAPSE_RECAPTCHA_PUBLIC_KEY``, set this variable to the recaptcha public
|
||||
key in order to enable recaptcha upon registration.
|
||||
* ``SYNAPSE_RECAPTCHA_PRIVATE_KEY``, set this variable to the recaptcha private
|
||||
key in order to enable recaptcha upon registration.
|
||||
* ``SYNAPSE_TURN_URIS``, set this variable to the coma-separated list of TURN
|
||||
uris to enable TURN for this homeserver.
|
||||
* ``SYNAPSE_TURN_SECRET``, set this to the TURN shared secret if required.
|
||||
|
||||
Shared secrets, that will be initialized to random values if not set:
|
||||
|
||||
* ``SYNAPSE_REGISTRATION_SHARED_SECRET``, secret for registrering users if
|
||||
registration is disable.
|
||||
* ``SYNAPSE_MACAROON_SECRET_KEY`` secret for signing access tokens
|
||||
to the server.
|
||||
|
||||
Database specific values (will use SQLite if not set):
|
||||
|
||||
* `POSTGRES_DB` - The database name for the synapse postgres database. [default: `synapse`]
|
||||
* `POSTGRES_HOST` - The host of the postgres database if you wish to use postgresql instead of sqlite3. [default: `db` which is useful when using a container on the same docker network in a compose file where the postgres service is called `db`]
|
||||
* `POSTGRES_PASSWORD` - The password for the synapse postgres database. **If this is set then postgres will be used instead of sqlite3.** [default: none] **NOTE**: You are highly encouraged to use postgresql! Please use the compose file to make it easier to deploy.
|
||||
* `POSTGRES_USER` - The user for the synapse postgres database. [default: `matrix`]
|
||||
|
||||
Mail server specific values (will not send emails if not set):
|
||||
|
||||
* ``SYNAPSE_SMTP_HOST``, hostname to the mail server.
|
||||
* ``SYNAPSE_SMTP_PORT``, TCP port for accessing the mail server [default ``25``].
|
||||
* ``SYNAPSE_SMTP_USER``, username for authenticating against the mail server if any.
|
||||
* ``SYNAPSE_SMTP_PASSWORD``, password for authenticating against the mail server if any.
|
||||
|
||||
## Build
|
||||
|
||||
Build the docker image with the `docker build` command from the root of the synapse repository.
|
||||
|
||||
```
|
||||
docker build -t docker.io/matrixdotorg/synapse . -f docker/Dockerfile
|
||||
```
|
||||
|
||||
The `-t` option sets the image tag. Official images are tagged `matrixdotorg/synapse:<version>` where `<version>` is the same as the release tag in the synapse git repository.
|
||||
|
||||
You may have a local Python wheel cache available, in which case copy the relevant
|
||||
packages in the ``cache/`` directory at the root of the project.
|
||||
219
docker/conf/homeserver.yaml
Normal file
219
docker/conf/homeserver.yaml
Normal file
@@ -0,0 +1,219 @@
|
||||
# vim:ft=yaml
|
||||
|
||||
## TLS ##
|
||||
|
||||
tls_certificate_path: "/data/{{ SYNAPSE_SERVER_NAME }}.tls.crt"
|
||||
tls_private_key_path: "/data/{{ SYNAPSE_SERVER_NAME }}.tls.key"
|
||||
tls_dh_params_path: "/data/{{ SYNAPSE_SERVER_NAME }}.tls.dh"
|
||||
no_tls: {{ "True" if SYNAPSE_NO_TLS else "False" }}
|
||||
tls_fingerprints: []
|
||||
|
||||
## Server ##
|
||||
|
||||
server_name: "{{ SYNAPSE_SERVER_NAME }}"
|
||||
pid_file: /homeserver.pid
|
||||
web_client: False
|
||||
soft_file_limit: 0
|
||||
|
||||
## Ports ##
|
||||
|
||||
listeners:
|
||||
{% if not SYNAPSE_NO_TLS %}
|
||||
-
|
||||
port: 8448
|
||||
bind_addresses: ['0.0.0.0']
|
||||
type: http
|
||||
tls: true
|
||||
x_forwarded: false
|
||||
resources:
|
||||
- names: [client]
|
||||
compress: true
|
||||
- names: [federation] # Federation APIs
|
||||
compress: false
|
||||
{% endif %}
|
||||
|
||||
- port: 8008
|
||||
tls: false
|
||||
bind_addresses: ['0.0.0.0']
|
||||
type: http
|
||||
x_forwarded: false
|
||||
|
||||
resources:
|
||||
- names: [client]
|
||||
compress: true
|
||||
- names: [federation]
|
||||
compress: false
|
||||
|
||||
## Database ##
|
||||
|
||||
{% if POSTGRES_PASSWORD %}
|
||||
database:
|
||||
name: "psycopg2"
|
||||
args:
|
||||
user: "{{ POSTGRES_USER or "synapse" }}"
|
||||
password: "{{ POSTGRES_PASSWORD }}"
|
||||
database: "{{ POSTGRES_DB or "synapse" }}"
|
||||
host: "{{ POSTGRES_HOST or "db" }}"
|
||||
port: "{{ POSTGRES_PORT or "5432" }}"
|
||||
cp_min: 5
|
||||
cp_max: 10
|
||||
{% else %}
|
||||
database:
|
||||
name: "sqlite3"
|
||||
args:
|
||||
database: "/data/homeserver.db"
|
||||
{% endif %}
|
||||
|
||||
## Performance ##
|
||||
|
||||
event_cache_size: "{{ SYNAPSE_EVENT_CACHE_SIZE or "10K" }}"
|
||||
verbose: 0
|
||||
log_file: "/data/homeserver.log"
|
||||
log_config: "/compiled/log.config"
|
||||
|
||||
## Ratelimiting ##
|
||||
|
||||
rc_messages_per_second: 0.2
|
||||
rc_message_burst_count: 10.0
|
||||
federation_rc_window_size: 1000
|
||||
federation_rc_sleep_limit: 10
|
||||
federation_rc_sleep_delay: 500
|
||||
federation_rc_reject_limit: 50
|
||||
federation_rc_concurrent: 3
|
||||
|
||||
## Files ##
|
||||
|
||||
media_store_path: "/data/media"
|
||||
uploads_path: "/data/uploads"
|
||||
max_upload_size: "10M"
|
||||
max_image_pixels: "32M"
|
||||
dynamic_thumbnails: false
|
||||
|
||||
# List of thumbnail to precalculate when an image is uploaded.
|
||||
thumbnail_sizes:
|
||||
- width: 32
|
||||
height: 32
|
||||
method: crop
|
||||
- width: 96
|
||||
height: 96
|
||||
method: crop
|
||||
- width: 320
|
||||
height: 240
|
||||
method: scale
|
||||
- width: 640
|
||||
height: 480
|
||||
method: scale
|
||||
- width: 800
|
||||
height: 600
|
||||
method: scale
|
||||
|
||||
url_preview_enabled: False
|
||||
max_spider_size: "10M"
|
||||
|
||||
## Captcha ##
|
||||
|
||||
{% if SYNAPSE_RECAPTCHA_PUBLIC_KEY %}
|
||||
recaptcha_public_key: "{{ SYNAPSE_RECAPTCHA_PUBLIC_KEY }}"
|
||||
recaptcha_private_key: "{{ SYNAPSE_RECAPTCHA_PRIVATE_KEY }}"
|
||||
enable_registration_captcha: True
|
||||
recaptcha_siteverify_api: "https://www.google.com/recaptcha/api/siteverify"
|
||||
{% else %}
|
||||
recaptcha_public_key: "YOUR_PUBLIC_KEY"
|
||||
recaptcha_private_key: "YOUR_PRIVATE_KEY"
|
||||
enable_registration_captcha: False
|
||||
recaptcha_siteverify_api: "https://www.google.com/recaptcha/api/siteverify"
|
||||
{% endif %}
|
||||
|
||||
## Turn ##
|
||||
|
||||
{% if SYNAPSE_TURN_URIS %}
|
||||
turn_uris:
|
||||
{% for uri in SYNAPSE_TURN_URIS.split(',') %} - "{{ uri }}"
|
||||
{% endfor %}
|
||||
turn_shared_secret: "{{ SYNAPSE_TURN_SECRET }}"
|
||||
turn_user_lifetime: "1h"
|
||||
turn_allow_guests: True
|
||||
{% else %}
|
||||
turn_uris: []
|
||||
turn_shared_secret: "YOUR_SHARED_SECRET"
|
||||
turn_user_lifetime: "1h"
|
||||
turn_allow_guests: True
|
||||
{% endif %}
|
||||
|
||||
## Registration ##
|
||||
|
||||
enable_registration: {{ "True" if SYNAPSE_ENABLE_REGISTRATION else "False" }}
|
||||
registration_shared_secret: "{{ SYNAPSE_REGISTRATION_SHARED_SECRET }}"
|
||||
bcrypt_rounds: 12
|
||||
allow_guest_access: {{ "True" if SYNAPSE_ALLOW_GUEST else "False" }}
|
||||
enable_group_creation: true
|
||||
|
||||
# The list of identity servers trusted to verify third party
|
||||
# identifiers by this server.
|
||||
trusted_third_party_id_servers:
|
||||
- matrix.org
|
||||
- vector.im
|
||||
- riot.im
|
||||
|
||||
## Metrics ###
|
||||
|
||||
{% if SYNAPSE_REPORT_STATS.lower() == "yes" %}
|
||||
enable_metrics: True
|
||||
report_stats: True
|
||||
{% else %}
|
||||
enable_metrics: False
|
||||
report_stats: False
|
||||
{% endif %}
|
||||
|
||||
## API Configuration ##
|
||||
|
||||
room_invite_state_types:
|
||||
- "m.room.join_rules"
|
||||
- "m.room.canonical_alias"
|
||||
- "m.room.avatar"
|
||||
- "m.room.name"
|
||||
|
||||
{% if SYNAPSE_APPSERVICES %}
|
||||
app_service_config_files:
|
||||
{% for appservice in SYNAPSE_APPSERVICES %} - "{{ appservice }}"
|
||||
{% endfor %}
|
||||
{% else %}
|
||||
app_service_config_files: []
|
||||
{% endif %}
|
||||
|
||||
macaroon_secret_key: "{{ SYNAPSE_MACAROON_SECRET_KEY }}"
|
||||
expire_access_token: False
|
||||
|
||||
## Signing Keys ##
|
||||
|
||||
signing_key_path: "/data/{{ SYNAPSE_SERVER_NAME }}.signing.key"
|
||||
old_signing_keys: {}
|
||||
key_refresh_interval: "1d" # 1 Day.
|
||||
|
||||
# The trusted servers to download signing keys from.
|
||||
perspectives:
|
||||
servers:
|
||||
"matrix.org":
|
||||
verify_keys:
|
||||
"ed25519:auto":
|
||||
key: "Noi6WqcDj0QmPxCNQqgezwTlBKrfqehY1u2FyWP9uYw"
|
||||
|
||||
password_config:
|
||||
enabled: true
|
||||
|
||||
{% if SYNAPSE_SMTP_HOST %}
|
||||
email:
|
||||
enable_notifs: false
|
||||
smtp_host: "{{ SYNAPSE_SMTP_HOST }}"
|
||||
smtp_port: {{ SYNAPSE_SMTP_PORT or "25" }}
|
||||
smtp_user: "{{ SYNAPSE_SMTP_USER }}"
|
||||
smtp_pass: "{{ SYNAPSE_SMTP_PASSWORD }}"
|
||||
require_transport_security: False
|
||||
notif_from: "{{ SYNAPSE_SMTP_FROM or "hostmaster@" + SYNAPSE_SERVER_NAME }}"
|
||||
app_name: Matrix
|
||||
template_dir: res/templates
|
||||
notif_template_html: notif_mail.html
|
||||
notif_template_text: notif_mail.txt
|
||||
notif_for_new_users: True
|
||||
riot_base_url: "https://{{ SYNAPSE_SERVER_NAME }}"
|
||||
{% endif %}
|
||||
29
docker/conf/log.config
Normal file
29
docker/conf/log.config
Normal file
@@ -0,0 +1,29 @@
|
||||
version: 1
|
||||
|
||||
formatters:
|
||||
precise:
|
||||
format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s- %(message)s'
|
||||
|
||||
filters:
|
||||
context:
|
||||
(): synapse.util.logcontext.LoggingContextFilter
|
||||
request: ""
|
||||
|
||||
handlers:
|
||||
console:
|
||||
class: logging.StreamHandler
|
||||
formatter: precise
|
||||
filters: [context]
|
||||
|
||||
loggers:
|
||||
synapse:
|
||||
level: {{ SYNAPSE_LOG_LEVEL or "WARNING" }}
|
||||
|
||||
synapse.storage.SQL:
|
||||
# beware: increasing this to DEBUG will make synapse log sensitive
|
||||
# information such as access tokens.
|
||||
level: {{ SYNAPSE_LOG_LEVEL or "WARNING" }}
|
||||
|
||||
root:
|
||||
level: {{ SYNAPSE_LOG_LEVEL or "WARNING" }}
|
||||
handlers: [console]
|
||||
66
docker/start.py
Executable file
66
docker/start.py
Executable file
@@ -0,0 +1,66 @@
|
||||
#!/usr/local/bin/python
|
||||
|
||||
import jinja2
|
||||
import os
|
||||
import sys
|
||||
import subprocess
|
||||
import glob
|
||||
|
||||
# Utility functions
|
||||
convert = lambda src, dst, environ: open(dst, "w").write(jinja2.Template(open(src).read()).render(**environ))
|
||||
|
||||
def check_arguments(environ, args):
|
||||
for argument in args:
|
||||
if argument not in environ:
|
||||
print("Environment variable %s is mandatory, exiting." % argument)
|
||||
sys.exit(2)
|
||||
|
||||
def generate_secrets(environ, secrets):
|
||||
for name, secret in secrets.items():
|
||||
if secret not in environ:
|
||||
filename = "/data/%s.%s.key" % (environ["SYNAPSE_SERVER_NAME"], name)
|
||||
if os.path.exists(filename):
|
||||
with open(filename) as handle: value = handle.read()
|
||||
else:
|
||||
print("Generating a random secret for {}".format(name))
|
||||
value = os.urandom(32).encode("hex")
|
||||
with open(filename, "w") as handle: handle.write(value)
|
||||
environ[secret] = value
|
||||
|
||||
# Prepare the configuration
|
||||
mode = sys.argv[1] if len(sys.argv) > 1 else None
|
||||
environ = os.environ.copy()
|
||||
ownership = "{}:{}".format(environ.get("UID", 991), environ.get("GID", 991))
|
||||
args = ["python", "-m", "synapse.app.homeserver"]
|
||||
|
||||
# In generate mode, generate a configuration, missing keys, then exit
|
||||
if mode == "generate":
|
||||
check_arguments(environ, ("SYNAPSE_SERVER_NAME", "SYNAPSE_REPORT_STATS", "SYNAPSE_CONFIG_PATH"))
|
||||
args += [
|
||||
"--server-name", environ["SYNAPSE_SERVER_NAME"],
|
||||
"--report-stats", environ["SYNAPSE_REPORT_STATS"],
|
||||
"--config-path", environ["SYNAPSE_CONFIG_PATH"],
|
||||
"--generate-config"
|
||||
]
|
||||
os.execv("/usr/local/bin/python", args)
|
||||
|
||||
# In normal mode, generate missing keys if any, then run synapse
|
||||
else:
|
||||
# Parse the configuration file
|
||||
if "SYNAPSE_CONFIG_PATH" in environ:
|
||||
args += ["--config-path", environ["SYNAPSE_CONFIG_PATH"]]
|
||||
else:
|
||||
check_arguments(environ, ("SYNAPSE_SERVER_NAME", "SYNAPSE_REPORT_STATS"))
|
||||
generate_secrets(environ, {
|
||||
"registration": "SYNAPSE_REGISTRATION_SHARED_SECRET",
|
||||
"macaroon": "SYNAPSE_MACAROON_SECRET_KEY"
|
||||
})
|
||||
environ["SYNAPSE_APPSERVICES"] = glob.glob("/data/appservices/*.yaml")
|
||||
if not os.path.exists("/compiled"): os.mkdir("/compiled")
|
||||
convert("/conf/homeserver.yaml", "/compiled/homeserver.yaml", environ)
|
||||
convert("/conf/log.config", "/compiled/log.config", environ)
|
||||
subprocess.check_output(["chown", "-R", ownership, "/data"])
|
||||
args += ["--config-path", "/compiled/homeserver.yaml"]
|
||||
# Generate missing keys and start synapse
|
||||
subprocess.check_output(args + ["--generate-keys"])
|
||||
os.execv("/sbin/su-exec", ["su-exec", ownership] + args)
|
||||
63
docs/admin_api/register_api.rst
Normal file
63
docs/admin_api/register_api.rst
Normal file
@@ -0,0 +1,63 @@
|
||||
Shared-Secret Registration
|
||||
==========================
|
||||
|
||||
This API allows for the creation of users in an administrative and
|
||||
non-interactive way. This is generally used for bootstrapping a Synapse
|
||||
instance with administrator accounts.
|
||||
|
||||
To authenticate yourself to the server, you will need both the shared secret
|
||||
(``registration_shared_secret`` in the homeserver configuration), and a
|
||||
one-time nonce. If the registration shared secret is not configured, this API
|
||||
is not enabled.
|
||||
|
||||
To fetch the nonce, you need to request one from the API::
|
||||
|
||||
> GET /_matrix/client/r0/admin/register
|
||||
|
||||
< {"nonce": "thisisanonce"}
|
||||
|
||||
Once you have the nonce, you can make a ``POST`` to the same URL with a JSON
|
||||
body containing the nonce, username, password, whether they are an admin
|
||||
(optional, False by default), and a HMAC digest of the content.
|
||||
|
||||
As an example::
|
||||
|
||||
> POST /_matrix/client/r0/admin/register
|
||||
> {
|
||||
"nonce": "thisisanonce",
|
||||
"username": "pepper_roni",
|
||||
"password": "pizza",
|
||||
"admin": true,
|
||||
"mac": "mac_digest_here"
|
||||
}
|
||||
|
||||
< {
|
||||
"access_token": "token_here",
|
||||
"user_id": "@pepper_roni:localhost",
|
||||
"home_server": "test",
|
||||
"device_id": "device_id_here"
|
||||
}
|
||||
|
||||
The MAC is the hex digest output of the HMAC-SHA1 algorithm, with the key being
|
||||
the shared secret and the content being the nonce, user, password, and either
|
||||
the string "admin" or "notadmin", each separated by NULs. For an example of
|
||||
generation in Python::
|
||||
|
||||
import hmac, hashlib
|
||||
|
||||
def generate_mac(nonce, user, password, admin=False):
|
||||
|
||||
mac = hmac.new(
|
||||
key=shared_secret,
|
||||
digestmod=hashlib.sha1,
|
||||
)
|
||||
|
||||
mac.update(nonce.encode('utf8'))
|
||||
mac.update(b"\x00")
|
||||
mac.update(user.encode('utf8'))
|
||||
mac.update(b"\x00")
|
||||
mac.update(password.encode('utf8'))
|
||||
mac.update(b"\x00")
|
||||
mac.update(b"admin" if admin else b"notadmin")
|
||||
|
||||
return mac.hexdigest()
|
||||
@@ -44,13 +44,26 @@ Deactivate Account
|
||||
|
||||
This API deactivates an account. It removes active access tokens, resets the
|
||||
password, and deletes third-party IDs (to prevent the user requesting a
|
||||
password reset).
|
||||
password reset). It can also mark the user as GDPR-erased (stopping their data
|
||||
from distributed further, and deleting it entirely if there are no other
|
||||
references to it).
|
||||
|
||||
The api is::
|
||||
|
||||
POST /_matrix/client/r0/admin/deactivate/<user_id>
|
||||
|
||||
including an ``access_token`` of a server admin, and an empty request body.
|
||||
with a body of:
|
||||
|
||||
.. code:: json
|
||||
|
||||
{
|
||||
"erase": true
|
||||
}
|
||||
|
||||
including an ``access_token`` of a server admin.
|
||||
|
||||
The erase parameter is optional and defaults to 'false'.
|
||||
An empty body may be passed for backwards compatibility.
|
||||
|
||||
|
||||
Reset password
|
||||
|
||||
@@ -16,7 +16,7 @@
|
||||
print("I am a fish %s" %
|
||||
"moo")
|
||||
|
||||
and this::
|
||||
and this::
|
||||
|
||||
print(
|
||||
"I am a fish %s" %
|
||||
|
||||
160
docs/consent_tracking.md
Normal file
160
docs/consent_tracking.md
Normal file
@@ -0,0 +1,160 @@
|
||||
Support in Synapse for tracking agreement to server terms and conditions
|
||||
========================================================================
|
||||
|
||||
Synapse 0.30 introduces support for tracking whether users have agreed to the
|
||||
terms and conditions set by the administrator of a server - and blocking access
|
||||
to the server until they have.
|
||||
|
||||
There are several parts to this functionality; each requires some specific
|
||||
configuration in `homeserver.yaml` to be enabled.
|
||||
|
||||
Note that various parts of the configuation and this document refer to the
|
||||
"privacy policy": agreement with a privacy policy is one particular use of this
|
||||
feature, but of course adminstrators can specify other terms and conditions
|
||||
unrelated to "privacy" per se.
|
||||
|
||||
Collecting policy agreement from a user
|
||||
---------------------------------------
|
||||
|
||||
Synapse can be configured to serve the user a simple policy form with an
|
||||
"accept" button. Clicking "Accept" records the user's acceptance in the
|
||||
database and shows a success page.
|
||||
|
||||
To enable this, first create templates for the policy and success pages.
|
||||
These should be stored on the local filesystem.
|
||||
|
||||
These templates use the [Jinja2](http://jinja.pocoo.org) templating language,
|
||||
and [docs/privacy_policy_templates](privacy_policy_templates) gives
|
||||
examples of the sort of thing that can be done.
|
||||
|
||||
Note that the templates must be stored under a name giving the language of the
|
||||
template - currently this must always be `en` (for "English");
|
||||
internationalisation support is intended for the future.
|
||||
|
||||
The template for the policy itself should be versioned and named according to
|
||||
the version: for example `1.0.html`. The version of the policy which the user
|
||||
has agreed to is stored in the database.
|
||||
|
||||
Once the templates are in place, make the following changes to `homeserver.yaml`:
|
||||
|
||||
1. Add a `user_consent` section, which should look like:
|
||||
|
||||
```yaml
|
||||
user_consent:
|
||||
template_dir: privacy_policy_templates
|
||||
version: 1.0
|
||||
```
|
||||
|
||||
`template_dir` points to the directory containing the policy
|
||||
templates. `version` defines the version of the policy which will be served
|
||||
to the user. In the example above, Synapse will serve
|
||||
`privacy_policy_templates/en/1.0.html`.
|
||||
|
||||
|
||||
2. Add a `form_secret` setting at the top level:
|
||||
|
||||
|
||||
```yaml
|
||||
form_secret: "<unique secret>"
|
||||
```
|
||||
|
||||
This should be set to an arbitrary secret string (try `pwgen -y 30` to
|
||||
generate suitable secrets).
|
||||
|
||||
More on what this is used for below.
|
||||
|
||||
3. Add `consent` wherever the `client` resource is currently enabled in the
|
||||
`listeners` configuration. For example:
|
||||
|
||||
```yaml
|
||||
listeners:
|
||||
- port: 8008
|
||||
resources:
|
||||
- names:
|
||||
- client
|
||||
- consent
|
||||
```
|
||||
|
||||
|
||||
Finally, ensure that `jinja2` is installed. If you are using a virtualenv, this
|
||||
should be a matter of `pip install Jinja2`. On debian, try `apt-get install
|
||||
python-jinja2`.
|
||||
|
||||
Once this is complete, and the server has been restarted, try visiting
|
||||
`https://<server>/_matrix/consent`. If correctly configured, this should give
|
||||
an error "Missing string query parameter 'u'". It is now possible to manually
|
||||
construct URIs where users can give their consent.
|
||||
|
||||
### Constructing the consent URI
|
||||
|
||||
It may be useful to manually construct the "consent URI" for a given user - for
|
||||
instance, in order to send them an email asking them to consent. To do this,
|
||||
take the base `https://<server>/_matrix/consent` URL and add the following
|
||||
query parameters:
|
||||
|
||||
* `u`: the user id of the user. This can either be a full MXID
|
||||
(`@user:server.com`) or just the localpart (`user`).
|
||||
|
||||
* `h`: hex-encoded HMAC-SHA256 of `u` using the `form_secret` as a key. It is
|
||||
possible to calculate this on the commandline with something like:
|
||||
|
||||
```bash
|
||||
echo -n '<user>' | openssl sha256 -hmac '<form_secret>'
|
||||
```
|
||||
|
||||
This should result in a URI which looks something like:
|
||||
`https://<server>/_matrix/consent?u=<user>&h=68a152465a4d...`.
|
||||
|
||||
|
||||
Sending users a server notice asking them to agree to the policy
|
||||
----------------------------------------------------------------
|
||||
|
||||
It is possible to configure Synapse to send a [server
|
||||
notice](server_notices.md) to anybody who has not yet agreed to the current
|
||||
version of the policy. To do so:
|
||||
|
||||
* ensure that the consent resource is configured, as in the previous section
|
||||
|
||||
* ensure that server notices are configured, as in [server_notices.md](server_notices.md).
|
||||
|
||||
* Add `server_notice_content` under `user_consent` in `homeserver.yaml`. For
|
||||
example:
|
||||
|
||||
```yaml
|
||||
user_consent:
|
||||
server_notice_content:
|
||||
msgtype: m.text
|
||||
body: >-
|
||||
Please give your consent to the privacy policy at %(consent_uri)s.
|
||||
```
|
||||
|
||||
Synapse automatically replaces the placeholder `%(consent_uri)s` with the
|
||||
consent uri for that user.
|
||||
|
||||
* ensure that `public_baseurl` is set in `homeserver.yaml`, and gives the base
|
||||
URI that clients use to connect to the server. (It is used to construct
|
||||
`consent_uri` in the server notice.)
|
||||
|
||||
|
||||
Blocking users from using the server until they agree to the policy
|
||||
-------------------------------------------------------------------
|
||||
|
||||
Synapse can be configured to block any attempts to join rooms or send messages
|
||||
until the user has given their agreement to the policy. (Joining the server
|
||||
notices room is exempted from this).
|
||||
|
||||
To enable this, add `block_events_error` under `user_consent`. For example:
|
||||
|
||||
```yaml
|
||||
user_consent:
|
||||
block_events_error: >-
|
||||
You can't send any messages until you consent to the privacy policy at
|
||||
%(consent_uri)s.
|
||||
```
|
||||
|
||||
Synapse automatically replaces the placeholder `%(consent_uri)s` with the
|
||||
consent uri for that user.
|
||||
|
||||
ensure that `public_baseurl` is set in `homeserver.yaml`, and gives the base
|
||||
URI that clients use to connect to the server. (It is used to construct
|
||||
`consent_uri` in the error.)
|
||||
43
docs/manhole.md
Normal file
43
docs/manhole.md
Normal file
@@ -0,0 +1,43 @@
|
||||
Using the synapse manhole
|
||||
=========================
|
||||
|
||||
The "manhole" allows server administrators to access a Python shell on a running
|
||||
Synapse installation. This is a very powerful mechanism for administration and
|
||||
debugging.
|
||||
|
||||
To enable it, first uncomment the `manhole` listener configuration in
|
||||
`homeserver.yaml`:
|
||||
|
||||
```yaml
|
||||
listeners:
|
||||
- port: 9000
|
||||
bind_addresses: ['::1', '127.0.0.1']
|
||||
type: manhole
|
||||
```
|
||||
|
||||
(`bind_addresses` in the above is important: it ensures that access to the
|
||||
manhole is only possible for local users).
|
||||
|
||||
Note that this will give administrative access to synapse to **all users** with
|
||||
shell access to the server. It should therefore **not** be enabled in
|
||||
environments where untrusted users have shell access.
|
||||
|
||||
Then restart synapse, and point an ssh client at port 9000 on localhost, using
|
||||
the username `matrix`:
|
||||
|
||||
```bash
|
||||
ssh -p9000 matrix@localhost
|
||||
```
|
||||
|
||||
The password is `rabbithole`.
|
||||
|
||||
This gives a Python REPL in which `hs` gives access to the
|
||||
`synapse.server.HomeServer` object - which in turn gives access to many other
|
||||
parts of the process.
|
||||
|
||||
As a simple example, retrieving an event from the database:
|
||||
|
||||
```
|
||||
>>> hs.get_datastore().get_event('$1416420717069yeQaw:matrix.org')
|
||||
<Deferred at 0x7ff253fc6998 current result: <FrozenEvent event_id='$1416420717069yeQaw:matrix.org', type='m.room.create', state_key=''>>
|
||||
```
|
||||
@@ -1,25 +1,47 @@
|
||||
How to monitor Synapse metrics using Prometheus
|
||||
===============================================
|
||||
|
||||
1. Install prometheus:
|
||||
1. Install Prometheus:
|
||||
|
||||
Follow instructions at http://prometheus.io/docs/introduction/install/
|
||||
|
||||
2. Enable synapse metrics:
|
||||
2. Enable Synapse metrics:
|
||||
|
||||
Simply setting a (local) port number will enable it. Pick a port.
|
||||
prometheus itself defaults to 9090, so starting just above that for
|
||||
locally monitored services seems reasonable. E.g. 9092:
|
||||
There are two methods of enabling metrics in Synapse.
|
||||
|
||||
Add to homeserver.yaml::
|
||||
The first serves the metrics as a part of the usual web server and can be
|
||||
enabled by adding the "metrics" resource to the existing listener as such::
|
||||
|
||||
metrics_port: 9092
|
||||
resources:
|
||||
- names:
|
||||
- client
|
||||
- metrics
|
||||
|
||||
Also ensure that ``enable_metrics`` is set to ``True``.
|
||||
This provides a simple way of adding metrics to your Synapse installation,
|
||||
and serves under ``/_synapse/metrics``. If you do not wish your metrics be
|
||||
publicly exposed, you will need to either filter it out at your load
|
||||
balancer, or use the second method.
|
||||
|
||||
Restart synapse.
|
||||
The second method runs the metrics server on a different port, in a
|
||||
different thread to Synapse. This can make it more resilient to heavy load
|
||||
meaning metrics cannot be retrieved, and can be exposed to just internal
|
||||
networks easier. The served metrics are available over HTTP only, and will
|
||||
be available at ``/``.
|
||||
|
||||
3. Add a prometheus target for synapse.
|
||||
Add a new listener to homeserver.yaml::
|
||||
|
||||
listeners:
|
||||
- type: metrics
|
||||
port: 9000
|
||||
bind_addresses:
|
||||
- '0.0.0.0'
|
||||
|
||||
For both options, you will need to ensure that ``enable_metrics`` is set to
|
||||
``True``.
|
||||
|
||||
Restart Synapse.
|
||||
|
||||
3. Add a Prometheus target for Synapse.
|
||||
|
||||
It needs to set the ``metrics_path`` to a non-default value (under ``scrape_configs``)::
|
||||
|
||||
@@ -31,7 +53,50 @@ How to monitor Synapse metrics using Prometheus
|
||||
If your prometheus is older than 1.5.2, you will need to replace
|
||||
``static_configs`` in the above with ``target_groups``.
|
||||
|
||||
Restart prometheus.
|
||||
Restart Prometheus.
|
||||
|
||||
|
||||
Removal of deprecated metrics & time based counters becoming histograms in 0.31.0
|
||||
---------------------------------------------------------------------------------
|
||||
|
||||
The duplicated metrics deprecated in Synapse 0.27.0 have been removed.
|
||||
|
||||
All time duration-based metrics have been changed to be seconds. This affects:
|
||||
|
||||
+----------------------------------+
|
||||
| msec -> sec metrics |
|
||||
+==================================+
|
||||
| python_gc_time |
|
||||
+----------------------------------+
|
||||
| python_twisted_reactor_tick_time |
|
||||
+----------------------------------+
|
||||
| synapse_storage_query_time |
|
||||
+----------------------------------+
|
||||
| synapse_storage_schedule_time |
|
||||
+----------------------------------+
|
||||
| synapse_storage_transaction_time |
|
||||
+----------------------------------+
|
||||
|
||||
Several metrics have been changed to be histograms, which sort entries into
|
||||
buckets and allow better analysis. The following metrics are now histograms:
|
||||
|
||||
+-------------------------------------------+
|
||||
| Altered metrics |
|
||||
+===========================================+
|
||||
| python_gc_time |
|
||||
+-------------------------------------------+
|
||||
| python_twisted_reactor_pending_calls |
|
||||
+-------------------------------------------+
|
||||
| python_twisted_reactor_tick_time |
|
||||
+-------------------------------------------+
|
||||
| synapse_http_server_response_time_seconds |
|
||||
+-------------------------------------------+
|
||||
| synapse_storage_query_time |
|
||||
+-------------------------------------------+
|
||||
| synapse_storage_schedule_time |
|
||||
+-------------------------------------------+
|
||||
| synapse_storage_transaction_time |
|
||||
+-------------------------------------------+
|
||||
|
||||
|
||||
Block and response metrics renamed for 0.27.0
|
||||
|
||||
@@ -6,16 +6,22 @@ Postgres version 9.4 or later is known to work.
|
||||
Set up database
|
||||
===============
|
||||
|
||||
The PostgreSQL database used *must* have the correct encoding set, otherwise
|
||||
Assuming your PostgreSQL database user is called ``postgres``, create a user
|
||||
``synapse_user`` with::
|
||||
|
||||
su - postgres
|
||||
createuser --pwprompt synapse_user
|
||||
|
||||
The PostgreSQL database used *must* have the correct encoding set, otherwise it
|
||||
would not be able to store UTF8 strings. To create a database with the correct
|
||||
encoding use, e.g.::
|
||||
|
||||
CREATE DATABASE synapse
|
||||
ENCODING 'UTF8'
|
||||
LC_COLLATE='C'
|
||||
LC_CTYPE='C'
|
||||
template=template0
|
||||
OWNER synapse_user;
|
||||
CREATE DATABASE synapse
|
||||
ENCODING 'UTF8'
|
||||
LC_COLLATE='C'
|
||||
LC_CTYPE='C'
|
||||
template=template0
|
||||
OWNER synapse_user;
|
||||
|
||||
This would create an appropriate database named ``synapse`` owned by the
|
||||
``synapse_user`` user (which must already exist).
|
||||
@@ -46,8 +52,8 @@ As with Debian/Ubuntu, postgres support depends on the postgres python connector
|
||||
Synapse config
|
||||
==============
|
||||
|
||||
When you are ready to start using PostgreSQL, add the following line to your
|
||||
config file::
|
||||
When you are ready to start using PostgreSQL, edit the ``database`` section in
|
||||
your config file to match the following lines::
|
||||
|
||||
database:
|
||||
name: psycopg2
|
||||
@@ -96,9 +102,12 @@ complete, restart synapse. For instance::
|
||||
cp homeserver.db homeserver.db.snapshot
|
||||
./synctl start
|
||||
|
||||
Assuming your new config file (as described in the section *Synapse config*)
|
||||
is named ``homeserver-postgres.yaml`` and the SQLite snapshot is at
|
||||
``homeserver.db.snapshot`` then simply run::
|
||||
Copy the old config file into a new config file::
|
||||
|
||||
cp homeserver.yaml homeserver-postgres.yaml
|
||||
|
||||
Edit the database section as described in the section *Synapse config* above
|
||||
and with the SQLite snapshot located at ``homeserver.db.snapshot`` simply run::
|
||||
|
||||
synapse_port_db --sqlite-database homeserver.db.snapshot \
|
||||
--postgres-config homeserver-postgres.yaml
|
||||
@@ -117,6 +126,11 @@ run::
|
||||
--postgres-config homeserver-postgres.yaml
|
||||
|
||||
Once that has completed, change the synapse config to point at the PostgreSQL
|
||||
database configuration file ``homeserver-postgres.yaml`` (i.e. rename it to
|
||||
``homeserver.yaml``) and restart synapse. Synapse should now be running against
|
||||
PostgreSQL.
|
||||
database configuration file ``homeserver-postgres.yaml``::
|
||||
|
||||
./synctl stop
|
||||
mv homeserver.yaml homeserver-old-sqlite.yaml
|
||||
mv homeserver-postgres.yaml homeserver.yaml
|
||||
./synctl start
|
||||
|
||||
Synapse should now be running against PostgreSQL.
|
||||
|
||||
23
docs/privacy_policy_templates/en/1.0.html
Normal file
23
docs/privacy_policy_templates/en/1.0.html
Normal file
@@ -0,0 +1,23 @@
|
||||
<!doctype html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<title>Matrix.org Privacy policy</title>
|
||||
</head>
|
||||
<body>
|
||||
{% if has_consented %}
|
||||
<p>
|
||||
Your base already belong to us.
|
||||
</p>
|
||||
{% else %}
|
||||
<p>
|
||||
All your base are belong to us.
|
||||
</p>
|
||||
<form method="post" action="consent">
|
||||
<input type="hidden" name="v" value="{{version}}"/>
|
||||
<input type="hidden" name="u" value="{{user}}"/>
|
||||
<input type="hidden" name="h" value="{{userhmac}}"/>
|
||||
<input type="submit" value="Sure thing!"/>
|
||||
</form>
|
||||
{% endif %}
|
||||
</body>
|
||||
</html>
|
||||
11
docs/privacy_policy_templates/en/success.html
Normal file
11
docs/privacy_policy_templates/en/success.html
Normal file
@@ -0,0 +1,11 @@
|
||||
<!doctype html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<title>Matrix.org Privacy policy</title>
|
||||
</head>
|
||||
<body>
|
||||
<p>
|
||||
Sweet.
|
||||
</p>
|
||||
</body>
|
||||
</html>
|
||||
74
docs/server_notices.md
Normal file
74
docs/server_notices.md
Normal file
@@ -0,0 +1,74 @@
|
||||
Server Notices
|
||||
==============
|
||||
|
||||
'Server Notices' are a new feature introduced in Synapse 0.30. They provide a
|
||||
channel whereby server administrators can send messages to users on the server.
|
||||
|
||||
They are used as part of communication of the server polices(see
|
||||
[consent_tracking.md](consent_tracking.md)), however the intention is that
|
||||
they may also find a use for features such as "Message of the day".
|
||||
|
||||
This is a feature specific to Synapse, but it uses standard Matrix
|
||||
communication mechanisms, so should work with any Matrix client.
|
||||
|
||||
User experience
|
||||
---------------
|
||||
|
||||
When the user is first sent a server notice, they will get an invitation to a
|
||||
room (typically called 'Server Notices', though this is configurable in
|
||||
`homeserver.yaml`). They will be **unable to reject** this invitation -
|
||||
attempts to do so will receive an error.
|
||||
|
||||
Once they accept the invitation, they will see the notice message in the room
|
||||
history; it will appear to have come from the 'server notices user' (see
|
||||
below).
|
||||
|
||||
The user is prevented from sending any messages in this room by the power
|
||||
levels.
|
||||
|
||||
Having joined the room, the user can leave the room if they want. Subsequent
|
||||
server notices will then cause a new room to be created.
|
||||
|
||||
Synapse configuration
|
||||
---------------------
|
||||
|
||||
Server notices come from a specific user id on the server. Server
|
||||
administrators are free to choose the user id - something like `server` is
|
||||
suggested, meaning the notices will come from
|
||||
`@server:<your_server_name>`. Once the Server Notices user is configured, that
|
||||
user id becomes a special, privileged user, so administrators should ensure
|
||||
that **it is not already allocated**.
|
||||
|
||||
In order to support server notices, it is necessary to add some configuration
|
||||
to the `homeserver.yaml` file. In particular, you should add a `server_notices`
|
||||
section, which should look like this:
|
||||
|
||||
```yaml
|
||||
server_notices:
|
||||
system_mxid_localpart: server
|
||||
system_mxid_display_name: "Server Notices"
|
||||
system_mxid_avatar_url: "mxc://server.com/oumMVlgDnLYFaPVkExemNVVZ"
|
||||
room_name: "Server Notices"
|
||||
```
|
||||
|
||||
The only compulsory setting is `system_mxid_localpart`, which defines the user
|
||||
id of the Server Notices user, as above. `room_name` defines the name of the
|
||||
room which will be created.
|
||||
|
||||
`system_mxid_display_name` and `system_mxid_avatar_url` can be used to set the
|
||||
displayname and avatar of the Server Notices user.
|
||||
|
||||
Sending notices
|
||||
---------------
|
||||
|
||||
As of the current version of synapse, there is no convenient interface for
|
||||
sending notices (other than the automated ones sent as part of consent
|
||||
tracking).
|
||||
|
||||
In the meantime, it is possible to test this feature using the manhole. Having
|
||||
gone into the manhole as described in [manhole.md](manhole.md), a notice can be
|
||||
sent with something like:
|
||||
|
||||
```
|
||||
>>> hs.get_server_notices_manager().send_notice('@user:server.com', {'msgtype':'m.text', 'body':'foo'})
|
||||
```
|
||||
@@ -173,10 +173,23 @@ endpoints matching the following regular expressions::
|
||||
^/_matrix/federation/v1/backfill/
|
||||
^/_matrix/federation/v1/get_missing_events/
|
||||
^/_matrix/federation/v1/publicRooms
|
||||
^/_matrix/federation/v1/query/
|
||||
^/_matrix/federation/v1/make_join/
|
||||
^/_matrix/federation/v1/make_leave/
|
||||
^/_matrix/federation/v1/send_join/
|
||||
^/_matrix/federation/v1/send_leave/
|
||||
^/_matrix/federation/v1/invite/
|
||||
^/_matrix/federation/v1/query_auth/
|
||||
^/_matrix/federation/v1/event_auth/
|
||||
^/_matrix/federation/v1/exchange_third_party_invite/
|
||||
^/_matrix/federation/v1/send/
|
||||
|
||||
The above endpoints should all be routed to the federation_reader worker by the
|
||||
reverse-proxy configuration.
|
||||
|
||||
The `^/_matrix/federation/v1/send/` endpoint must only be handled by a single
|
||||
instance.
|
||||
|
||||
``synapse.app.federation_sender``
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
@@ -206,6 +219,10 @@ Handles client API endpoints. It can handle REST endpoints matching the
|
||||
following regular expressions::
|
||||
|
||||
^/_matrix/client/(api/v1|r0|unstable)/publicRooms$
|
||||
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/joined_members$
|
||||
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/context/.*$
|
||||
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/members$
|
||||
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/state$
|
||||
|
||||
``synapse.app.user_dir``
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
@@ -224,6 +241,14 @@ regular expressions::
|
||||
|
||||
^/_matrix/client/(api/v1|r0|unstable)/keys/upload
|
||||
|
||||
If ``use_presence`` is False in the homeserver config, it can also handle REST
|
||||
endpoints matching the following regular expressions::
|
||||
|
||||
^/_matrix/client/(api/v1|r0|unstable)/presence/[^/]+/status
|
||||
|
||||
This "stub" presence handler will pass through ``GET`` request but make the
|
||||
``PUT`` effectively a no-op.
|
||||
|
||||
It will proxy any requests it cannot handle to the main synapse instance. It
|
||||
must therefore be configured with the location of the main instance, via
|
||||
the ``worker_main_http_uri`` setting in the frontend_proxy worker configuration
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
#! /bin/bash
|
||||
|
||||
set -eux
|
||||
|
||||
cd "`dirname $0`/.."
|
||||
|
||||
TOX_DIR=$WORKSPACE/.tox
|
||||
@@ -14,7 +16,20 @@ fi
|
||||
tox -e py27 --notest -v
|
||||
|
||||
TOX_BIN=$TOX_DIR/py27/bin
|
||||
$TOX_BIN/pip install setuptools
|
||||
|
||||
# cryptography 2.2 requires setuptools >= 18.5.
|
||||
#
|
||||
# older versions of virtualenv (?) give us a virtualenv with the same version
|
||||
# of setuptools as is installed on the system python (and tox runs virtualenv
|
||||
# under python3, so we get the version of setuptools that is installed on that).
|
||||
#
|
||||
# anyway, make sure that we have a recent enough setuptools.
|
||||
$TOX_BIN/pip install 'setuptools>=18.5'
|
||||
|
||||
# we also need a semi-recent version of pip, because old ones fail to install
|
||||
# the "enum34" dependency of cryptography.
|
||||
$TOX_BIN/pip install 'pip>=10'
|
||||
|
||||
{ python synapse/python_dependencies.py
|
||||
echo lxml psycopg2
|
||||
} | xargs $TOX_BIN/pip install
|
||||
|
||||
30
pyproject.toml
Normal file
30
pyproject.toml
Normal file
@@ -0,0 +1,30 @@
|
||||
[tool.towncrier]
|
||||
package = "synapse"
|
||||
filename = "CHANGES.md"
|
||||
directory = "changelog.d"
|
||||
issue_format = "[\\#{issue}](https://github.com/matrix-org/synapse/issues/{issue})"
|
||||
|
||||
[[tool.towncrier.type]]
|
||||
directory = "feature"
|
||||
name = "Features"
|
||||
showcontent = true
|
||||
|
||||
[[tool.towncrier.type]]
|
||||
directory = "bugfix"
|
||||
name = "Bugfixes"
|
||||
showcontent = true
|
||||
|
||||
[[tool.towncrier.type]]
|
||||
directory = "doc"
|
||||
name = "Improved Documentation"
|
||||
showcontent = true
|
||||
|
||||
[[tool.towncrier.type]]
|
||||
directory = "removal"
|
||||
name = "Deprecations and Removals"
|
||||
showcontent = true
|
||||
|
||||
[[tool.towncrier.type]]
|
||||
directory = "misc"
|
||||
name = "Internal Changes"
|
||||
showcontent = true
|
||||
@@ -18,14 +18,22 @@
|
||||
from __future__ import print_function
|
||||
|
||||
import argparse
|
||||
from urlparse import urlparse, urlunparse
|
||||
|
||||
import nacl.signing
|
||||
import json
|
||||
import base64
|
||||
import requests
|
||||
import sys
|
||||
|
||||
from requests.adapters import HTTPAdapter
|
||||
import srvlookup
|
||||
import yaml
|
||||
|
||||
# uncomment the following to enable debug logging of http requests
|
||||
#from httplib import HTTPConnection
|
||||
#HTTPConnection.debuglevel = 1
|
||||
|
||||
def encode_base64(input_bytes):
|
||||
"""Encode bytes as a base64 string without any padding."""
|
||||
|
||||
@@ -113,17 +121,6 @@ def read_signing_keys(stream):
|
||||
return keys
|
||||
|
||||
|
||||
def lookup(destination, path):
|
||||
if ":" in destination:
|
||||
return "https://%s%s" % (destination, path)
|
||||
else:
|
||||
try:
|
||||
srv = srvlookup.lookup("matrix", "tcp", destination)[0]
|
||||
return "https://%s:%d%s" % (srv.host, srv.port, path)
|
||||
except:
|
||||
return "https://%s:%d%s" % (destination, 8448, path)
|
||||
|
||||
|
||||
def request_json(method, origin_name, origin_key, destination, path, content):
|
||||
if method is None:
|
||||
if content is None:
|
||||
@@ -152,13 +149,19 @@ def request_json(method, origin_name, origin_key, destination, path, content):
|
||||
authorization_headers.append(bytes(header))
|
||||
print ("Authorization: %s" % header, file=sys.stderr)
|
||||
|
||||
dest = lookup(destination, path)
|
||||
dest = "matrix://%s%s" % (destination, path)
|
||||
print ("Requesting %s" % dest, file=sys.stderr)
|
||||
|
||||
result = requests.request(
|
||||
s = requests.Session()
|
||||
s.mount("matrix://", MatrixConnectionAdapter())
|
||||
|
||||
result = s.request(
|
||||
method=method,
|
||||
url=dest,
|
||||
headers={"Authorization": authorization_headers[0]},
|
||||
headers={
|
||||
"Host": destination,
|
||||
"Authorization": authorization_headers[0]
|
||||
},
|
||||
verify=False,
|
||||
data=content,
|
||||
)
|
||||
@@ -242,5 +245,39 @@ def read_args_from_config(args):
|
||||
args.signing_key_path = config['signing_key_path']
|
||||
|
||||
|
||||
class MatrixConnectionAdapter(HTTPAdapter):
|
||||
@staticmethod
|
||||
def lookup(s):
|
||||
if s[-1] == ']':
|
||||
# ipv6 literal (with no port)
|
||||
return s, 8448
|
||||
|
||||
if ":" in s:
|
||||
out = s.rsplit(":",1)
|
||||
try:
|
||||
port = int(out[1])
|
||||
except ValueError:
|
||||
raise ValueError("Invalid host:port '%s'" % s)
|
||||
return out[0], port
|
||||
|
||||
try:
|
||||
srv = srvlookup.lookup("matrix", "tcp", s)[0]
|
||||
return srv.host, srv.port
|
||||
except:
|
||||
return s, 8448
|
||||
|
||||
def get_connection(self, url, proxies=None):
|
||||
parsed = urlparse(url)
|
||||
|
||||
(host, port) = self.lookup(parsed.netloc)
|
||||
netloc = "%s:%d" % (host, port)
|
||||
print("Connecting to %s" % (netloc,), file=sys.stderr)
|
||||
url = urlunparse((
|
||||
"https", netloc, parsed.path, parsed.params, parsed.query,
|
||||
parsed.fragment,
|
||||
))
|
||||
return super(MatrixConnectionAdapter, self).get_connection(url, proxies)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
||||
@@ -6,9 +6,19 @@
|
||||
|
||||
## Do not run it lightly.
|
||||
|
||||
set -e
|
||||
|
||||
if [ "$1" == "-h" ] || [ "$1" == "" ]; then
|
||||
echo "Call with ROOM_ID as first option and then pipe it into the database. So for instance you might run"
|
||||
echo " nuke-room-from-db.sh <room_id> | sqlite3 homeserver.db"
|
||||
echo "or"
|
||||
echo " nuke-room-from-db.sh <room_id> | psql --dbname=synapse"
|
||||
exit
|
||||
fi
|
||||
|
||||
ROOMID="$1"
|
||||
|
||||
sqlite3 homeserver.db <<EOF
|
||||
cat <<EOF
|
||||
DELETE FROM event_forward_extremities WHERE room_id = '$ROOMID';
|
||||
DELETE FROM event_backward_extremities WHERE room_id = '$ROOMID';
|
||||
DELETE FROM event_edges WHERE room_id = '$ROOMID';
|
||||
@@ -29,7 +39,7 @@ DELETE FROM state_groups WHERE room_id = '$ROOMID';
|
||||
DELETE FROM state_groups_state WHERE room_id = '$ROOMID';
|
||||
DELETE FROM receipts_graph WHERE room_id = '$ROOMID';
|
||||
DELETE FROM receipts_linearized WHERE room_id = '$ROOMID';
|
||||
DELETE FROM event_search_content WHERE c1room_id = '$ROOMID';
|
||||
DELETE FROM event_search WHERE room_id = '$ROOMID';
|
||||
DELETE FROM guest_access WHERE room_id = '$ROOMID';
|
||||
DELETE FROM history_visibility WHERE room_id = '$ROOMID';
|
||||
DELETE FROM room_tags WHERE room_id = '$ROOMID';
|
||||
|
||||
@@ -26,11 +26,37 @@ import yaml
|
||||
|
||||
|
||||
def request_registration(user, password, server_location, shared_secret, admin=False):
|
||||
req = urllib2.Request(
|
||||
"%s/_matrix/client/r0/admin/register" % (server_location,),
|
||||
headers={'Content-Type': 'application/json'}
|
||||
)
|
||||
|
||||
try:
|
||||
if sys.version_info[:3] >= (2, 7, 9):
|
||||
# As of version 2.7.9, urllib2 now checks SSL certs
|
||||
import ssl
|
||||
f = urllib2.urlopen(req, context=ssl.SSLContext(ssl.PROTOCOL_SSLv23))
|
||||
else:
|
||||
f = urllib2.urlopen(req)
|
||||
body = f.read()
|
||||
f.close()
|
||||
nonce = json.loads(body)["nonce"]
|
||||
except urllib2.HTTPError as e:
|
||||
print "ERROR! Received %d %s" % (e.code, e.reason,)
|
||||
if 400 <= e.code < 500:
|
||||
if e.info().type == "application/json":
|
||||
resp = json.load(e)
|
||||
if "error" in resp:
|
||||
print resp["error"]
|
||||
sys.exit(1)
|
||||
|
||||
mac = hmac.new(
|
||||
key=shared_secret,
|
||||
digestmod=hashlib.sha1,
|
||||
)
|
||||
|
||||
mac.update(nonce)
|
||||
mac.update("\x00")
|
||||
mac.update(user)
|
||||
mac.update("\x00")
|
||||
mac.update(password)
|
||||
@@ -40,10 +66,10 @@ def request_registration(user, password, server_location, shared_secret, admin=F
|
||||
mac = mac.hexdigest()
|
||||
|
||||
data = {
|
||||
"user": user,
|
||||
"nonce": nonce,
|
||||
"username": user,
|
||||
"password": password,
|
||||
"mac": mac,
|
||||
"type": "org.matrix.login.shared_secret",
|
||||
"admin": admin,
|
||||
}
|
||||
|
||||
@@ -52,7 +78,7 @@ def request_registration(user, password, server_location, shared_secret, admin=F
|
||||
print "Sending registration request..."
|
||||
|
||||
req = urllib2.Request(
|
||||
"%s/_matrix/client/api/v1/register" % (server_location,),
|
||||
"%s/_matrix/client/r0/admin/register" % (server_location,),
|
||||
data=json.dumps(data),
|
||||
headers={'Content-Type': 'application/json'}
|
||||
)
|
||||
|
||||
@@ -30,6 +30,8 @@ import time
|
||||
import traceback
|
||||
import yaml
|
||||
|
||||
from six import string_types
|
||||
|
||||
|
||||
logger = logging.getLogger("synapse_port_db")
|
||||
|
||||
@@ -574,7 +576,7 @@ class Porter(object):
|
||||
def conv(j, col):
|
||||
if j in bool_cols:
|
||||
return bool(col)
|
||||
elif isinstance(col, basestring) and "\0" in col:
|
||||
elif isinstance(col, string_types) and "\0" in col:
|
||||
logger.warn("DROPPING ROW: NUL value in table %s col %s: %r", table, headers[j], col)
|
||||
raise BadValueException();
|
||||
return col
|
||||
|
||||
25
setup.cfg
25
setup.cfg
@@ -14,7 +14,26 @@ ignore =
|
||||
pylint.cfg
|
||||
tox.ini
|
||||
|
||||
[flake8]
|
||||
[pep8]
|
||||
max-line-length = 90
|
||||
# W503 requires that binary operators be at the end, not start, of lines. Erik doesn't like it.
|
||||
ignore = W503
|
||||
# W503 requires that binary operators be at the end, not start, of lines. Erik
|
||||
# doesn't like it. E203 is contrary to PEP8.
|
||||
ignore = W503,E203
|
||||
|
||||
[flake8]
|
||||
# note that flake8 inherits the "ignore" settings from "pep8" (because it uses
|
||||
# pep8 to do those checks), but not the "max-line-length" setting
|
||||
max-line-length = 90
|
||||
|
||||
[isort]
|
||||
line_length = 89
|
||||
not_skip = __init__.py
|
||||
sections=FUTURE,STDLIB,COMPAT,THIRDPARTY,TWISTED,FIRSTPARTY,TESTS,LOCALFOLDER
|
||||
default_section=THIRDPARTY
|
||||
known_first_party = synapse
|
||||
known_tests=tests
|
||||
known_compat = mock,six
|
||||
known_twisted=twisted,OpenSSL
|
||||
multi_line_output=3
|
||||
include_trailing_comma=true
|
||||
combine_as_imports=true
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
# Copyright 2018 New Vector Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -16,4 +17,4 @@
|
||||
""" This is a reference implementation of a Matrix home server.
|
||||
"""
|
||||
|
||||
__version__ = "0.27.2"
|
||||
__version__ = "0.33.3"
|
||||
|
||||
@@ -15,15 +15,19 @@
|
||||
|
||||
import logging
|
||||
|
||||
from six import itervalues
|
||||
|
||||
import pymacaroons
|
||||
from netaddr import IPAddress
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
import synapse.types
|
||||
from synapse import event_auth
|
||||
from synapse.api.constants import EventTypes, Membership, JoinRules
|
||||
from synapse.api.errors import AuthError, Codes
|
||||
from synapse.api.constants import EventTypes, JoinRules, Membership
|
||||
from synapse.api.errors import AuthError, Codes, ResourceLimitError
|
||||
from synapse.types import UserID
|
||||
from synapse.util.caches import register_cache, CACHE_SIZE_FACTOR
|
||||
from synapse.util.caches import CACHE_SIZE_FACTOR, register_cache
|
||||
from synapse.util.caches.lrucache import LruCache
|
||||
from synapse.util.metrics import Measure
|
||||
|
||||
@@ -57,16 +61,17 @@ class Auth(object):
|
||||
self.TOKEN_NOT_FOUND_HTTP_STATUS = 401
|
||||
|
||||
self.token_cache = LruCache(CACHE_SIZE_FACTOR * 10000)
|
||||
register_cache("token_cache", self.token_cache)
|
||||
register_cache("cache", "token_cache", self.token_cache)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def check_from_context(self, event, context, do_sig_check=True):
|
||||
prev_state_ids = yield context.get_prev_state_ids(self.store)
|
||||
auth_events_ids = yield self.compute_auth_events(
|
||||
event, context.prev_state_ids, for_verification=True,
|
||||
event, prev_state_ids, for_verification=True,
|
||||
)
|
||||
auth_events = yield self.store.get_events(auth_events_ids)
|
||||
auth_events = {
|
||||
(e.type, e.state_key): e for e in auth_events.values()
|
||||
(e.type, e.state_key): e for e in itervalues(auth_events)
|
||||
}
|
||||
self.check(event, auth_events=auth_events, do_sig_check=do_sig_check)
|
||||
|
||||
@@ -189,7 +194,7 @@ class Auth(object):
|
||||
synapse.types.create_requester(user_id, app_service=app_service)
|
||||
)
|
||||
|
||||
access_token = get_access_token_from_request(
|
||||
access_token = self.get_access_token_from_request(
|
||||
request, self.TOKEN_NOT_FOUND_HTTP_STATUS
|
||||
)
|
||||
|
||||
@@ -204,11 +209,11 @@ class Auth(object):
|
||||
|
||||
ip_addr = self.hs.get_ip_from_request(request)
|
||||
user_agent = request.requestHeaders.getRawHeaders(
|
||||
"User-Agent",
|
||||
default=[""]
|
||||
)[0]
|
||||
b"User-Agent",
|
||||
default=[b""]
|
||||
)[0].decode('ascii', 'surrogateescape')
|
||||
if user and access_token and ip_addr:
|
||||
self.store.insert_client_ip(
|
||||
yield self.store.insert_client_ip(
|
||||
user_id=user.to_string(),
|
||||
access_token=access_token,
|
||||
ip=ip_addr,
|
||||
@@ -235,17 +240,22 @@ class Auth(object):
|
||||
@defer.inlineCallbacks
|
||||
def _get_appservice_user_id(self, request):
|
||||
app_service = self.store.get_app_service_by_token(
|
||||
get_access_token_from_request(
|
||||
self.get_access_token_from_request(
|
||||
request, self.TOKEN_NOT_FOUND_HTTP_STATUS
|
||||
)
|
||||
)
|
||||
if app_service is None:
|
||||
defer.returnValue((None, None))
|
||||
|
||||
if "user_id" not in request.args:
|
||||
if app_service.ip_range_whitelist:
|
||||
ip_address = IPAddress(self.hs.get_ip_from_request(request))
|
||||
if ip_address not in app_service.ip_range_whitelist:
|
||||
defer.returnValue((None, None))
|
||||
|
||||
if b"user_id" not in request.args:
|
||||
defer.returnValue((app_service.sender, app_service))
|
||||
|
||||
user_id = request.args["user_id"][0]
|
||||
user_id = request.args[b"user_id"][0].decode('utf8')
|
||||
if app_service.sender == user_id:
|
||||
defer.returnValue((app_service.sender, app_service))
|
||||
|
||||
@@ -486,7 +496,7 @@ class Auth(object):
|
||||
def _look_up_user_by_access_token(self, token):
|
||||
ret = yield self.store.get_user_by_access_token(token)
|
||||
if not ret:
|
||||
logger.warn("Unrecognised access token - not in store: %s" % (token,))
|
||||
logger.warn("Unrecognised access token - not in store.")
|
||||
raise AuthError(
|
||||
self.TOKEN_NOT_FOUND_HTTP_STATUS, "Unrecognised access token.",
|
||||
errcode=Codes.UNKNOWN_TOKEN
|
||||
@@ -504,12 +514,12 @@ class Auth(object):
|
||||
|
||||
def get_appservice_by_req(self, request):
|
||||
try:
|
||||
token = get_access_token_from_request(
|
||||
token = self.get_access_token_from_request(
|
||||
request, self.TOKEN_NOT_FOUND_HTTP_STATUS
|
||||
)
|
||||
service = self.store.get_app_service_by_token(token)
|
||||
if not service:
|
||||
logger.warn("Unrecognised appservice access token: %s" % (token,))
|
||||
logger.warn("Unrecognised appservice access token.")
|
||||
raise AuthError(
|
||||
self.TOKEN_NOT_FOUND_HTTP_STATUS,
|
||||
"Unrecognised access token.",
|
||||
@@ -535,7 +545,8 @@ class Auth(object):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def add_auth_events(self, builder, context):
|
||||
auth_ids = yield self.compute_auth_events(builder, context.prev_state_ids)
|
||||
prev_state_ids = yield context.get_prev_state_ids(self.store)
|
||||
auth_ids = yield self.compute_auth_events(builder, prev_state_ids)
|
||||
|
||||
auth_events_entries = yield self.store.add_event_hashes(
|
||||
auth_ids
|
||||
@@ -653,7 +664,7 @@ class Auth(object):
|
||||
auth_events[(EventTypes.PowerLevels, "")] = power_level_event
|
||||
|
||||
send_level = event_auth.get_send_level(
|
||||
EventTypes.Aliases, "", auth_events
|
||||
EventTypes.Aliases, "", power_level_event,
|
||||
)
|
||||
user_level = event_auth.get_user_power_level(user_id, auth_events)
|
||||
|
||||
@@ -664,67 +675,134 @@ class Auth(object):
|
||||
" edit its room list entry"
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def has_access_token(request):
|
||||
"""Checks if the request has an access_token.
|
||||
|
||||
def has_access_token(request):
|
||||
"""Checks if the request has an access_token.
|
||||
Returns:
|
||||
bool: False if no access_token was given, True otherwise.
|
||||
"""
|
||||
query_params = request.args.get(b"access_token")
|
||||
auth_headers = request.requestHeaders.getRawHeaders(b"Authorization")
|
||||
return bool(query_params) or bool(auth_headers)
|
||||
|
||||
Returns:
|
||||
bool: False if no access_token was given, True otherwise.
|
||||
"""
|
||||
query_params = request.args.get("access_token")
|
||||
auth_headers = request.requestHeaders.getRawHeaders("Authorization")
|
||||
return bool(query_params) or bool(auth_headers)
|
||||
@staticmethod
|
||||
def get_access_token_from_request(request, token_not_found_http_status=401):
|
||||
"""Extracts the access_token from the request.
|
||||
|
||||
Args:
|
||||
request: The http request.
|
||||
token_not_found_http_status(int): The HTTP status code to set in the
|
||||
AuthError if the token isn't found. This is used in some of the
|
||||
legacy APIs to change the status code to 403 from the default of
|
||||
401 since some of the old clients depended on auth errors returning
|
||||
403.
|
||||
Returns:
|
||||
unicode: The access_token
|
||||
Raises:
|
||||
AuthError: If there isn't an access_token in the request.
|
||||
"""
|
||||
|
||||
def get_access_token_from_request(request, token_not_found_http_status=401):
|
||||
"""Extracts the access_token from the request.
|
||||
|
||||
Args:
|
||||
request: The http request.
|
||||
token_not_found_http_status(int): The HTTP status code to set in the
|
||||
AuthError if the token isn't found. This is used in some of the
|
||||
legacy APIs to change the status code to 403 from the default of
|
||||
401 since some of the old clients depended on auth errors returning
|
||||
403.
|
||||
Returns:
|
||||
str: The access_token
|
||||
Raises:
|
||||
AuthError: If there isn't an access_token in the request.
|
||||
"""
|
||||
|
||||
auth_headers = request.requestHeaders.getRawHeaders("Authorization")
|
||||
query_params = request.args.get("access_token")
|
||||
if auth_headers:
|
||||
# Try the get the access_token from a "Authorization: Bearer"
|
||||
# header
|
||||
if query_params is not None:
|
||||
raise AuthError(
|
||||
token_not_found_http_status,
|
||||
"Mixing Authorization headers and access_token query parameters.",
|
||||
errcode=Codes.MISSING_TOKEN,
|
||||
)
|
||||
if len(auth_headers) > 1:
|
||||
raise AuthError(
|
||||
token_not_found_http_status,
|
||||
"Too many Authorization headers.",
|
||||
errcode=Codes.MISSING_TOKEN,
|
||||
)
|
||||
parts = auth_headers[0].split(" ")
|
||||
if parts[0] == "Bearer" and len(parts) == 2:
|
||||
return parts[1]
|
||||
auth_headers = request.requestHeaders.getRawHeaders(b"Authorization")
|
||||
query_params = request.args.get(b"access_token")
|
||||
if auth_headers:
|
||||
# Try the get the access_token from a "Authorization: Bearer"
|
||||
# header
|
||||
if query_params is not None:
|
||||
raise AuthError(
|
||||
token_not_found_http_status,
|
||||
"Mixing Authorization headers and access_token query parameters.",
|
||||
errcode=Codes.MISSING_TOKEN,
|
||||
)
|
||||
if len(auth_headers) > 1:
|
||||
raise AuthError(
|
||||
token_not_found_http_status,
|
||||
"Too many Authorization headers.",
|
||||
errcode=Codes.MISSING_TOKEN,
|
||||
)
|
||||
parts = auth_headers[0].split(b" ")
|
||||
if parts[0] == b"Bearer" and len(parts) == 2:
|
||||
return parts[1].decode('ascii')
|
||||
else:
|
||||
raise AuthError(
|
||||
token_not_found_http_status,
|
||||
"Invalid Authorization header.",
|
||||
errcode=Codes.MISSING_TOKEN,
|
||||
)
|
||||
else:
|
||||
raise AuthError(
|
||||
token_not_found_http_status,
|
||||
"Invalid Authorization header.",
|
||||
errcode=Codes.MISSING_TOKEN,
|
||||
# Try to get the access_token from the query params.
|
||||
if not query_params:
|
||||
raise AuthError(
|
||||
token_not_found_http_status,
|
||||
"Missing access token.",
|
||||
errcode=Codes.MISSING_TOKEN
|
||||
)
|
||||
|
||||
return query_params[0].decode('ascii')
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def check_in_room_or_world_readable(self, room_id, user_id):
|
||||
"""Checks that the user is or was in the room or the room is world
|
||||
readable. If it isn't then an exception is raised.
|
||||
|
||||
Returns:
|
||||
Deferred[tuple[str, str|None]]: Resolves to the current membership of
|
||||
the user in the room and the membership event ID of the user. If
|
||||
the user is not in the room and never has been, then
|
||||
`(Membership.JOIN, None)` is returned.
|
||||
"""
|
||||
|
||||
try:
|
||||
# check_user_was_in_room will return the most recent membership
|
||||
# event for the user if:
|
||||
# * The user is a non-guest user, and was ever in the room
|
||||
# * The user is a guest user, and has joined the room
|
||||
# else it will throw.
|
||||
member_event = yield self.check_user_was_in_room(room_id, user_id)
|
||||
defer.returnValue((member_event.membership, member_event.event_id))
|
||||
except AuthError:
|
||||
visibility = yield self.state.get_current_state(
|
||||
room_id, EventTypes.RoomHistoryVisibility, ""
|
||||
)
|
||||
else:
|
||||
# Try to get the access_token from the query params.
|
||||
if not query_params:
|
||||
if (
|
||||
visibility and
|
||||
visibility.content["history_visibility"] == "world_readable"
|
||||
):
|
||||
defer.returnValue((Membership.JOIN, None))
|
||||
return
|
||||
raise AuthError(
|
||||
token_not_found_http_status,
|
||||
"Missing access token.",
|
||||
errcode=Codes.MISSING_TOKEN
|
||||
403, "Guest access not allowed", errcode=Codes.GUEST_ACCESS_FORBIDDEN
|
||||
)
|
||||
|
||||
return query_params[0]
|
||||
@defer.inlineCallbacks
|
||||
def check_auth_blocking(self, user_id=None):
|
||||
"""Checks if the user should be rejected for some external reason,
|
||||
such as monthly active user limiting or global disable flag
|
||||
|
||||
Args:
|
||||
user_id(str|None): If present, checks for presence against existing
|
||||
MAU cohort
|
||||
"""
|
||||
if self.hs.config.hs_disabled:
|
||||
raise ResourceLimitError(
|
||||
403, self.hs.config.hs_disabled_message,
|
||||
errcode=Codes.RESOURCE_LIMIT_EXCEED,
|
||||
admin_uri=self.hs.config.admin_uri,
|
||||
limit_type=self.hs.config.hs_disabled_limit_type
|
||||
)
|
||||
if self.hs.config.limit_usage_by_mau is True:
|
||||
# If the user is already part of the MAU cohort
|
||||
if user_id:
|
||||
timestamp = yield self.store.user_last_seen_monthly_active(user_id)
|
||||
if timestamp:
|
||||
return
|
||||
# Else if there is no room in the MAU bucket, bail
|
||||
current_mau = yield self.store.get_monthly_active_count()
|
||||
if current_mau >= self.hs.config.max_mau_value:
|
||||
raise ResourceLimitError(
|
||||
403, "Monthly Active User Limit Exceeded",
|
||||
|
||||
admin_uri=self.hs.config.admin_uri,
|
||||
errcode=Codes.RESOURCE_LIMIT_EXCEED,
|
||||
limit_type="monthly_active_user"
|
||||
)
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
# Copyright 2017 Vector Creations Ltd
|
||||
# Copyright 2018 New Vector Ltd.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -16,6 +17,9 @@
|
||||
|
||||
"""Contains constants from the specification."""
|
||||
|
||||
# the "depth" field on events is limited to 2**63 - 1
|
||||
MAX_DEPTH = 2**63 - 1
|
||||
|
||||
|
||||
class Membership(object):
|
||||
|
||||
@@ -73,6 +77,8 @@ class EventTypes(object):
|
||||
Topic = "m.room.topic"
|
||||
Name = "m.room.name"
|
||||
|
||||
ServerACL = "m.room.server_acl"
|
||||
|
||||
|
||||
class RejectedReason(object):
|
||||
AUTH_ERROR = "auth_error"
|
||||
@@ -89,3 +95,11 @@ class RoomCreationPreset(object):
|
||||
class ThirdPartyEntityKind(object):
|
||||
USER = "user"
|
||||
LOCATION = "location"
|
||||
|
||||
|
||||
# the version we will give rooms which are created on this server
|
||||
DEFAULT_ROOM_VERSION = "1"
|
||||
|
||||
# vdh-test-version is a placeholder to get room versioning support working and tested
|
||||
# until we have a working v2.
|
||||
KNOWN_ROOM_VERSIONS = {"1", "vdh-test-version"}
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
# Copyright 2018 New Vector Ltd.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -17,7 +18,10 @@
|
||||
|
||||
import logging
|
||||
|
||||
import simplejson as json
|
||||
from six import iteritems
|
||||
from six.moves import http_client
|
||||
|
||||
from canonicaljson import json
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -50,6 +54,11 @@ class Codes(object):
|
||||
THREEPID_DENIED = "M_THREEPID_DENIED"
|
||||
INVALID_USERNAME = "M_INVALID_USERNAME"
|
||||
SERVER_NOT_TRUSTED = "M_SERVER_NOT_TRUSTED"
|
||||
CONSENT_NOT_GIVEN = "M_CONSENT_NOT_GIVEN"
|
||||
CANNOT_LEAVE_SERVER_NOTICE_ROOM = "M_CANNOT_LEAVE_SERVER_NOTICE_ROOM"
|
||||
RESOURCE_LIMIT_EXCEED = "M_RESOURCE_LIMIT_EXCEED"
|
||||
UNSUPPORTED_ROOM_VERSION = "M_UNSUPPORTED_ROOM_VERSION"
|
||||
INCOMPATIBLE_ROOM_VERSION = "M_INCOMPATIBLE_ROOM_VERSION"
|
||||
|
||||
|
||||
class CodeMessageException(RuntimeError):
|
||||
@@ -64,20 +73,6 @@ class CodeMessageException(RuntimeError):
|
||||
self.code = code
|
||||
self.msg = msg
|
||||
|
||||
def error_dict(self):
|
||||
return cs_error(self.msg)
|
||||
|
||||
|
||||
class MatrixCodeMessageException(CodeMessageException):
|
||||
"""An error from a general matrix endpoint, eg. from a proxied Matrix API call.
|
||||
|
||||
Attributes:
|
||||
errcode (str): Matrix error code e.g 'M_FORBIDDEN'
|
||||
"""
|
||||
def __init__(self, code, msg, errcode=Codes.UNKNOWN):
|
||||
super(MatrixCodeMessageException, self).__init__(code, msg)
|
||||
self.errcode = errcode
|
||||
|
||||
|
||||
class SynapseError(CodeMessageException):
|
||||
"""A base exception type for matrix errors which have an errcode and error
|
||||
@@ -103,38 +98,54 @@ class SynapseError(CodeMessageException):
|
||||
self.errcode,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def from_http_response_exception(cls, err):
|
||||
"""Make a SynapseError based on an HTTPResponseException
|
||||
|
||||
This is useful when a proxied request has failed, and we need to
|
||||
decide how to map the failure onto a matrix error to send back to the
|
||||
client.
|
||||
class ProxiedRequestError(SynapseError):
|
||||
"""An error from a general matrix endpoint, eg. from a proxied Matrix API call.
|
||||
|
||||
An attempt is made to parse the body of the http response as a matrix
|
||||
error. If that succeeds, the errcode and error message from the body
|
||||
are used as the errcode and error message in the new synapse error.
|
||||
Attributes:
|
||||
errcode (str): Matrix error code e.g 'M_FORBIDDEN'
|
||||
"""
|
||||
def __init__(self, code, msg, errcode=Codes.UNKNOWN, additional_fields=None):
|
||||
super(ProxiedRequestError, self).__init__(
|
||||
code, msg, errcode
|
||||
)
|
||||
if additional_fields is None:
|
||||
self._additional_fields = {}
|
||||
else:
|
||||
self._additional_fields = dict(additional_fields)
|
||||
|
||||
Otherwise, the errcode is set to M_UNKNOWN, and the error message is
|
||||
set to the reason code from the HTTP response.
|
||||
def error_dict(self):
|
||||
return cs_error(
|
||||
self.msg,
|
||||
self.errcode,
|
||||
**self._additional_fields
|
||||
)
|
||||
|
||||
|
||||
class ConsentNotGivenError(SynapseError):
|
||||
"""The error returned to the client when the user has not consented to the
|
||||
privacy policy.
|
||||
"""
|
||||
def __init__(self, msg, consent_uri):
|
||||
"""Constructs a ConsentNotGivenError
|
||||
|
||||
Args:
|
||||
err (HttpResponseException):
|
||||
|
||||
Returns:
|
||||
SynapseError:
|
||||
msg (str): The human-readable error message
|
||||
consent_url (str): The URL where the user can give their consent
|
||||
"""
|
||||
# try to parse the body as json, to get better errcode/msg, but
|
||||
# default to M_UNKNOWN with the HTTP status as the error text
|
||||
try:
|
||||
j = json.loads(err.response)
|
||||
except ValueError:
|
||||
j = {}
|
||||
errcode = j.get('errcode', Codes.UNKNOWN)
|
||||
errmsg = j.get('error', err.msg)
|
||||
super(ConsentNotGivenError, self).__init__(
|
||||
code=http_client.FORBIDDEN,
|
||||
msg=msg,
|
||||
errcode=Codes.CONSENT_NOT_GIVEN
|
||||
)
|
||||
self._consent_uri = consent_uri
|
||||
|
||||
res = SynapseError(err.code, errmsg, errcode)
|
||||
return res
|
||||
def error_dict(self):
|
||||
return cs_error(
|
||||
self.msg,
|
||||
self.errcode,
|
||||
consent_uri=self._consent_uri
|
||||
)
|
||||
|
||||
|
||||
class RegistrationError(SynapseError):
|
||||
@@ -220,6 +231,30 @@ class AuthError(SynapseError):
|
||||
super(AuthError, self).__init__(*args, **kwargs)
|
||||
|
||||
|
||||
class ResourceLimitError(SynapseError):
|
||||
"""
|
||||
Any error raised when there is a problem with resource usage.
|
||||
For instance, the monthly active user limit for the server has been exceeded
|
||||
"""
|
||||
def __init__(
|
||||
self, code, msg,
|
||||
errcode=Codes.RESOURCE_LIMIT_EXCEED,
|
||||
admin_uri=None,
|
||||
limit_type=None,
|
||||
):
|
||||
self.admin_uri = admin_uri
|
||||
self.limit_type = limit_type
|
||||
super(ResourceLimitError, self).__init__(code, msg, errcode=errcode)
|
||||
|
||||
def error_dict(self):
|
||||
return cs_error(
|
||||
self.msg,
|
||||
self.errcode,
|
||||
admin_uri=self.admin_uri,
|
||||
limit_type=self.limit_type
|
||||
)
|
||||
|
||||
|
||||
class EventSizeError(SynapseError):
|
||||
"""An error raised when an event is too big."""
|
||||
|
||||
@@ -277,12 +312,25 @@ class LimitExceededError(SynapseError):
|
||||
)
|
||||
|
||||
|
||||
def cs_exception(exception):
|
||||
if isinstance(exception, CodeMessageException):
|
||||
return exception.error_dict()
|
||||
else:
|
||||
logger.error("Unknown exception type: %s", type(exception))
|
||||
return {}
|
||||
class IncompatibleRoomVersionError(SynapseError):
|
||||
"""A server is trying to join a room whose version it does not support."""
|
||||
|
||||
def __init__(self, room_version):
|
||||
super(IncompatibleRoomVersionError, self).__init__(
|
||||
code=400,
|
||||
msg="Your homeserver does not support the features required to "
|
||||
"join this room",
|
||||
errcode=Codes.INCOMPATIBLE_ROOM_VERSION,
|
||||
)
|
||||
|
||||
self._room_version = room_version
|
||||
|
||||
def error_dict(self):
|
||||
return cs_error(
|
||||
self.msg,
|
||||
self.errcode,
|
||||
room_version=self._room_version,
|
||||
)
|
||||
|
||||
|
||||
def cs_error(msg, code=Codes.UNKNOWN, **kwargs):
|
||||
@@ -291,13 +339,13 @@ def cs_error(msg, code=Codes.UNKNOWN, **kwargs):
|
||||
|
||||
Args:
|
||||
msg (str): The error message.
|
||||
code (int): The error code.
|
||||
code (str): The error code.
|
||||
kwargs : Additional keys to add to the response.
|
||||
Returns:
|
||||
A dict representing the error response JSON.
|
||||
"""
|
||||
err = {"error": msg, "errcode": code}
|
||||
for key, value in kwargs.iteritems():
|
||||
for key, value in iteritems(kwargs):
|
||||
err[key] = value
|
||||
return err
|
||||
|
||||
@@ -341,7 +389,7 @@ class HttpResponseException(CodeMessageException):
|
||||
Represents an HTTP-level failure of an outbound request
|
||||
|
||||
Attributes:
|
||||
response (str): body of response
|
||||
response (bytes): body of response
|
||||
"""
|
||||
def __init__(self, code, msg, response):
|
||||
"""
|
||||
@@ -349,7 +397,39 @@ class HttpResponseException(CodeMessageException):
|
||||
Args:
|
||||
code (int): HTTP status code
|
||||
msg (str): reason phrase from HTTP response status line
|
||||
response (str): body of response
|
||||
response (bytes): body of response
|
||||
"""
|
||||
super(HttpResponseException, self).__init__(code, msg)
|
||||
self.response = response
|
||||
|
||||
def to_synapse_error(self):
|
||||
"""Make a SynapseError based on an HTTPResponseException
|
||||
|
||||
This is useful when a proxied request has failed, and we need to
|
||||
decide how to map the failure onto a matrix error to send back to the
|
||||
client.
|
||||
|
||||
An attempt is made to parse the body of the http response as a matrix
|
||||
error. If that succeeds, the errcode and error message from the body
|
||||
are used as the errcode and error message in the new synapse error.
|
||||
|
||||
Otherwise, the errcode is set to M_UNKNOWN, and the error message is
|
||||
set to the reason code from the HTTP response.
|
||||
|
||||
Returns:
|
||||
SynapseError:
|
||||
"""
|
||||
# try to parse the body as json, to get better errcode/msg, but
|
||||
# default to M_UNKNOWN with the HTTP status as the error text
|
||||
try:
|
||||
j = json.loads(self.response)
|
||||
except ValueError:
|
||||
j = {}
|
||||
|
||||
if not isinstance(j, dict):
|
||||
j = {}
|
||||
|
||||
errcode = j.pop('errcode', Codes.UNKNOWN)
|
||||
errmsg = j.pop('error', self.msg)
|
||||
|
||||
return ProxiedRequestError(self.code, errmsg, errcode, j)
|
||||
|
||||
@@ -12,14 +12,15 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from synapse.api.errors import SynapseError
|
||||
from synapse.storage.presence import UserPresenceState
|
||||
from synapse.types import UserID, RoomID
|
||||
import jsonschema
|
||||
from canonicaljson import json
|
||||
from jsonschema import FormatChecker
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
import simplejson as json
|
||||
import jsonschema
|
||||
from jsonschema import FormatChecker
|
||||
from synapse.api.errors import SynapseError
|
||||
from synapse.storage.presence import UserPresenceState
|
||||
from synapse.types import RoomID, UserID
|
||||
|
||||
FILTER_SCHEMA = {
|
||||
"additionalProperties": False,
|
||||
@@ -112,7 +113,13 @@ ROOM_EVENT_FILTER_SCHEMA = {
|
||||
},
|
||||
"contains_url": {
|
||||
"type": "boolean"
|
||||
}
|
||||
},
|
||||
"lazy_load_members": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"include_redundant_members": {
|
||||
"type": "boolean"
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -260,6 +267,12 @@ class FilterCollection(object):
|
||||
def ephemeral_limit(self):
|
||||
return self._room_ephemeral_filter.limit()
|
||||
|
||||
def lazy_load_members(self):
|
||||
return self._room_state_filter.lazy_load_members()
|
||||
|
||||
def include_redundant_members(self):
|
||||
return self._room_state_filter.include_redundant_members()
|
||||
|
||||
def filter_presence(self, events):
|
||||
return self._presence_filter.filter(events)
|
||||
|
||||
@@ -411,11 +424,17 @@ class Filter(object):
|
||||
return room_ids
|
||||
|
||||
def filter(self, events):
|
||||
return filter(self.check, events)
|
||||
return list(filter(self.check, events))
|
||||
|
||||
def limit(self):
|
||||
return self.filter_json.get("limit", 10)
|
||||
|
||||
def lazy_load_members(self):
|
||||
return self.filter_json.get("lazy_load_members", False)
|
||||
|
||||
def include_redundant_members(self):
|
||||
return self.filter_json.get("include_redundant_members", False)
|
||||
|
||||
|
||||
def _matches_wildcard(actual_value, filter_value):
|
||||
if filter_value.endswith("*"):
|
||||
|
||||
@@ -72,7 +72,7 @@ class Ratelimiter(object):
|
||||
return allowed, time_allowed
|
||||
|
||||
def prune_message_counts(self, time_now_s):
|
||||
for user_id in self.message_counts.keys():
|
||||
for user_id in list(self.message_counts.keys()):
|
||||
message_count, time_start, msg_rate_hz = (
|
||||
self.message_counts[user_id]
|
||||
)
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
# Copyright 2018 New Vector Ltd.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -14,6 +15,12 @@
|
||||
# limitations under the License.
|
||||
|
||||
"""Contains the URL paths to prefix various aspects of the server with. """
|
||||
import hmac
|
||||
from hashlib import sha256
|
||||
|
||||
from six.moves.urllib.parse import urlencode
|
||||
|
||||
from synapse.config import ConfigError
|
||||
|
||||
CLIENT_PREFIX = "/_matrix/client/api/v1"
|
||||
CLIENT_V2_ALPHA_PREFIX = "/_matrix/client/v2_alpha"
|
||||
@@ -25,3 +32,46 @@ SERVER_KEY_PREFIX = "/_matrix/key/v1"
|
||||
SERVER_KEY_V2_PREFIX = "/_matrix/key/v2"
|
||||
MEDIA_PREFIX = "/_matrix/media/r0"
|
||||
LEGACY_MEDIA_PREFIX = "/_matrix/media/v1"
|
||||
|
||||
|
||||
class ConsentURIBuilder(object):
|
||||
def __init__(self, hs_config):
|
||||
"""
|
||||
Args:
|
||||
hs_config (synapse.config.homeserver.HomeServerConfig):
|
||||
"""
|
||||
if hs_config.form_secret is None:
|
||||
raise ConfigError(
|
||||
"form_secret not set in config",
|
||||
)
|
||||
if hs_config.public_baseurl is None:
|
||||
raise ConfigError(
|
||||
"public_baseurl not set in config",
|
||||
)
|
||||
|
||||
self._hmac_secret = hs_config.form_secret.encode("utf-8")
|
||||
self._public_baseurl = hs_config.public_baseurl
|
||||
|
||||
def build_user_consent_uri(self, user_id):
|
||||
"""Build a URI which we can give to the user to do their privacy
|
||||
policy consent
|
||||
|
||||
Args:
|
||||
user_id (str): mxid or username of user
|
||||
|
||||
Returns
|
||||
(str) the URI where the user can do consent
|
||||
"""
|
||||
mac = hmac.new(
|
||||
key=self._hmac_secret,
|
||||
msg=user_id,
|
||||
digestmod=sha256,
|
||||
).hexdigest()
|
||||
consent_uri = "%s_matrix/consent?%s" % (
|
||||
self._public_baseurl,
|
||||
urlencode({
|
||||
"u": user_id,
|
||||
"h": mac
|
||||
}),
|
||||
)
|
||||
return consent_uri
|
||||
|
||||
@@ -14,9 +14,11 @@
|
||||
# limitations under the License.
|
||||
|
||||
import sys
|
||||
|
||||
from synapse import python_dependencies # noqa: E402
|
||||
|
||||
sys.dont_write_bytecode = True
|
||||
|
||||
from synapse import python_dependencies # noqa: E402
|
||||
|
||||
try:
|
||||
python_dependencies.check_requirements()
|
||||
|
||||
@@ -17,15 +17,18 @@ import gc
|
||||
import logging
|
||||
import sys
|
||||
|
||||
from daemonize import Daemonize
|
||||
|
||||
from twisted.internet import error, reactor
|
||||
|
||||
from synapse.util import PreserveLoggingContext
|
||||
from synapse.util.rlimit import change_resource_limit
|
||||
|
||||
try:
|
||||
import affinity
|
||||
except Exception:
|
||||
affinity = None
|
||||
|
||||
from daemonize import Daemonize
|
||||
from synapse.util import PreserveLoggingContext
|
||||
from synapse.util.rlimit import change_resource_limit
|
||||
from twisted.internet import error, reactor
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -124,7 +127,20 @@ def quit_with_error(error_string):
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def listen_tcp(bind_addresses, port, factory, backlog=50):
|
||||
def listen_metrics(bind_addresses, port):
|
||||
"""
|
||||
Start Prometheus metrics server.
|
||||
"""
|
||||
from synapse.metrics import RegistryProxy
|
||||
from prometheus_client import start_http_server
|
||||
|
||||
for host in bind_addresses:
|
||||
reactor.callInThread(start_http_server, int(port),
|
||||
addr=host, registry=RegistryProxy)
|
||||
logger.info("Metrics now reporting on %s:%d", host, port)
|
||||
|
||||
|
||||
def listen_tcp(bind_addresses, port, factory, reactor=reactor, backlog=50):
|
||||
"""
|
||||
Create a TCP socket for a port and several addresses
|
||||
"""
|
||||
@@ -140,7 +156,9 @@ def listen_tcp(bind_addresses, port, factory, backlog=50):
|
||||
check_bind_error(e, address, bind_addresses)
|
||||
|
||||
|
||||
def listen_ssl(bind_addresses, port, factory, context_factory, backlog=50):
|
||||
def listen_ssl(
|
||||
bind_addresses, port, factory, context_factory, reactor=reactor, backlog=50
|
||||
):
|
||||
"""
|
||||
Create an SSL socket for a port and several addresses
|
||||
"""
|
||||
|
||||
@@ -16,6 +16,9 @@
|
||||
import logging
|
||||
import sys
|
||||
|
||||
from twisted.internet import defer, reactor
|
||||
from twisted.web.resource import NoResource
|
||||
|
||||
import synapse
|
||||
from synapse import events
|
||||
from synapse.app import _base
|
||||
@@ -23,6 +26,7 @@ from synapse.config._base import ConfigError
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.config.logger import setup_logging
|
||||
from synapse.http.site import SynapseSite
|
||||
from synapse.metrics import RegistryProxy
|
||||
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||
from synapse.replication.slave.storage.directory import DirectoryStore
|
||||
@@ -32,11 +36,9 @@ from synapse.replication.tcp.client import ReplicationClientHandler
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.engines import create_engine
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.logcontext import LoggingContext, preserve_fn
|
||||
from synapse.util.logcontext import LoggingContext, run_in_background
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.versionstring import get_version_string
|
||||
from twisted.internet import reactor
|
||||
from twisted.web.resource import NoResource
|
||||
|
||||
logger = logging.getLogger("synapse.app.appservice")
|
||||
|
||||
@@ -62,7 +64,7 @@ class AppserviceServer(HomeServer):
|
||||
for res in listener_config["resources"]:
|
||||
for name in res["names"]:
|
||||
if name == "metrics":
|
||||
resources[METRICS_PREFIX] = MetricsResource(self)
|
||||
resources[METRICS_PREFIX] = MetricsResource(RegistryProxy)
|
||||
|
||||
root_resource = create_resource_tree(resources, NoResource())
|
||||
|
||||
@@ -74,6 +76,7 @@ class AppserviceServer(HomeServer):
|
||||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
self.version_string,
|
||||
)
|
||||
)
|
||||
|
||||
@@ -93,6 +96,13 @@ class AppserviceServer(HomeServer):
|
||||
globals={"hs": self},
|
||||
)
|
||||
)
|
||||
elif listener["type"] == "metrics":
|
||||
if not self.get_config().enable_metrics:
|
||||
logger.warn(("Metrics listener configured, but "
|
||||
"enable_metrics is not True!"))
|
||||
else:
|
||||
_base.listen_metrics(listener["bind_addresses"],
|
||||
listener["port"])
|
||||
else:
|
||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||
|
||||
@@ -107,14 +117,20 @@ class ASReplicationHandler(ReplicationClientHandler):
|
||||
super(ASReplicationHandler, self).__init__(hs.get_datastore())
|
||||
self.appservice_handler = hs.get_application_service_handler()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_rdata(self, stream_name, token, rows):
|
||||
super(ASReplicationHandler, self).on_rdata(stream_name, token, rows)
|
||||
yield super(ASReplicationHandler, self).on_rdata(stream_name, token, rows)
|
||||
|
||||
if stream_name == "events":
|
||||
max_stream_id = self.store.get_room_max_stream_ordering()
|
||||
preserve_fn(
|
||||
self.appservice_handler.notify_interested_services
|
||||
)(max_stream_id)
|
||||
run_in_background(self._notify_app_services, max_stream_id)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _notify_app_services(self, room_stream_id):
|
||||
try:
|
||||
yield self.appservice_handler.notify_interested_services(room_stream_id)
|
||||
except Exception:
|
||||
logger.exception("Error notifying application services of event")
|
||||
|
||||
|
||||
def start(config_options):
|
||||
|
||||
@@ -16,6 +16,9 @@
|
||||
import logging
|
||||
import sys
|
||||
|
||||
from twisted.internet import reactor
|
||||
from twisted.web.resource import NoResource
|
||||
|
||||
import synapse
|
||||
from synapse import events
|
||||
from synapse.app import _base
|
||||
@@ -25,8 +28,10 @@ from synapse.config.logger import setup_logging
|
||||
from synapse.crypto import context_factory
|
||||
from synapse.http.server import JsonResource
|
||||
from synapse.http.site import SynapseSite
|
||||
from synapse.metrics import RegistryProxy
|
||||
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
|
||||
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||
from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
|
||||
from synapse.replication.slave.storage.directory import DirectoryStore
|
||||
@@ -34,29 +39,34 @@ from synapse.replication.slave.storage.events import SlavedEventStore
|
||||
from synapse.replication.slave.storage.keys import SlavedKeyStore
|
||||
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||
from synapse.replication.slave.storage.room import RoomStore
|
||||
from synapse.replication.slave.storage.transactions import TransactionStore
|
||||
from synapse.replication.slave.storage.transactions import SlavedTransactionStore
|
||||
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||
from synapse.rest.client.v1.room import PublicRoomListRestServlet
|
||||
from synapse.rest.client.v1.room import (
|
||||
JoinedRoomMemberListRestServlet,
|
||||
PublicRoomListRestServlet,
|
||||
RoomEventContextServlet,
|
||||
RoomMemberListRestServlet,
|
||||
RoomStateRestServlet,
|
||||
)
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.engines import create_engine
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.logcontext import LoggingContext
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.versionstring import get_version_string
|
||||
from twisted.internet import reactor
|
||||
from twisted.web.resource import NoResource
|
||||
|
||||
logger = logging.getLogger("synapse.app.client_reader")
|
||||
|
||||
|
||||
class ClientReaderSlavedStore(
|
||||
SlavedAccountDataStore,
|
||||
SlavedEventStore,
|
||||
SlavedKeyStore,
|
||||
RoomStore,
|
||||
DirectoryStore,
|
||||
SlavedApplicationServiceStore,
|
||||
SlavedRegistrationStore,
|
||||
TransactionStore,
|
||||
SlavedTransactionStore,
|
||||
SlavedClientIpStore,
|
||||
BaseSlavedStore,
|
||||
):
|
||||
@@ -77,10 +87,16 @@ class ClientReaderServer(HomeServer):
|
||||
for res in listener_config["resources"]:
|
||||
for name in res["names"]:
|
||||
if name == "metrics":
|
||||
resources[METRICS_PREFIX] = MetricsResource(self)
|
||||
resources[METRICS_PREFIX] = MetricsResource(RegistryProxy)
|
||||
elif name == "client":
|
||||
resource = JsonResource(self, canonical_json=False)
|
||||
|
||||
PublicRoomListRestServlet(self).register(resource)
|
||||
RoomMemberListRestServlet(self).register(resource)
|
||||
JoinedRoomMemberListRestServlet(self).register(resource)
|
||||
RoomStateRestServlet(self).register(resource)
|
||||
RoomEventContextServlet(self).register(resource)
|
||||
|
||||
resources.update({
|
||||
"/_matrix/client/r0": resource,
|
||||
"/_matrix/client/unstable": resource,
|
||||
@@ -98,6 +114,7 @@ class ClientReaderServer(HomeServer):
|
||||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
self.version_string,
|
||||
)
|
||||
)
|
||||
|
||||
@@ -117,7 +134,13 @@ class ClientReaderServer(HomeServer):
|
||||
globals={"hs": self},
|
||||
)
|
||||
)
|
||||
|
||||
elif listener["type"] == "metrics":
|
||||
if not self.get_config().enable_metrics:
|
||||
logger.warn(("Metrics listener configured, but "
|
||||
"enable_metrics is not True!"))
|
||||
else:
|
||||
_base.listen_metrics(listener["bind_addresses"],
|
||||
listener["port"])
|
||||
else:
|
||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||
|
||||
@@ -145,11 +168,13 @@ def start(config_options):
|
||||
database_engine = create_engine(config.database_config)
|
||||
|
||||
tls_server_context_factory = context_factory.ServerContextFactory(config)
|
||||
tls_client_options_factory = context_factory.ClientTLSOptionsFactory(config)
|
||||
|
||||
ss = ClientReaderServer(
|
||||
config.server_name,
|
||||
db_config=config.database_config,
|
||||
tls_server_context_factory=tls_server_context_factory,
|
||||
tls_client_options_factory=tls_client_options_factory,
|
||||
config=config,
|
||||
version_string="Synapse/" + get_version_string(synapse),
|
||||
database_engine=database_engine,
|
||||
|
||||
@@ -16,6 +16,9 @@
|
||||
import logging
|
||||
import sys
|
||||
|
||||
from twisted.internet import reactor
|
||||
from twisted.web.resource import NoResource
|
||||
|
||||
import synapse
|
||||
from synapse import events
|
||||
from synapse.app import _base
|
||||
@@ -25,6 +28,7 @@ from synapse.config.logger import setup_logging
|
||||
from synapse.crypto import context_factory
|
||||
from synapse.http.server import JsonResource
|
||||
from synapse.http.site import SynapseSite
|
||||
from synapse.metrics import RegistryProxy
|
||||
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
|
||||
@@ -39,11 +43,13 @@ from synapse.replication.slave.storage.pushers import SlavedPusherStore
|
||||
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
|
||||
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||
from synapse.replication.slave.storage.room import RoomStore
|
||||
from synapse.replication.slave.storage.transactions import TransactionStore
|
||||
from synapse.replication.slave.storage.transactions import SlavedTransactionStore
|
||||
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||
from synapse.rest.client.v1.room import (
|
||||
RoomSendEventRestServlet, RoomMembershipRestServlet, RoomStateEventRestServlet,
|
||||
JoinRoomAliasServlet,
|
||||
RoomMembershipRestServlet,
|
||||
RoomSendEventRestServlet,
|
||||
RoomStateEventRestServlet,
|
||||
)
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.engines import create_engine
|
||||
@@ -51,15 +57,13 @@ from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.logcontext import LoggingContext
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.versionstring import get_version_string
|
||||
from twisted.internet import reactor
|
||||
from twisted.web.resource import NoResource
|
||||
|
||||
logger = logging.getLogger("synapse.app.event_creator")
|
||||
|
||||
|
||||
class EventCreatorSlavedStore(
|
||||
DirectoryStore,
|
||||
TransactionStore,
|
||||
SlavedTransactionStore,
|
||||
SlavedProfileStore,
|
||||
SlavedAccountDataStore,
|
||||
SlavedPusherStore,
|
||||
@@ -90,7 +94,7 @@ class EventCreatorServer(HomeServer):
|
||||
for res in listener_config["resources"]:
|
||||
for name in res["names"]:
|
||||
if name == "metrics":
|
||||
resources[METRICS_PREFIX] = MetricsResource(self)
|
||||
resources[METRICS_PREFIX] = MetricsResource(RegistryProxy)
|
||||
elif name == "client":
|
||||
resource = JsonResource(self, canonical_json=False)
|
||||
RoomSendEventRestServlet(self).register(resource)
|
||||
@@ -114,6 +118,7 @@ class EventCreatorServer(HomeServer):
|
||||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
self.version_string,
|
||||
)
|
||||
)
|
||||
|
||||
@@ -133,6 +138,13 @@ class EventCreatorServer(HomeServer):
|
||||
globals={"hs": self},
|
||||
)
|
||||
)
|
||||
elif listener["type"] == "metrics":
|
||||
if not self.get_config().enable_metrics:
|
||||
logger.warn(("Metrics listener configured, but "
|
||||
"enable_metrics is not True!"))
|
||||
else:
|
||||
_base.listen_metrics(listener["bind_addresses"],
|
||||
listener["port"])
|
||||
else:
|
||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||
|
||||
@@ -162,11 +174,13 @@ def start(config_options):
|
||||
database_engine = create_engine(config.database_config)
|
||||
|
||||
tls_server_context_factory = context_factory.ServerContextFactory(config)
|
||||
tls_client_options_factory = context_factory.ClientTLSOptionsFactory(config)
|
||||
|
||||
ss = EventCreatorServer(
|
||||
config.server_name,
|
||||
db_config=config.database_config,
|
||||
tls_server_context_factory=tls_server_context_factory,
|
||||
tls_client_options_factory=tls_client_options_factory,
|
||||
config=config,
|
||||
version_string="Synapse/" + get_version_string(synapse),
|
||||
database_engine=database_engine,
|
||||
|
||||
@@ -16,6 +16,9 @@
|
||||
import logging
|
||||
import sys
|
||||
|
||||
from twisted.internet import reactor
|
||||
from twisted.web.resource import NoResource
|
||||
|
||||
import synapse
|
||||
from synapse import events
|
||||
from synapse.api.urls import FEDERATION_PREFIX
|
||||
@@ -26,13 +29,20 @@ from synapse.config.logger import setup_logging
|
||||
from synapse.crypto import context_factory
|
||||
from synapse.federation.transport.server import TransportLayerServer
|
||||
from synapse.http.site import SynapseSite
|
||||
from synapse.metrics import RegistryProxy
|
||||
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
|
||||
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||
from synapse.replication.slave.storage.directory import DirectoryStore
|
||||
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||
from synapse.replication.slave.storage.keys import SlavedKeyStore
|
||||
from synapse.replication.slave.storage.profile import SlavedProfileStore
|
||||
from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore
|
||||
from synapse.replication.slave.storage.pushers import SlavedPusherStore
|
||||
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
|
||||
from synapse.replication.slave.storage.room import RoomStore
|
||||
from synapse.replication.slave.storage.transactions import TransactionStore
|
||||
from synapse.replication.slave.storage.transactions import SlavedTransactionStore
|
||||
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.engines import create_engine
|
||||
@@ -40,18 +50,22 @@ from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.logcontext import LoggingContext
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.versionstring import get_version_string
|
||||
from twisted.internet import reactor
|
||||
from twisted.web.resource import NoResource
|
||||
|
||||
logger = logging.getLogger("synapse.app.federation_reader")
|
||||
|
||||
|
||||
class FederationReaderSlavedStore(
|
||||
SlavedAccountDataStore,
|
||||
SlavedProfileStore,
|
||||
SlavedApplicationServiceStore,
|
||||
SlavedPusherStore,
|
||||
SlavedPushRuleStore,
|
||||
SlavedReceiptsStore,
|
||||
SlavedEventStore,
|
||||
SlavedKeyStore,
|
||||
RoomStore,
|
||||
DirectoryStore,
|
||||
TransactionStore,
|
||||
SlavedTransactionStore,
|
||||
BaseSlavedStore,
|
||||
):
|
||||
pass
|
||||
@@ -71,7 +85,7 @@ class FederationReaderServer(HomeServer):
|
||||
for res in listener_config["resources"]:
|
||||
for name in res["names"]:
|
||||
if name == "metrics":
|
||||
resources[METRICS_PREFIX] = MetricsResource(self)
|
||||
resources[METRICS_PREFIX] = MetricsResource(RegistryProxy)
|
||||
elif name == "federation":
|
||||
resources.update({
|
||||
FEDERATION_PREFIX: TransportLayerServer(self),
|
||||
@@ -87,6 +101,7 @@ class FederationReaderServer(HomeServer):
|
||||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
self.version_string,
|
||||
)
|
||||
)
|
||||
|
||||
@@ -106,6 +121,13 @@ class FederationReaderServer(HomeServer):
|
||||
globals={"hs": self},
|
||||
)
|
||||
)
|
||||
elif listener["type"] == "metrics":
|
||||
if not self.get_config().enable_metrics:
|
||||
logger.warn(("Metrics listener configured, but "
|
||||
"enable_metrics is not True!"))
|
||||
else:
|
||||
_base.listen_metrics(listener["bind_addresses"],
|
||||
listener["port"])
|
||||
else:
|
||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||
|
||||
@@ -133,11 +155,13 @@ def start(config_options):
|
||||
database_engine = create_engine(config.database_config)
|
||||
|
||||
tls_server_context_factory = context_factory.ServerContextFactory(config)
|
||||
tls_client_options_factory = context_factory.ClientTLSOptionsFactory(config)
|
||||
|
||||
ss = FederationReaderServer(
|
||||
config.server_name,
|
||||
db_config=config.database_config,
|
||||
tls_server_context_factory=tls_server_context_factory,
|
||||
tls_client_options_factory=tls_client_options_factory,
|
||||
config=config,
|
||||
version_string="Synapse/" + get_version_string(synapse),
|
||||
database_engine=database_engine,
|
||||
|
||||
@@ -16,6 +16,9 @@
|
||||
import logging
|
||||
import sys
|
||||
|
||||
from twisted.internet import defer, reactor
|
||||
from twisted.web.resource import NoResource
|
||||
|
||||
import synapse
|
||||
from synapse import events
|
||||
from synapse.app import _base
|
||||
@@ -25,6 +28,7 @@ from synapse.config.logger import setup_logging
|
||||
from synapse.crypto import context_factory
|
||||
from synapse.federation import send_queue
|
||||
from synapse.http.site import SynapseSite
|
||||
from synapse.metrics import RegistryProxy
|
||||
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||
from synapse.replication.slave.storage.deviceinbox import SlavedDeviceInboxStore
|
||||
from synapse.replication.slave.storage.devices import SlavedDeviceStore
|
||||
@@ -32,23 +36,21 @@ from synapse.replication.slave.storage.events import SlavedEventStore
|
||||
from synapse.replication.slave.storage.presence import SlavedPresenceStore
|
||||
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
|
||||
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||
from synapse.replication.slave.storage.transactions import TransactionStore
|
||||
from synapse.replication.slave.storage.transactions import SlavedTransactionStore
|
||||
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.engines import create_engine
|
||||
from synapse.util.async import Linearizer
|
||||
from synapse.util.async_helpers import Linearizer
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.logcontext import LoggingContext, preserve_fn
|
||||
from synapse.util.logcontext import LoggingContext, run_in_background
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.versionstring import get_version_string
|
||||
from twisted.internet import defer, reactor
|
||||
from twisted.web.resource import NoResource
|
||||
|
||||
logger = logging.getLogger("synapse.app.federation_sender")
|
||||
|
||||
|
||||
class FederationSenderSlaveStore(
|
||||
SlavedDeviceInboxStore, TransactionStore, SlavedReceiptsStore, SlavedEventStore,
|
||||
SlavedDeviceInboxStore, SlavedTransactionStore, SlavedReceiptsStore, SlavedEventStore,
|
||||
SlavedRegistrationStore, SlavedDeviceStore, SlavedPresenceStore,
|
||||
):
|
||||
def __init__(self, db_conn, hs):
|
||||
@@ -89,7 +91,7 @@ class FederationSenderServer(HomeServer):
|
||||
for res in listener_config["resources"]:
|
||||
for name in res["names"]:
|
||||
if name == "metrics":
|
||||
resources[METRICS_PREFIX] = MetricsResource(self)
|
||||
resources[METRICS_PREFIX] = MetricsResource(RegistryProxy)
|
||||
|
||||
root_resource = create_resource_tree(resources, NoResource())
|
||||
|
||||
@@ -101,6 +103,7 @@ class FederationSenderServer(HomeServer):
|
||||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
self.version_string,
|
||||
)
|
||||
)
|
||||
|
||||
@@ -120,6 +123,13 @@ class FederationSenderServer(HomeServer):
|
||||
globals={"hs": self},
|
||||
)
|
||||
)
|
||||
elif listener["type"] == "metrics":
|
||||
if not self.get_config().enable_metrics:
|
||||
logger.warn(("Metrics listener configured, but "
|
||||
"enable_metrics is not True!"))
|
||||
else:
|
||||
_base.listen_metrics(listener["bind_addresses"],
|
||||
listener["port"])
|
||||
else:
|
||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||
|
||||
@@ -134,8 +144,9 @@ class FederationSenderReplicationHandler(ReplicationClientHandler):
|
||||
super(FederationSenderReplicationHandler, self).__init__(hs.get_datastore())
|
||||
self.send_handler = FederationSenderHandler(hs, self)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_rdata(self, stream_name, token, rows):
|
||||
super(FederationSenderReplicationHandler, self).on_rdata(
|
||||
yield super(FederationSenderReplicationHandler, self).on_rdata(
|
||||
stream_name, token, rows
|
||||
)
|
||||
self.send_handler.process_replication_rows(stream_name, token, rows)
|
||||
@@ -176,11 +187,13 @@ def start(config_options):
|
||||
config.send_federation = True
|
||||
|
||||
tls_server_context_factory = context_factory.ServerContextFactory(config)
|
||||
tls_client_options_factory = context_factory.ClientTLSOptionsFactory(config)
|
||||
|
||||
ps = FederationSenderServer(
|
||||
config.server_name,
|
||||
db_config=config.database_config,
|
||||
tls_server_context_factory=tls_server_context_factory,
|
||||
tls_client_options_factory=tls_client_options_factory,
|
||||
config=config,
|
||||
version_string="Synapse/" + get_version_string(synapse),
|
||||
database_engine=database_engine,
|
||||
@@ -229,7 +242,7 @@ class FederationSenderHandler(object):
|
||||
# presence, typing, etc.
|
||||
if stream_name == "federation":
|
||||
send_queue.process_rows_for_federation(self.federation_sender, rows)
|
||||
preserve_fn(self.update_token)(token)
|
||||
run_in_background(self.update_token, token)
|
||||
|
||||
# We also need to poke the federation sender when new events happen
|
||||
elif stream_name == "events":
|
||||
@@ -237,19 +250,22 @@ class FederationSenderHandler(object):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def update_token(self, token):
|
||||
self.federation_position = token
|
||||
try:
|
||||
self.federation_position = token
|
||||
|
||||
# We linearize here to ensure we don't have races updating the token
|
||||
with (yield self._fed_position_linearizer.queue(None)):
|
||||
if self._last_ack < self.federation_position:
|
||||
yield self.store.update_federation_out_pos(
|
||||
"federation", self.federation_position
|
||||
)
|
||||
# We linearize here to ensure we don't have races updating the token
|
||||
with (yield self._fed_position_linearizer.queue(None)):
|
||||
if self._last_ack < self.federation_position:
|
||||
yield self.store.update_federation_out_pos(
|
||||
"federation", self.federation_position
|
||||
)
|
||||
|
||||
# We ACK this token over replication so that the master can drop
|
||||
# its in memory queues
|
||||
self.replication_client.send_federation_ack(self.federation_position)
|
||||
self._last_ack = self.federation_position
|
||||
# We ACK this token over replication so that the master can drop
|
||||
# its in memory queues
|
||||
self.replication_client.send_federation_ack(self.federation_position)
|
||||
self._last_ack = self.federation_position
|
||||
except Exception:
|
||||
logger.exception("Error updating federation stream position")
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
@@ -16,6 +16,9 @@
|
||||
import logging
|
||||
import sys
|
||||
|
||||
from twisted.internet import defer, reactor
|
||||
from twisted.web.resource import NoResource
|
||||
|
||||
import synapse
|
||||
from synapse import events
|
||||
from synapse.api.errors import SynapseError
|
||||
@@ -25,10 +28,9 @@ from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.config.logger import setup_logging
|
||||
from synapse.crypto import context_factory
|
||||
from synapse.http.server import JsonResource
|
||||
from synapse.http.servlet import (
|
||||
RestServlet, parse_json_object_from_request,
|
||||
)
|
||||
from synapse.http.servlet import RestServlet, parse_json_object_from_request
|
||||
from synapse.http.site import SynapseSite
|
||||
from synapse.metrics import RegistryProxy
|
||||
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||
@@ -36,6 +38,7 @@ from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
|
||||
from synapse.replication.slave.storage.devices import SlavedDeviceStore
|
||||
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||
from synapse.rest.client.v1.base import ClientV1RestServlet, client_path_patterns
|
||||
from synapse.rest.client.v2_alpha._base import client_v2_patterns
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.engines import create_engine
|
||||
@@ -43,12 +46,39 @@ from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.logcontext import LoggingContext
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.versionstring import get_version_string
|
||||
from twisted.internet import defer, reactor
|
||||
from twisted.web.resource import NoResource
|
||||
|
||||
logger = logging.getLogger("synapse.app.frontend_proxy")
|
||||
|
||||
|
||||
class PresenceStatusStubServlet(ClientV1RestServlet):
|
||||
PATTERNS = client_path_patterns("/presence/(?P<user_id>[^/]*)/status")
|
||||
|
||||
def __init__(self, hs):
|
||||
super(PresenceStatusStubServlet, self).__init__(hs)
|
||||
self.http_client = hs.get_simple_http_client()
|
||||
self.auth = hs.get_auth()
|
||||
self.main_uri = hs.config.worker_main_http_uri
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_GET(self, request, user_id):
|
||||
# Pass through the auth headers, if any, in case the access token
|
||||
# is there.
|
||||
auth_headers = request.requestHeaders.getRawHeaders("Authorization", [])
|
||||
headers = {
|
||||
"Authorization": auth_headers,
|
||||
}
|
||||
result = yield self.http_client.get_json(
|
||||
self.main_uri + request.uri,
|
||||
headers=headers,
|
||||
)
|
||||
defer.returnValue((200, result))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_PUT(self, request, user_id):
|
||||
yield self.auth.get_user_by_req(request)
|
||||
defer.returnValue((200, {}))
|
||||
|
||||
|
||||
class KeyUploadServlet(RestServlet):
|
||||
PATTERNS = client_v2_patterns("/keys/upload(/(?P<device_id>[^/]+))?$")
|
||||
|
||||
@@ -90,7 +120,7 @@ class KeyUploadServlet(RestServlet):
|
||||
# They're actually trying to upload something, proxy to main synapse.
|
||||
# Pass through the auth headers, if any, in case the access token
|
||||
# is there.
|
||||
auth_headers = request.requestHeaders.getRawHeaders("Authorization", [])
|
||||
auth_headers = request.requestHeaders.getRawHeaders(b"Authorization", [])
|
||||
headers = {
|
||||
"Authorization": auth_headers,
|
||||
}
|
||||
@@ -131,10 +161,16 @@ class FrontendProxyServer(HomeServer):
|
||||
for res in listener_config["resources"]:
|
||||
for name in res["names"]:
|
||||
if name == "metrics":
|
||||
resources[METRICS_PREFIX] = MetricsResource(self)
|
||||
resources[METRICS_PREFIX] = MetricsResource(RegistryProxy)
|
||||
elif name == "client":
|
||||
resource = JsonResource(self, canonical_json=False)
|
||||
KeyUploadServlet(self).register(resource)
|
||||
|
||||
# If presence is disabled, use the stub servlet that does
|
||||
# not allow sending presence
|
||||
if not self.config.use_presence:
|
||||
PresenceStatusStubServlet(self).register(resource)
|
||||
|
||||
resources.update({
|
||||
"/_matrix/client/r0": resource,
|
||||
"/_matrix/client/unstable": resource,
|
||||
@@ -152,7 +188,9 @@ class FrontendProxyServer(HomeServer):
|
||||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
)
|
||||
self.version_string,
|
||||
),
|
||||
reactor=self.get_reactor()
|
||||
)
|
||||
|
||||
logger.info("Synapse client reader now listening on port %d", port)
|
||||
@@ -171,6 +209,13 @@ class FrontendProxyServer(HomeServer):
|
||||
globals={"hs": self},
|
||||
)
|
||||
)
|
||||
elif listener["type"] == "metrics":
|
||||
if not self.get_config().enable_metrics:
|
||||
logger.warn(("Metrics listener configured, but "
|
||||
"enable_metrics is not True!"))
|
||||
else:
|
||||
_base.listen_metrics(listener["bind_addresses"],
|
||||
listener["port"])
|
||||
else:
|
||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||
|
||||
@@ -200,11 +245,13 @@ def start(config_options):
|
||||
database_engine = create_engine(config.database_config)
|
||||
|
||||
tls_server_context_factory = context_factory.ServerContextFactory(config)
|
||||
tls_client_options_factory = context_factory.ClientTLSOptionsFactory(config)
|
||||
|
||||
ss = FrontendProxyServer(
|
||||
config.server_name,
|
||||
db_config=config.database_config,
|
||||
tls_server_context_factory=tls_server_context_factory,
|
||||
tls_client_options_factory=tls_client_options_factory,
|
||||
config=config,
|
||||
version_string="Synapse/" + get_version_string(synapse),
|
||||
database_engine=database_engine,
|
||||
|
||||
@@ -18,27 +18,44 @@ import logging
|
||||
import os
|
||||
import sys
|
||||
|
||||
from six import iteritems
|
||||
|
||||
from prometheus_client import Gauge
|
||||
|
||||
from twisted.application import service
|
||||
from twisted.internet import defer, reactor
|
||||
from twisted.web.resource import EncodingResourceWrapper, NoResource
|
||||
from twisted.web.server import GzipEncoderFactory
|
||||
from twisted.web.static import File
|
||||
|
||||
import synapse
|
||||
import synapse.config.logger
|
||||
from synapse import events
|
||||
from synapse.api.urls import CONTENT_REPO_PREFIX, FEDERATION_PREFIX, \
|
||||
LEGACY_MEDIA_PREFIX, MEDIA_PREFIX, SERVER_KEY_PREFIX, SERVER_KEY_V2_PREFIX, \
|
||||
STATIC_PREFIX, WEB_CLIENT_PREFIX
|
||||
from synapse.api.urls import (
|
||||
CONTENT_REPO_PREFIX,
|
||||
FEDERATION_PREFIX,
|
||||
LEGACY_MEDIA_PREFIX,
|
||||
MEDIA_PREFIX,
|
||||
SERVER_KEY_PREFIX,
|
||||
SERVER_KEY_V2_PREFIX,
|
||||
STATIC_PREFIX,
|
||||
WEB_CLIENT_PREFIX,
|
||||
)
|
||||
from synapse.app import _base
|
||||
from synapse.app._base import quit_with_error, listen_ssl, listen_tcp
|
||||
from synapse.app._base import listen_ssl, listen_tcp, quit_with_error
|
||||
from synapse.config._base import ConfigError
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.crypto import context_factory
|
||||
from synapse.federation.transport.server import TransportLayerServer
|
||||
from synapse.module_api import ModuleApi
|
||||
from synapse.http.additional_resource import AdditionalResource
|
||||
from synapse.http.server import RootRedirect
|
||||
from synapse.http.site import SynapseSite
|
||||
from synapse.metrics import register_memory_metrics
|
||||
from synapse.metrics import RegistryProxy
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||
from synapse.python_dependencies import CONDITIONAL_REQUIREMENTS, \
|
||||
check_requirements
|
||||
from synapse.replication.http import ReplicationRestResource, REPLICATION_PREFIX
|
||||
from synapse.module_api import ModuleApi
|
||||
from synapse.python_dependencies import CONDITIONAL_REQUIREMENTS, check_requirements
|
||||
from synapse.replication.http import REPLICATION_PREFIX, ReplicationRestResource
|
||||
from synapse.replication.tcp.resource import ReplicationStreamProtocolFactory
|
||||
from synapse.rest import ClientRestResource
|
||||
from synapse.rest.key.v1.server_key_resource import LocalKey
|
||||
@@ -55,11 +72,6 @@ from synapse.util.manhole import manhole
|
||||
from synapse.util.module_loader import load_module
|
||||
from synapse.util.rlimit import change_resource_limit
|
||||
from synapse.util.versionstring import get_version_string
|
||||
from twisted.application import service
|
||||
from twisted.internet import defer, reactor
|
||||
from twisted.web.resource import EncodingResourceWrapper, NoResource
|
||||
from twisted.web.server import GzipEncoderFactory
|
||||
from twisted.web.static import File
|
||||
|
||||
logger = logging.getLogger("synapse.app.homeserver")
|
||||
|
||||
@@ -140,6 +152,7 @@ class SynapseHomeServer(HomeServer):
|
||||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
self.version_string,
|
||||
),
|
||||
self.tls_server_context_factory,
|
||||
)
|
||||
@@ -153,6 +166,7 @@ class SynapseHomeServer(HomeServer):
|
||||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
self.version_string,
|
||||
)
|
||||
)
|
||||
logger.info("Synapse now listening on port %d", port)
|
||||
@@ -182,6 +196,15 @@ class SynapseHomeServer(HomeServer):
|
||||
"/_matrix/client/versions": client_resource,
|
||||
})
|
||||
|
||||
if name == "consent":
|
||||
from synapse.rest.consent.consent_resource import ConsentResource
|
||||
consent_resource = ConsentResource(self)
|
||||
if compress:
|
||||
consent_resource = gz_wrap(consent_resource)
|
||||
resources.update({
|
||||
"/_matrix/consent": consent_resource,
|
||||
})
|
||||
|
||||
if name == "federation":
|
||||
resources.update({
|
||||
FEDERATION_PREFIX: TransportLayerServer(self),
|
||||
@@ -219,7 +242,7 @@ class SynapseHomeServer(HomeServer):
|
||||
resources[WEB_CLIENT_PREFIX] = build_resource_for_web_client(self)
|
||||
|
||||
if name == "metrics" and self.get_config().enable_metrics:
|
||||
resources[METRICS_PREFIX] = MetricsResource(self)
|
||||
resources[METRICS_PREFIX] = MetricsResource(RegistryProxy)
|
||||
|
||||
if name == "replication":
|
||||
resources[REPLICATION_PREFIX] = ReplicationRestResource(self)
|
||||
@@ -252,6 +275,13 @@ class SynapseHomeServer(HomeServer):
|
||||
reactor.addSystemEventTrigger(
|
||||
"before", "shutdown", server_listener.stopListening,
|
||||
)
|
||||
elif listener["type"] == "metrics":
|
||||
if not self.get_config().enable_metrics:
|
||||
logger.warn(("Metrics listener configured, but "
|
||||
"enable_metrics is not True!"))
|
||||
else:
|
||||
_base.listen_metrics(listener["bind_addresses"],
|
||||
listener["port"])
|
||||
else:
|
||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||
|
||||
@@ -272,6 +302,11 @@ class SynapseHomeServer(HomeServer):
|
||||
quit_with_error(e.message)
|
||||
|
||||
|
||||
# Gauges to expose monthly active user control metrics
|
||||
current_mau_gauge = Gauge("synapse_admin_mau:current", "Current MAU")
|
||||
max_mau_gauge = Gauge("synapse_admin_mau:max", "MAU Limit")
|
||||
|
||||
|
||||
def setup(config_options):
|
||||
"""
|
||||
Args:
|
||||
@@ -300,14 +335,10 @@ def setup(config_options):
|
||||
# check any extra requirements we have now we have a config
|
||||
check_requirements(config)
|
||||
|
||||
version_string = "Synapse/" + get_version_string(synapse)
|
||||
|
||||
logger.info("Server hostname: %s", config.server_name)
|
||||
logger.info("Server version: %s", version_string)
|
||||
|
||||
events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||
|
||||
tls_server_context_factory = context_factory.ServerContextFactory(config)
|
||||
tls_client_options_factory = context_factory.ClientTLSOptionsFactory(config)
|
||||
|
||||
database_engine = create_engine(config.database_config)
|
||||
config.database_config["args"]["cp_openfun"] = database_engine.on_new_connection
|
||||
@@ -316,8 +347,9 @@ def setup(config_options):
|
||||
config.server_name,
|
||||
db_config=config.database_config,
|
||||
tls_server_context_factory=tls_server_context_factory,
|
||||
tls_client_options_factory=tls_client_options_factory,
|
||||
config=config,
|
||||
version_string=version_string,
|
||||
version_string="Synapse/" + get_version_string(synapse),
|
||||
database_engine=database_engine,
|
||||
)
|
||||
|
||||
@@ -351,8 +383,6 @@ def setup(config_options):
|
||||
hs.get_datastore().start_doing_background_updates()
|
||||
hs.get_federation_client().start_get_pdu_cache()
|
||||
|
||||
register_memory_metrics(hs)
|
||||
|
||||
reactor.callWhenRunning(start)
|
||||
|
||||
return hs
|
||||
@@ -407,6 +437,9 @@ def run(hs):
|
||||
# currently either 0 or 1
|
||||
stats_process = []
|
||||
|
||||
def start_phone_stats_home():
|
||||
return run_as_background_process("phone_stats_home", phone_stats_home)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def phone_stats_home():
|
||||
logger.info("Gathering stats for reporting")
|
||||
@@ -423,6 +456,10 @@ def run(hs):
|
||||
total_nonbridged_users = yield hs.get_datastore().count_nonbridged_users()
|
||||
stats["total_nonbridged_users"] = total_nonbridged_users
|
||||
|
||||
daily_user_type_results = yield hs.get_datastore().count_daily_user_type()
|
||||
for name, count in iteritems(daily_user_type_results):
|
||||
stats["daily_user_type_" + name] = count
|
||||
|
||||
room_count = yield hs.get_datastore().get_room_count()
|
||||
stats["total_room_count"] = room_count
|
||||
|
||||
@@ -431,7 +468,7 @@ def run(hs):
|
||||
stats["daily_messages"] = yield hs.get_datastore().count_daily_messages()
|
||||
|
||||
r30_results = yield hs.get_datastore().count_r30_users()
|
||||
for name, count in r30_results.iteritems():
|
||||
for name, count in iteritems(r30_results):
|
||||
stats["r30_users_" + name] = count
|
||||
|
||||
daily_sent_messages = yield hs.get_datastore().count_daily_sent_messages()
|
||||
@@ -473,9 +510,42 @@ def run(hs):
|
||||
" changes across releases."
|
||||
)
|
||||
|
||||
def generate_user_daily_visit_stats():
|
||||
return run_as_background_process(
|
||||
"generate_user_daily_visits",
|
||||
hs.get_datastore().generate_user_daily_visits,
|
||||
)
|
||||
|
||||
# Rather than update on per session basis, batch up the requests.
|
||||
# If you increase the loop period, the accuracy of user_daily_visits
|
||||
# table will decrease
|
||||
clock.looping_call(generate_user_daily_visit_stats, 5 * 60 * 1000)
|
||||
|
||||
# monthly active user limiting functionality
|
||||
clock.looping_call(
|
||||
hs.get_datastore().reap_monthly_active_users, 1000 * 60 * 60
|
||||
)
|
||||
hs.get_datastore().reap_monthly_active_users()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def generate_monthly_active_users():
|
||||
count = 0
|
||||
if hs.config.limit_usage_by_mau:
|
||||
count = yield hs.get_datastore().get_monthly_active_count()
|
||||
current_mau_gauge.set(float(count))
|
||||
max_mau_gauge.set(float(hs.config.max_mau_value))
|
||||
|
||||
hs.get_datastore().initialise_reserved_users(
|
||||
hs.config.mau_limits_reserved_threepids
|
||||
)
|
||||
generate_monthly_active_users()
|
||||
if hs.config.limit_usage_by_mau:
|
||||
clock.looping_call(generate_monthly_active_users, 5 * 60 * 1000)
|
||||
# End of monthly active user settings
|
||||
|
||||
if hs.config.report_stats:
|
||||
logger.info("Scheduling stats reporting for 3 hour intervals")
|
||||
clock.looping_call(phone_stats_home, 3 * 60 * 60 * 1000)
|
||||
clock.looping_call(start_phone_stats_home, 3 * 60 * 60 * 1000)
|
||||
|
||||
# We need to defer this init for the cases that we daemonize
|
||||
# otherwise the process ID we get is that of the non-daemon process
|
||||
@@ -483,7 +553,7 @@ def run(hs):
|
||||
|
||||
# We wait 5 minutes to send the first set of stats as the server can
|
||||
# be quite busy the first few minutes
|
||||
clock.call_later(5 * 60, phone_stats_home)
|
||||
clock.call_later(5 * 60, start_phone_stats_home)
|
||||
|
||||
if hs.config.daemonize and hs.config.print_pidfile:
|
||||
print (hs.config.pid_file)
|
||||
|
||||
@@ -16,23 +16,25 @@
|
||||
import logging
|
||||
import sys
|
||||
|
||||
from twisted.internet import reactor
|
||||
from twisted.web.resource import NoResource
|
||||
|
||||
import synapse
|
||||
from synapse import events
|
||||
from synapse.api.urls import (
|
||||
CONTENT_REPO_PREFIX, LEGACY_MEDIA_PREFIX, MEDIA_PREFIX
|
||||
)
|
||||
from synapse.api.urls import CONTENT_REPO_PREFIX, LEGACY_MEDIA_PREFIX, MEDIA_PREFIX
|
||||
from synapse.app import _base
|
||||
from synapse.config._base import ConfigError
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.config.logger import setup_logging
|
||||
from synapse.crypto import context_factory
|
||||
from synapse.http.site import SynapseSite
|
||||
from synapse.metrics import RegistryProxy
|
||||
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||
from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
|
||||
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||
from synapse.replication.slave.storage.transactions import TransactionStore
|
||||
from synapse.replication.slave.storage.transactions import SlavedTransactionStore
|
||||
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||
from synapse.rest.media.v0.content_repository import ContentRepoResource
|
||||
from synapse.server import HomeServer
|
||||
@@ -42,8 +44,6 @@ from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.logcontext import LoggingContext
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.versionstring import get_version_string
|
||||
from twisted.internet import reactor
|
||||
from twisted.web.resource import NoResource
|
||||
|
||||
logger = logging.getLogger("synapse.app.media_repository")
|
||||
|
||||
@@ -52,7 +52,7 @@ class MediaRepositorySlavedStore(
|
||||
SlavedApplicationServiceStore,
|
||||
SlavedRegistrationStore,
|
||||
SlavedClientIpStore,
|
||||
TransactionStore,
|
||||
SlavedTransactionStore,
|
||||
BaseSlavedStore,
|
||||
MediaRepositoryStore,
|
||||
):
|
||||
@@ -73,7 +73,7 @@ class MediaRepositoryServer(HomeServer):
|
||||
for res in listener_config["resources"]:
|
||||
for name in res["names"]:
|
||||
if name == "metrics":
|
||||
resources[METRICS_PREFIX] = MetricsResource(self)
|
||||
resources[METRICS_PREFIX] = MetricsResource(RegistryProxy)
|
||||
elif name == "media":
|
||||
media_repo = self.get_media_repository_resource()
|
||||
resources.update({
|
||||
@@ -94,6 +94,7 @@ class MediaRepositoryServer(HomeServer):
|
||||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
self.version_string,
|
||||
)
|
||||
)
|
||||
|
||||
@@ -113,6 +114,13 @@ class MediaRepositoryServer(HomeServer):
|
||||
globals={"hs": self},
|
||||
)
|
||||
)
|
||||
elif listener["type"] == "metrics":
|
||||
if not self.get_config().enable_metrics:
|
||||
logger.warn(("Metrics listener configured, but "
|
||||
"enable_metrics is not True!"))
|
||||
else:
|
||||
_base.listen_metrics(listener["bind_addresses"],
|
||||
listener["port"])
|
||||
else:
|
||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||
|
||||
@@ -147,11 +155,13 @@ def start(config_options):
|
||||
database_engine = create_engine(config.database_config)
|
||||
|
||||
tls_server_context_factory = context_factory.ServerContextFactory(config)
|
||||
tls_client_options_factory = context_factory.ClientTLSOptionsFactory(config)
|
||||
|
||||
ss = MediaRepositoryServer(
|
||||
config.server_name,
|
||||
db_config=config.database_config,
|
||||
tls_server_context_factory=tls_server_context_factory,
|
||||
tls_client_options_factory=tls_client_options_factory,
|
||||
config=config,
|
||||
version_string="Synapse/" + get_version_string(synapse),
|
||||
database_engine=database_engine,
|
||||
|
||||
@@ -16,6 +16,9 @@
|
||||
import logging
|
||||
import sys
|
||||
|
||||
from twisted.internet import defer, reactor
|
||||
from twisted.web.resource import NoResource
|
||||
|
||||
import synapse
|
||||
from synapse import events
|
||||
from synapse.app import _base
|
||||
@@ -23,6 +26,7 @@ from synapse.config._base import ConfigError
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.config.logger import setup_logging
|
||||
from synapse.http.site import SynapseSite
|
||||
from synapse.metrics import RegistryProxy
|
||||
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
|
||||
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||
@@ -33,11 +37,9 @@ from synapse.server import HomeServer
|
||||
from synapse.storage import DataStore
|
||||
from synapse.storage.engines import create_engine
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.logcontext import LoggingContext, preserve_fn
|
||||
from synapse.util.logcontext import LoggingContext, run_in_background
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.versionstring import get_version_string
|
||||
from twisted.internet import defer, reactor
|
||||
from twisted.web.resource import NoResource
|
||||
|
||||
logger = logging.getLogger("synapse.app.pusher")
|
||||
|
||||
@@ -92,7 +94,7 @@ class PusherServer(HomeServer):
|
||||
for res in listener_config["resources"]:
|
||||
for name in res["names"]:
|
||||
if name == "metrics":
|
||||
resources[METRICS_PREFIX] = MetricsResource(self)
|
||||
resources[METRICS_PREFIX] = MetricsResource(RegistryProxy)
|
||||
|
||||
root_resource = create_resource_tree(resources, NoResource())
|
||||
|
||||
@@ -104,6 +106,7 @@ class PusherServer(HomeServer):
|
||||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
self.version_string,
|
||||
)
|
||||
)
|
||||
|
||||
@@ -123,6 +126,13 @@ class PusherServer(HomeServer):
|
||||
globals={"hs": self},
|
||||
)
|
||||
)
|
||||
elif listener["type"] == "metrics":
|
||||
if not self.get_config().enable_metrics:
|
||||
logger.warn(("Metrics listener configured, but "
|
||||
"enable_metrics is not True!"))
|
||||
else:
|
||||
_base.listen_metrics(listener["bind_addresses"],
|
||||
listener["port"])
|
||||
else:
|
||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||
|
||||
@@ -138,26 +148,30 @@ class PusherReplicationHandler(ReplicationClientHandler):
|
||||
|
||||
self.pusher_pool = hs.get_pusherpool()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_rdata(self, stream_name, token, rows):
|
||||
super(PusherReplicationHandler, self).on_rdata(stream_name, token, rows)
|
||||
preserve_fn(self.poke_pushers)(stream_name, token, rows)
|
||||
yield super(PusherReplicationHandler, self).on_rdata(stream_name, token, rows)
|
||||
run_in_background(self.poke_pushers, stream_name, token, rows)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def poke_pushers(self, stream_name, token, rows):
|
||||
if stream_name == "pushers":
|
||||
for row in rows:
|
||||
if row.deleted:
|
||||
yield self.stop_pusher(row.user_id, row.app_id, row.pushkey)
|
||||
else:
|
||||
yield self.start_pusher(row.user_id, row.app_id, row.pushkey)
|
||||
elif stream_name == "events":
|
||||
yield self.pusher_pool.on_new_notifications(
|
||||
token, token,
|
||||
)
|
||||
elif stream_name == "receipts":
|
||||
yield self.pusher_pool.on_new_receipts(
|
||||
token, token, set(row.room_id for row in rows)
|
||||
)
|
||||
try:
|
||||
if stream_name == "pushers":
|
||||
for row in rows:
|
||||
if row.deleted:
|
||||
yield self.stop_pusher(row.user_id, row.app_id, row.pushkey)
|
||||
else:
|
||||
yield self.start_pusher(row.user_id, row.app_id, row.pushkey)
|
||||
elif stream_name == "events":
|
||||
self.pusher_pool.on_new_notifications(
|
||||
token, token,
|
||||
)
|
||||
elif stream_name == "receipts":
|
||||
self.pusher_pool.on_new_receipts(
|
||||
token, token, set(row.room_id for row in rows)
|
||||
)
|
||||
except Exception:
|
||||
logger.exception("Error poking pushers")
|
||||
|
||||
def stop_pusher(self, user_id, app_id, pushkey):
|
||||
key = "%s:%s" % (app_id, pushkey)
|
||||
|
||||
@@ -17,6 +17,11 @@ import contextlib
|
||||
import logging
|
||||
import sys
|
||||
|
||||
from six import iteritems
|
||||
|
||||
from twisted.internet import defer, reactor
|
||||
from twisted.web.resource import NoResource
|
||||
|
||||
import synapse
|
||||
from synapse.api.constants import EventTypes
|
||||
from synapse.app import _base
|
||||
@@ -26,6 +31,7 @@ from synapse.config.logger import setup_logging
|
||||
from synapse.handlers.presence import PresenceHandler, get_interested_parties
|
||||
from synapse.http.server import JsonResource
|
||||
from synapse.http.site import SynapseSite
|
||||
from synapse.metrics import RegistryProxy
|
||||
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
|
||||
@@ -35,12 +41,12 @@ from synapse.replication.slave.storage.deviceinbox import SlavedDeviceInboxStore
|
||||
from synapse.replication.slave.storage.devices import SlavedDeviceStore
|
||||
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||
from synapse.replication.slave.storage.filtering import SlavedFilteringStore
|
||||
from synapse.replication.slave.storage.groups import SlavedGroupServerStore
|
||||
from synapse.replication.slave.storage.presence import SlavedPresenceStore
|
||||
from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore
|
||||
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
|
||||
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||
from synapse.replication.slave.storage.room import RoomStore
|
||||
from synapse.replication.slave.storage.groups import SlavedGroupServerStore
|
||||
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||
from synapse.rest.client.v1 import events
|
||||
from synapse.rest.client.v1.initial_sync import InitialSyncRestServlet
|
||||
@@ -49,14 +55,11 @@ from synapse.rest.client.v2_alpha import sync
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.engines import create_engine
|
||||
from synapse.storage.presence import UserPresenceState
|
||||
from synapse.storage.roommember import RoomMemberStore
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.logcontext import LoggingContext, preserve_fn
|
||||
from synapse.util.logcontext import LoggingContext, run_in_background
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.stringutils import random_string
|
||||
from synapse.util.versionstring import get_version_string
|
||||
from twisted.internet import defer, reactor
|
||||
from twisted.web.resource import NoResource
|
||||
|
||||
logger = logging.getLogger("synapse.app.synchrotron")
|
||||
|
||||
@@ -77,9 +80,7 @@ class SynchrotronSlavedStore(
|
||||
RoomStore,
|
||||
BaseSlavedStore,
|
||||
):
|
||||
did_forget = (
|
||||
RoomMemberStore.__dict__["did_forget"]
|
||||
)
|
||||
pass
|
||||
|
||||
|
||||
UPDATE_SYNCING_USERS_MS = 10 * 1000
|
||||
@@ -113,7 +114,10 @@ class SynchrotronPresence(object):
|
||||
logger.info("Presence process_id is %r", self.process_id)
|
||||
|
||||
def send_user_sync(self, user_id, is_syncing, last_sync_ms):
|
||||
self.hs.get_tcp_replication().send_user_sync(user_id, is_syncing, last_sync_ms)
|
||||
if self.hs.config.use_presence:
|
||||
self.hs.get_tcp_replication().send_user_sync(
|
||||
user_id, is_syncing, last_sync_ms
|
||||
)
|
||||
|
||||
def mark_as_coming_online(self, user_id):
|
||||
"""A user has started syncing. Send a UserSync to the master, unless they
|
||||
@@ -210,10 +214,13 @@ class SynchrotronPresence(object):
|
||||
yield self.notify_from_replication(states, stream_id)
|
||||
|
||||
def get_currently_syncing_users(self):
|
||||
return [
|
||||
user_id for user_id, count in self.user_to_num_current_syncs.iteritems()
|
||||
if count > 0
|
||||
]
|
||||
if self.hs.config.use_presence:
|
||||
return [
|
||||
user_id for user_id, count in iteritems(self.user_to_num_current_syncs)
|
||||
if count > 0
|
||||
]
|
||||
else:
|
||||
return set()
|
||||
|
||||
|
||||
class SynchrotronTyping(object):
|
||||
@@ -255,7 +262,7 @@ class SynchrotronServer(HomeServer):
|
||||
for res in listener_config["resources"]:
|
||||
for name in res["names"]:
|
||||
if name == "metrics":
|
||||
resources[METRICS_PREFIX] = MetricsResource(self)
|
||||
resources[METRICS_PREFIX] = MetricsResource(RegistryProxy)
|
||||
elif name == "client":
|
||||
resource = JsonResource(self, canonical_json=False)
|
||||
sync.register_servlets(self, resource)
|
||||
@@ -279,6 +286,7 @@ class SynchrotronServer(HomeServer):
|
||||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
self.version_string,
|
||||
)
|
||||
)
|
||||
|
||||
@@ -298,6 +306,13 @@ class SynchrotronServer(HomeServer):
|
||||
globals={"hs": self},
|
||||
)
|
||||
)
|
||||
elif listener["type"] == "metrics":
|
||||
if not self.get_config().enable_metrics:
|
||||
logger.warn(("Metrics listener configured, but "
|
||||
"enable_metrics is not True!"))
|
||||
else:
|
||||
_base.listen_metrics(listener["bind_addresses"],
|
||||
listener["port"])
|
||||
else:
|
||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||
|
||||
@@ -323,10 +338,10 @@ class SyncReplicationHandler(ReplicationClientHandler):
|
||||
self.presence_handler = hs.get_presence_handler()
|
||||
self.notifier = hs.get_notifier()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_rdata(self, stream_name, token, rows):
|
||||
super(SyncReplicationHandler, self).on_rdata(stream_name, token, rows)
|
||||
|
||||
preserve_fn(self.process_and_notify)(stream_name, token, rows)
|
||||
yield super(SyncReplicationHandler, self).on_rdata(stream_name, token, rows)
|
||||
run_in_background(self.process_and_notify, stream_name, token, rows)
|
||||
|
||||
def get_streams_to_replicate(self):
|
||||
args = super(SyncReplicationHandler, self).get_streams_to_replicate()
|
||||
@@ -338,55 +353,58 @@ class SyncReplicationHandler(ReplicationClientHandler):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def process_and_notify(self, stream_name, token, rows):
|
||||
if stream_name == "events":
|
||||
# We shouldn't get multiple rows per token for events stream, so
|
||||
# we don't need to optimise this for multiple rows.
|
||||
for row in rows:
|
||||
event = yield self.store.get_event(row.event_id)
|
||||
extra_users = ()
|
||||
if event.type == EventTypes.Member:
|
||||
extra_users = (event.state_key,)
|
||||
max_token = self.store.get_room_max_stream_ordering()
|
||||
self.notifier.on_new_room_event(
|
||||
event, token, max_token, extra_users
|
||||
)
|
||||
elif stream_name == "push_rules":
|
||||
self.notifier.on_new_event(
|
||||
"push_rules_key", token, users=[row.user_id for row in rows],
|
||||
)
|
||||
elif stream_name in ("account_data", "tag_account_data",):
|
||||
self.notifier.on_new_event(
|
||||
"account_data_key", token, users=[row.user_id for row in rows],
|
||||
)
|
||||
elif stream_name == "receipts":
|
||||
self.notifier.on_new_event(
|
||||
"receipt_key", token, rooms=[row.room_id for row in rows],
|
||||
)
|
||||
elif stream_name == "typing":
|
||||
self.typing_handler.process_replication_rows(token, rows)
|
||||
self.notifier.on_new_event(
|
||||
"typing_key", token, rooms=[row.room_id for row in rows],
|
||||
)
|
||||
elif stream_name == "to_device":
|
||||
entities = [row.entity for row in rows if row.entity.startswith("@")]
|
||||
if entities:
|
||||
try:
|
||||
if stream_name == "events":
|
||||
# We shouldn't get multiple rows per token for events stream, so
|
||||
# we don't need to optimise this for multiple rows.
|
||||
for row in rows:
|
||||
event = yield self.store.get_event(row.event_id)
|
||||
extra_users = ()
|
||||
if event.type == EventTypes.Member:
|
||||
extra_users = (event.state_key,)
|
||||
max_token = self.store.get_room_max_stream_ordering()
|
||||
self.notifier.on_new_room_event(
|
||||
event, token, max_token, extra_users
|
||||
)
|
||||
elif stream_name == "push_rules":
|
||||
self.notifier.on_new_event(
|
||||
"to_device_key", token, users=entities,
|
||||
"push_rules_key", token, users=[row.user_id for row in rows],
|
||||
)
|
||||
elif stream_name == "device_lists":
|
||||
all_room_ids = set()
|
||||
for row in rows:
|
||||
room_ids = yield self.store.get_rooms_for_user(row.user_id)
|
||||
all_room_ids.update(room_ids)
|
||||
self.notifier.on_new_event(
|
||||
"device_list_key", token, rooms=all_room_ids,
|
||||
)
|
||||
elif stream_name == "presence":
|
||||
yield self.presence_handler.process_replication_rows(token, rows)
|
||||
elif stream_name == "receipts":
|
||||
self.notifier.on_new_event(
|
||||
"groups_key", token, users=[row.user_id for row in rows],
|
||||
)
|
||||
elif stream_name in ("account_data", "tag_account_data",):
|
||||
self.notifier.on_new_event(
|
||||
"account_data_key", token, users=[row.user_id for row in rows],
|
||||
)
|
||||
elif stream_name == "receipts":
|
||||
self.notifier.on_new_event(
|
||||
"receipt_key", token, rooms=[row.room_id for row in rows],
|
||||
)
|
||||
elif stream_name == "typing":
|
||||
self.typing_handler.process_replication_rows(token, rows)
|
||||
self.notifier.on_new_event(
|
||||
"typing_key", token, rooms=[row.room_id for row in rows],
|
||||
)
|
||||
elif stream_name == "to_device":
|
||||
entities = [row.entity for row in rows if row.entity.startswith("@")]
|
||||
if entities:
|
||||
self.notifier.on_new_event(
|
||||
"to_device_key", token, users=entities,
|
||||
)
|
||||
elif stream_name == "device_lists":
|
||||
all_room_ids = set()
|
||||
for row in rows:
|
||||
room_ids = yield self.store.get_rooms_for_user(row.user_id)
|
||||
all_room_ids.update(room_ids)
|
||||
self.notifier.on_new_event(
|
||||
"device_list_key", token, rooms=all_room_ids,
|
||||
)
|
||||
elif stream_name == "presence":
|
||||
yield self.presence_handler.process_replication_rows(token, rows)
|
||||
elif stream_name == "receipts":
|
||||
self.notifier.on_new_event(
|
||||
"groups_key", token, users=[row.user_id for row in rows],
|
||||
)
|
||||
except Exception:
|
||||
logger.exception("Error processing replication")
|
||||
|
||||
|
||||
def start(config_options):
|
||||
|
||||
@@ -16,16 +16,19 @@
|
||||
|
||||
import argparse
|
||||
import collections
|
||||
import errno
|
||||
import glob
|
||||
import os
|
||||
import os.path
|
||||
import signal
|
||||
import subprocess
|
||||
import sys
|
||||
import yaml
|
||||
import errno
|
||||
import time
|
||||
|
||||
from six import iteritems
|
||||
|
||||
import yaml
|
||||
|
||||
SYNAPSE = [sys.executable, "-B", "-m", "synapse.app.homeserver"]
|
||||
|
||||
GREEN = "\x1b[1;32m"
|
||||
@@ -171,6 +174,10 @@ def main():
|
||||
if cache_factor:
|
||||
os.environ["SYNAPSE_CACHE_FACTOR"] = str(cache_factor)
|
||||
|
||||
cache_factors = config.get("synctl_cache_factors", {})
|
||||
for cache_name, factor in iteritems(cache_factors):
|
||||
os.environ["SYNAPSE_CACHE_FACTOR_" + cache_name.upper()] = str(factor)
|
||||
|
||||
worker_configfiles = []
|
||||
if options.worker:
|
||||
start_stop_synapse = False
|
||||
|
||||
@@ -17,6 +17,9 @@
|
||||
import logging
|
||||
import sys
|
||||
|
||||
from twisted.internet import defer, reactor
|
||||
from twisted.web.resource import NoResource
|
||||
|
||||
import synapse
|
||||
from synapse import events
|
||||
from synapse.app import _base
|
||||
@@ -26,6 +29,7 @@ from synapse.config.logger import setup_logging
|
||||
from synapse.crypto import context_factory
|
||||
from synapse.http.server import JsonResource
|
||||
from synapse.http.site import SynapseSite
|
||||
from synapse.metrics import RegistryProxy
|
||||
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||
@@ -39,11 +43,9 @@ from synapse.storage.engines import create_engine
|
||||
from synapse.storage.user_directory import UserDirectoryStore
|
||||
from synapse.util.caches.stream_change_cache import StreamChangeCache
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.logcontext import LoggingContext, preserve_fn
|
||||
from synapse.util.logcontext import LoggingContext, run_in_background
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.versionstring import get_version_string
|
||||
from twisted.internet import reactor
|
||||
from twisted.web.resource import NoResource
|
||||
|
||||
logger = logging.getLogger("synapse.app.user_dir")
|
||||
|
||||
@@ -105,7 +107,7 @@ class UserDirectoryServer(HomeServer):
|
||||
for res in listener_config["resources"]:
|
||||
for name in res["names"]:
|
||||
if name == "metrics":
|
||||
resources[METRICS_PREFIX] = MetricsResource(self)
|
||||
resources[METRICS_PREFIX] = MetricsResource(RegistryProxy)
|
||||
elif name == "client":
|
||||
resource = JsonResource(self, canonical_json=False)
|
||||
user_directory.register_servlets(self, resource)
|
||||
@@ -126,6 +128,7 @@ class UserDirectoryServer(HomeServer):
|
||||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
self.version_string,
|
||||
)
|
||||
)
|
||||
|
||||
@@ -145,6 +148,13 @@ class UserDirectoryServer(HomeServer):
|
||||
globals={"hs": self},
|
||||
)
|
||||
)
|
||||
elif listener["type"] == "metrics":
|
||||
if not self.get_config().enable_metrics:
|
||||
logger.warn(("Metrics listener configured, but "
|
||||
"enable_metrics is not True!"))
|
||||
else:
|
||||
_base.listen_metrics(listener["bind_addresses"],
|
||||
listener["port"])
|
||||
else:
|
||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||
|
||||
@@ -159,12 +169,20 @@ class UserDirectoryReplicationHandler(ReplicationClientHandler):
|
||||
super(UserDirectoryReplicationHandler, self).__init__(hs.get_datastore())
|
||||
self.user_directory = hs.get_user_directory_handler()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_rdata(self, stream_name, token, rows):
|
||||
super(UserDirectoryReplicationHandler, self).on_rdata(
|
||||
yield super(UserDirectoryReplicationHandler, self).on_rdata(
|
||||
stream_name, token, rows
|
||||
)
|
||||
if stream_name == "current_state_deltas":
|
||||
preserve_fn(self.user_directory.notify_new_event)()
|
||||
run_in_background(self._notify_directory)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _notify_directory(self):
|
||||
try:
|
||||
yield self.user_directory.notify_new_event()
|
||||
except Exception:
|
||||
logger.exception("Error notifiying user directory of state update")
|
||||
|
||||
|
||||
def start(config_options):
|
||||
@@ -197,11 +215,13 @@ def start(config_options):
|
||||
config.update_user_directory = True
|
||||
|
||||
tls_server_context_factory = context_factory.ServerContextFactory(config)
|
||||
tls_client_options_factory = context_factory.ClientTLSOptionsFactory(config)
|
||||
|
||||
ps = UserDirectoryServer(
|
||||
config.server_name,
|
||||
db_config=config.database_config,
|
||||
tls_server_context_factory=tls_server_context_factory,
|
||||
tls_client_options_factory=tls_client_options_factory,
|
||||
config=config,
|
||||
version_string="Synapse/" + get_version_string(synapse),
|
||||
database_engine=database_engine,
|
||||
|
||||
@@ -12,14 +12,16 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from synapse.api.constants import EventTypes
|
||||
from synapse.util.caches.descriptors import cachedInlineCallbacks
|
||||
from synapse.types import GroupID, get_domain_from_id
|
||||
import logging
|
||||
import re
|
||||
|
||||
from six import string_types
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
import logging
|
||||
import re
|
||||
from synapse.api.constants import EventTypes
|
||||
from synapse.types import GroupID, get_domain_from_id
|
||||
from synapse.util.caches.descriptors import cachedInlineCallbacks
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -83,7 +85,8 @@ class ApplicationService(object):
|
||||
NS_LIST = [NS_USERS, NS_ALIASES, NS_ROOMS]
|
||||
|
||||
def __init__(self, token, hostname, url=None, namespaces=None, hs_token=None,
|
||||
sender=None, id=None, protocols=None, rate_limited=True):
|
||||
sender=None, id=None, protocols=None, rate_limited=True,
|
||||
ip_range_whitelist=None):
|
||||
self.token = token
|
||||
self.url = url
|
||||
self.hs_token = hs_token
|
||||
@@ -91,6 +94,7 @@ class ApplicationService(object):
|
||||
self.server_name = hostname
|
||||
self.namespaces = self._check_namespaces(namespaces)
|
||||
self.id = id
|
||||
self.ip_range_whitelist = ip_range_whitelist
|
||||
|
||||
if "|" in self.id:
|
||||
raise Exception("application service ID cannot contain '|' character")
|
||||
@@ -146,7 +150,7 @@ class ApplicationService(object):
|
||||
)
|
||||
|
||||
regex = regex_obj.get("regex")
|
||||
if isinstance(regex, basestring):
|
||||
if isinstance(regex, string_types):
|
||||
regex_obj["regex"] = re.compile(regex) # Pre-compile regex
|
||||
else:
|
||||
raise ValueError(
|
||||
@@ -290,4 +294,8 @@ class ApplicationService(object):
|
||||
return self.rate_limited
|
||||
|
||||
def __str__(self):
|
||||
return "ApplicationService: %s" % (self.__dict__,)
|
||||
# copy dictionary and redact token fields so they don't get logged
|
||||
dict_copy = self.__dict__.copy()
|
||||
dict_copy["token"] = "<redacted>"
|
||||
dict_copy["hs_token"] = "<redacted>"
|
||||
return "ApplicationService: %s" % (dict_copy,)
|
||||
|
||||
@@ -12,21 +12,39 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import logging
|
||||
import urllib
|
||||
|
||||
from prometheus_client import Counter
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.constants import ThirdPartyEntityKind
|
||||
from synapse.api.errors import CodeMessageException
|
||||
from synapse.http.client import SimpleHttpClient
|
||||
from synapse.events.utils import serialize_event
|
||||
from synapse.util.logcontext import preserve_fn, make_deferred_yieldable
|
||||
from synapse.util.caches.response_cache import ResponseCache
|
||||
from synapse.http.client import SimpleHttpClient
|
||||
from synapse.types import ThirdPartyInstanceID
|
||||
|
||||
import logging
|
||||
import urllib
|
||||
from synapse.util.caches.response_cache import ResponseCache
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
sent_transactions_counter = Counter(
|
||||
"synapse_appservice_api_sent_transactions",
|
||||
"Number of /transactions/ requests sent",
|
||||
["service"]
|
||||
)
|
||||
|
||||
failed_transactions_counter = Counter(
|
||||
"synapse_appservice_api_failed_transactions",
|
||||
"Number of /transactions/ requests that failed to send",
|
||||
["service"]
|
||||
)
|
||||
|
||||
sent_events_counter = Counter(
|
||||
"synapse_appservice_api_sent_events",
|
||||
"Number of events sent to the AS",
|
||||
["service"]
|
||||
)
|
||||
|
||||
HOUR_IN_MS = 60 * 60 * 1000
|
||||
|
||||
@@ -73,7 +91,8 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
super(ApplicationServiceApi, self).__init__(hs)
|
||||
self.clock = hs.get_clock()
|
||||
|
||||
self.protocol_meta_cache = ResponseCache(hs, timeout_ms=HOUR_IN_MS)
|
||||
self.protocol_meta_cache = ResponseCache(hs, "as_protocol_meta",
|
||||
timeout_ms=HOUR_IN_MS)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def query_user(self, service, user_id):
|
||||
@@ -193,12 +212,7 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
defer.returnValue(None)
|
||||
|
||||
key = (service.id, protocol)
|
||||
result = self.protocol_meta_cache.get(key)
|
||||
if not result:
|
||||
result = self.protocol_meta_cache.set(
|
||||
key, preserve_fn(_get)()
|
||||
)
|
||||
return make_deferred_yieldable(result)
|
||||
return self.protocol_meta_cache.wrap(key, _get)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def push_bulk(self, service, events, txn_id=None):
|
||||
@@ -224,12 +238,15 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
args={
|
||||
"access_token": service.hs_token
|
||||
})
|
||||
sent_transactions_counter.labels(service.id).inc()
|
||||
sent_events_counter.labels(service.id).inc(len(events))
|
||||
defer.returnValue(True)
|
||||
return
|
||||
except CodeMessageException as e:
|
||||
logger.warning("push_bulk to %s received %s", uri, e.code)
|
||||
except Exception as ex:
|
||||
logger.warning("push_bulk to %s threw exception %s", uri, ex)
|
||||
failed_transactions_counter.labels(service.id).inc()
|
||||
defer.returnValue(False)
|
||||
|
||||
def _serialize(self, events):
|
||||
|
||||
@@ -48,14 +48,14 @@ UP & quit +---------- YES SUCCESS
|
||||
This is all tied together by the AppServiceScheduler which DIs the required
|
||||
components.
|
||||
"""
|
||||
import logging
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.appservice import ApplicationServiceState
|
||||
from synapse.util.logcontext import preserve_fn
|
||||
from synapse.util.logcontext import run_in_background
|
||||
from synapse.util.metrics import Measure
|
||||
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@@ -106,7 +106,7 @@ class _ServiceQueuer(object):
|
||||
def enqueue(self, service, event):
|
||||
# if this service isn't being sent something
|
||||
self.queued_events.setdefault(service.id, []).append(event)
|
||||
preserve_fn(self._send_request)(service)
|
||||
run_in_background(self._send_request, service)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _send_request(self, service):
|
||||
@@ -152,10 +152,10 @@ class _TransactionController(object):
|
||||
if sent:
|
||||
yield txn.complete(self.store)
|
||||
else:
|
||||
preserve_fn(self._start_recoverer)(service)
|
||||
except Exception as e:
|
||||
logger.exception(e)
|
||||
preserve_fn(self._start_recoverer)(service)
|
||||
run_in_background(self._start_recoverer, service)
|
||||
except Exception:
|
||||
logger.exception("Error creating appservice transaction")
|
||||
run_in_background(self._start_recoverer, service)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_recovered(self, recoverer):
|
||||
@@ -176,17 +176,20 @@ class _TransactionController(object):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _start_recoverer(self, service):
|
||||
yield self.store.set_appservice_state(
|
||||
service,
|
||||
ApplicationServiceState.DOWN
|
||||
)
|
||||
logger.info(
|
||||
"Application service falling behind. Starting recoverer. AS ID %s",
|
||||
service.id
|
||||
)
|
||||
recoverer = self.recoverer_fn(service, self.on_recovered)
|
||||
self.add_recoverers([recoverer])
|
||||
recoverer.recover()
|
||||
try:
|
||||
yield self.store.set_appservice_state(
|
||||
service,
|
||||
ApplicationServiceState.DOWN
|
||||
)
|
||||
logger.info(
|
||||
"Application service falling behind. Starting recoverer. AS ID %s",
|
||||
service.id
|
||||
)
|
||||
recoverer = self.recoverer_fn(service, self.on_recovered)
|
||||
self.add_recoverers([recoverer])
|
||||
recoverer.recover()
|
||||
except Exception:
|
||||
logger.exception("Error starting AS recoverer")
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _is_service_up(self, service):
|
||||
|
||||
@@ -12,3 +12,9 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from ._base import ConfigError
|
||||
|
||||
# export ConfigError if somebody does import *
|
||||
# this is largely a fudge to stop PEP8 moaning about the import
|
||||
__all__ = ["ConfigError"]
|
||||
|
||||
@@ -16,9 +16,12 @@
|
||||
import argparse
|
||||
import errno
|
||||
import os
|
||||
import yaml
|
||||
from textwrap import dedent
|
||||
|
||||
from six import integer_types
|
||||
|
||||
import yaml
|
||||
|
||||
|
||||
class ConfigError(Exception):
|
||||
pass
|
||||
@@ -49,7 +52,7 @@ Missing mandatory `server_name` config option.
|
||||
class Config(object):
|
||||
@staticmethod
|
||||
def parse_size(value):
|
||||
if isinstance(value, int) or isinstance(value, long):
|
||||
if isinstance(value, integer_types):
|
||||
return value
|
||||
sizes = {"K": 1024, "M": 1024 * 1024}
|
||||
size = 1
|
||||
@@ -61,7 +64,7 @@ class Config(object):
|
||||
|
||||
@staticmethod
|
||||
def parse_duration(value):
|
||||
if isinstance(value, int) or isinstance(value, long):
|
||||
if isinstance(value, integer_types):
|
||||
return value
|
||||
second = 1000
|
||||
minute = 60 * second
|
||||
@@ -279,31 +282,31 @@ class Config(object):
|
||||
)
|
||||
if not cls.path_exists(config_dir_path):
|
||||
os.makedirs(config_dir_path)
|
||||
with open(config_path, "wb") as config_file:
|
||||
config_bytes, config = obj.generate_config(
|
||||
with open(config_path, "w") as config_file:
|
||||
config_str, config = obj.generate_config(
|
||||
config_dir_path=config_dir_path,
|
||||
server_name=server_name,
|
||||
report_stats=(config_args.report_stats == "yes"),
|
||||
is_generating_file=True
|
||||
)
|
||||
obj.invoke_all("generate_files", config)
|
||||
config_file.write(config_bytes)
|
||||
print (
|
||||
config_file.write(config_str)
|
||||
print((
|
||||
"A config file has been generated in %r for server name"
|
||||
" %r with corresponding SSL keys and self-signed"
|
||||
" certificates. Please review this file and customise it"
|
||||
" to your needs."
|
||||
) % (config_path, server_name)
|
||||
print (
|
||||
) % (config_path, server_name))
|
||||
print(
|
||||
"If this server name is incorrect, you will need to"
|
||||
" regenerate the SSL certificates"
|
||||
)
|
||||
return
|
||||
else:
|
||||
print (
|
||||
print((
|
||||
"Config file %r already exists. Generating any missing key"
|
||||
" files."
|
||||
) % (config_path,)
|
||||
) % (config_path,))
|
||||
generate_keys = True
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
|
||||
@@ -12,10 +12,10 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from ._base import Config
|
||||
|
||||
from synapse.api.constants import EventTypes
|
||||
|
||||
from ._base import Config
|
||||
|
||||
|
||||
class ApiConfig(Config):
|
||||
|
||||
|
||||
@@ -12,14 +12,18 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from ._base import Config, ConfigError
|
||||
import logging
|
||||
|
||||
from six import string_types
|
||||
from six.moves.urllib import parse as urlparse
|
||||
|
||||
import yaml
|
||||
from netaddr import IPSet
|
||||
|
||||
from synapse.appservice import ApplicationService
|
||||
from synapse.types import UserID
|
||||
|
||||
import urllib
|
||||
import yaml
|
||||
import logging
|
||||
from ._base import Config, ConfigError
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -89,21 +93,21 @@ def _load_appservice(hostname, as_info, config_filename):
|
||||
"id", "as_token", "hs_token", "sender_localpart"
|
||||
]
|
||||
for field in required_string_fields:
|
||||
if not isinstance(as_info.get(field), basestring):
|
||||
if not isinstance(as_info.get(field), string_types):
|
||||
raise KeyError("Required string field: '%s' (%s)" % (
|
||||
field, config_filename,
|
||||
))
|
||||
|
||||
# 'url' must either be a string or explicitly null, not missing
|
||||
# to avoid accidentally turning off push for ASes.
|
||||
if (not isinstance(as_info.get("url"), basestring) and
|
||||
if (not isinstance(as_info.get("url"), string_types) and
|
||||
as_info.get("url", "") is not None):
|
||||
raise KeyError(
|
||||
"Required string field or explicit null: 'url' (%s)" % (config_filename,)
|
||||
)
|
||||
|
||||
localpart = as_info["sender_localpart"]
|
||||
if urllib.quote(localpart) != localpart:
|
||||
if urlparse.quote(localpart) != localpart:
|
||||
raise ValueError(
|
||||
"sender_localpart needs characters which are not URL encoded."
|
||||
)
|
||||
@@ -128,7 +132,7 @@ def _load_appservice(hostname, as_info, config_filename):
|
||||
"Expected namespace entry in %s to be an object,"
|
||||
" but got %s", ns, regex_obj
|
||||
)
|
||||
if not isinstance(regex_obj.get("regex"), basestring):
|
||||
if not isinstance(regex_obj.get("regex"), string_types):
|
||||
raise ValueError(
|
||||
"Missing/bad type 'regex' key in %s", regex_obj
|
||||
)
|
||||
@@ -152,6 +156,13 @@ def _load_appservice(hostname, as_info, config_filename):
|
||||
" will not receive events or queries.",
|
||||
config_filename,
|
||||
)
|
||||
|
||||
ip_range_whitelist = None
|
||||
if as_info.get('ip_range_whitelist'):
|
||||
ip_range_whitelist = IPSet(
|
||||
as_info.get('ip_range_whitelist')
|
||||
)
|
||||
|
||||
return ApplicationService(
|
||||
token=as_info["as_token"],
|
||||
hostname=hostname,
|
||||
@@ -161,5 +172,6 @@ def _load_appservice(hostname, as_info, config_filename):
|
||||
sender=user_id,
|
||||
id=as_info["id"],
|
||||
protocols=protocols,
|
||||
rate_limited=rate_limited
|
||||
rate_limited=rate_limited,
|
||||
ip_range_whitelist=ip_range_whitelist,
|
||||
)
|
||||
|
||||
88
synapse/config/consent_config.py
Normal file
88
synapse/config/consent_config.py
Normal file
@@ -0,0 +1,88 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2018 New Vector Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from ._base import Config
|
||||
|
||||
DEFAULT_CONFIG = """\
|
||||
# User Consent configuration
|
||||
#
|
||||
# for detailed instructions, see
|
||||
# https://github.com/matrix-org/synapse/blob/master/docs/consent_tracking.md
|
||||
#
|
||||
# Parts of this section are required if enabling the 'consent' resource under
|
||||
# 'listeners', in particular 'template_dir' and 'version'.
|
||||
#
|
||||
# 'template_dir' gives the location of the templates for the HTML forms.
|
||||
# This directory should contain one subdirectory per language (eg, 'en', 'fr'),
|
||||
# and each language directory should contain the policy document (named as
|
||||
# '<version>.html') and a success page (success.html).
|
||||
#
|
||||
# 'version' specifies the 'current' version of the policy document. It defines
|
||||
# the version to be served by the consent resource if there is no 'v'
|
||||
# parameter.
|
||||
#
|
||||
# 'server_notice_content', if enabled, will send a user a "Server Notice"
|
||||
# asking them to consent to the privacy policy. The 'server_notices' section
|
||||
# must also be configured for this to work. Notices will *not* be sent to
|
||||
# guest users unless 'send_server_notice_to_guests' is set to true.
|
||||
#
|
||||
# 'block_events_error', if set, will block any attempts to send events
|
||||
# until the user consents to the privacy policy. The value of the setting is
|
||||
# used as the text of the error.
|
||||
#
|
||||
# user_consent:
|
||||
# template_dir: res/templates/privacy
|
||||
# version: 1.0
|
||||
# server_notice_content:
|
||||
# msgtype: m.text
|
||||
# body: >-
|
||||
# To continue using this homeserver you must review and agree to the
|
||||
# terms and conditions at %(consent_uri)s
|
||||
# send_server_notice_to_guests: True
|
||||
# block_events_error: >-
|
||||
# To continue using this homeserver you must review and agree to the
|
||||
# terms and conditions at %(consent_uri)s
|
||||
#
|
||||
"""
|
||||
|
||||
|
||||
class ConsentConfig(Config):
|
||||
def __init__(self):
|
||||
super(ConsentConfig, self).__init__()
|
||||
|
||||
self.user_consent_version = None
|
||||
self.user_consent_template_dir = None
|
||||
self.user_consent_server_notice_content = None
|
||||
self.user_consent_server_notice_to_guests = False
|
||||
self.block_events_without_consent_error = None
|
||||
|
||||
def read_config(self, config):
|
||||
consent_config = config.get("user_consent")
|
||||
if consent_config is None:
|
||||
return
|
||||
self.user_consent_version = str(consent_config["version"])
|
||||
self.user_consent_template_dir = consent_config["template_dir"]
|
||||
self.user_consent_server_notice_content = consent_config.get(
|
||||
"server_notice_content",
|
||||
)
|
||||
self.block_events_without_consent_error = consent_config.get(
|
||||
"block_events_error",
|
||||
)
|
||||
self.user_consent_server_notice_to_guests = bool(consent_config.get(
|
||||
"send_server_notice_to_guests", False,
|
||||
))
|
||||
|
||||
def default_config(self, **kwargs):
|
||||
return DEFAULT_CONFIG
|
||||
@@ -1,5 +1,6 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
# Copyright 2018 New Vector Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -12,31 +13,32 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from .tls import TlsConfig
|
||||
from .server import ServerConfig
|
||||
from .logger import LoggingConfig
|
||||
from .database import DatabaseConfig
|
||||
from .ratelimiting import RatelimitConfig
|
||||
from .repository import ContentRepositoryConfig
|
||||
from .captcha import CaptchaConfig
|
||||
from .voip import VoipConfig
|
||||
from .registration import RegistrationConfig
|
||||
from .metrics import MetricsConfig
|
||||
from .api import ApiConfig
|
||||
from .appservice import AppServiceConfig
|
||||
from .key import KeyConfig
|
||||
from .saml2 import SAML2Config
|
||||
from .captcha import CaptchaConfig
|
||||
from .cas import CasConfig
|
||||
from .password import PasswordConfig
|
||||
from .jwt import JWTConfig
|
||||
from .password_auth_providers import PasswordAuthProviderConfig
|
||||
from .consent_config import ConsentConfig
|
||||
from .database import DatabaseConfig
|
||||
from .emailconfig import EmailConfig
|
||||
from .workers import WorkerConfig
|
||||
from .push import PushConfig
|
||||
from .spam_checker import SpamCheckerConfig
|
||||
from .groups import GroupsConfig
|
||||
from .jwt import JWTConfig
|
||||
from .key import KeyConfig
|
||||
from .logger import LoggingConfig
|
||||
from .metrics import MetricsConfig
|
||||
from .password import PasswordConfig
|
||||
from .password_auth_providers import PasswordAuthProviderConfig
|
||||
from .push import PushConfig
|
||||
from .ratelimiting import RatelimitConfig
|
||||
from .registration import RegistrationConfig
|
||||
from .repository import ContentRepositoryConfig
|
||||
from .saml2 import SAML2Config
|
||||
from .server import ServerConfig
|
||||
from .server_notices_config import ServerNoticesConfig
|
||||
from .spam_checker import SpamCheckerConfig
|
||||
from .tls import TlsConfig
|
||||
from .user_directory import UserDirectoryConfig
|
||||
from .voip import VoipConfig
|
||||
from .workers import WorkerConfig
|
||||
|
||||
|
||||
class HomeServerConfig(TlsConfig, ServerConfig, DatabaseConfig, LoggingConfig,
|
||||
@@ -45,12 +47,15 @@ class HomeServerConfig(TlsConfig, ServerConfig, DatabaseConfig, LoggingConfig,
|
||||
AppServiceConfig, KeyConfig, SAML2Config, CasConfig,
|
||||
JWTConfig, PasswordConfig, EmailConfig,
|
||||
WorkerConfig, PasswordAuthProviderConfig, PushConfig,
|
||||
SpamCheckerConfig, GroupsConfig, UserDirectoryConfig,):
|
||||
SpamCheckerConfig, GroupsConfig, UserDirectoryConfig,
|
||||
ConsentConfig,
|
||||
ServerNoticesConfig,
|
||||
):
|
||||
pass
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
import sys
|
||||
sys.stdout.write(
|
||||
HomeServerConfig().generate_config(sys.argv[1], sys.argv[2])[0]
|
||||
HomeServerConfig().generate_config(sys.argv[1], sys.argv[2], True)[0]
|
||||
)
|
||||
|
||||
@@ -15,7 +15,6 @@
|
||||
|
||||
from ._base import Config, ConfigError
|
||||
|
||||
|
||||
MISSING_JWT = (
|
||||
"""Missing jwt library. This is required for jwt login.
|
||||
|
||||
|
||||
@@ -13,21 +13,24 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from ._base import Config, ConfigError
|
||||
|
||||
from synapse.util.stringutils import random_string
|
||||
from signedjson.key import (
|
||||
generate_signing_key, is_signing_algorithm_supported,
|
||||
decode_signing_key_base64, decode_verify_key_bytes,
|
||||
read_signing_keys, write_signing_keys, NACL_ED25519
|
||||
)
|
||||
from unpaddedbase64 import decode_base64
|
||||
from synapse.util.stringutils import random_string_with_symbols
|
||||
|
||||
import os
|
||||
import hashlib
|
||||
import logging
|
||||
import os
|
||||
|
||||
from signedjson.key import (
|
||||
NACL_ED25519,
|
||||
decode_signing_key_base64,
|
||||
decode_verify_key_bytes,
|
||||
generate_signing_key,
|
||||
is_signing_algorithm_supported,
|
||||
read_signing_keys,
|
||||
write_signing_keys,
|
||||
)
|
||||
from unpaddedbase64 import decode_base64
|
||||
|
||||
from synapse.util.stringutils import random_string, random_string_with_symbols
|
||||
|
||||
from ._base import Config, ConfigError
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -59,14 +62,20 @@ class KeyConfig(Config):
|
||||
|
||||
self.expire_access_token = config.get("expire_access_token", False)
|
||||
|
||||
# a secret which is used to calculate HMACs for form values, to stop
|
||||
# falsification of values
|
||||
self.form_secret = config.get("form_secret", None)
|
||||
|
||||
def default_config(self, config_dir_path, server_name, is_generating_file=False,
|
||||
**kwargs):
|
||||
base_key_name = os.path.join(config_dir_path, server_name)
|
||||
|
||||
if is_generating_file:
|
||||
macaroon_secret_key = random_string_with_symbols(50)
|
||||
form_secret = '"%s"' % random_string_with_symbols(50)
|
||||
else:
|
||||
macaroon_secret_key = None
|
||||
form_secret = 'null'
|
||||
|
||||
return """\
|
||||
macaroon_secret_key: "%(macaroon_secret_key)s"
|
||||
@@ -74,6 +83,10 @@ class KeyConfig(Config):
|
||||
# Used to enable access token expiration.
|
||||
expire_access_token: False
|
||||
|
||||
# a secret which is used to calculate HMACs for form values, to stop
|
||||
# falsification of values
|
||||
form_secret: %(form_secret)s
|
||||
|
||||
## Signing Keys ##
|
||||
|
||||
# Path to the signing key to sign messages with
|
||||
|
||||
@@ -12,17 +12,22 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from ._base import Config
|
||||
from synapse.util.logcontext import LoggingContextFilter
|
||||
from twisted.logger import globalLogBeginner, STDLibLogObserver
|
||||
import logging
|
||||
import logging.config
|
||||
import yaml
|
||||
from string import Template
|
||||
import os
|
||||
import signal
|
||||
import sys
|
||||
from string import Template
|
||||
|
||||
import yaml
|
||||
|
||||
from twisted.logger import STDLibLogObserver, globalLogBeginner
|
||||
|
||||
import synapse
|
||||
from synapse.util.logcontext import LoggingContextFilter
|
||||
from synapse.util.versionstring import get_version_string
|
||||
|
||||
from ._base import Config
|
||||
|
||||
DEFAULT_LOG_CONFIG = Template("""
|
||||
version: 1
|
||||
@@ -117,7 +122,7 @@ class LoggingConfig(Config):
|
||||
log_config = config.get("log_config")
|
||||
if log_config and not os.path.exists(log_config):
|
||||
log_file = self.abspath("homeserver.log")
|
||||
with open(log_config, "wb") as log_config_file:
|
||||
with open(log_config, "w") as log_config_file:
|
||||
log_config_file.write(
|
||||
DEFAULT_LOG_CONFIG.substitute(log_file=log_file)
|
||||
)
|
||||
@@ -163,7 +168,8 @@ def setup_logging(config, use_worker_options=False):
|
||||
if log_file:
|
||||
# TODO: Customisable file size / backup count
|
||||
handler = logging.handlers.RotatingFileHandler(
|
||||
log_file, maxBytes=(1000 * 1000 * 100), backupCount=3
|
||||
log_file, maxBytes=(1000 * 1000 * 100), backupCount=3,
|
||||
encoding='utf8'
|
||||
)
|
||||
|
||||
def sighup(signum, stack):
|
||||
@@ -188,9 +194,8 @@ def setup_logging(config, use_worker_options=False):
|
||||
|
||||
def sighup(signum, stack):
|
||||
# it might be better to use a file watcher or something for this.
|
||||
logging.info("Reloading log config from %s due to SIGHUP",
|
||||
log_config)
|
||||
load_log_config()
|
||||
logging.info("Reloaded log config from %s due to SIGHUP", log_config)
|
||||
|
||||
load_log_config()
|
||||
|
||||
@@ -202,6 +207,15 @@ def setup_logging(config, use_worker_options=False):
|
||||
if getattr(signal, "SIGHUP"):
|
||||
signal.signal(signal.SIGHUP, sighup)
|
||||
|
||||
# make sure that the first thing we log is a thing we can grep backwards
|
||||
# for
|
||||
logging.warn("***** STARTING SERVER *****")
|
||||
logging.warn(
|
||||
"Server %s version %s",
|
||||
sys.argv[0], get_version_string(synapse),
|
||||
)
|
||||
logging.info("Server hostname: %s", config.server_name)
|
||||
|
||||
# It's critical to point twisted's internal logging somewhere, otherwise it
|
||||
# stacks up and leaks kup to 64K object;
|
||||
# see: https://twistedmatrix.com/trac/ticket/8164
|
||||
|
||||
@@ -13,10 +13,10 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from ._base import Config
|
||||
|
||||
from synapse.util.module_loader import load_module
|
||||
|
||||
from ._base import Config
|
||||
|
||||
LDAP_PROVIDER = 'ldap_auth_provider.LdapAuthProvider'
|
||||
|
||||
|
||||
|
||||
@@ -13,11 +13,11 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from ._base import Config
|
||||
from distutils.util import strtobool
|
||||
|
||||
from synapse.util.stringutils import random_string_with_symbols
|
||||
|
||||
from distutils.util import strtobool
|
||||
from ._base import Config
|
||||
|
||||
|
||||
class RegistrationConfig(Config):
|
||||
|
||||
@@ -13,11 +13,11 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from ._base import Config, ConfigError
|
||||
from collections import namedtuple
|
||||
|
||||
from synapse.util.module_loader import load_module
|
||||
|
||||
from ._base import Config, ConfigError
|
||||
|
||||
MISSING_NETADDR = (
|
||||
"Missing netaddr library. This is required for URL preview API."
|
||||
@@ -250,6 +250,9 @@ class ContentRepositoryConfig(Config):
|
||||
# - '192.168.0.0/16'
|
||||
# - '100.64.0.0/10'
|
||||
# - '169.254.0.0/16'
|
||||
# - '::1/128'
|
||||
# - 'fe80::/64'
|
||||
# - 'fc00::/7'
|
||||
#
|
||||
# List of IP address CIDR ranges that the URL preview spider is allowed
|
||||
# to access even if they are specified in url_preview_ip_range_blacklist.
|
||||
|
||||
@@ -14,13 +14,25 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
|
||||
from synapse.http.endpoint import parse_and_validate_server_name
|
||||
|
||||
from ._base import Config, ConfigError
|
||||
|
||||
logger = logging.Logger(__name__)
|
||||
|
||||
|
||||
class ServerConfig(Config):
|
||||
|
||||
def read_config(self, config):
|
||||
self.server_name = config["server_name"]
|
||||
|
||||
try:
|
||||
parse_and_validate_server_name(self.server_name)
|
||||
except ValueError as e:
|
||||
raise ConfigError(str(e))
|
||||
|
||||
self.pid_file = self.abspath(config.get("pid_file"))
|
||||
self.web_client = config["web_client"]
|
||||
self.web_client_location = config.get("web_client_location", None)
|
||||
@@ -37,6 +49,9 @@ class ServerConfig(Config):
|
||||
# "disable" federation
|
||||
self.send_federation = config.get("send_federation", True)
|
||||
|
||||
# Whether to enable user presence.
|
||||
self.use_presence = config.get("use_presence", True)
|
||||
|
||||
# Whether to update the user directory or not. This should be set to
|
||||
# false only if we are updating the user directory in a worker
|
||||
self.update_user_directory = config.get("update_user_directory", True)
|
||||
@@ -55,6 +70,26 @@ class ServerConfig(Config):
|
||||
"block_non_admin_invites", False,
|
||||
)
|
||||
|
||||
# Options to control access by tracking MAU
|
||||
self.limit_usage_by_mau = config.get("limit_usage_by_mau", False)
|
||||
self.max_mau_value = 0
|
||||
if self.limit_usage_by_mau:
|
||||
self.max_mau_value = config.get(
|
||||
"max_mau_value", 0,
|
||||
)
|
||||
self.mau_limits_reserved_threepids = config.get(
|
||||
"mau_limit_reserved_threepids", []
|
||||
)
|
||||
|
||||
# Options to disable HS
|
||||
self.hs_disabled = config.get("hs_disabled", False)
|
||||
self.hs_disabled_message = config.get("hs_disabled_message", "")
|
||||
self.hs_disabled_limit_type = config.get("hs_disabled_limit_type", "")
|
||||
|
||||
# Admin uri to direct users at should their instance become blocked
|
||||
# due to resource constraints
|
||||
self.admin_uri = config.get("admin_uri", None)
|
||||
|
||||
# FIXME: federation_domain_whitelist needs sytests
|
||||
self.federation_domain_whitelist = None
|
||||
federation_domain_whitelist = config.get(
|
||||
@@ -138,6 +173,12 @@ class ServerConfig(Config):
|
||||
|
||||
metrics_port = config.get("metrics_port")
|
||||
if metrics_port:
|
||||
logger.warn(
|
||||
("The metrics_port configuration option is deprecated in Synapse 0.31 "
|
||||
"in favour of a listener. Please see "
|
||||
"http://github.com/matrix-org/synapse/blob/master/docs/metrics-howto.rst"
|
||||
" on how to configure the new listener."))
|
||||
|
||||
self.listeners.append({
|
||||
"port": metrics_port,
|
||||
"bind_addresses": [config.get("metrics_bind_host", "127.0.0.1")],
|
||||
@@ -152,8 +193,8 @@ class ServerConfig(Config):
|
||||
})
|
||||
|
||||
def default_config(self, server_name, **kwargs):
|
||||
if ":" in server_name:
|
||||
bind_port = int(server_name.split(":")[1])
|
||||
_, bind_port = parse_and_validate_server_name(server_name)
|
||||
if bind_port is not None:
|
||||
unsecure_port = bind_port - 400
|
||||
else:
|
||||
bind_port = 8448
|
||||
@@ -191,6 +232,8 @@ class ServerConfig(Config):
|
||||
# different cores. See
|
||||
# https://www.mirantis.com/blog/improve-performance-python-programs-restricting-single-cpu/.
|
||||
#
|
||||
# This setting requires the affinity package to be installed!
|
||||
#
|
||||
# cpu_affinity: 0xFFFFFFFF
|
||||
|
||||
# Whether to serve a web client from the HTTP/HTTPS root resource.
|
||||
@@ -210,6 +253,9 @@ class ServerConfig(Config):
|
||||
# hard limit.
|
||||
soft_file_limit: 0
|
||||
|
||||
# Set to false to disable presence tracking on this homeserver.
|
||||
use_presence: true
|
||||
|
||||
# The GC threshold parameters to pass to `gc.set_threshold`, if defined
|
||||
# gc_thresholds: [700, 10, 10]
|
||||
|
||||
@@ -301,6 +347,32 @@ class ServerConfig(Config):
|
||||
# - port: 9000
|
||||
# bind_addresses: ['::1', '127.0.0.1']
|
||||
# type: manhole
|
||||
|
||||
|
||||
# Homeserver blocking
|
||||
#
|
||||
# How to reach the server admin, used in ResourceLimitError
|
||||
# admin_uri: 'mailto:admin@server.com'
|
||||
#
|
||||
# Global block config
|
||||
#
|
||||
# hs_disabled: False
|
||||
# hs_disabled_message: 'Human readable reason for why the HS is blocked'
|
||||
# hs_disabled_limit_type: 'error code(str), to help clients decode reason'
|
||||
#
|
||||
# Monthly Active User Blocking
|
||||
#
|
||||
# Enables monthly active user checking
|
||||
# limit_usage_by_mau: False
|
||||
# max_mau_value: 50
|
||||
#
|
||||
# Sometimes the server admin will want to ensure certain accounts are
|
||||
# never blocked by mau checking. These accounts are specified here.
|
||||
#
|
||||
# mau_limit_reserved_threepids:
|
||||
# - medium: 'email'
|
||||
# address: 'reserved_user@example.com'
|
||||
|
||||
""" % locals()
|
||||
|
||||
def read_arguments(self, args):
|
||||
|
||||
87
synapse/config/server_notices_config.py
Normal file
87
synapse/config/server_notices_config.py
Normal file
@@ -0,0 +1,87 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2018 New Vector Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from synapse.types import UserID
|
||||
|
||||
from ._base import Config
|
||||
|
||||
DEFAULT_CONFIG = """\
|
||||
# Server Notices room configuration
|
||||
#
|
||||
# Uncomment this section to enable a room which can be used to send notices
|
||||
# from the server to users. It is a special room which cannot be left; notices
|
||||
# come from a special "notices" user id.
|
||||
#
|
||||
# If you uncomment this section, you *must* define the system_mxid_localpart
|
||||
# setting, which defines the id of the user which will be used to send the
|
||||
# notices.
|
||||
#
|
||||
# It's also possible to override the room name, the display name of the
|
||||
# "notices" user, and the avatar for the user.
|
||||
#
|
||||
# server_notices:
|
||||
# system_mxid_localpart: notices
|
||||
# system_mxid_display_name: "Server Notices"
|
||||
# system_mxid_avatar_url: "mxc://server.com/oumMVlgDnLYFaPVkExemNVVZ"
|
||||
# room_name: "Server Notices"
|
||||
"""
|
||||
|
||||
|
||||
class ServerNoticesConfig(Config):
|
||||
"""Configuration for the server notices room.
|
||||
|
||||
Attributes:
|
||||
server_notices_mxid (str|None):
|
||||
The MXID to use for server notices.
|
||||
None if server notices are not enabled.
|
||||
|
||||
server_notices_mxid_display_name (str|None):
|
||||
The display name to use for the server notices user.
|
||||
None if server notices are not enabled.
|
||||
|
||||
server_notices_mxid_avatar_url (str|None):
|
||||
The display name to use for the server notices user.
|
||||
None if server notices are not enabled.
|
||||
|
||||
server_notices_room_name (str|None):
|
||||
The name to use for the server notices room.
|
||||
None if server notices are not enabled.
|
||||
"""
|
||||
def __init__(self):
|
||||
super(ServerNoticesConfig, self).__init__()
|
||||
self.server_notices_mxid = None
|
||||
self.server_notices_mxid_display_name = None
|
||||
self.server_notices_mxid_avatar_url = None
|
||||
self.server_notices_room_name = None
|
||||
|
||||
def read_config(self, config):
|
||||
c = config.get("server_notices")
|
||||
if c is None:
|
||||
return
|
||||
|
||||
mxid_localpart = c['system_mxid_localpart']
|
||||
self.server_notices_mxid = UserID(
|
||||
mxid_localpart, self.server_name,
|
||||
).to_string()
|
||||
self.server_notices_mxid_display_name = c.get(
|
||||
'system_mxid_display_name', None,
|
||||
)
|
||||
self.server_notices_mxid_avatar_url = c.get(
|
||||
'system_mxid_avatar_url', None,
|
||||
)
|
||||
# todo: i18n
|
||||
self.server_notices_room_name = c.get('room_name', "Server Notices")
|
||||
|
||||
def default_config(self, **kwargs):
|
||||
return DEFAULT_CONFIG
|
||||
@@ -13,14 +13,15 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from ._base import Config
|
||||
import os
|
||||
import subprocess
|
||||
from hashlib import sha256
|
||||
|
||||
from unpaddedbase64 import encode_base64
|
||||
|
||||
from OpenSSL import crypto
|
||||
import subprocess
|
||||
import os
|
||||
|
||||
from hashlib import sha256
|
||||
from unpaddedbase64 import encode_base64
|
||||
from ._base import Config
|
||||
|
||||
GENERATE_DH_PARAMS = False
|
||||
|
||||
@@ -133,7 +134,7 @@ class TlsConfig(Config):
|
||||
tls_dh_params_path = config["tls_dh_params_path"]
|
||||
|
||||
if not self.path_exists(tls_private_key_path):
|
||||
with open(tls_private_key_path, "w") as private_key_file:
|
||||
with open(tls_private_key_path, "wb") as private_key_file:
|
||||
tls_private_key = crypto.PKey()
|
||||
tls_private_key.generate_key(crypto.TYPE_RSA, 2048)
|
||||
private_key_pem = crypto.dump_privatekey(
|
||||
@@ -148,7 +149,7 @@ class TlsConfig(Config):
|
||||
)
|
||||
|
||||
if not self.path_exists(tls_certificate_path):
|
||||
with open(tls_certificate_path, "w") as certificate_file:
|
||||
with open(tls_certificate_path, "wb") as certificate_file:
|
||||
cert = crypto.X509()
|
||||
subject = cert.get_subject()
|
||||
subject.CN = config["server_name"]
|
||||
|
||||
@@ -30,10 +30,10 @@ class VoipConfig(Config):
|
||||
## Turn ##
|
||||
|
||||
# The public URIs of the TURN server to give to clients
|
||||
turn_uris: []
|
||||
#turn_uris: []
|
||||
|
||||
# The shared secret used to compute passwords for the TURN server
|
||||
turn_shared_secret: "YOUR_SHARED_SECRET"
|
||||
#turn_shared_secret: "YOUR_SHARED_SECRET"
|
||||
|
||||
# The Username and password if the TURN server needs them and
|
||||
# does not use a token
|
||||
|
||||
@@ -11,19 +11,22 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from twisted.internet import ssl
|
||||
from OpenSSL import SSL
|
||||
from twisted.internet._sslverify import _OpenSSLECCurve, _defaultCurveName
|
||||
|
||||
import logging
|
||||
|
||||
from zope.interface import implementer
|
||||
|
||||
from OpenSSL import SSL, crypto
|
||||
from twisted.internet._sslverify import _defaultCurveName
|
||||
from twisted.internet.interfaces import IOpenSSLClientConnectionCreator
|
||||
from twisted.internet.ssl import CertificateOptions, ContextFactory
|
||||
from twisted.python.failure import Failure
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ServerContextFactory(ssl.ContextFactory):
|
||||
class ServerContextFactory(ContextFactory):
|
||||
"""Factory for PyOpenSSL SSL contexts that are used to handle incoming
|
||||
connections and to make connections to remote servers."""
|
||||
connections."""
|
||||
|
||||
def __init__(self, config):
|
||||
self._context = SSL.Context(SSL.SSLv23_METHOD)
|
||||
@@ -32,8 +35,9 @@ class ServerContextFactory(ssl.ContextFactory):
|
||||
@staticmethod
|
||||
def configure_context(context, config):
|
||||
try:
|
||||
_ecCurve = _OpenSSLECCurve(_defaultCurveName)
|
||||
_ecCurve.addECKeyToContext(context)
|
||||
_ecCurve = crypto.get_elliptic_curve(_defaultCurveName)
|
||||
context.set_tmp_ecdh(_ecCurve)
|
||||
|
||||
except Exception:
|
||||
logger.exception("Failed to enable elliptic curve for TLS")
|
||||
context.set_options(SSL.OP_NO_SSLv2 | SSL.OP_NO_SSLv3)
|
||||
@@ -47,3 +51,78 @@ class ServerContextFactory(ssl.ContextFactory):
|
||||
|
||||
def getContext(self):
|
||||
return self._context
|
||||
|
||||
|
||||
def _idnaBytes(text):
|
||||
"""
|
||||
Convert some text typed by a human into some ASCII bytes. This is a
|
||||
copy of twisted.internet._idna._idnaBytes. For documentation, see the
|
||||
twisted documentation.
|
||||
"""
|
||||
try:
|
||||
import idna
|
||||
except ImportError:
|
||||
return text.encode("idna")
|
||||
else:
|
||||
return idna.encode(text)
|
||||
|
||||
|
||||
def _tolerateErrors(wrapped):
|
||||
"""
|
||||
Wrap up an info_callback for pyOpenSSL so that if something goes wrong
|
||||
the error is immediately logged and the connection is dropped if possible.
|
||||
This is a copy of twisted.internet._sslverify._tolerateErrors. For
|
||||
documentation, see the twisted documentation.
|
||||
"""
|
||||
|
||||
def infoCallback(connection, where, ret):
|
||||
try:
|
||||
return wrapped(connection, where, ret)
|
||||
except: # noqa: E722, taken from the twisted implementation
|
||||
f = Failure()
|
||||
logger.exception("Error during info_callback")
|
||||
connection.get_app_data().failVerification(f)
|
||||
|
||||
return infoCallback
|
||||
|
||||
|
||||
@implementer(IOpenSSLClientConnectionCreator)
|
||||
class ClientTLSOptions(object):
|
||||
"""
|
||||
Client creator for TLS without certificate identity verification. This is a
|
||||
copy of twisted.internet._sslverify.ClientTLSOptions with the identity
|
||||
verification left out. For documentation, see the twisted documentation.
|
||||
"""
|
||||
|
||||
def __init__(self, hostname, ctx):
|
||||
self._ctx = ctx
|
||||
self._hostname = hostname
|
||||
self._hostnameBytes = _idnaBytes(hostname)
|
||||
ctx.set_info_callback(
|
||||
_tolerateErrors(self._identityVerifyingInfoCallback)
|
||||
)
|
||||
|
||||
def clientConnectionForTLS(self, tlsProtocol):
|
||||
context = self._ctx
|
||||
connection = SSL.Connection(context, None)
|
||||
connection.set_app_data(tlsProtocol)
|
||||
return connection
|
||||
|
||||
def _identityVerifyingInfoCallback(self, connection, where, ret):
|
||||
if where & SSL.SSL_CB_HANDSHAKE_START:
|
||||
connection.set_tlsext_host_name(self._hostnameBytes)
|
||||
|
||||
|
||||
class ClientTLSOptionsFactory(object):
|
||||
"""Factory for Twisted ClientTLSOptions that are used to make connections
|
||||
to remote servers for federation."""
|
||||
|
||||
def __init__(self, config):
|
||||
# We don't use config options yet
|
||||
pass
|
||||
|
||||
def get_options(self, host):
|
||||
return ClientTLSOptions(
|
||||
host.decode('utf-8'),
|
||||
CertificateOptions(verify=False).getContext()
|
||||
)
|
||||
|
||||
@@ -15,16 +15,16 @@
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
from synapse.api.errors import SynapseError, Codes
|
||||
from synapse.events.utils import prune_event
|
||||
|
||||
from canonicaljson import encode_canonical_json
|
||||
from unpaddedbase64 import encode_base64, decode_base64
|
||||
from signedjson.sign import sign_json
|
||||
|
||||
import hashlib
|
||||
import logging
|
||||
|
||||
from canonicaljson import encode_canonical_json
|
||||
from signedjson.sign import sign_json
|
||||
from unpaddedbase64 import decode_base64, encode_base64
|
||||
|
||||
from synapse.api.errors import Codes, SynapseError
|
||||
from synapse.events.utils import prune_event
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
|
||||
@@ -13,14 +13,16 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from synapse.util import logcontext
|
||||
from twisted.web.http import HTTPClient
|
||||
from twisted.internet.protocol import Factory
|
||||
from twisted.internet import defer, reactor
|
||||
from synapse.http.endpoint import matrix_federation_endpoint
|
||||
import simplejson as json
|
||||
import logging
|
||||
|
||||
from canonicaljson import json
|
||||
|
||||
from twisted.internet import defer, reactor
|
||||
from twisted.internet.protocol import Factory
|
||||
from twisted.web.http import HTTPClient
|
||||
|
||||
from synapse.http.endpoint import matrix_federation_endpoint
|
||||
from synapse.util import logcontext
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -28,14 +30,14 @@ KEY_API_V1 = b"/_matrix/key/v1/"
|
||||
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def fetch_server_key(server_name, ssl_context_factory, path=KEY_API_V1):
|
||||
def fetch_server_key(server_name, tls_client_options_factory, path=KEY_API_V1):
|
||||
"""Fetch the keys for a remote server."""
|
||||
|
||||
factory = SynapseKeyClientFactory()
|
||||
factory.path = path
|
||||
factory.host = server_name
|
||||
endpoint = matrix_federation_endpoint(
|
||||
reactor, server_name, ssl_context_factory, timeout=30
|
||||
reactor, server_name, tls_client_options_factory, timeout=30
|
||||
)
|
||||
|
||||
for i in range(5):
|
||||
|
||||
@@ -14,32 +14,37 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from synapse.crypto.keyclient import fetch_server_key
|
||||
from synapse.api.errors import SynapseError, Codes
|
||||
from synapse.util import unwrapFirstError, logcontext
|
||||
from synapse.util.logcontext import (
|
||||
PreserveLoggingContext,
|
||||
preserve_fn
|
||||
)
|
||||
from synapse.util.metrics import Measure
|
||||
import hashlib
|
||||
import logging
|
||||
import urllib
|
||||
from collections import namedtuple
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from signedjson.sign import (
|
||||
verify_signed_json, signature_ids, sign_json, encode_canonical_json
|
||||
)
|
||||
from signedjson.key import (
|
||||
is_signing_algorithm_supported, decode_verify_key_bytes
|
||||
decode_verify_key_bytes,
|
||||
encode_verify_key_base64,
|
||||
is_signing_algorithm_supported,
|
||||
)
|
||||
from signedjson.sign import (
|
||||
SignatureVerifyException,
|
||||
encode_canonical_json,
|
||||
sign_json,
|
||||
signature_ids,
|
||||
verify_signed_json,
|
||||
)
|
||||
from unpaddedbase64 import decode_base64, encode_base64
|
||||
|
||||
from OpenSSL import crypto
|
||||
from twisted.internet import defer
|
||||
|
||||
from collections import namedtuple
|
||||
import urllib
|
||||
import hashlib
|
||||
import logging
|
||||
|
||||
from synapse.api.errors import Codes, SynapseError
|
||||
from synapse.crypto.keyclient import fetch_server_key
|
||||
from synapse.util import logcontext, unwrapFirstError
|
||||
from synapse.util.logcontext import (
|
||||
PreserveLoggingContext,
|
||||
preserve_fn,
|
||||
run_in_background,
|
||||
)
|
||||
from synapse.util.metrics import Measure
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -55,7 +60,7 @@ Attributes:
|
||||
key_ids(set(str)): The set of key_ids to that could be used to verify the
|
||||
JSON object
|
||||
json_object(dict): The JSON object to verify.
|
||||
deferred(twisted.internet.defer.Deferred):
|
||||
deferred(Deferred[str, str, nacl.signing.VerifyKey]):
|
||||
A deferred (server_name, key_id, verify_key) tuple that resolves when
|
||||
a verify key has been fetched. The deferreds' callbacks are run with no
|
||||
logcontext.
|
||||
@@ -127,7 +132,7 @@ class Keyring(object):
|
||||
|
||||
verify_requests.append(verify_request)
|
||||
|
||||
preserve_fn(self._start_key_lookups)(verify_requests)
|
||||
run_in_background(self._start_key_lookups, verify_requests)
|
||||
|
||||
# Pass those keys to handle_key_deferred so that the json object
|
||||
# signatures can be verified
|
||||
@@ -146,53 +151,56 @@ class Keyring(object):
|
||||
verify_requests (List[VerifyKeyRequest]):
|
||||
"""
|
||||
|
||||
# create a deferred for each server we're going to look up the keys
|
||||
# for; we'll resolve them once we have completed our lookups.
|
||||
# These will be passed into wait_for_previous_lookups to block
|
||||
# any other lookups until we have finished.
|
||||
# The deferreds are called with no logcontext.
|
||||
server_to_deferred = {
|
||||
rq.server_name: defer.Deferred()
|
||||
for rq in verify_requests
|
||||
}
|
||||
try:
|
||||
# create a deferred for each server we're going to look up the keys
|
||||
# for; we'll resolve them once we have completed our lookups.
|
||||
# These will be passed into wait_for_previous_lookups to block
|
||||
# any other lookups until we have finished.
|
||||
# The deferreds are called with no logcontext.
|
||||
server_to_deferred = {
|
||||
rq.server_name: defer.Deferred()
|
||||
for rq in verify_requests
|
||||
}
|
||||
|
||||
# We want to wait for any previous lookups to complete before
|
||||
# proceeding.
|
||||
yield self.wait_for_previous_lookups(
|
||||
[rq.server_name for rq in verify_requests],
|
||||
server_to_deferred,
|
||||
)
|
||||
|
||||
# Actually start fetching keys.
|
||||
self._get_server_verify_keys(verify_requests)
|
||||
|
||||
# When we've finished fetching all the keys for a given server_name,
|
||||
# resolve the deferred passed to `wait_for_previous_lookups` so that
|
||||
# any lookups waiting will proceed.
|
||||
#
|
||||
# map from server name to a set of request ids
|
||||
server_to_request_ids = {}
|
||||
|
||||
for verify_request in verify_requests:
|
||||
server_name = verify_request.server_name
|
||||
request_id = id(verify_request)
|
||||
server_to_request_ids.setdefault(server_name, set()).add(request_id)
|
||||
|
||||
def remove_deferreds(res, verify_request):
|
||||
server_name = verify_request.server_name
|
||||
request_id = id(verify_request)
|
||||
server_to_request_ids[server_name].discard(request_id)
|
||||
if not server_to_request_ids[server_name]:
|
||||
d = server_to_deferred.pop(server_name, None)
|
||||
if d:
|
||||
d.callback(None)
|
||||
return res
|
||||
|
||||
for verify_request in verify_requests:
|
||||
verify_request.deferred.addBoth(
|
||||
remove_deferreds, verify_request,
|
||||
# We want to wait for any previous lookups to complete before
|
||||
# proceeding.
|
||||
yield self.wait_for_previous_lookups(
|
||||
[rq.server_name for rq in verify_requests],
|
||||
server_to_deferred,
|
||||
)
|
||||
|
||||
# Actually start fetching keys.
|
||||
self._get_server_verify_keys(verify_requests)
|
||||
|
||||
# When we've finished fetching all the keys for a given server_name,
|
||||
# resolve the deferred passed to `wait_for_previous_lookups` so that
|
||||
# any lookups waiting will proceed.
|
||||
#
|
||||
# map from server name to a set of request ids
|
||||
server_to_request_ids = {}
|
||||
|
||||
for verify_request in verify_requests:
|
||||
server_name = verify_request.server_name
|
||||
request_id = id(verify_request)
|
||||
server_to_request_ids.setdefault(server_name, set()).add(request_id)
|
||||
|
||||
def remove_deferreds(res, verify_request):
|
||||
server_name = verify_request.server_name
|
||||
request_id = id(verify_request)
|
||||
server_to_request_ids[server_name].discard(request_id)
|
||||
if not server_to_request_ids[server_name]:
|
||||
d = server_to_deferred.pop(server_name, None)
|
||||
if d:
|
||||
d.callback(None)
|
||||
return res
|
||||
|
||||
for verify_request in verify_requests:
|
||||
verify_request.deferred.addBoth(
|
||||
remove_deferreds, verify_request,
|
||||
)
|
||||
except Exception:
|
||||
logger.exception("Error starting key lookups")
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def wait_for_previous_lookups(self, server_names, server_to_deferred):
|
||||
"""Waits for any previous key lookups for the given servers to finish.
|
||||
@@ -313,7 +321,7 @@ class Keyring(object):
|
||||
if not verify_request.deferred.called:
|
||||
verify_request.deferred.errback(err)
|
||||
|
||||
preserve_fn(do_iterations)().addErrback(on_err)
|
||||
run_in_background(do_iterations).addErrback(on_err)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_keys_from_store(self, server_name_and_key_ids):
|
||||
@@ -329,8 +337,9 @@ class Keyring(object):
|
||||
"""
|
||||
res = yield logcontext.make_deferred_yieldable(defer.gatherResults(
|
||||
[
|
||||
preserve_fn(self.store.get_server_verify_keys)(
|
||||
server_name, key_ids
|
||||
run_in_background(
|
||||
self.store.get_server_verify_keys,
|
||||
server_name, key_ids,
|
||||
).addCallback(lambda ks, server: (server, ks), server_name)
|
||||
for server_name, key_ids in server_name_and_key_ids
|
||||
],
|
||||
@@ -352,13 +361,13 @@ class Keyring(object):
|
||||
logger.exception(
|
||||
"Unable to get key from %r: %s %s",
|
||||
perspective_name,
|
||||
type(e).__name__, str(e.message),
|
||||
type(e).__name__, str(e),
|
||||
)
|
||||
defer.returnValue({})
|
||||
|
||||
results = yield logcontext.make_deferred_yieldable(defer.gatherResults(
|
||||
[
|
||||
preserve_fn(get_key)(p_name, p_keys)
|
||||
run_in_background(get_key, p_name, p_keys)
|
||||
for p_name, p_keys in self.perspective_servers.items()
|
||||
],
|
||||
consumeErrors=True,
|
||||
@@ -384,7 +393,7 @@ class Keyring(object):
|
||||
logger.info(
|
||||
"Unable to get key %r for %r directly: %s %s",
|
||||
key_ids, server_name,
|
||||
type(e).__name__, str(e.message),
|
||||
type(e).__name__, str(e),
|
||||
)
|
||||
|
||||
if not keys:
|
||||
@@ -398,7 +407,7 @@ class Keyring(object):
|
||||
|
||||
results = yield logcontext.make_deferred_yieldable(defer.gatherResults(
|
||||
[
|
||||
preserve_fn(get_key)(server_name, key_ids)
|
||||
run_in_background(get_key, server_name, key_ids)
|
||||
for server_name, key_ids in server_name_and_key_ids
|
||||
],
|
||||
consumeErrors=True,
|
||||
@@ -481,7 +490,8 @@ class Keyring(object):
|
||||
|
||||
yield logcontext.make_deferred_yieldable(defer.gatherResults(
|
||||
[
|
||||
preserve_fn(self.store_keys)(
|
||||
run_in_background(
|
||||
self.store_keys,
|
||||
server_name=server_name,
|
||||
from_server=perspective_name,
|
||||
verify_keys=response_keys,
|
||||
@@ -502,7 +512,7 @@ class Keyring(object):
|
||||
continue
|
||||
|
||||
(response, tls_certificate) = yield fetch_server_key(
|
||||
server_name, self.hs.tls_server_context_factory,
|
||||
server_name, self.hs.tls_client_options_factory,
|
||||
path=(b"/_matrix/key/v2/server/%s" % (
|
||||
urllib.quote(requested_key_id),
|
||||
)).encode("ascii"),
|
||||
@@ -539,7 +549,8 @@ class Keyring(object):
|
||||
|
||||
yield logcontext.make_deferred_yieldable(defer.gatherResults(
|
||||
[
|
||||
preserve_fn(self.store_keys)(
|
||||
run_in_background(
|
||||
self.store_keys,
|
||||
server_name=key_server_name,
|
||||
from_server=server_name,
|
||||
verify_keys=verify_keys,
|
||||
@@ -615,7 +626,8 @@ class Keyring(object):
|
||||
|
||||
yield logcontext.make_deferred_yieldable(defer.gatherResults(
|
||||
[
|
||||
preserve_fn(self.store.store_server_keys_json)(
|
||||
run_in_background(
|
||||
self.store.store_server_keys_json,
|
||||
server_name=server_name,
|
||||
key_id=key_id,
|
||||
from_server=server_name,
|
||||
@@ -643,7 +655,7 @@ class Keyring(object):
|
||||
# Try to fetch the key from the remote server.
|
||||
|
||||
(response, tls_certificate) = yield fetch_server_key(
|
||||
server_name, self.hs.tls_server_context_factory
|
||||
server_name, self.hs.tls_client_options_factory
|
||||
)
|
||||
|
||||
# Check the response.
|
||||
@@ -716,7 +728,8 @@ class Keyring(object):
|
||||
# TODO(markjh): Store whether the keys have expired.
|
||||
return logcontext.make_deferred_yieldable(defer.gatherResults(
|
||||
[
|
||||
preserve_fn(self.store.store_server_verify_key)(
|
||||
run_in_background(
|
||||
self.store.store_server_verify_key,
|
||||
server_name, server_name, key.time_added, key
|
||||
)
|
||||
for key_id, key in verify_keys.items()
|
||||
@@ -727,6 +740,17 @@ class Keyring(object):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _handle_key_deferred(verify_request):
|
||||
"""Waits for the key to become available, and then performs a verification
|
||||
|
||||
Args:
|
||||
verify_request (VerifyKeyRequest):
|
||||
|
||||
Returns:
|
||||
Deferred[None]
|
||||
|
||||
Raises:
|
||||
SynapseError if there was a problem performing the verification
|
||||
"""
|
||||
server_name = verify_request.server_name
|
||||
try:
|
||||
with PreserveLoggingContext():
|
||||
@@ -734,7 +758,7 @@ def _handle_key_deferred(verify_request):
|
||||
except IOError as e:
|
||||
logger.warn(
|
||||
"Got IOError when downloading keys for %s: %s %s",
|
||||
server_name, type(e).__name__, str(e.message),
|
||||
server_name, type(e).__name__, str(e),
|
||||
)
|
||||
raise SynapseError(
|
||||
502,
|
||||
@@ -744,7 +768,7 @@ def _handle_key_deferred(verify_request):
|
||||
except Exception as e:
|
||||
logger.exception(
|
||||
"Got Exception when downloading keys for %s: %s %s",
|
||||
server_name, type(e).__name__, str(e.message),
|
||||
server_name, type(e).__name__, str(e),
|
||||
)
|
||||
raise SynapseError(
|
||||
401,
|
||||
@@ -759,11 +783,17 @@ def _handle_key_deferred(verify_request):
|
||||
))
|
||||
try:
|
||||
verify_signed_json(json_object, server_name, verify_key)
|
||||
except Exception:
|
||||
except SignatureVerifyException as e:
|
||||
logger.debug(
|
||||
"Error verifying signature for %s:%s:%s with key %s: %s",
|
||||
server_name, verify_key.alg, verify_key.version,
|
||||
encode_verify_key_base64(verify_key),
|
||||
str(e),
|
||||
)
|
||||
raise SynapseError(
|
||||
401,
|
||||
"Invalid signature for server %s with key %s:%s" % (
|
||||
server_name, verify_key.alg, verify_key.version
|
||||
"Invalid signature for server %s with key %s:%s: %s" % (
|
||||
server_name, verify_key.alg, verify_key.version, str(e),
|
||||
),
|
||||
Codes.UNAUTHORIZED,
|
||||
)
|
||||
|
||||
@@ -17,11 +17,11 @@ import logging
|
||||
|
||||
from canonicaljson import encode_canonical_json
|
||||
from signedjson.key import decode_verify_key_bytes
|
||||
from signedjson.sign import verify_signed_json, SignatureVerifyException
|
||||
from signedjson.sign import SignatureVerifyException, verify_signed_json
|
||||
from unpaddedbase64 import decode_base64
|
||||
|
||||
from synapse.api.constants import EventTypes, Membership, JoinRules
|
||||
from synapse.api.errors import AuthError, SynapseError, EventSizeError
|
||||
from synapse.api.constants import KNOWN_ROOM_VERSIONS, EventTypes, JoinRules, Membership
|
||||
from synapse.api.errors import AuthError, EventSizeError, SynapseError
|
||||
from synapse.types import UserID, get_domain_from_id
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -34,9 +34,11 @@ def check(event, auth_events, do_sig_check=True, do_size_check=True):
|
||||
event: the event being checked.
|
||||
auth_events (dict: event-key -> event): the existing room state.
|
||||
|
||||
Raises:
|
||||
AuthError if the checks fail
|
||||
|
||||
Returns:
|
||||
True if the auth checks pass.
|
||||
if the auth checks pass.
|
||||
"""
|
||||
if do_size_check:
|
||||
_check_size_limits(event)
|
||||
@@ -71,17 +73,27 @@ def check(event, auth_events, do_sig_check=True, do_size_check=True):
|
||||
# Oh, we don't know what the state of the room was, so we
|
||||
# are trusting that this is allowed (at least for now)
|
||||
logger.warn("Trusting event: %s", event.event_id)
|
||||
return True
|
||||
return
|
||||
|
||||
if event.type == EventTypes.Create:
|
||||
sender_domain = get_domain_from_id(event.sender)
|
||||
room_id_domain = get_domain_from_id(event.room_id)
|
||||
if room_id_domain != sender_domain:
|
||||
raise AuthError(
|
||||
403,
|
||||
"Creation event's room_id domain does not match sender's"
|
||||
)
|
||||
|
||||
room_version = event.content.get("room_version", "1")
|
||||
if room_version not in KNOWN_ROOM_VERSIONS:
|
||||
raise AuthError(
|
||||
403,
|
||||
"room appears to have unsupported version %s" % (
|
||||
room_version,
|
||||
))
|
||||
# FIXME
|
||||
return True
|
||||
logger.debug("Allowing! %s", event)
|
||||
return
|
||||
|
||||
creation_event = auth_events.get((EventTypes.Create, ""), None)
|
||||
|
||||
@@ -118,7 +130,8 @@ def check(event, auth_events, do_sig_check=True, do_size_check=True):
|
||||
403,
|
||||
"Alias event's state_key does not match sender's domain"
|
||||
)
|
||||
return True
|
||||
logger.debug("Allowing! %s", event)
|
||||
return
|
||||
|
||||
if logger.isEnabledFor(logging.DEBUG):
|
||||
logger.debug(
|
||||
@@ -127,14 +140,9 @@ def check(event, auth_events, do_sig_check=True, do_size_check=True):
|
||||
)
|
||||
|
||||
if event.type == EventTypes.Member:
|
||||
allowed = _is_membership_change_allowed(
|
||||
event, auth_events
|
||||
)
|
||||
if allowed:
|
||||
logger.debug("Allowing! %s", event)
|
||||
else:
|
||||
logger.debug("Denying! %s", event)
|
||||
return allowed
|
||||
_is_membership_change_allowed(event, auth_events)
|
||||
logger.debug("Allowing! %s", event)
|
||||
return
|
||||
|
||||
_check_event_sender_in_room(event, auth_events)
|
||||
|
||||
@@ -153,7 +161,8 @@ def check(event, auth_events, do_sig_check=True, do_size_check=True):
|
||||
)
|
||||
)
|
||||
else:
|
||||
return True
|
||||
logger.debug("Allowing! %s", event)
|
||||
return
|
||||
|
||||
_can_send_event(event, auth_events)
|
||||
|
||||
@@ -200,7 +209,7 @@ def _is_membership_change_allowed(event, auth_events):
|
||||
create = auth_events.get(key)
|
||||
if create and event.prev_events[0][0] == create.event_id:
|
||||
if create.content["creator"] == event.state_key:
|
||||
return True
|
||||
return
|
||||
|
||||
target_user_id = event.state_key
|
||||
|
||||
@@ -265,13 +274,13 @@ def _is_membership_change_allowed(event, auth_events):
|
||||
raise AuthError(
|
||||
403, "%s is banned from the room" % (target_user_id,)
|
||||
)
|
||||
return True
|
||||
return
|
||||
|
||||
if Membership.JOIN != membership:
|
||||
if (caller_invited
|
||||
and Membership.LEAVE == membership
|
||||
and target_user_id == event.user_id):
|
||||
return True
|
||||
return
|
||||
|
||||
if not caller_in_room: # caller isn't joined
|
||||
raise AuthError(
|
||||
@@ -334,8 +343,6 @@ def _is_membership_change_allowed(event, auth_events):
|
||||
else:
|
||||
raise AuthError(500, "Unknown membership %s" % membership)
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def _check_event_sender_in_room(event, auth_events):
|
||||
key = (EventTypes.Member, event.user_id, )
|
||||
@@ -355,35 +362,46 @@ def _check_joined_room(member, user_id, room_id):
|
||||
))
|
||||
|
||||
|
||||
def get_send_level(etype, state_key, auth_events):
|
||||
key = (EventTypes.PowerLevels, "", )
|
||||
send_level_event = auth_events.get(key)
|
||||
send_level = None
|
||||
if send_level_event:
|
||||
send_level = send_level_event.content.get("events", {}).get(
|
||||
etype
|
||||
)
|
||||
if send_level is None:
|
||||
if state_key is not None:
|
||||
send_level = send_level_event.content.get(
|
||||
"state_default", 50
|
||||
)
|
||||
else:
|
||||
send_level = send_level_event.content.get(
|
||||
"events_default", 0
|
||||
)
|
||||
def get_send_level(etype, state_key, power_levels_event):
|
||||
"""Get the power level required to send an event of a given type
|
||||
|
||||
if send_level:
|
||||
send_level = int(send_level)
|
||||
The federation spec [1] refers to this as "Required Power Level".
|
||||
|
||||
https://matrix.org/docs/spec/server_server/unstable.html#definitions
|
||||
|
||||
Args:
|
||||
etype (str): type of event
|
||||
state_key (str|None): state_key of state event, or None if it is not
|
||||
a state event.
|
||||
power_levels_event (synapse.events.EventBase|None): power levels event
|
||||
in force at this point in the room
|
||||
Returns:
|
||||
int: power level required to send this event.
|
||||
"""
|
||||
|
||||
if power_levels_event:
|
||||
power_levels_content = power_levels_event.content
|
||||
else:
|
||||
send_level = 0
|
||||
power_levels_content = {}
|
||||
|
||||
return send_level
|
||||
# see if we have a custom level for this event type
|
||||
send_level = power_levels_content.get("events", {}).get(etype)
|
||||
|
||||
# otherwise, fall back to the state_default/events_default.
|
||||
if send_level is None:
|
||||
if state_key is not None:
|
||||
send_level = power_levels_content.get("state_default", 50)
|
||||
else:
|
||||
send_level = power_levels_content.get("events_default", 0)
|
||||
|
||||
return int(send_level)
|
||||
|
||||
|
||||
def _can_send_event(event, auth_events):
|
||||
power_levels_event = _get_power_level_event(auth_events)
|
||||
|
||||
send_level = get_send_level(
|
||||
event.type, event.get("state_key", None), auth_events
|
||||
event.type, event.get("state_key"), power_levels_event,
|
||||
)
|
||||
user_level = get_user_power_level(event.user_id, auth_events)
|
||||
|
||||
@@ -471,14 +489,14 @@ def _check_power_levels(event, auth_events):
|
||||
]
|
||||
|
||||
old_list = current_state.content.get("users", {})
|
||||
for user in set(old_list.keys() + user_list.keys()):
|
||||
for user in set(list(old_list) + list(user_list)):
|
||||
levels_to_check.append(
|
||||
(user, "users")
|
||||
)
|
||||
|
||||
old_list = current_state.content.get("events", {})
|
||||
new_list = event.content.get("events", {})
|
||||
for ev_id in set(old_list.keys() + new_list.keys()):
|
||||
for ev_id in set(list(old_list) + list(new_list)):
|
||||
levels_to_check.append(
|
||||
(ev_id, "events")
|
||||
)
|
||||
@@ -515,7 +533,11 @@ def _check_power_levels(event, auth_events):
|
||||
"to your own"
|
||||
)
|
||||
|
||||
if old_level > user_level or new_level > user_level:
|
||||
# Check if the old and new levels are greater than the user level
|
||||
# (if defined)
|
||||
old_level_too_big = old_level is not None and old_level > user_level
|
||||
new_level_too_big = new_level is not None and new_level > user_level
|
||||
if old_level_too_big or new_level_too_big:
|
||||
raise AuthError(
|
||||
403,
|
||||
"You don't have permission to add ops level greater "
|
||||
@@ -524,13 +546,22 @@ def _check_power_levels(event, auth_events):
|
||||
|
||||
|
||||
def _get_power_level_event(auth_events):
|
||||
key = (EventTypes.PowerLevels, "", )
|
||||
return auth_events.get(key)
|
||||
return auth_events.get((EventTypes.PowerLevels, ""))
|
||||
|
||||
|
||||
def get_user_power_level(user_id, auth_events):
|
||||
power_level_event = _get_power_level_event(auth_events)
|
||||
"""Get a user's power level
|
||||
|
||||
Args:
|
||||
user_id (str): user's id to look up in power_levels
|
||||
auth_events (dict[(str, str), synapse.events.EventBase]):
|
||||
state in force at this point in the room (or rather, a subset of
|
||||
it including at least the create event and power levels event.
|
||||
|
||||
Returns:
|
||||
int: the user's power level in this room.
|
||||
"""
|
||||
power_level_event = _get_power_level_event(auth_events)
|
||||
if power_level_event:
|
||||
level = power_level_event.content.get("users", {}).get(user_id)
|
||||
if not level:
|
||||
@@ -541,6 +572,11 @@ def get_user_power_level(user_id, auth_events):
|
||||
else:
|
||||
return int(level)
|
||||
else:
|
||||
# if there is no power levels event, the creator gets 100 and everyone
|
||||
# else gets 0.
|
||||
|
||||
# some things which call this don't pass the create event: hack around
|
||||
# that.
|
||||
key = (EventTypes.Create, "", )
|
||||
create_event = auth_events.get(key)
|
||||
if (create_event is not None and
|
||||
|
||||
@@ -13,9 +13,8 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from synapse.util.frozenutils import freeze
|
||||
from synapse.util.caches import intern_dict
|
||||
|
||||
from synapse.util.frozenutils import freeze
|
||||
|
||||
# Whether we should use frozen_dict in FrozenEvent. Using frozen_dicts prevents
|
||||
# bugs where we accidentally share e.g. signature dicts. However, converting
|
||||
@@ -47,14 +46,26 @@ class _EventInternalMetadata(object):
|
||||
|
||||
|
||||
def _event_dict_property(key):
|
||||
# We want to be able to use hasattr with the event dict properties.
|
||||
# However, (on python3) hasattr expects AttributeError to be raised. Hence,
|
||||
# we need to transform the KeyError into an AttributeError
|
||||
def getter(self):
|
||||
return self._event_dict[key]
|
||||
try:
|
||||
return self._event_dict[key]
|
||||
except KeyError:
|
||||
raise AttributeError(key)
|
||||
|
||||
def setter(self, v):
|
||||
self._event_dict[key] = v
|
||||
try:
|
||||
self._event_dict[key] = v
|
||||
except KeyError:
|
||||
raise AttributeError(key)
|
||||
|
||||
def delete(self):
|
||||
del self._event_dict[key]
|
||||
try:
|
||||
del self._event_dict[key]
|
||||
except KeyError:
|
||||
raise AttributeError(key)
|
||||
|
||||
return property(
|
||||
getter,
|
||||
@@ -134,7 +145,7 @@ class EventBase(object):
|
||||
return field in self._event_dict
|
||||
|
||||
def items(self):
|
||||
return self._event_dict.items()
|
||||
return list(self._event_dict.items())
|
||||
|
||||
|
||||
class FrozenEvent(EventBase):
|
||||
|
||||
@@ -13,13 +13,12 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from . import EventBase, FrozenEvent, _event_dict_property
|
||||
import copy
|
||||
|
||||
from synapse.types import EventID
|
||||
|
||||
from synapse.util.stringutils import random_string
|
||||
|
||||
import copy
|
||||
from . import EventBase, FrozenEvent, _event_dict_property
|
||||
|
||||
|
||||
class EventBuilder(EventBase):
|
||||
|
||||
@@ -13,22 +13,18 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from twisted.internet import defer
|
||||
from six import iteritems
|
||||
|
||||
from frozendict import frozendict
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.util.logcontext import make_deferred_yieldable, run_in_background
|
||||
|
||||
|
||||
class EventContext(object):
|
||||
"""
|
||||
Attributes:
|
||||
current_state_ids (dict[(str, str), str]):
|
||||
The current state map including the current event.
|
||||
(type, state_key) -> event_id
|
||||
|
||||
prev_state_ids (dict[(str, str), str]):
|
||||
The current state map excluding the current event.
|
||||
(type, state_key) -> event_id
|
||||
|
||||
state_group (int|None): state group id, if the state has been stored
|
||||
as a state group. This is usually only None if e.g. the event is
|
||||
an outlier.
|
||||
@@ -45,38 +41,77 @@ class EventContext(object):
|
||||
|
||||
prev_state_events (?): XXX: is this ever set to anything other than
|
||||
the empty list?
|
||||
|
||||
_current_state_ids (dict[(str, str), str]|None):
|
||||
The current state map including the current event. None if outlier
|
||||
or we haven't fetched the state from DB yet.
|
||||
(type, state_key) -> event_id
|
||||
|
||||
_prev_state_ids (dict[(str, str), str]|None):
|
||||
The current state map excluding the current event. None if outlier
|
||||
or we haven't fetched the state from DB yet.
|
||||
(type, state_key) -> event_id
|
||||
|
||||
_fetching_state_deferred (Deferred|None): Resolves when *_state_ids have
|
||||
been calculated. None if we haven't started calculating yet
|
||||
|
||||
_event_type (str): The type of the event the context is associated with.
|
||||
Only set when state has not been fetched yet.
|
||||
|
||||
_event_state_key (str|None): The state_key of the event the context is
|
||||
associated with. Only set when state has not been fetched yet.
|
||||
|
||||
_prev_state_id (str|None): If the event associated with the context is
|
||||
a state event, then `_prev_state_id` is the event_id of the state
|
||||
that was replaced.
|
||||
Only set when state has not been fetched yet.
|
||||
"""
|
||||
|
||||
__slots__ = [
|
||||
"current_state_ids",
|
||||
"prev_state_ids",
|
||||
"state_group",
|
||||
"rejected",
|
||||
"prev_group",
|
||||
"delta_ids",
|
||||
"prev_state_events",
|
||||
"app_service",
|
||||
"_current_state_ids",
|
||||
"_prev_state_ids",
|
||||
"_prev_state_id",
|
||||
"_event_type",
|
||||
"_event_state_key",
|
||||
"_fetching_state_deferred",
|
||||
]
|
||||
|
||||
def __init__(self):
|
||||
# The current state including the current event
|
||||
self.current_state_ids = None
|
||||
# The current state excluding the current event
|
||||
self.prev_state_ids = None
|
||||
self.state_group = None
|
||||
|
||||
self.prev_state_events = []
|
||||
self.rejected = False
|
||||
self.app_service = None
|
||||
|
||||
@staticmethod
|
||||
def with_state(state_group, current_state_ids, prev_state_ids,
|
||||
prev_group=None, delta_ids=None):
|
||||
context = EventContext()
|
||||
|
||||
# The current state including the current event
|
||||
context._current_state_ids = current_state_ids
|
||||
# The current state excluding the current event
|
||||
context._prev_state_ids = prev_state_ids
|
||||
context.state_group = state_group
|
||||
|
||||
context._prev_state_id = None
|
||||
context._event_type = None
|
||||
context._event_state_key = None
|
||||
context._fetching_state_deferred = defer.succeed(None)
|
||||
|
||||
# A previously persisted state group and a delta between that
|
||||
# and this state.
|
||||
self.prev_group = None
|
||||
self.delta_ids = None
|
||||
context.prev_group = prev_group
|
||||
context.delta_ids = delta_ids
|
||||
|
||||
self.prev_state_events = None
|
||||
return context
|
||||
|
||||
self.app_service = None
|
||||
|
||||
def serialize(self, event):
|
||||
@defer.inlineCallbacks
|
||||
def serialize(self, event, store):
|
||||
"""Converts self to a type that can be serialized as JSON, and then
|
||||
deserialized by `deserialize`
|
||||
|
||||
@@ -92,11 +127,12 @@ class EventContext(object):
|
||||
# the prev_state_ids, so if we're a state event we include the event
|
||||
# id that we replaced in the state.
|
||||
if event.is_state():
|
||||
prev_state_id = self.prev_state_ids.get((event.type, event.state_key))
|
||||
prev_state_ids = yield self.get_prev_state_ids(store)
|
||||
prev_state_id = prev_state_ids.get((event.type, event.state_key))
|
||||
else:
|
||||
prev_state_id = None
|
||||
|
||||
return {
|
||||
defer.returnValue({
|
||||
"prev_state_id": prev_state_id,
|
||||
"event_type": event.type,
|
||||
"event_state_key": event.state_key if event.is_state() else None,
|
||||
@@ -106,10 +142,9 @@ class EventContext(object):
|
||||
"delta_ids": _encode_state_dict(self.delta_ids),
|
||||
"prev_state_events": self.prev_state_events,
|
||||
"app_service_id": self.app_service.id if self.app_service else None
|
||||
}
|
||||
})
|
||||
|
||||
@staticmethod
|
||||
@defer.inlineCallbacks
|
||||
def deserialize(store, input):
|
||||
"""Converts a dict that was produced by `serialize` back into a
|
||||
EventContext.
|
||||
@@ -122,32 +157,115 @@ class EventContext(object):
|
||||
EventContext
|
||||
"""
|
||||
context = EventContext()
|
||||
context.state_group = input["state_group"]
|
||||
context.rejected = input["rejected"]
|
||||
context.prev_group = input["prev_group"]
|
||||
context.delta_ids = _decode_state_dict(input["delta_ids"])
|
||||
context.prev_state_events = input["prev_state_events"]
|
||||
|
||||
# We use the state_group and prev_state_id stuff to pull the
|
||||
# current_state_ids out of the DB and construct prev_state_ids.
|
||||
prev_state_id = input["prev_state_id"]
|
||||
event_type = input["event_type"]
|
||||
event_state_key = input["event_state_key"]
|
||||
context._prev_state_id = input["prev_state_id"]
|
||||
context._event_type = input["event_type"]
|
||||
context._event_state_key = input["event_state_key"]
|
||||
|
||||
context.current_state_ids = yield store.get_state_ids_for_group(
|
||||
context.state_group,
|
||||
)
|
||||
if prev_state_id and event_state_key:
|
||||
context.prev_state_ids = dict(context.current_state_ids)
|
||||
context.prev_state_ids[(event_type, event_state_key)] = prev_state_id
|
||||
else:
|
||||
context.prev_state_ids = context.current_state_ids
|
||||
context._current_state_ids = None
|
||||
context._prev_state_ids = None
|
||||
context._fetching_state_deferred = None
|
||||
|
||||
context.state_group = input["state_group"]
|
||||
context.prev_group = input["prev_group"]
|
||||
context.delta_ids = _decode_state_dict(input["delta_ids"])
|
||||
|
||||
context.rejected = input["rejected"]
|
||||
context.prev_state_events = input["prev_state_events"]
|
||||
|
||||
app_service_id = input["app_service_id"]
|
||||
if app_service_id:
|
||||
context.app_service = store.get_app_service_by_id(app_service_id)
|
||||
|
||||
defer.returnValue(context)
|
||||
return context
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_current_state_ids(self, store):
|
||||
"""Gets the current state IDs
|
||||
|
||||
Returns:
|
||||
Deferred[dict[(str, str), str]|None]: Returns None if state_group
|
||||
is None, which happens when the associated event is an outlier.
|
||||
"""
|
||||
|
||||
if not self._fetching_state_deferred:
|
||||
self._fetching_state_deferred = run_in_background(
|
||||
self._fill_out_state, store,
|
||||
)
|
||||
|
||||
yield make_deferred_yieldable(self._fetching_state_deferred)
|
||||
|
||||
defer.returnValue(self._current_state_ids)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_prev_state_ids(self, store):
|
||||
"""Gets the prev state IDs
|
||||
|
||||
Returns:
|
||||
Deferred[dict[(str, str), str]|None]: Returns None if state_group
|
||||
is None, which happens when the associated event is an outlier.
|
||||
"""
|
||||
|
||||
if not self._fetching_state_deferred:
|
||||
self._fetching_state_deferred = run_in_background(
|
||||
self._fill_out_state, store,
|
||||
)
|
||||
|
||||
yield make_deferred_yieldable(self._fetching_state_deferred)
|
||||
|
||||
defer.returnValue(self._prev_state_ids)
|
||||
|
||||
def get_cached_current_state_ids(self):
|
||||
"""Gets the current state IDs if we have them already cached.
|
||||
|
||||
Returns:
|
||||
dict[(str, str), str]|None: Returns None if we haven't cached the
|
||||
state or if state_group is None, which happens when the associated
|
||||
event is an outlier.
|
||||
"""
|
||||
|
||||
return self._current_state_ids
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _fill_out_state(self, store):
|
||||
"""Called to populate the _current_state_ids and _prev_state_ids
|
||||
attributes by loading from the database.
|
||||
"""
|
||||
if self.state_group is None:
|
||||
return
|
||||
|
||||
self._current_state_ids = yield store.get_state_ids_for_group(
|
||||
self.state_group,
|
||||
)
|
||||
if self._prev_state_id and self._event_state_key is not None:
|
||||
self._prev_state_ids = dict(self._current_state_ids)
|
||||
|
||||
key = (self._event_type, self._event_state_key)
|
||||
self._prev_state_ids[key] = self._prev_state_id
|
||||
else:
|
||||
self._prev_state_ids = self._current_state_ids
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def update_state(self, state_group, prev_state_ids, current_state_ids,
|
||||
prev_group, delta_ids):
|
||||
"""Replace the state in the context
|
||||
"""
|
||||
|
||||
# We need to make sure we wait for any ongoing fetching of state
|
||||
# to complete so that the updated state doesn't get clobbered
|
||||
if self._fetching_state_deferred:
|
||||
yield make_deferred_yieldable(self._fetching_state_deferred)
|
||||
|
||||
self.state_group = state_group
|
||||
self._prev_state_ids = prev_state_ids
|
||||
self.prev_group = prev_group
|
||||
self._current_state_ids = current_state_ids
|
||||
self.delta_ids = delta_ids
|
||||
|
||||
# We need to ensure that that we've marked as having fetched the state
|
||||
self._fetching_state_deferred = defer.succeed(None)
|
||||
|
||||
|
||||
def _encode_state_dict(state_dict):
|
||||
@@ -159,7 +277,7 @@ def _encode_state_dict(state_dict):
|
||||
|
||||
return [
|
||||
(etype, state_key, v)
|
||||
for (etype, state_key), v in state_dict.iteritems()
|
||||
for (etype, state_key), v in iteritems(state_dict)
|
||||
]
|
||||
|
||||
|
||||
|
||||
@@ -13,12 +13,15 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from synapse.api.constants import EventTypes
|
||||
from . import EventBase
|
||||
import re
|
||||
|
||||
from six import string_types
|
||||
|
||||
from frozendict import frozendict
|
||||
|
||||
import re
|
||||
from synapse.api.constants import EventTypes
|
||||
|
||||
from . import EventBase
|
||||
|
||||
# Split strings on "." but not "\." This uses a negative lookbehind assertion for '\'
|
||||
# (?<!stuff) matches if the current position in the string is not preceded
|
||||
@@ -277,7 +280,7 @@ def serialize_event(e, time_now_ms, as_client_event=True,
|
||||
|
||||
if only_event_fields:
|
||||
if (not isinstance(only_event_fields, list) or
|
||||
not all(isinstance(f, basestring) for f in only_event_fields)):
|
||||
not all(isinstance(f, string_types) for f in only_event_fields)):
|
||||
raise TypeError("only_event_fields must be a list of strings")
|
||||
d = only_fields(d, only_event_fields)
|
||||
|
||||
|
||||
@@ -13,9 +13,11 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from synapse.types import EventID, RoomID, UserID
|
||||
from synapse.api.errors import SynapseError
|
||||
from six import string_types
|
||||
|
||||
from synapse.api.constants import EventTypes, Membership
|
||||
from synapse.api.errors import SynapseError
|
||||
from synapse.types import EventID, RoomID, UserID
|
||||
|
||||
|
||||
class EventValidator(object):
|
||||
@@ -49,7 +51,7 @@ class EventValidator(object):
|
||||
strings.append("state_key")
|
||||
|
||||
for s in strings:
|
||||
if not isinstance(getattr(event, s), basestring):
|
||||
if not isinstance(getattr(event, s), string_types):
|
||||
raise SynapseError(400, "Not '%s' a string type" % (s,))
|
||||
|
||||
if event.type == EventTypes.Member:
|
||||
@@ -88,5 +90,5 @@ class EventValidator(object):
|
||||
for s in keys:
|
||||
if s not in d:
|
||||
raise SynapseError(400, "'%s' not in content" % (s,))
|
||||
if not isinstance(d[s], basestring):
|
||||
if not isinstance(d[s], string_types):
|
||||
raise SynapseError(400, "Not '%s' a string type" % (s,))
|
||||
|
||||
@@ -14,13 +14,17 @@
|
||||
# limitations under the License.
|
||||
import logging
|
||||
|
||||
from synapse.api.errors import SynapseError
|
||||
import six
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.constants import MAX_DEPTH
|
||||
from synapse.api.errors import Codes, SynapseError
|
||||
from synapse.crypto.event_signing import check_event_content_hash
|
||||
from synapse.events import FrozenEvent
|
||||
from synapse.events.utils import prune_event
|
||||
from synapse.http.servlet import assert_params_in_request
|
||||
from synapse.util import unwrapFirstError, logcontext
|
||||
from twisted.internet import defer
|
||||
from synapse.http.servlet import assert_params_in_dict
|
||||
from synapse.util import logcontext, unwrapFirstError
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -190,11 +194,23 @@ def event_from_pdu_json(pdu_json, outlier=False):
|
||||
FrozenEvent
|
||||
|
||||
Raises:
|
||||
SynapseError: if the pdu is missing required fields
|
||||
SynapseError: if the pdu is missing required fields or is otherwise
|
||||
not a valid matrix event
|
||||
"""
|
||||
# we could probably enforce a bunch of other fields here (room_id, sender,
|
||||
# origin, etc etc)
|
||||
assert_params_in_request(pdu_json, ('event_id', 'type'))
|
||||
assert_params_in_dict(pdu_json, ('event_id', 'type', 'depth'))
|
||||
|
||||
depth = pdu_json['depth']
|
||||
if not isinstance(depth, six.integer_types):
|
||||
raise SynapseError(400, "Depth %r not an intger" % (depth, ),
|
||||
Codes.BAD_JSON)
|
||||
|
||||
if depth < 0:
|
||||
raise SynapseError(400, "Depth too small", Codes.BAD_JSON)
|
||||
elif depth > MAX_DEPTH:
|
||||
raise SynapseError(400, "Depth too large", Codes.BAD_JSON)
|
||||
|
||||
event = FrozenEvent(
|
||||
pdu_json
|
||||
)
|
||||
|
||||
@@ -19,36 +19,42 @@ import itertools
|
||||
import logging
|
||||
import random
|
||||
|
||||
from six.moves import range
|
||||
|
||||
from prometheus_client import Counter
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.constants import Membership
|
||||
from synapse.api.constants import KNOWN_ROOM_VERSIONS, EventTypes, Membership
|
||||
from synapse.api.errors import (
|
||||
CodeMessageException, HttpResponseException, SynapseError, FederationDeniedError
|
||||
CodeMessageException,
|
||||
FederationDeniedError,
|
||||
HttpResponseException,
|
||||
SynapseError,
|
||||
)
|
||||
from synapse.events import builder
|
||||
from synapse.federation.federation_base import (
|
||||
FederationBase,
|
||||
event_from_pdu_json,
|
||||
)
|
||||
import synapse.metrics
|
||||
from synapse.federation.federation_base import FederationBase, event_from_pdu_json
|
||||
from synapse.util import logcontext, unwrapFirstError
|
||||
from synapse.util.caches.expiringcache import ExpiringCache
|
||||
from synapse.util.logcontext import make_deferred_yieldable, preserve_fn
|
||||
from synapse.util.logcontext import make_deferred_yieldable, run_in_background
|
||||
from synapse.util.logutils import log_function
|
||||
from synapse.util.retryutils import NotRetryingDestination
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# synapse.federation.federation_client is a silly name
|
||||
metrics = synapse.metrics.get_metrics_for("synapse.federation.client")
|
||||
|
||||
sent_queries_counter = metrics.register_counter("sent_queries", labels=["type"])
|
||||
sent_queries_counter = Counter("synapse_federation_client_sent_queries", "", ["type"])
|
||||
|
||||
|
||||
PDU_RETRY_TIME_MS = 1 * 60 * 1000
|
||||
|
||||
|
||||
class InvalidResponseError(RuntimeError):
|
||||
"""Helper for _try_destination_list: indicates that the server returned a response
|
||||
we couldn't parse
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class FederationClient(FederationBase):
|
||||
def __init__(self, hs):
|
||||
super(FederationClient, self).__init__(hs)
|
||||
@@ -106,7 +112,7 @@ class FederationClient(FederationBase):
|
||||
a Deferred which will eventually yield a JSON object from the
|
||||
response
|
||||
"""
|
||||
sent_queries_counter.inc(query_type)
|
||||
sent_queries_counter.labels(query_type).inc()
|
||||
|
||||
return self.transport_layer.make_query(
|
||||
destination, query_type, args, retry_on_dns_fail=retry_on_dns_fail,
|
||||
@@ -125,7 +131,7 @@ class FederationClient(FederationBase):
|
||||
a Deferred which will eventually yield a JSON object from the
|
||||
response
|
||||
"""
|
||||
sent_queries_counter.inc("client_device_keys")
|
||||
sent_queries_counter.labels("client_device_keys").inc()
|
||||
return self.transport_layer.query_client_keys(
|
||||
destination, content, timeout
|
||||
)
|
||||
@@ -135,7 +141,7 @@ class FederationClient(FederationBase):
|
||||
"""Query the device keys for a list of user ids hosted on a remote
|
||||
server.
|
||||
"""
|
||||
sent_queries_counter.inc("user_devices")
|
||||
sent_queries_counter.labels("user_devices").inc()
|
||||
return self.transport_layer.query_user_devices(
|
||||
destination, user_id, timeout
|
||||
)
|
||||
@@ -152,7 +158,7 @@ class FederationClient(FederationBase):
|
||||
a Deferred which will eventually yield a JSON object from the
|
||||
response
|
||||
"""
|
||||
sent_queries_counter.inc("client_one_time_keys")
|
||||
sent_queries_counter.labels("client_one_time_keys").inc()
|
||||
return self.transport_layer.claim_client_keys(
|
||||
destination, content, timeout
|
||||
)
|
||||
@@ -392,9 +398,9 @@ class FederationClient(FederationBase):
|
||||
"""
|
||||
if return_local:
|
||||
seen_events = yield self.store.get_events(event_ids, allow_rejected=True)
|
||||
signed_events = seen_events.values()
|
||||
signed_events = list(seen_events.values())
|
||||
else:
|
||||
seen_events = yield self.store.have_events(event_ids)
|
||||
seen_events = yield self.store.have_seen_events(event_ids)
|
||||
signed_events = []
|
||||
|
||||
failed_to_fetch = set()
|
||||
@@ -413,11 +419,12 @@ class FederationClient(FederationBase):
|
||||
|
||||
batch_size = 20
|
||||
missing_events = list(missing_events)
|
||||
for i in xrange(0, len(missing_events), batch_size):
|
||||
for i in range(0, len(missing_events), batch_size):
|
||||
batch = set(missing_events[i:i + batch_size])
|
||||
|
||||
deferreds = [
|
||||
preserve_fn(self.get_pdu)(
|
||||
run_in_background(
|
||||
self.get_pdu,
|
||||
destinations=random_server_list(),
|
||||
event_id=e_id,
|
||||
)
|
||||
@@ -458,8 +465,63 @@ class FederationClient(FederationBase):
|
||||
defer.returnValue(signed_auth)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _try_destination_list(self, description, destinations, callback):
|
||||
"""Try an operation on a series of servers, until it succeeds
|
||||
|
||||
Args:
|
||||
description (unicode): description of the operation we're doing, for logging
|
||||
|
||||
destinations (Iterable[unicode]): list of server_names to try
|
||||
|
||||
callback (callable): Function to run for each server. Passed a single
|
||||
argument: the server_name to try. May return a deferred.
|
||||
|
||||
If the callback raises a CodeMessageException with a 300/400 code,
|
||||
attempts to perform the operation stop immediately and the exception is
|
||||
reraised.
|
||||
|
||||
Otherwise, if the callback raises an Exception the error is logged and the
|
||||
next server tried. Normally the stacktrace is logged but this is
|
||||
suppressed if the exception is an InvalidResponseError.
|
||||
|
||||
Returns:
|
||||
The [Deferred] result of callback, if it succeeds
|
||||
|
||||
Raises:
|
||||
SynapseError if the chosen remote server returns a 300/400 code.
|
||||
|
||||
RuntimeError if no servers were reachable.
|
||||
"""
|
||||
for destination in destinations:
|
||||
if destination == self.server_name:
|
||||
continue
|
||||
|
||||
try:
|
||||
res = yield callback(destination)
|
||||
defer.returnValue(res)
|
||||
except InvalidResponseError as e:
|
||||
logger.warn(
|
||||
"Failed to %s via %s: %s",
|
||||
description, destination, e,
|
||||
)
|
||||
except HttpResponseException as e:
|
||||
if not 500 <= e.code < 600:
|
||||
raise e.to_synapse_error()
|
||||
else:
|
||||
logger.warn(
|
||||
"Failed to %s via %s: %i %s",
|
||||
description, destination, e.code, e.message,
|
||||
)
|
||||
except Exception:
|
||||
logger.warn(
|
||||
"Failed to %s via %s",
|
||||
description, destination, exc_info=1,
|
||||
)
|
||||
|
||||
raise RuntimeError("Failed to %s via any server" % (description, ))
|
||||
|
||||
def make_membership_event(self, destinations, room_id, user_id, membership,
|
||||
content={},):
|
||||
content, params):
|
||||
"""
|
||||
Creates an m.room.member event, with context, without participating in the room.
|
||||
|
||||
@@ -475,13 +537,15 @@ class FederationClient(FederationBase):
|
||||
user_id (str): The user whose membership is being evented.
|
||||
membership (str): The "membership" property of the event. Must be
|
||||
one of "join" or "leave".
|
||||
content (object): Any additional data to put into the content field
|
||||
content (dict): Any additional data to put into the content field
|
||||
of the event.
|
||||
params (dict[str, str|Iterable[str]]): Query parameters to include in the
|
||||
request.
|
||||
Return:
|
||||
Deferred: resolves to a tuple of (origin (str), event (object))
|
||||
where origin is the remote homeserver which generated the event.
|
||||
|
||||
Fails with a ``CodeMessageException`` if the chosen remote server
|
||||
Fails with a ``SynapseError`` if the chosen remote server
|
||||
returns a 300/400 code.
|
||||
|
||||
Fails with a ``RuntimeError`` if no servers were reachable.
|
||||
@@ -492,50 +556,37 @@ class FederationClient(FederationBase):
|
||||
"make_membership_event called with membership='%s', must be one of %s" %
|
||||
(membership, ",".join(valid_memberships))
|
||||
)
|
||||
for destination in destinations:
|
||||
if destination == self.server_name:
|
||||
continue
|
||||
|
||||
try:
|
||||
ret = yield self.transport_layer.make_membership_event(
|
||||
destination, room_id, user_id, membership
|
||||
)
|
||||
@defer.inlineCallbacks
|
||||
def send_request(destination):
|
||||
ret = yield self.transport_layer.make_membership_event(
|
||||
destination, room_id, user_id, membership, params,
|
||||
)
|
||||
|
||||
pdu_dict = ret["event"]
|
||||
pdu_dict = ret.get("event", None)
|
||||
if not isinstance(pdu_dict, dict):
|
||||
raise InvalidResponseError("Bad 'event' field in response")
|
||||
|
||||
logger.debug("Got response to make_%s: %s", membership, pdu_dict)
|
||||
logger.debug("Got response to make_%s: %s", membership, pdu_dict)
|
||||
|
||||
pdu_dict["content"].update(content)
|
||||
pdu_dict["content"].update(content)
|
||||
|
||||
# The protoevent received over the JSON wire may not have all
|
||||
# the required fields. Lets just gloss over that because
|
||||
# there's some we never care about
|
||||
if "prev_state" not in pdu_dict:
|
||||
pdu_dict["prev_state"] = []
|
||||
# The protoevent received over the JSON wire may not have all
|
||||
# the required fields. Lets just gloss over that because
|
||||
# there's some we never care about
|
||||
if "prev_state" not in pdu_dict:
|
||||
pdu_dict["prev_state"] = []
|
||||
|
||||
ev = builder.EventBuilder(pdu_dict)
|
||||
ev = builder.EventBuilder(pdu_dict)
|
||||
|
||||
defer.returnValue(
|
||||
(destination, ev)
|
||||
)
|
||||
break
|
||||
except CodeMessageException as e:
|
||||
if not 500 <= e.code < 600:
|
||||
raise
|
||||
else:
|
||||
logger.warn(
|
||||
"Failed to make_%s via %s: %s",
|
||||
membership, destination, e.message
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warn(
|
||||
"Failed to make_%s via %s: %s",
|
||||
membership, destination, e.message
|
||||
)
|
||||
defer.returnValue(
|
||||
(destination, ev)
|
||||
)
|
||||
|
||||
raise RuntimeError("Failed to send to any server.")
|
||||
return self._try_destination_list(
|
||||
"make_" + membership, destinations, send_request,
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def send_join(self, destinations, pdu):
|
||||
"""Sends a join event to one of a list of homeservers.
|
||||
|
||||
@@ -552,103 +603,111 @@ class FederationClient(FederationBase):
|
||||
giving the serer the event was sent to, ``state`` (?) and
|
||||
``auth_chain``.
|
||||
|
||||
Fails with a ``CodeMessageException`` if the chosen remote server
|
||||
Fails with a ``SynapseError`` if the chosen remote server
|
||||
returns a 300/400 code.
|
||||
|
||||
Fails with a ``RuntimeError`` if no servers were reachable.
|
||||
"""
|
||||
|
||||
for destination in destinations:
|
||||
if destination == self.server_name:
|
||||
continue
|
||||
|
||||
try:
|
||||
time_now = self._clock.time_msec()
|
||||
_, content = yield self.transport_layer.send_join(
|
||||
destination=destination,
|
||||
room_id=pdu.room_id,
|
||||
event_id=pdu.event_id,
|
||||
content=pdu.get_pdu_json(time_now),
|
||||
def check_authchain_validity(signed_auth_chain):
|
||||
for e in signed_auth_chain:
|
||||
if e.type == EventTypes.Create:
|
||||
create_event = e
|
||||
break
|
||||
else:
|
||||
raise InvalidResponseError(
|
||||
"no %s in auth chain" % (EventTypes.Create,),
|
||||
)
|
||||
|
||||
logger.debug("Got content: %s", content)
|
||||
# the room version should be sane.
|
||||
room_version = create_event.content.get("room_version", "1")
|
||||
if room_version not in KNOWN_ROOM_VERSIONS:
|
||||
# This shouldn't be possible, because the remote server should have
|
||||
# rejected the join attempt during make_join.
|
||||
raise InvalidResponseError(
|
||||
"room appears to have unsupported version %s" % (
|
||||
room_version,
|
||||
))
|
||||
|
||||
state = [
|
||||
event_from_pdu_json(p, outlier=True)
|
||||
for p in content.get("state", [])
|
||||
]
|
||||
@defer.inlineCallbacks
|
||||
def send_request(destination):
|
||||
time_now = self._clock.time_msec()
|
||||
_, content = yield self.transport_layer.send_join(
|
||||
destination=destination,
|
||||
room_id=pdu.room_id,
|
||||
event_id=pdu.event_id,
|
||||
content=pdu.get_pdu_json(time_now),
|
||||
)
|
||||
|
||||
auth_chain = [
|
||||
event_from_pdu_json(p, outlier=True)
|
||||
for p in content.get("auth_chain", [])
|
||||
]
|
||||
logger.debug("Got content: %s", content)
|
||||
|
||||
pdus = {
|
||||
p.event_id: p
|
||||
for p in itertools.chain(state, auth_chain)
|
||||
}
|
||||
state = [
|
||||
event_from_pdu_json(p, outlier=True)
|
||||
for p in content.get("state", [])
|
||||
]
|
||||
|
||||
valid_pdus = yield self._check_sigs_and_hash_and_fetch(
|
||||
destination, pdus.values(),
|
||||
outlier=True,
|
||||
)
|
||||
auth_chain = [
|
||||
event_from_pdu_json(p, outlier=True)
|
||||
for p in content.get("auth_chain", [])
|
||||
]
|
||||
|
||||
valid_pdus_map = {
|
||||
p.event_id: p
|
||||
for p in valid_pdus
|
||||
}
|
||||
pdus = {
|
||||
p.event_id: p
|
||||
for p in itertools.chain(state, auth_chain)
|
||||
}
|
||||
|
||||
# NB: We *need* to copy to ensure that we don't have multiple
|
||||
# references being passed on, as that causes... issues.
|
||||
signed_state = [
|
||||
copy.copy(valid_pdus_map[p.event_id])
|
||||
for p in state
|
||||
if p.event_id in valid_pdus_map
|
||||
]
|
||||
valid_pdus = yield self._check_sigs_and_hash_and_fetch(
|
||||
destination, list(pdus.values()),
|
||||
outlier=True,
|
||||
)
|
||||
|
||||
signed_auth = [
|
||||
valid_pdus_map[p.event_id]
|
||||
for p in auth_chain
|
||||
if p.event_id in valid_pdus_map
|
||||
]
|
||||
valid_pdus_map = {
|
||||
p.event_id: p
|
||||
for p in valid_pdus
|
||||
}
|
||||
|
||||
# NB: We *need* to copy to ensure that we don't have multiple
|
||||
# references being passed on, as that causes... issues.
|
||||
for s in signed_state:
|
||||
s.internal_metadata = copy.deepcopy(s.internal_metadata)
|
||||
# NB: We *need* to copy to ensure that we don't have multiple
|
||||
# references being passed on, as that causes... issues.
|
||||
signed_state = [
|
||||
copy.copy(valid_pdus_map[p.event_id])
|
||||
for p in state
|
||||
if p.event_id in valid_pdus_map
|
||||
]
|
||||
|
||||
auth_chain.sort(key=lambda e: e.depth)
|
||||
signed_auth = [
|
||||
valid_pdus_map[p.event_id]
|
||||
for p in auth_chain
|
||||
if p.event_id in valid_pdus_map
|
||||
]
|
||||
|
||||
defer.returnValue({
|
||||
"state": signed_state,
|
||||
"auth_chain": signed_auth,
|
||||
"origin": destination,
|
||||
})
|
||||
except CodeMessageException as e:
|
||||
if not 500 <= e.code < 600:
|
||||
raise
|
||||
else:
|
||||
logger.exception(
|
||||
"Failed to send_join via %s: %s",
|
||||
destination, e.message
|
||||
)
|
||||
except Exception as e:
|
||||
logger.exception(
|
||||
"Failed to send_join via %s: %s",
|
||||
destination, e.message
|
||||
)
|
||||
# NB: We *need* to copy to ensure that we don't have multiple
|
||||
# references being passed on, as that causes... issues.
|
||||
for s in signed_state:
|
||||
s.internal_metadata = copy.deepcopy(s.internal_metadata)
|
||||
|
||||
raise RuntimeError("Failed to send to any server.")
|
||||
check_authchain_validity(signed_auth)
|
||||
|
||||
defer.returnValue({
|
||||
"state": signed_state,
|
||||
"auth_chain": signed_auth,
|
||||
"origin": destination,
|
||||
})
|
||||
return self._try_destination_list("send_join", destinations, send_request)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def send_invite(self, destination, room_id, event_id, pdu):
|
||||
time_now = self._clock.time_msec()
|
||||
code, content = yield self.transport_layer.send_invite(
|
||||
destination=destination,
|
||||
room_id=room_id,
|
||||
event_id=event_id,
|
||||
content=pdu.get_pdu_json(time_now),
|
||||
)
|
||||
try:
|
||||
code, content = yield self.transport_layer.send_invite(
|
||||
destination=destination,
|
||||
room_id=room_id,
|
||||
event_id=event_id,
|
||||
content=pdu.get_pdu_json(time_now),
|
||||
)
|
||||
except HttpResponseException as e:
|
||||
if e.code == 403:
|
||||
raise e.to_synapse_error()
|
||||
raise
|
||||
|
||||
pdu_dict = content["event"]
|
||||
|
||||
@@ -663,7 +722,6 @@ class FederationClient(FederationBase):
|
||||
|
||||
defer.returnValue(pdu)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def send_leave(self, destinations, pdu):
|
||||
"""Sends a leave event to one of a list of homeservers.
|
||||
|
||||
@@ -680,35 +738,25 @@ class FederationClient(FederationBase):
|
||||
Return:
|
||||
Deferred: resolves to None.
|
||||
|
||||
Fails with a ``CodeMessageException`` if the chosen remote server
|
||||
returns a non-200 code.
|
||||
Fails with a ``SynapseError`` if the chosen remote server
|
||||
returns a 300/400 code.
|
||||
|
||||
Fails with a ``RuntimeError`` if no servers were reachable.
|
||||
"""
|
||||
for destination in destinations:
|
||||
if destination == self.server_name:
|
||||
continue
|
||||
@defer.inlineCallbacks
|
||||
def send_request(destination):
|
||||
time_now = self._clock.time_msec()
|
||||
_, content = yield self.transport_layer.send_leave(
|
||||
destination=destination,
|
||||
room_id=pdu.room_id,
|
||||
event_id=pdu.event_id,
|
||||
content=pdu.get_pdu_json(time_now),
|
||||
)
|
||||
|
||||
try:
|
||||
time_now = self._clock.time_msec()
|
||||
_, content = yield self.transport_layer.send_leave(
|
||||
destination=destination,
|
||||
room_id=pdu.room_id,
|
||||
event_id=pdu.event_id,
|
||||
content=pdu.get_pdu_json(time_now),
|
||||
)
|
||||
logger.debug("Got content: %s", content)
|
||||
defer.returnValue(None)
|
||||
|
||||
logger.debug("Got content: %s", content)
|
||||
defer.returnValue(None)
|
||||
except CodeMessageException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.exception(
|
||||
"Failed to send_leave via %s: %s",
|
||||
destination, e.message
|
||||
)
|
||||
|
||||
raise RuntimeError("Failed to send to any server.")
|
||||
return self._try_destination_list("send_leave", destinations, send_request)
|
||||
|
||||
def get_public_rooms(self, destination, limit=None, since_token=None,
|
||||
search_filter=None, include_all_networks=False,
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2015, 2016 OpenMarket Ltd
|
||||
# Copyright 2018 New Vector Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -13,24 +14,38 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import logging
|
||||
import re
|
||||
|
||||
import six
|
||||
from six import iteritems
|
||||
|
||||
from canonicaljson import json
|
||||
from prometheus_client import Counter
|
||||
|
||||
import simplejson as json
|
||||
from twisted.internet import defer
|
||||
from twisted.internet.abstract import isIPAddress
|
||||
from twisted.python import failure
|
||||
|
||||
from synapse.api.errors import AuthError, FederationError, SynapseError, NotFoundError
|
||||
from synapse.crypto.event_signing import compute_event_signature
|
||||
from synapse.federation.federation_base import (
|
||||
FederationBase,
|
||||
event_from_pdu_json,
|
||||
from synapse.api.constants import EventTypes
|
||||
from synapse.api.errors import (
|
||||
AuthError,
|
||||
FederationError,
|
||||
IncompatibleRoomVersionError,
|
||||
NotFoundError,
|
||||
SynapseError,
|
||||
)
|
||||
|
||||
from synapse.crypto.event_signing import compute_event_signature
|
||||
from synapse.federation.federation_base import FederationBase, event_from_pdu_json
|
||||
from synapse.federation.persistence import TransactionActions
|
||||
from synapse.federation.units import Edu, Transaction
|
||||
import synapse.metrics
|
||||
from synapse.http.endpoint import parse_server_name
|
||||
from synapse.replication.http.federation import (
|
||||
ReplicationFederationSendEduRestServlet,
|
||||
ReplicationGetQueryRestServlet,
|
||||
)
|
||||
from synapse.types import get_domain_from_id
|
||||
from synapse.util import async
|
||||
from synapse.util.async_helpers import Linearizer, concurrently_execute
|
||||
from synapse.util.caches.response_cache import ResponseCache
|
||||
from synapse.util.logcontext import make_deferred_yieldable, preserve_fn
|
||||
from synapse.util.logutils import log_function
|
||||
|
||||
# when processing incoming transactions, we try to handle multiple rooms in
|
||||
@@ -39,25 +54,25 @@ TRANSACTION_CONCURRENCY_LIMIT = 10
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# synapse.federation.federation_server is a silly name
|
||||
metrics = synapse.metrics.get_metrics_for("synapse.federation.server")
|
||||
received_pdus_counter = Counter("synapse_federation_server_received_pdus", "")
|
||||
|
||||
received_pdus_counter = metrics.register_counter("received_pdus")
|
||||
received_edus_counter = Counter("synapse_federation_server_received_edus", "")
|
||||
|
||||
received_edus_counter = metrics.register_counter("received_edus")
|
||||
|
||||
received_queries_counter = metrics.register_counter("received_queries", labels=["type"])
|
||||
received_queries_counter = Counter(
|
||||
"synapse_federation_server_received_queries", "", ["type"]
|
||||
)
|
||||
|
||||
|
||||
class FederationServer(FederationBase):
|
||||
|
||||
def __init__(self, hs):
|
||||
super(FederationServer, self).__init__(hs)
|
||||
|
||||
self.auth = hs.get_auth()
|
||||
self.handler = hs.get_handlers().federation_handler
|
||||
|
||||
self._server_linearizer = async.Linearizer("fed_server")
|
||||
self._transaction_linearizer = async.Linearizer("fed_txn_handler")
|
||||
self._server_linearizer = Linearizer("fed_server")
|
||||
self._transaction_linearizer = Linearizer("fed_txn_handler")
|
||||
|
||||
self.transaction_actions = TransactionActions(self.store)
|
||||
|
||||
@@ -65,12 +80,15 @@ class FederationServer(FederationBase):
|
||||
|
||||
# We cache responses to state queries, as they take a while and often
|
||||
# come in waves.
|
||||
self._state_resp_cache = ResponseCache(hs, timeout_ms=30000)
|
||||
self._state_resp_cache = ResponseCache(hs, "state_resp", timeout_ms=30000)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def on_backfill_request(self, origin, room_id, versions, limit):
|
||||
with (yield self._server_linearizer.queue((origin, room_id))):
|
||||
origin_host, _ = parse_server_name(origin)
|
||||
yield self.check_server_matches_acl(origin_host, room_id)
|
||||
|
||||
pdus = yield self.handler.on_backfill_request(
|
||||
origin, room_id, versions, limit
|
||||
)
|
||||
@@ -129,7 +147,9 @@ class FederationServer(FederationBase):
|
||||
|
||||
logger.debug("[%s] Transaction is new", transaction.transaction_id)
|
||||
|
||||
received_pdus_counter.inc_by(len(transaction.pdus))
|
||||
received_pdus_counter.inc(len(transaction.pdus))
|
||||
|
||||
origin_host, _ = parse_server_name(transaction.origin)
|
||||
|
||||
pdus_by_room = {}
|
||||
|
||||
@@ -151,9 +171,21 @@ class FederationServer(FederationBase):
|
||||
# we can process different rooms in parallel (which is useful if they
|
||||
# require callouts to other servers to fetch missing events), but
|
||||
# impose a limit to avoid going too crazy with ram/cpu.
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def process_pdus_for_room(room_id):
|
||||
logger.debug("Processing PDUs for %s", room_id)
|
||||
try:
|
||||
yield self.check_server_matches_acl(origin_host, room_id)
|
||||
except AuthError as e:
|
||||
logger.warn(
|
||||
"Ignoring PDUs for room %s from banned server", room_id,
|
||||
)
|
||||
for pdu in pdus_by_room[room_id]:
|
||||
event_id = pdu.event_id
|
||||
pdu_results[event_id] = e.error_dict()
|
||||
return
|
||||
|
||||
for pdu in pdus_by_room[room_id]:
|
||||
event_id = pdu.event_id
|
||||
try:
|
||||
@@ -165,10 +197,14 @@ class FederationServer(FederationBase):
|
||||
logger.warn("Error handling PDU %s: %s", event_id, e)
|
||||
pdu_results[event_id] = {"error": str(e)}
|
||||
except Exception as e:
|
||||
f = failure.Failure()
|
||||
pdu_results[event_id] = {"error": str(e)}
|
||||
logger.exception("Failed to handle PDU %s", event_id)
|
||||
logger.error(
|
||||
"Failed to handle PDU %s: %s",
|
||||
event_id, f.getTraceback().rstrip(),
|
||||
)
|
||||
|
||||
yield async.concurrently_execute(
|
||||
yield concurrently_execute(
|
||||
process_pdus_for_room, pdus_by_room.keys(),
|
||||
TRANSACTION_CONCURRENCY_LIMIT,
|
||||
)
|
||||
@@ -181,10 +217,6 @@ class FederationServer(FederationBase):
|
||||
edu.content
|
||||
)
|
||||
|
||||
pdu_failures = getattr(transaction, "pdu_failures", [])
|
||||
for failure in pdu_failures:
|
||||
logger.info("Got failure %r", failure)
|
||||
|
||||
response = {
|
||||
"pdus": pdu_results,
|
||||
}
|
||||
@@ -208,20 +240,24 @@ class FederationServer(FederationBase):
|
||||
if not event_id:
|
||||
raise NotImplementedError("Specify an event")
|
||||
|
||||
origin_host, _ = parse_server_name(origin)
|
||||
yield self.check_server_matches_acl(origin_host, room_id)
|
||||
|
||||
in_room = yield self.auth.check_host_in_room(room_id, origin)
|
||||
if not in_room:
|
||||
raise AuthError(403, "Host not in room.")
|
||||
|
||||
result = self._state_resp_cache.get((room_id, event_id))
|
||||
if not result:
|
||||
with (yield self._server_linearizer.queue((origin, room_id))):
|
||||
d = self._state_resp_cache.set(
|
||||
(room_id, event_id),
|
||||
preserve_fn(self._on_context_state_request_compute)(room_id, event_id)
|
||||
)
|
||||
resp = yield make_deferred_yieldable(d)
|
||||
else:
|
||||
resp = yield make_deferred_yieldable(result)
|
||||
# we grab the linearizer to protect ourselves from servers which hammer
|
||||
# us. In theory we might already have the response to this query
|
||||
# in the cache so we could return it without waiting for the linearizer
|
||||
# - but that's non-trivial to get right, and anyway somewhat defeats
|
||||
# the point of the linearizer.
|
||||
with (yield self._server_linearizer.queue((origin, room_id))):
|
||||
resp = yield self._state_resp_cache.wrap(
|
||||
(room_id, event_id),
|
||||
self._on_context_state_request_compute,
|
||||
room_id, event_id,
|
||||
)
|
||||
|
||||
defer.returnValue((200, resp))
|
||||
|
||||
@@ -230,6 +266,9 @@ class FederationServer(FederationBase):
|
||||
if not event_id:
|
||||
raise NotImplementedError("Specify an event")
|
||||
|
||||
origin_host, _ = parse_server_name(origin)
|
||||
yield self.check_server_matches_acl(origin_host, room_id)
|
||||
|
||||
in_room = yield self.auth.check_host_in_room(room_id, origin)
|
||||
if not in_room:
|
||||
raise AuthError(403, "Host not in room.")
|
||||
@@ -273,7 +312,7 @@ class FederationServer(FederationBase):
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def on_pdu_request(self, origin, event_id):
|
||||
pdu = yield self._get_persisted_pdu(origin, event_id)
|
||||
pdu = yield self.handler.get_persisted_pdu(origin, event_id)
|
||||
|
||||
if pdu:
|
||||
defer.returnValue(
|
||||
@@ -289,19 +328,32 @@ class FederationServer(FederationBase):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_query_request(self, query_type, args):
|
||||
received_queries_counter.inc(query_type)
|
||||
received_queries_counter.labels(query_type).inc()
|
||||
resp = yield self.registry.on_query(query_type, args)
|
||||
defer.returnValue((200, resp))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_make_join_request(self, room_id, user_id):
|
||||
def on_make_join_request(self, origin, room_id, user_id, supported_versions):
|
||||
origin_host, _ = parse_server_name(origin)
|
||||
yield self.check_server_matches_acl(origin_host, room_id)
|
||||
|
||||
room_version = yield self.store.get_room_version(room_id)
|
||||
if room_version not in supported_versions:
|
||||
logger.warn("Room version %s not in %s", room_version, supported_versions)
|
||||
raise IncompatibleRoomVersionError(room_version=room_version)
|
||||
|
||||
pdu = yield self.handler.on_make_join_request(room_id, user_id)
|
||||
time_now = self._clock.time_msec()
|
||||
defer.returnValue({"event": pdu.get_pdu_json(time_now)})
|
||||
defer.returnValue({
|
||||
"event": pdu.get_pdu_json(time_now),
|
||||
"room_version": room_version,
|
||||
})
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_invite_request(self, origin, content):
|
||||
pdu = event_from_pdu_json(content)
|
||||
origin_host, _ = parse_server_name(origin)
|
||||
yield self.check_server_matches_acl(origin_host, pdu.room_id)
|
||||
ret_pdu = yield self.handler.on_invite_request(origin, pdu)
|
||||
time_now = self._clock.time_msec()
|
||||
defer.returnValue((200, {"event": ret_pdu.get_pdu_json(time_now)}))
|
||||
@@ -310,6 +362,10 @@ class FederationServer(FederationBase):
|
||||
def on_send_join_request(self, origin, content):
|
||||
logger.debug("on_send_join_request: content: %s", content)
|
||||
pdu = event_from_pdu_json(content)
|
||||
|
||||
origin_host, _ = parse_server_name(origin)
|
||||
yield self.check_server_matches_acl(origin_host, pdu.room_id)
|
||||
|
||||
logger.debug("on_send_join_request: pdu sigs: %s", pdu.signatures)
|
||||
res_pdus = yield self.handler.on_send_join_request(origin, pdu)
|
||||
time_now = self._clock.time_msec()
|
||||
@@ -321,7 +377,9 @@ class FederationServer(FederationBase):
|
||||
}))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_make_leave_request(self, room_id, user_id):
|
||||
def on_make_leave_request(self, origin, room_id, user_id):
|
||||
origin_host, _ = parse_server_name(origin)
|
||||
yield self.check_server_matches_acl(origin_host, room_id)
|
||||
pdu = yield self.handler.on_make_leave_request(room_id, user_id)
|
||||
time_now = self._clock.time_msec()
|
||||
defer.returnValue({"event": pdu.get_pdu_json(time_now)})
|
||||
@@ -330,6 +388,10 @@ class FederationServer(FederationBase):
|
||||
def on_send_leave_request(self, origin, content):
|
||||
logger.debug("on_send_leave_request: content: %s", content)
|
||||
pdu = event_from_pdu_json(content)
|
||||
|
||||
origin_host, _ = parse_server_name(origin)
|
||||
yield self.check_server_matches_acl(origin_host, pdu.room_id)
|
||||
|
||||
logger.debug("on_send_leave_request: pdu sigs: %s", pdu.signatures)
|
||||
yield self.handler.on_send_leave_request(origin, pdu)
|
||||
defer.returnValue((200, {}))
|
||||
@@ -337,6 +399,9 @@ class FederationServer(FederationBase):
|
||||
@defer.inlineCallbacks
|
||||
def on_event_auth(self, origin, room_id, event_id):
|
||||
with (yield self._server_linearizer.queue((origin, room_id))):
|
||||
origin_host, _ = parse_server_name(origin)
|
||||
yield self.check_server_matches_acl(origin_host, room_id)
|
||||
|
||||
time_now = self._clock.time_msec()
|
||||
auth_pdus = yield self.handler.on_event_auth(event_id)
|
||||
res = {
|
||||
@@ -365,6 +430,9 @@ class FederationServer(FederationBase):
|
||||
Deferred: Results in `dict` with the same format as `content`
|
||||
"""
|
||||
with (yield self._server_linearizer.queue((origin, room_id))):
|
||||
origin_host, _ = parse_server_name(origin)
|
||||
yield self.check_server_matches_acl(origin_host, room_id)
|
||||
|
||||
auth_chain = [
|
||||
event_from_pdu_json(e)
|
||||
for e in content["auth_chain"]
|
||||
@@ -377,6 +445,7 @@ class FederationServer(FederationBase):
|
||||
ret = yield self.handler.on_query_auth(
|
||||
origin,
|
||||
event_id,
|
||||
room_id,
|
||||
signed_auth,
|
||||
content.get("rejects", []),
|
||||
content.get("missing", []),
|
||||
@@ -425,9 +494,9 @@ class FederationServer(FederationBase):
|
||||
"Claimed one-time-keys: %s",
|
||||
",".join((
|
||||
"%s for %s:%s" % (key_id, user_id, device_id)
|
||||
for user_id, user_keys in json_result.iteritems()
|
||||
for device_id, device_keys in user_keys.iteritems()
|
||||
for key_id, _ in device_keys.iteritems()
|
||||
for user_id, user_keys in iteritems(json_result)
|
||||
for device_id, device_keys in iteritems(user_keys)
|
||||
for key_id, _ in iteritems(device_keys)
|
||||
)),
|
||||
)
|
||||
|
||||
@@ -438,6 +507,9 @@ class FederationServer(FederationBase):
|
||||
def on_get_missing_events(self, origin, room_id, earliest_events,
|
||||
latest_events, limit, min_depth):
|
||||
with (yield self._server_linearizer.queue((origin, room_id))):
|
||||
origin_host, _ = parse_server_name(origin)
|
||||
yield self.check_server_matches_acl(origin_host, room_id)
|
||||
|
||||
logger.info(
|
||||
"on_get_missing_events: earliest_events: %r, latest_events: %r,"
|
||||
" limit: %d, min_depth: %d",
|
||||
@@ -466,17 +538,6 @@ class FederationServer(FederationBase):
|
||||
ts_now_ms = self._clock.time_msec()
|
||||
return self.store.get_user_id_for_open_id_token(token, ts_now_ms)
|
||||
|
||||
@log_function
|
||||
def _get_persisted_pdu(self, origin, event_id, do_auth=True):
|
||||
""" Get a PDU from the database with given origin and id.
|
||||
|
||||
Returns:
|
||||
Deferred: Results in a `Pdu`.
|
||||
"""
|
||||
return self.handler.get_persisted_pdu(
|
||||
origin, event_id, do_auth=do_auth
|
||||
)
|
||||
|
||||
def _transaction_from_pdus(self, pdu_list):
|
||||
"""Returns a new Transaction containing the given PDUs suitable for
|
||||
transmission.
|
||||
@@ -494,13 +555,33 @@ class FederationServer(FederationBase):
|
||||
def _handle_received_pdu(self, origin, pdu):
|
||||
""" Process a PDU received in a federation /send/ transaction.
|
||||
|
||||
If the event is invalid, then this method throws a FederationError.
|
||||
(The error will then be logged and sent back to the sender (which
|
||||
probably won't do anything with it), and other events in the
|
||||
transaction will be processed as normal).
|
||||
|
||||
It is likely that we'll then receive other events which refer to
|
||||
this rejected_event in their prev_events, etc. When that happens,
|
||||
we'll attempt to fetch the rejected event again, which will presumably
|
||||
fail, so those second-generation events will also get rejected.
|
||||
|
||||
Eventually, we get to the point where there are more than 10 events
|
||||
between any new events and the original rejected event. Since we
|
||||
only try to backfill 10 events deep on received pdu, we then accept the
|
||||
new event, possibly introducing a discontinuity in the DAG, with new
|
||||
forward extremities, so normal service is approximately returned,
|
||||
until we try to backfill across the discontinuity.
|
||||
|
||||
Args:
|
||||
origin (str): server which sent the pdu
|
||||
pdu (FrozenEvent): received pdu
|
||||
|
||||
Returns (Deferred): completes with None
|
||||
Raises: FederationError if the signatures / hash do not match
|
||||
"""
|
||||
|
||||
Raises: FederationError if the signatures / hash do not match, or
|
||||
if the event was unacceptable for any other reason (eg, too large,
|
||||
too many prev_events, couldn't find the prev_events)
|
||||
"""
|
||||
# check that it's actually being sent from a valid destination to
|
||||
# workaround bug #1753 in 0.18.5 and 0.18.6
|
||||
if origin != get_domain_from_id(pdu.event_id):
|
||||
@@ -536,7 +617,9 @@ class FederationServer(FederationBase):
|
||||
affected=pdu.event_id,
|
||||
)
|
||||
|
||||
yield self.handler.on_receive_pdu(origin, pdu, get_missing=True)
|
||||
yield self.handler.on_receive_pdu(
|
||||
origin, pdu, get_missing=True, sent_to_us_directly=True,
|
||||
)
|
||||
|
||||
def __str__(self):
|
||||
return "<ReplicationLayer(%s)>" % self.server_name
|
||||
@@ -564,6 +647,101 @@ class FederationServer(FederationBase):
|
||||
)
|
||||
defer.returnValue(ret)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def check_server_matches_acl(self, server_name, room_id):
|
||||
"""Check if the given server is allowed by the server ACLs in the room
|
||||
|
||||
Args:
|
||||
server_name (str): name of server, *without any port part*
|
||||
room_id (str): ID of the room to check
|
||||
|
||||
Raises:
|
||||
AuthError if the server does not match the ACL
|
||||
"""
|
||||
state_ids = yield self.store.get_current_state_ids(room_id)
|
||||
acl_event_id = state_ids.get((EventTypes.ServerACL, ""))
|
||||
|
||||
if not acl_event_id:
|
||||
return
|
||||
|
||||
acl_event = yield self.store.get_event(acl_event_id)
|
||||
if server_matches_acl_event(server_name, acl_event):
|
||||
return
|
||||
|
||||
raise AuthError(code=403, msg="Server is banned from room")
|
||||
|
||||
|
||||
def server_matches_acl_event(server_name, acl_event):
|
||||
"""Check if the given server is allowed by the ACL event
|
||||
|
||||
Args:
|
||||
server_name (str): name of server, without any port part
|
||||
acl_event (EventBase): m.room.server_acl event
|
||||
|
||||
Returns:
|
||||
bool: True if this server is allowed by the ACLs
|
||||
"""
|
||||
logger.debug("Checking %s against acl %s", server_name, acl_event.content)
|
||||
|
||||
# first of all, check if literal IPs are blocked, and if so, whether the
|
||||
# server name is a literal IP
|
||||
allow_ip_literals = acl_event.content.get("allow_ip_literals", True)
|
||||
if not isinstance(allow_ip_literals, bool):
|
||||
logger.warn("Ignorning non-bool allow_ip_literals flag")
|
||||
allow_ip_literals = True
|
||||
if not allow_ip_literals:
|
||||
# check for ipv6 literals. These start with '['.
|
||||
if server_name[0] == '[':
|
||||
return False
|
||||
|
||||
# check for ipv4 literals. We can just lift the routine from twisted.
|
||||
if isIPAddress(server_name):
|
||||
return False
|
||||
|
||||
# next, check the deny list
|
||||
deny = acl_event.content.get("deny", [])
|
||||
if not isinstance(deny, (list, tuple)):
|
||||
logger.warn("Ignorning non-list deny ACL %s", deny)
|
||||
deny = []
|
||||
for e in deny:
|
||||
if _acl_entry_matches(server_name, e):
|
||||
# logger.info("%s matched deny rule %s", server_name, e)
|
||||
return False
|
||||
|
||||
# then the allow list.
|
||||
allow = acl_event.content.get("allow", [])
|
||||
if not isinstance(allow, (list, tuple)):
|
||||
logger.warn("Ignorning non-list allow ACL %s", allow)
|
||||
allow = []
|
||||
for e in allow:
|
||||
if _acl_entry_matches(server_name, e):
|
||||
# logger.info("%s matched allow rule %s", server_name, e)
|
||||
return True
|
||||
|
||||
# everything else should be rejected.
|
||||
# logger.info("%s fell through", server_name)
|
||||
return False
|
||||
|
||||
|
||||
def _acl_entry_matches(server_name, acl_entry):
|
||||
if not isinstance(acl_entry, six.string_types):
|
||||
logger.warn("Ignoring non-str ACL entry '%s' (is %s)", acl_entry, type(acl_entry))
|
||||
return False
|
||||
regex = _glob_to_regex(acl_entry)
|
||||
return regex.match(server_name)
|
||||
|
||||
|
||||
def _glob_to_regex(glob):
|
||||
res = ''
|
||||
for c in glob:
|
||||
if c == '*':
|
||||
res = res + '.*'
|
||||
elif c == '?':
|
||||
res = res + '.'
|
||||
else:
|
||||
res = res + re.escape(c)
|
||||
return re.compile(res + "\\Z", re.IGNORECASE)
|
||||
|
||||
|
||||
class FederationHandlerRegistry(object):
|
||||
"""Allows classes to register themselves as handlers for a given EDU or
|
||||
@@ -586,6 +764,8 @@ class FederationHandlerRegistry(object):
|
||||
if edu_type in self.edu_handlers:
|
||||
raise KeyError("Already have an EDU handler for %s" % (edu_type,))
|
||||
|
||||
logger.info("Registering federation EDU handler for %r", edu_type)
|
||||
|
||||
self.edu_handlers[edu_type] = handler
|
||||
|
||||
def register_query_handler(self, query_type, handler):
|
||||
@@ -604,6 +784,8 @@ class FederationHandlerRegistry(object):
|
||||
"Already have a Query handler for %s" % (query_type,)
|
||||
)
|
||||
|
||||
logger.info("Registering federation query handler for %r", query_type)
|
||||
|
||||
self.query_handlers[query_type] = handler
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@@ -626,3 +808,49 @@ class FederationHandlerRegistry(object):
|
||||
raise NotFoundError("No handler for Query type '%s'" % (query_type,))
|
||||
|
||||
return handler(args)
|
||||
|
||||
|
||||
class ReplicationFederationHandlerRegistry(FederationHandlerRegistry):
|
||||
"""A FederationHandlerRegistry for worker processes.
|
||||
|
||||
When receiving EDU or queries it will check if an appropriate handler has
|
||||
been registered on the worker, if there isn't one then it calls off to the
|
||||
master process.
|
||||
"""
|
||||
|
||||
def __init__(self, hs):
|
||||
self.config = hs.config
|
||||
self.http_client = hs.get_simple_http_client()
|
||||
self.clock = hs.get_clock()
|
||||
|
||||
self._get_query_client = ReplicationGetQueryRestServlet.make_client(hs)
|
||||
self._send_edu = ReplicationFederationSendEduRestServlet.make_client(hs)
|
||||
|
||||
super(ReplicationFederationHandlerRegistry, self).__init__()
|
||||
|
||||
def on_edu(self, edu_type, origin, content):
|
||||
"""Overrides FederationHandlerRegistry
|
||||
"""
|
||||
handler = self.edu_handlers.get(edu_type)
|
||||
if handler:
|
||||
return super(ReplicationFederationHandlerRegistry, self).on_edu(
|
||||
edu_type, origin, content,
|
||||
)
|
||||
|
||||
return self._send_edu(
|
||||
edu_type=edu_type,
|
||||
origin=origin,
|
||||
content=content,
|
||||
)
|
||||
|
||||
def on_query(self, query_type, args):
|
||||
"""Overrides FederationHandlerRegistry
|
||||
"""
|
||||
handler = self.query_handlers.get(query_type)
|
||||
if handler:
|
||||
return handler(args)
|
||||
|
||||
return self._get_query_client(
|
||||
query_type=query_type,
|
||||
args=args,
|
||||
)
|
||||
|
||||
@@ -19,13 +19,12 @@ package.
|
||||
These actions are mostly only used by the :py:mod:`.replication` module.
|
||||
"""
|
||||
|
||||
import logging
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.util.logutils import log_function
|
||||
|
||||
import logging
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
|
||||
@@ -29,23 +29,22 @@ dead worker doesn't cause the queues to grow limitlessly.
|
||||
Events are replicated via a separate events stream.
|
||||
"""
|
||||
|
||||
from .units import Edu
|
||||
|
||||
from synapse.storage.presence import UserPresenceState
|
||||
from synapse.util.metrics import Measure
|
||||
import synapse.metrics
|
||||
|
||||
from blist import sorteddict
|
||||
import logging
|
||||
from collections import namedtuple
|
||||
|
||||
import logging
|
||||
from six import iteritems, itervalues
|
||||
|
||||
from sortedcontainers import SortedDict
|
||||
|
||||
from synapse.metrics import LaterGauge
|
||||
from synapse.storage.presence import UserPresenceState
|
||||
from synapse.util.metrics import Measure
|
||||
|
||||
from .units import Edu
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
metrics = synapse.metrics.get_metrics_for(__name__)
|
||||
|
||||
|
||||
class FederationRemoteSendQueue(object):
|
||||
"""A drop in replacement for TransactionQueue"""
|
||||
|
||||
@@ -56,33 +55,29 @@ class FederationRemoteSendQueue(object):
|
||||
self.is_mine_id = hs.is_mine_id
|
||||
|
||||
self.presence_map = {} # Pending presence map user_id -> UserPresenceState
|
||||
self.presence_changed = sorteddict() # Stream position -> user_id
|
||||
self.presence_changed = SortedDict() # Stream position -> user_id
|
||||
|
||||
self.keyed_edu = {} # (destination, key) -> EDU
|
||||
self.keyed_edu_changed = sorteddict() # stream position -> (destination, key)
|
||||
self.keyed_edu_changed = SortedDict() # stream position -> (destination, key)
|
||||
|
||||
self.edus = sorteddict() # stream position -> Edu
|
||||
self.edus = SortedDict() # stream position -> Edu
|
||||
|
||||
self.failures = sorteddict() # stream position -> (destination, Failure)
|
||||
|
||||
self.device_messages = sorteddict() # stream position -> destination
|
||||
self.device_messages = SortedDict() # stream position -> destination
|
||||
|
||||
self.pos = 1
|
||||
self.pos_time = sorteddict()
|
||||
self.pos_time = SortedDict()
|
||||
|
||||
# EVERYTHING IS SAD. In particular, python only makes new scopes when
|
||||
# we make a new function, so we need to make a new function so the inner
|
||||
# lambda binds to the queue rather than to the name of the queue which
|
||||
# changes. ARGH.
|
||||
def register(name, queue):
|
||||
metrics.register_callback(
|
||||
queue_name + "_size",
|
||||
lambda: len(queue),
|
||||
)
|
||||
LaterGauge("synapse_federation_send_queue_%s_size" % (queue_name,),
|
||||
"", [], lambda: len(queue))
|
||||
|
||||
for queue_name in [
|
||||
"presence_map", "presence_changed", "keyed_edu", "keyed_edu_changed",
|
||||
"edus", "failures", "device_messages", "pos_time",
|
||||
"edus", "device_messages", "pos_time",
|
||||
]:
|
||||
register(queue_name, getattr(self, queue_name))
|
||||
|
||||
@@ -101,7 +96,7 @@ class FederationRemoteSendQueue(object):
|
||||
now = self.clock.time_msec()
|
||||
|
||||
keys = self.pos_time.keys()
|
||||
time = keys.bisect_left(now - FIVE_MINUTES_AGO)
|
||||
time = self.pos_time.bisect_left(now - FIVE_MINUTES_AGO)
|
||||
if not keys[:time]:
|
||||
return
|
||||
|
||||
@@ -116,13 +111,13 @@ class FederationRemoteSendQueue(object):
|
||||
with Measure(self.clock, "send_queue._clear"):
|
||||
# Delete things out of presence maps
|
||||
keys = self.presence_changed.keys()
|
||||
i = keys.bisect_left(position_to_delete)
|
||||
i = self.presence_changed.bisect_left(position_to_delete)
|
||||
for key in keys[:i]:
|
||||
del self.presence_changed[key]
|
||||
|
||||
user_ids = set(
|
||||
user_id
|
||||
for uids in self.presence_changed.itervalues()
|
||||
for uids in itervalues(self.presence_changed)
|
||||
for user_id in uids
|
||||
)
|
||||
|
||||
@@ -134,7 +129,7 @@ class FederationRemoteSendQueue(object):
|
||||
|
||||
# Delete things out of keyed edus
|
||||
keys = self.keyed_edu_changed.keys()
|
||||
i = keys.bisect_left(position_to_delete)
|
||||
i = self.keyed_edu_changed.bisect_left(position_to_delete)
|
||||
for key in keys[:i]:
|
||||
del self.keyed_edu_changed[key]
|
||||
|
||||
@@ -148,19 +143,13 @@ class FederationRemoteSendQueue(object):
|
||||
|
||||
# Delete things out of edu map
|
||||
keys = self.edus.keys()
|
||||
i = keys.bisect_left(position_to_delete)
|
||||
i = self.edus.bisect_left(position_to_delete)
|
||||
for key in keys[:i]:
|
||||
del self.edus[key]
|
||||
|
||||
# Delete things out of failure map
|
||||
keys = self.failures.keys()
|
||||
i = keys.bisect_left(position_to_delete)
|
||||
for key in keys[:i]:
|
||||
del self.failures[key]
|
||||
|
||||
# Delete things out of device map
|
||||
keys = self.device_messages.keys()
|
||||
i = keys.bisect_left(position_to_delete)
|
||||
i = self.device_messages.bisect_left(position_to_delete)
|
||||
for key in keys[:i]:
|
||||
del self.device_messages[key]
|
||||
|
||||
@@ -200,20 +189,13 @@ class FederationRemoteSendQueue(object):
|
||||
|
||||
# We only want to send presence for our own users, so lets always just
|
||||
# filter here just in case.
|
||||
local_states = filter(lambda s: self.is_mine_id(s.user_id), states)
|
||||
local_states = list(filter(lambda s: self.is_mine_id(s.user_id), states))
|
||||
|
||||
self.presence_map.update({state.user_id: state for state in local_states})
|
||||
self.presence_changed[pos] = [state.user_id for state in local_states]
|
||||
|
||||
self.notifier.on_new_replication_data()
|
||||
|
||||
def send_failure(self, failure, destination):
|
||||
"""As per TransactionQueue"""
|
||||
pos = self._next_pos()
|
||||
|
||||
self.failures[pos] = (destination, str(failure))
|
||||
self.notifier.on_new_replication_data()
|
||||
|
||||
def send_device_messages(self, destination):
|
||||
"""As per TransactionQueue"""
|
||||
pos = self._next_pos()
|
||||
@@ -253,13 +235,12 @@ class FederationRemoteSendQueue(object):
|
||||
self._clear_queue_before_pos(federation_ack)
|
||||
|
||||
# Fetch changed presence
|
||||
keys = self.presence_changed.keys()
|
||||
i = keys.bisect_right(from_token)
|
||||
j = keys.bisect_right(to_token) + 1
|
||||
i = self.presence_changed.bisect_right(from_token)
|
||||
j = self.presence_changed.bisect_right(to_token) + 1
|
||||
dest_user_ids = [
|
||||
(pos, user_id)
|
||||
for pos in keys[i:j]
|
||||
for user_id in self.presence_changed[pos]
|
||||
for pos, user_id_list in self.presence_changed.items()[i:j]
|
||||
for user_id in user_id_list
|
||||
]
|
||||
|
||||
for (key, user_id) in dest_user_ids:
|
||||
@@ -268,48 +249,33 @@ class FederationRemoteSendQueue(object):
|
||||
)))
|
||||
|
||||
# Fetch changes keyed edus
|
||||
keys = self.keyed_edu_changed.keys()
|
||||
i = keys.bisect_right(from_token)
|
||||
j = keys.bisect_right(to_token) + 1
|
||||
i = self.keyed_edu_changed.bisect_right(from_token)
|
||||
j = self.keyed_edu_changed.bisect_right(to_token) + 1
|
||||
# We purposefully clobber based on the key here, python dict comprehensions
|
||||
# always use the last value, so this will correctly point to the last
|
||||
# stream position.
|
||||
keyed_edus = {self.keyed_edu_changed[k]: k for k in keys[i:j]}
|
||||
keyed_edus = {v: k for k, v in self.keyed_edu_changed.items()[i:j]}
|
||||
|
||||
for ((destination, edu_key), pos) in keyed_edus.iteritems():
|
||||
for ((destination, edu_key), pos) in iteritems(keyed_edus):
|
||||
rows.append((pos, KeyedEduRow(
|
||||
key=edu_key,
|
||||
edu=self.keyed_edu[(destination, edu_key)],
|
||||
)))
|
||||
|
||||
# Fetch changed edus
|
||||
keys = self.edus.keys()
|
||||
i = keys.bisect_right(from_token)
|
||||
j = keys.bisect_right(to_token) + 1
|
||||
edus = ((k, self.edus[k]) for k in keys[i:j])
|
||||
i = self.edus.bisect_right(from_token)
|
||||
j = self.edus.bisect_right(to_token) + 1
|
||||
edus = self.edus.items()[i:j]
|
||||
|
||||
for (pos, edu) in edus:
|
||||
rows.append((pos, EduRow(edu)))
|
||||
|
||||
# Fetch changed failures
|
||||
keys = self.failures.keys()
|
||||
i = keys.bisect_right(from_token)
|
||||
j = keys.bisect_right(to_token) + 1
|
||||
failures = ((k, self.failures[k]) for k in keys[i:j])
|
||||
|
||||
for (pos, (destination, failure)) in failures:
|
||||
rows.append((pos, FailureRow(
|
||||
destination=destination,
|
||||
failure=failure,
|
||||
)))
|
||||
|
||||
# Fetch changed device messages
|
||||
keys = self.device_messages.keys()
|
||||
i = keys.bisect_right(from_token)
|
||||
j = keys.bisect_right(to_token) + 1
|
||||
device_messages = {self.device_messages[k]: k for k in keys[i:j]}
|
||||
i = self.device_messages.bisect_right(from_token)
|
||||
j = self.device_messages.bisect_right(to_token) + 1
|
||||
device_messages = {v: k for k, v in self.device_messages.items()[i:j]}
|
||||
|
||||
for (destination, pos) in device_messages.iteritems():
|
||||
for (destination, pos) in iteritems(device_messages):
|
||||
rows.append((pos, DeviceRow(
|
||||
destination=destination,
|
||||
)))
|
||||
@@ -425,34 +391,6 @@ class EduRow(BaseFederationRow, namedtuple("EduRow", (
|
||||
buff.edus.setdefault(self.edu.destination, []).append(self.edu)
|
||||
|
||||
|
||||
class FailureRow(BaseFederationRow, namedtuple("FailureRow", (
|
||||
"destination", # str
|
||||
"failure",
|
||||
))):
|
||||
"""Streams failures to a remote server. Failures are issued when there was
|
||||
something wrong with a transaction the remote sent us, e.g. it included
|
||||
an event that was invalid.
|
||||
"""
|
||||
|
||||
TypeId = "f"
|
||||
|
||||
@staticmethod
|
||||
def from_data(data):
|
||||
return FailureRow(
|
||||
destination=data["destination"],
|
||||
failure=data["failure"],
|
||||
)
|
||||
|
||||
def to_data(self):
|
||||
return {
|
||||
"destination": self.destination,
|
||||
"failure": self.failure,
|
||||
}
|
||||
|
||||
def add_to_buffer(self, buff):
|
||||
buff.failures.setdefault(self.destination, []).append(self.failure)
|
||||
|
||||
|
||||
class DeviceRow(BaseFederationRow, namedtuple("DeviceRow", (
|
||||
"destination", # str
|
||||
))):
|
||||
@@ -479,7 +417,6 @@ TypeToRow = {
|
||||
PresenceRow,
|
||||
KeyedEduRow,
|
||||
EduRow,
|
||||
FailureRow,
|
||||
DeviceRow,
|
||||
)
|
||||
}
|
||||
@@ -489,7 +426,6 @@ ParsedFederationStreamData = namedtuple("ParsedFederationStreamData", (
|
||||
"presence", # list(UserPresenceState)
|
||||
"keyed_edus", # dict of destination -> { key -> Edu }
|
||||
"edus", # dict of destination -> [Edu]
|
||||
"failures", # dict of destination -> [failures]
|
||||
"device_destinations", # set of destinations
|
||||
))
|
||||
|
||||
@@ -511,7 +447,6 @@ def process_rows_for_federation(transaction_queue, rows):
|
||||
presence=[],
|
||||
keyed_edus={},
|
||||
edus={},
|
||||
failures={},
|
||||
device_destinations=set(),
|
||||
)
|
||||
|
||||
@@ -528,21 +463,17 @@ def process_rows_for_federation(transaction_queue, rows):
|
||||
if buff.presence:
|
||||
transaction_queue.send_presence(buff.presence)
|
||||
|
||||
for destination, edu_map in buff.keyed_edus.iteritems():
|
||||
for destination, edu_map in iteritems(buff.keyed_edus):
|
||||
for key, edu in edu_map.items():
|
||||
transaction_queue.send_edu(
|
||||
edu.destination, edu.edu_type, edu.content, key=key,
|
||||
)
|
||||
|
||||
for destination, edu_list in buff.edus.iteritems():
|
||||
for destination, edu_list in iteritems(buff.edus):
|
||||
for edu in edu_list:
|
||||
transaction_queue.send_edu(
|
||||
edu.destination, edu.edu_type, edu.content, key=None,
|
||||
)
|
||||
|
||||
for destination, failure_list in buff.failures.iteritems():
|
||||
for failure in failure_list:
|
||||
transaction_queue.send_failure(destination, failure)
|
||||
|
||||
for destination in buff.device_destinations:
|
||||
transaction_queue.send_device_messages(destination)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user