mirror of
https://github.com/element-hq/synapse.git
synced 2025-12-15 02:00:21 +00:00
Compare commits
2445 Commits
matrix-org
...
erikj/chun
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
bdba57edf1 | ||
|
|
9eaf69a386 | ||
|
|
c33810d9cc | ||
|
|
58aadd3dd4 | ||
|
|
e7bb34b72a | ||
|
|
9e7cf48461 | ||
|
|
5bf4fa0fc4 | ||
|
|
80a877e9d9 | ||
|
|
47b36e9a02 | ||
|
|
b671e57759 | ||
|
|
bf599cdba1 | ||
|
|
6188512b18 | ||
|
|
867132f28c | ||
|
|
384731330d | ||
|
|
9e1d3f119a | ||
|
|
f687d8fae2 | ||
|
|
ecd4931ab2 | ||
|
|
1cdd0d3b0d | ||
|
|
1810cc3f7e | ||
|
|
6c1d13a15a | ||
|
|
13dbcafb9b | ||
|
|
bcc9e7f777 | ||
|
|
9c36c150e7 | ||
|
|
cc1349c06a | ||
|
|
5b788aba90 | ||
|
|
0e61705661 | ||
|
|
17a70cf6e9 | ||
|
|
6c16a4ec1b | ||
|
|
7ea07c7305 | ||
|
|
1f69693347 | ||
|
|
c4fb15a06c | ||
|
|
36501068d8 | ||
|
|
2aff6eab6d | ||
|
|
095292304f | ||
|
|
ecc4b88bd1 | ||
|
|
46345187cc | ||
|
|
037c6db85d | ||
|
|
7a1af504d7 | ||
|
|
14ca678674 | ||
|
|
6f67163c63 | ||
|
|
bdd2ed5acf | ||
|
|
f72d5a44d5 | ||
|
|
68399fc4de | ||
|
|
91d95a1d8e | ||
|
|
8c98281b8d | ||
|
|
6abcb5d22d | ||
|
|
9bf4b2bda3 | ||
|
|
23aa70cea8 | ||
|
|
043f05a078 | ||
|
|
96f07cebda | ||
|
|
a0b3946fe2 | ||
|
|
2f7008d4eb | ||
|
|
e206b2c9ac | ||
|
|
2df8c3139a | ||
|
|
dda40fb55d | ||
|
|
3ff6f50eac | ||
|
|
82191b08f6 | ||
|
|
2c62ea2515 | ||
|
|
cd8ab9a0d8 | ||
|
|
321f02d263 | ||
|
|
1cbb8e5a33 | ||
|
|
052d08a6a5 | ||
|
|
5ad1149f38 | ||
|
|
563606b8f2 | ||
|
|
2574ea3dc8 | ||
|
|
833db2d922 | ||
|
|
9e8ab0a4f4 | ||
|
|
3601a240aa | ||
|
|
e7598b666b | ||
|
|
6e11803ed3 | ||
|
|
5aaa3189d5 | ||
|
|
0a4bca4134 | ||
|
|
e85b5a0ff7 | ||
|
|
586b66b197 | ||
|
|
35ca3e7b65 | ||
|
|
a17e901f4d | ||
|
|
5494c1d71e | ||
|
|
ad2823ee27 | ||
|
|
08bfc48abf | ||
|
|
0a078026ea | ||
|
|
cb2a2ad791 | ||
|
|
08a14b32ae | ||
|
|
82c2a52987 | ||
|
|
7b36d06a69 | ||
|
|
669400e22f | ||
|
|
b5b2d5d64b | ||
|
|
3b2def6c7a | ||
|
|
a5e2941aad | ||
|
|
8aeb529262 | ||
|
|
8810685df9 | ||
|
|
d5dca9a04f | ||
|
|
9ea219c514 | ||
|
|
d14d7b8fdc | ||
|
|
7cfa8a87a1 | ||
|
|
7948ecf234 | ||
|
|
020377a550 | ||
|
|
13a8dfba0d | ||
|
|
c435b0b441 | ||
|
|
fb2806b186 | ||
|
|
413482f578 | ||
|
|
4aac88928f | ||
|
|
6e1cb54a05 | ||
|
|
6d6e7288fe | ||
|
|
d689e0dba1 | ||
|
|
dfa70adc33 | ||
|
|
933bf2dd35 | ||
|
|
d9fe2b2d9d | ||
|
|
45b55e23d3 | ||
|
|
dcc235b47d | ||
|
|
73cbdef5f7 | ||
|
|
aafb0f6b0d | ||
|
|
b932b4ea25 | ||
|
|
644aac5f73 | ||
|
|
08462620bf | ||
|
|
ef466b3a13 | ||
|
|
861f8a9b21 | ||
|
|
2725223f08 | ||
|
|
ab5e888927 | ||
|
|
f3d9dca975 | ||
|
|
0a325e5385 | ||
|
|
b725e128f8 | ||
|
|
6d9dc67139 | ||
|
|
ed3125b0a1 | ||
|
|
67af392712 | ||
|
|
011e1f4010 | ||
|
|
26305788fe | ||
|
|
d10707c810 | ||
|
|
fa30ac38cc | ||
|
|
8b1c856d81 | ||
|
|
6958459b50 | ||
|
|
88d3405332 | ||
|
|
d43d480d86 | ||
|
|
a2da6de40e | ||
|
|
450f500d0c | ||
|
|
82b0361f02 | ||
|
|
1b1b47aec6 | ||
|
|
fed62e21ad | ||
|
|
f8a1e76d64 | ||
|
|
0504d809fd | ||
|
|
12fd6d7688 | ||
|
|
a638649254 | ||
|
|
d4e4a7344f | ||
|
|
c771c124d5 | ||
|
|
3369354b56 | ||
|
|
3b505a80dc | ||
|
|
943f1029d6 | ||
|
|
f7906203f6 | ||
|
|
ae53c71d90 | ||
|
|
616da9eb1d | ||
|
|
c46367d0d7 | ||
|
|
85b8acdeb4 | ||
|
|
3c099219e0 | ||
|
|
680530cc7f | ||
|
|
43e6e82c4d | ||
|
|
dc8930ea9e | ||
|
|
c945af8799 | ||
|
|
be11a02c4f | ||
|
|
a2204cc9cc | ||
|
|
31c2502ca8 | ||
|
|
8030a825c8 | ||
|
|
c92a8aa578 | ||
|
|
05ac15ae82 | ||
|
|
5f27ed75ad | ||
|
|
37dbee6490 | ||
|
|
47815edcfa | ||
|
|
589ecc5b58 | ||
|
|
e71fb118f4 | ||
|
|
aea80a0118 | ||
|
|
f077e97914 | ||
|
|
8cbbfd16fb | ||
|
|
977765bde2 | ||
|
|
16f41237f0 | ||
|
|
c25d7ba12e | ||
|
|
6406b70aeb | ||
|
|
23e2dfe940 | ||
|
|
bd8d0cfab1 | ||
|
|
db18d854cd | ||
|
|
318711e139 | ||
|
|
7b411007e6 | ||
|
|
6b49628e3b | ||
|
|
217bc53c98 | ||
|
|
645cb4bf06 | ||
|
|
09f570b935 | ||
|
|
9589a1925e | ||
|
|
49e5a613f1 | ||
|
|
b8700dd7d0 | ||
|
|
c6f730282c | ||
|
|
09b29f9c4a | ||
|
|
4d298506dd | ||
|
|
8460e48d06 | ||
|
|
18e144fe08 | ||
|
|
bfe1f73855 | ||
|
|
5adb75bcba | ||
|
|
a5c98dda48 | ||
|
|
d26bec8a43 | ||
|
|
fcf55f2255 | ||
|
|
7ce98804ff | ||
|
|
cddf91c8b9 | ||
|
|
9896dab8f6 | ||
|
|
1e5280b7d0 | ||
|
|
75552d2148 | ||
|
|
294e9a0c9b | ||
|
|
46df23f581 | ||
|
|
52281e4c54 | ||
|
|
7e8726b8fb | ||
|
|
c0e08dc45b | ||
|
|
0461ef01b7 | ||
|
|
e2accd7f1d | ||
|
|
e5ab9cd24b | ||
|
|
60590211c1 | ||
|
|
c4af4c24ca | ||
|
|
05e0a2462c | ||
|
|
7dd13415db | ||
|
|
27cf170558 | ||
|
|
1aeb5e28a9 | ||
|
|
23ec51c94c | ||
|
|
d5377eba55 | ||
|
|
d11b8b6b65 | ||
|
|
8ff8ab3bce | ||
|
|
6c957e26f0 | ||
|
|
696f532453 | ||
|
|
3e6d306e94 | ||
|
|
274b8c6025 | ||
|
|
06c0d0ed08 | ||
|
|
bf98fa0864 | ||
|
|
678e649b78 | ||
|
|
0b7dfbb194 | ||
|
|
88868b2839 | ||
|
|
5addeaa02c | ||
|
|
6d8ec3462d | ||
|
|
95b6912045 | ||
|
|
966686c845 | ||
|
|
093d8c415a | ||
|
|
0ba609dc6f | ||
|
|
2117f84323 | ||
|
|
a7fe62f0cb | ||
|
|
2e7a94c36b | ||
|
|
a2aaa9cb3c | ||
|
|
d72faf2fad | ||
|
|
a0501ac57e | ||
|
|
0a3b51c420 | ||
|
|
31c7c29d43 | ||
|
|
902673e356 | ||
|
|
53a5fdf312 | ||
|
|
1dfd650348 | ||
|
|
9a779c2ddb | ||
|
|
a41117c63b | ||
|
|
32015e1109 | ||
|
|
3a42aed9a1 | ||
|
|
5a0be97ab2 | ||
|
|
415c6b672e | ||
|
|
4e9bdeba57 | ||
|
|
be31adb036 | ||
|
|
11607006d9 | ||
|
|
46beeb9a30 | ||
|
|
f22e7cda2c | ||
|
|
a8d8bf92e0 | ||
|
|
e482f8cd85 | ||
|
|
4f2e898c29 | ||
|
|
d4c14e1438 | ||
|
|
9f21de6a01 | ||
|
|
da602419b2 | ||
|
|
8ae7096958 | ||
|
|
562532dd2d | ||
|
|
5c2214f4c7 | ||
|
|
2414178ed6 | ||
|
|
40d1bbd257 | ||
|
|
8e6bd0e324 | ||
|
|
8570bb84cc | ||
|
|
ca7211104e | ||
|
|
d5eee5d601 | ||
|
|
d858f3bd4e | ||
|
|
33f469ba19 | ||
|
|
dd1a832419 | ||
|
|
d0857702e8 | ||
|
|
5917562b60 | ||
|
|
6495dbb326 | ||
|
|
2ad3fc36e6 | ||
|
|
cead75fae3 | ||
|
|
576b71dd3d | ||
|
|
99a54bf2af | ||
|
|
63ae5cbf34 | ||
|
|
fdb6849b81 | ||
|
|
66aa32ede2 | ||
|
|
6e005d1382 | ||
|
|
01e8a52825 | ||
|
|
0c9db26260 | ||
|
|
950a32eb47 | ||
|
|
bc2017a594 | ||
|
|
683149c1f9 | ||
|
|
7b908aeec4 | ||
|
|
3b0e431c82 | ||
|
|
db75c86e84 | ||
|
|
2fd96727b1 | ||
|
|
b8ee12b978 | ||
|
|
049b0b5af2 | ||
|
|
d1d54d6088 | ||
|
|
ac5f2f4d86 | ||
|
|
af3cc50511 | ||
|
|
dbf6f28d64 | ||
|
|
7767a9fc0e | ||
|
|
aab2e4da60 | ||
|
|
1315d374cc | ||
|
|
9e2601f830 | ||
|
|
122593265b | ||
|
|
e9143b6593 | ||
|
|
adaf3ec87f | ||
|
|
006e18b6bb | ||
|
|
42c89c8215 | ||
|
|
d82b6ea9e6 | ||
|
|
4f2f5171b7 | ||
|
|
94f4d7f49e | ||
|
|
57b58e2174 | ||
|
|
cdb4647a80 | ||
|
|
a376d8f761 | ||
|
|
4f5694e2ce | ||
|
|
9558236728 | ||
|
|
453adf00b6 | ||
|
|
fc149b4eeb | ||
|
|
6146332387 | ||
|
|
d2737c1fae | ||
|
|
2a13af23bc | ||
|
|
3d1ae61399 | ||
|
|
9d2c1b8429 | ||
|
|
13843f771e | ||
|
|
41d4b07a53 | ||
|
|
05ba7e3a44 | ||
|
|
53849ea9d3 | ||
|
|
268e40341b | ||
|
|
9c3da24561 | ||
|
|
53494c34df | ||
|
|
6493b22b42 | ||
|
|
6e10eed28e | ||
|
|
605defb9e4 | ||
|
|
9255a6cb17 | ||
|
|
d842ed14f4 | ||
|
|
31c8be956f | ||
|
|
28dd536e80 | ||
|
|
8721580303 | ||
|
|
dbf76fd4b9 | ||
|
|
d78ada3166 | ||
|
|
0ced8b5b47 | ||
|
|
7ec8e798b4 | ||
|
|
fb6015d0a6 | ||
|
|
a5ad88913c | ||
|
|
617bf40924 | ||
|
|
22881b3d69 | ||
|
|
ba3166743c | ||
|
|
e3a373f002 | ||
|
|
48c01ae851 | ||
|
|
6ab3b9c743 | ||
|
|
1bb83d5d41 | ||
|
|
13a2beabca | ||
|
|
2c3e995f38 | ||
|
|
8e8b06715f | ||
|
|
08b29d4574 | ||
|
|
77ebef9d43 | ||
|
|
9b9c38373c | ||
|
|
286e20f2bc | ||
|
|
1ea904b9f0 | ||
|
|
dc875d2712 | ||
|
|
8dc4a6144b | ||
|
|
d06a9ea5f7 | ||
|
|
c09a6daf09 | ||
|
|
692a3cc806 | ||
|
|
366dd893fc | ||
|
|
bdb7714d13 | ||
|
|
67dabe143d | ||
|
|
3de7d9fe99 | ||
|
|
11a67b7c9d | ||
|
|
0c280d4d99 | ||
|
|
bc381d5798 | ||
|
|
b1dfbc3c40 | ||
|
|
dacf3a50ac | ||
|
|
1f4b498b73 | ||
|
|
e585228860 | ||
|
|
9b7794262f | ||
|
|
639480e14a | ||
|
|
878995e660 | ||
|
|
a1a3c9660f | ||
|
|
512633ef44 | ||
|
|
2a3c33ff03 | ||
|
|
f63ff73c7f | ||
|
|
36c59ce669 | ||
|
|
cb9cdfecd0 | ||
|
|
1515560f5c | ||
|
|
bfc2ade9b3 | ||
|
|
c4bdbc2bd2 | ||
|
|
041b41a825 | ||
|
|
154b44c249 | ||
|
|
0d8c50df44 | ||
|
|
78a9698650 | ||
|
|
25b0ba30b1 | ||
|
|
f8d46cad3c | ||
|
|
d4b2e05852 | ||
|
|
eb53439c4a | ||
|
|
51d628d28d | ||
|
|
df77837a33 | ||
|
|
d3347ad485 | ||
|
|
fac3f9e678 | ||
|
|
60f6014bb7 | ||
|
|
119596ab8f | ||
|
|
b78395b7fe | ||
|
|
d5c74b9f6c | ||
|
|
0f13f30fca | ||
|
|
415aeefd89 | ||
|
|
19ceb4851f | ||
|
|
261124396e | ||
|
|
23a7f9d7f4 | ||
|
|
d7bf3a68f0 | ||
|
|
f67e906e18 | ||
|
|
971059a733 | ||
|
|
e939f3bca6 | ||
|
|
4dae4a97ed | ||
|
|
92e34615c5 | ||
|
|
ab825aa328 | ||
|
|
233699c42e | ||
|
|
427e6c4059 | ||
|
|
781cd8c54f | ||
|
|
9ef0b179e0 | ||
|
|
121591568b | ||
|
|
b3384232a0 | ||
|
|
360d899a64 | ||
|
|
d54cfbb7a8 | ||
|
|
eaa2ebf20b | ||
|
|
9daf82278f | ||
|
|
a3f9ddbede | ||
|
|
7f8eebc8ee | ||
|
|
dd723267b2 | ||
|
|
a060dfa132 | ||
|
|
f8e8ec013b | ||
|
|
1246d23710 | ||
|
|
d49cbf712f | ||
|
|
ce72d590ed | ||
|
|
11d2609da7 | ||
|
|
dab87b84a3 | ||
|
|
6d7f0f8dd3 | ||
|
|
f4284d943a | ||
|
|
d1e56cfcd1 | ||
|
|
89de934981 | ||
|
|
9fbe70a7dc | ||
|
|
a3599dda97 | ||
|
|
87478c5a60 | ||
|
|
c508b2f2f0 | ||
|
|
37354b55c9 | ||
|
|
0e9aa1d091 | ||
|
|
8eaa141d8f | ||
|
|
664adb4236 | ||
|
|
aea3a93611 | ||
|
|
41e0611895 | ||
|
|
61b439c904 | ||
|
|
87770300d5 | ||
|
|
9a311adfea | ||
|
|
64bc2162ef | ||
|
|
d2c6f4d626 | ||
|
|
5232d3bfb1 | ||
|
|
5e785d4d5b | ||
|
|
6e025a97b4 | ||
|
|
414b2b3bd1 | ||
|
|
b151eb14a2 | ||
|
|
64cebbc730 | ||
|
|
d9ae2bc826 | ||
|
|
21d5a2a08e | ||
|
|
c115deed12 | ||
|
|
072fb59446 | ||
|
|
89dda61315 | ||
|
|
687f3451bd | ||
|
|
13decdbf96 | ||
|
|
f3ef60662f | ||
|
|
e5082494eb | ||
|
|
56b0589865 | ||
|
|
11974f3787 | ||
|
|
145d14656b | ||
|
|
a13b7860c6 | ||
|
|
e54c202b81 | ||
|
|
b0500d3774 | ||
|
|
4f40d058cc | ||
|
|
135fc5b9cd | ||
|
|
020a501354 | ||
|
|
db2fd801f7 | ||
|
|
e8b03cab1b | ||
|
|
8844f95c32 | ||
|
|
7945435587 | ||
|
|
6bd1b7053e | ||
|
|
b4478e586f | ||
|
|
112c2253e2 | ||
|
|
6850f8aea3 | ||
|
|
cd087a265d | ||
|
|
87c864b698 | ||
|
|
ae85c7804e | ||
|
|
f8d1917fce | ||
|
|
6eb3aa94b6 | ||
|
|
edb45aae38 | ||
|
|
b370fe61c0 | ||
|
|
6a9777ba02 | ||
|
|
01579384cc | ||
|
|
e01ba5bda3 | ||
|
|
7b824f1475 | ||
|
|
35ff941172 | ||
|
|
1d71f484d4 | ||
|
|
15e8ed874f | ||
|
|
c7ede92d0b | ||
|
|
551422051b | ||
|
|
c7f0969731 | ||
|
|
3449da3bc7 | ||
|
|
d1679a4ed7 | ||
|
|
01afc563c3 | ||
|
|
e089100c62 | ||
|
|
68b0ee4e8d | ||
|
|
22284a6f65 | ||
|
|
917380e89d | ||
|
|
104c0bc1d5 | ||
|
|
700e5e7198 | ||
|
|
b214a04ffc | ||
|
|
0e5f479fc0 | ||
|
|
518f6de088 | ||
|
|
7d0f712348 | ||
|
|
e4570c53dd | ||
|
|
88964b987e | ||
|
|
204fc98520 | ||
|
|
301b339494 | ||
|
|
6168351877 | ||
|
|
9cd3f06ab7 | ||
|
|
f92963f5db | ||
|
|
72251d1b97 | ||
|
|
725a72ec5a | ||
|
|
a89f9f830c | ||
|
|
39ce38b024 | ||
|
|
a9a74101a4 | ||
|
|
eb8d8d6f57 | ||
|
|
8da39ad98f | ||
|
|
3ee4ad09eb | ||
|
|
0ca5c4d2af | ||
|
|
11597ddea5 | ||
|
|
2fe3f848b9 | ||
|
|
05630758f2 | ||
|
|
fcfe7f6ad3 | ||
|
|
b4e37c6f50 | ||
|
|
9ee44a372d | ||
|
|
88cc9cc69e | ||
|
|
dc7c020b33 | ||
|
|
16aeb41547 | ||
|
|
c5de6987c2 | ||
|
|
241e4e8687 | ||
|
|
929b34963d | ||
|
|
9a0db062af | ||
|
|
a838444a70 | ||
|
|
4262aba17b | ||
|
|
86932be2cb | ||
|
|
32260baa41 | ||
|
|
33f6195d9a | ||
|
|
a164270833 | ||
|
|
352e1ff9ed | ||
|
|
79452edeee | ||
|
|
6152e253d8 | ||
|
|
e9e4cb25fc | ||
|
|
792d340572 | ||
|
|
4ceaa7433a | ||
|
|
788e69098c | ||
|
|
0f890f477e | ||
|
|
545001b9e4 | ||
|
|
01ccc9e6f2 | ||
|
|
a9cb1a35c8 | ||
|
|
a32d2548d9 | ||
|
|
9187e0762f | ||
|
|
f879127aaa | ||
|
|
e6d87c93f3 | ||
|
|
004cc8a328 | ||
|
|
ef520d8d0e | ||
|
|
a134c572a6 | ||
|
|
c2a5cf2fe3 | ||
|
|
800cfd5774 | ||
|
|
152c2ac19e | ||
|
|
e70287cff3 | ||
|
|
03a26e28d9 | ||
|
|
3e0c0660b3 | ||
|
|
3f49e131d9 | ||
|
|
9b8c0fb162 | ||
|
|
691f8492fb | ||
|
|
a9d7d98d3f | ||
|
|
bdbb1eec65 | ||
|
|
01f72e2fc7 | ||
|
|
9187862002 | ||
|
|
aa3587fdd1 | ||
|
|
51406dab96 | ||
|
|
fecb45e0c3 | ||
|
|
44cd6e1358 | ||
|
|
8d6dc106d1 | ||
|
|
a052aa42e7 | ||
|
|
8efe773ef1 | ||
|
|
b7e7b52452 | ||
|
|
8cbbfaefc1 | ||
|
|
84b5cc69f5 | ||
|
|
fde8e8f09f | ||
|
|
eb9fc021e3 | ||
|
|
1c41b05c8c | ||
|
|
5bdb57cb66 | ||
|
|
f5aa027c2f | ||
|
|
e66fbcbb02 | ||
|
|
9aa5a0af51 | ||
|
|
610accbb7f | ||
|
|
c384705ee8 | ||
|
|
1a3aa957ca | ||
|
|
3f961e638a | ||
|
|
fa72803490 | ||
|
|
9a0d783c11 | ||
|
|
38f952b9bc | ||
|
|
757f1b5843 | ||
|
|
a8ce159be4 | ||
|
|
f609acc109 | ||
|
|
0092cf38ae | ||
|
|
5b631ff41a | ||
|
|
ba48755d56 | ||
|
|
926ba76e23 | ||
|
|
5a6e54264d | ||
|
|
9cf519769b | ||
|
|
7c7706f42b | ||
|
|
2cc9f76bc3 | ||
|
|
ddb00efc1d | ||
|
|
2a376579f3 | ||
|
|
873aea7168 | ||
|
|
bf7ee93cb6 | ||
|
|
5ea624b0f5 | ||
|
|
0ad5125814 | ||
|
|
068c21ab10 | ||
|
|
b29d1abab6 | ||
|
|
7367a4a823 | ||
|
|
7d26591048 | ||
|
|
2059b8573f | ||
|
|
10fdcf561d | ||
|
|
5ccb57d3ff | ||
|
|
c33c1ceddd | ||
|
|
fb647164f2 | ||
|
|
a492b17fe2 | ||
|
|
cb2c7c0669 | ||
|
|
91ea0202e6 | ||
|
|
3959754de3 | ||
|
|
4f28018c83 | ||
|
|
57db62e554 | ||
|
|
0011ede3b0 | ||
|
|
62ad701326 | ||
|
|
3f0f06cb31 | ||
|
|
3e839e0548 | ||
|
|
ebd0127999 | ||
|
|
cfe75a9fb6 | ||
|
|
f51565e023 | ||
|
|
d144ed6ffb | ||
|
|
a08726fc42 | ||
|
|
b27320b550 | ||
|
|
350331d466 | ||
|
|
1a69c6d590 | ||
|
|
df8ff682a7 | ||
|
|
3518d0ea8f | ||
|
|
d45a114824 | ||
|
|
6dbebef141 | ||
|
|
16adb11cc0 | ||
|
|
82f16faa78 | ||
|
|
b78717b87b | ||
|
|
95cb401ae0 | ||
|
|
5d8476d8ff | ||
|
|
47ce527f45 | ||
|
|
56e709857c | ||
|
|
cb9f8e527c | ||
|
|
cea462e285 | ||
|
|
bf8e97bd3c | ||
|
|
ea3442c15c | ||
|
|
16469a4f15 | ||
|
|
c82111a55f | ||
|
|
da87791975 | ||
|
|
99e9b4f26c | ||
|
|
f5160d4a3e | ||
|
|
8b3573a8b2 | ||
|
|
299fd740c7 | ||
|
|
9a2d9b4789 | ||
|
|
141c343e03 | ||
|
|
f43b6d6d9b | ||
|
|
0f942f68c1 | ||
|
|
d0fcc48f9d | ||
|
|
31becf4ac3 | ||
|
|
d023ecb810 | ||
|
|
ea7b3c4b1b | ||
|
|
6ea27fafad | ||
|
|
265b993b8a | ||
|
|
e05bf34117 | ||
|
|
631a73f7ef | ||
|
|
c3f79c9da5 | ||
|
|
889a2a853a | ||
|
|
d65ceb4b48 | ||
|
|
e48c7aac4d | ||
|
|
1708412f56 | ||
|
|
b984dd0b73 | ||
|
|
ba1d08bc4b | ||
|
|
58dd148c4f | ||
|
|
88541f9009 | ||
|
|
dbe80a286b | ||
|
|
20f40348d4 | ||
|
|
735fd8719a | ||
|
|
a56d54dcb7 | ||
|
|
02a1296ad6 | ||
|
|
8cb44da4aa | ||
|
|
8ffaacbee3 | ||
|
|
b2932107bb | ||
|
|
7aed50a038 | ||
|
|
b6c4b851f1 | ||
|
|
ed9b5eced4 | ||
|
|
d4ffe61d4f | ||
|
|
69ce365b79 | ||
|
|
2e223163ff | ||
|
|
f8bfcd7e0d | ||
|
|
d032785aa7 | ||
|
|
2c911d75e8 | ||
|
|
c818fcab11 | ||
|
|
06a14876e5 | ||
|
|
42174946f8 | ||
|
|
f394f5574d | ||
|
|
af7ed8e1ef | ||
|
|
efb79820b4 | ||
|
|
fafa3e7114 | ||
|
|
6619f047ad | ||
|
|
d960d23830 | ||
|
|
1a6c7cdf54 | ||
|
|
89b7232ff8 | ||
|
|
1773df0632 | ||
|
|
65cf454fd1 | ||
|
|
9e08a93a7b | ||
|
|
4b44f05f19 | ||
|
|
a83c514d1f | ||
|
|
33bebb63f3 | ||
|
|
483e8104db | ||
|
|
2ad4d5b5bb | ||
|
|
92789199a9 | ||
|
|
529c026ac1 | ||
|
|
7c371834cc | ||
|
|
64346be26d | ||
|
|
22518e2833 | ||
|
|
884b26ae41 | ||
|
|
1b2af11650 | ||
|
|
872ff95ed4 | ||
|
|
22004b524e | ||
|
|
4bc4236faf | ||
|
|
2324124a72 | ||
|
|
f793bc3877 | ||
|
|
784f036306 | ||
|
|
6411f725be | ||
|
|
a9a2d66cdd | ||
|
|
0c8ba5dd1c | ||
|
|
3a75de923b | ||
|
|
17445e6701 | ||
|
|
126b9bf96f | ||
|
|
157298f986 | ||
|
|
89f90d808a | ||
|
|
8ded8ba2c7 | ||
|
|
182ff17c83 | ||
|
|
f381d63813 | ||
|
|
6b8604239f | ||
|
|
f756f961ea | ||
|
|
28e973ac11 | ||
|
|
9cb3a190bc | ||
|
|
493e25d554 | ||
|
|
3594dbc6dc | ||
|
|
2311189ee4 | ||
|
|
c57607874c | ||
|
|
8956f0147a | ||
|
|
e5b4a208ce | ||
|
|
73fe866847 | ||
|
|
45b5fe9122 | ||
|
|
d62ce972f8 | ||
|
|
6ae9a3d2a6 | ||
|
|
2ec49826e8 | ||
|
|
a90c60912f | ||
|
|
50e8657867 | ||
|
|
1cf9e071dd | ||
|
|
d0957753bf | ||
|
|
199dba6c15 | ||
|
|
70349872c2 | ||
|
|
eba93b05bf | ||
|
|
bf8a36e080 | ||
|
|
5d0f665848 | ||
|
|
3bd760628b | ||
|
|
eb9b5eec81 | ||
|
|
c2ecfcc3a4 | ||
|
|
7e6cf89dc2 | ||
|
|
26d37f7a63 | ||
|
|
bb73f55fc6 | ||
|
|
faeb369f15 | ||
|
|
3dec9c66b3 | ||
|
|
46244b2759 | ||
|
|
27b094f382 | ||
|
|
573712da6b | ||
|
|
c96d547f4d | ||
|
|
d15d237b0d | ||
|
|
27939cbb0e | ||
|
|
6f72765371 | ||
|
|
cbaad969f9 | ||
|
|
ca9b9d9703 | ||
|
|
a2b25de68d | ||
|
|
8fbb4d0d19 | ||
|
|
95e4cffd85 | ||
|
|
e316bbb4c0 | ||
|
|
f5ac4dc2d4 | ||
|
|
25634ed152 | ||
|
|
24087bffa9 | ||
|
|
ad0ccf15ea | ||
|
|
e440e28456 | ||
|
|
d874d4f2d7 | ||
|
|
6ff8c87484 | ||
|
|
324c3e9399 | ||
|
|
3fc33bae8b | ||
|
|
3acd616979 | ||
|
|
923d9300ed | ||
|
|
a71a080cd2 | ||
|
|
d1a3325f99 | ||
|
|
bf5ef10a93 | ||
|
|
6af025d3c4 | ||
|
|
012e8e142a | ||
|
|
3a061cae26 | ||
|
|
b96278d6fe | ||
|
|
4810f7effd | ||
|
|
c714c61853 | ||
|
|
acac21248c | ||
|
|
6ed9ff69c2 | ||
|
|
106906a65e | ||
|
|
5fb347fc41 | ||
|
|
cd94728e93 | ||
|
|
fd1601c596 | ||
|
|
ef344b10e5 | ||
|
|
b8d821aa68 | ||
|
|
92c52df702 | ||
|
|
d28ec43e15 | ||
|
|
39bf47319f | ||
|
|
ac27f6a35e | ||
|
|
5978dccff0 | ||
|
|
278d21b5e4 | ||
|
|
5fcbf1e07c | ||
|
|
c0c9327fe0 | ||
|
|
059d3a6c8e | ||
|
|
d627174da2 | ||
|
|
ddb6a79b68 | ||
|
|
0b27ae8dc3 | ||
|
|
4a6d551704 | ||
|
|
bfdf7b9237 | ||
|
|
630caf8a70 | ||
|
|
8fd1a32456 | ||
|
|
4d09366656 | ||
|
|
a9b712e9dc | ||
|
|
32c7b8e48b | ||
|
|
1026690cd2 | ||
|
|
f44b7c022f | ||
|
|
07f1b71819 | ||
|
|
b815aa0e2d | ||
|
|
6f0b1f85f9 | ||
|
|
10b34dbb9a | ||
|
|
39a6b35496 | ||
|
|
74fcbf741b | ||
|
|
e571aef06d | ||
|
|
61ffaa8137 | ||
|
|
671540dccf | ||
|
|
ca70148c05 | ||
|
|
e511979fe6 | ||
|
|
a03c382966 | ||
|
|
48e2c641b8 | ||
|
|
d8680c969b | ||
|
|
b9b668e4bb | ||
|
|
ef1f8d4be6 | ||
|
|
a0af0054ec | ||
|
|
914a59cb8c | ||
|
|
e174c46a29 | ||
|
|
b8a4dceb3c | ||
|
|
084afbb6a0 | ||
|
|
58df3a8c5d | ||
|
|
63fd148724 | ||
|
|
5fa571a91b | ||
|
|
053255f36c | ||
|
|
f133228cb3 | ||
|
|
50fe92cd26 | ||
|
|
8ec2e638be | ||
|
|
24dd73028a | ||
|
|
e3624fad5f | ||
|
|
617199d73d | ||
|
|
3e1e69ccaf | ||
|
|
770b2252ca | ||
|
|
3d33eef6fc | ||
|
|
1ffd9cb936 | ||
|
|
107a5c9441 | ||
|
|
ee3b160a2a | ||
|
|
630573a932 | ||
|
|
f5364b47ec | ||
|
|
d8c7da5dca | ||
|
|
cf4ef60e28 | ||
|
|
cd51931b62 | ||
|
|
81010a126e | ||
|
|
8db84e9b21 | ||
|
|
b31bf0bb51 | ||
|
|
9a304ef2b0 | ||
|
|
ebfe64e3d6 | ||
|
|
225dc3b4cb | ||
|
|
9fcbbe8e7d | ||
|
|
447aed42d2 | ||
|
|
ee6fb4cf85 | ||
|
|
3c7b480ba3 | ||
|
|
25c0a020f4 | ||
|
|
3fa362502c | ||
|
|
5ff3d23564 | ||
|
|
c46e75d3d8 | ||
|
|
db91e72ade | ||
|
|
bc496df192 | ||
|
|
a1beca0e25 | ||
|
|
b5049d2e5c | ||
|
|
1f881e0746 | ||
|
|
e9021e16c4 | ||
|
|
f72c9c1fb6 | ||
|
|
b8ab78b82c | ||
|
|
9a87b8aaf7 | ||
|
|
84a9209ba7 | ||
|
|
53965334da | ||
|
|
a207cccb05 | ||
|
|
1ba2fe114c | ||
|
|
042757feb2 | ||
|
|
886c2d5019 | ||
|
|
f2bf0cda02 | ||
|
|
6d1e28a842 | ||
|
|
48bc22f89d | ||
|
|
80b8a28100 | ||
|
|
bd25f9cf36 | ||
|
|
4eeae7ad65 | ||
|
|
bb9f0f3cdb | ||
|
|
d434ae3387 | ||
|
|
431476fbc4 | ||
|
|
6b02fc80d1 | ||
|
|
9c9356512e | ||
|
|
18eae413af | ||
|
|
78d6ddba86 | ||
|
|
9dcd667ac2 | ||
|
|
33cac3dc29 | ||
|
|
6e87b34f7b | ||
|
|
d5352cbba8 | ||
|
|
14737ba495 | ||
|
|
e15d4ea248 | ||
|
|
a18828c129 | ||
|
|
6da4c4d3bd | ||
|
|
0cbda53819 | ||
|
|
77c0629ebc | ||
|
|
e16e45b1b4 | ||
|
|
e1e4ec9f9d | ||
|
|
78e7e05188 | ||
|
|
ad48dfe73d | ||
|
|
518a74586c | ||
|
|
d1fe4db882 | ||
|
|
421d68ca8c | ||
|
|
326189c25a | ||
|
|
3af53c183a | ||
|
|
63c4383927 | ||
|
|
af19f5e9aa | ||
|
|
773f0eed1e | ||
|
|
adfc0c9539 | ||
|
|
d413a2ba98 | ||
|
|
b387ee17b6 | ||
|
|
03dd745fe2 | ||
|
|
e051abd20b | ||
|
|
02ba118f81 | ||
|
|
4c65b98e4a | ||
|
|
d1f3490e75 | ||
|
|
46022025ea | ||
|
|
2186d7c06e | ||
|
|
88b9c5cbf0 | ||
|
|
d7eacc4f87 | ||
|
|
b178eca261 | ||
|
|
d8f90c4208 | ||
|
|
4b0f06e99c | ||
|
|
e98f0f9112 | ||
|
|
25adde9a04 | ||
|
|
6e9bf67f18 | ||
|
|
2b91846497 | ||
|
|
73560237d6 | ||
|
|
86c4f49a31 | ||
|
|
f632083576 | ||
|
|
6c6e197b0a | ||
|
|
d02e43b15f | ||
|
|
349c739966 | ||
|
|
9a72b70630 | ||
|
|
25e2456ee7 | ||
|
|
d32385336f | ||
|
|
b2da272b77 | ||
|
|
4528dd2443 | ||
|
|
93efd7eb04 | ||
|
|
ab9f844aaf | ||
|
|
5c431f421c | ||
|
|
d84f65255e | ||
|
|
a94d9b6b82 | ||
|
|
5552ed9a7f | ||
|
|
2c8526cac7 | ||
|
|
87b7d72760 | ||
|
|
49fce04624 | ||
|
|
b0d9e633ee | ||
|
|
ad7ec63d08 | ||
|
|
62d7d66ae5 | ||
|
|
8fe253f19b | ||
|
|
293380bef7 | ||
|
|
447f4f0d5f | ||
|
|
9d332e0f79 | ||
|
|
0af58f14ee | ||
|
|
81d037dbd8 | ||
|
|
28a6ccb49c | ||
|
|
cd871a3057 | ||
|
|
8ff6726c0d | ||
|
|
d69768348f | ||
|
|
8e85220373 | ||
|
|
3fe2bae857 | ||
|
|
aae77da73f | ||
|
|
ce4f66133e | ||
|
|
b6dc7044a9 | ||
|
|
9a89dae8c5 | ||
|
|
0af5dc63a8 | ||
|
|
5a4da21d58 | ||
|
|
d57765fc8a | ||
|
|
2cf6a7bc20 | ||
|
|
4a53f3a3e8 | ||
|
|
be0dfcd4a2 | ||
|
|
1432f7ccd5 | ||
|
|
2f18a2647b | ||
|
|
d6af5512bb | ||
|
|
ce236f8ac8 | ||
|
|
dc519602ac | ||
|
|
17b54389fe | ||
|
|
28b338ed9b | ||
|
|
a177325b49 | ||
|
|
36da256cc6 | ||
|
|
1224612a79 | ||
|
|
bc67e7d260 | ||
|
|
a87006f9c7 | ||
|
|
06db5c4b76 | ||
|
|
8716eb4920 | ||
|
|
2d9ab533f9 | ||
|
|
390093d45e | ||
|
|
2fb3a28c98 | ||
|
|
a7e4ff9cca | ||
|
|
f884cfffb9 | ||
|
|
a5213df1f7 | ||
|
|
3d5a25407c | ||
|
|
e8f7541d3f | ||
|
|
fb6563b4be | ||
|
|
1954e867b4 | ||
|
|
f23b4078c0 | ||
|
|
11ab2f56f5 | ||
|
|
0486a7814a | ||
|
|
90c14da992 | ||
|
|
1067b96364 | ||
|
|
38506773eb | ||
|
|
300edc2348 | ||
|
|
05f98a2224 | ||
|
|
3cb2dabaad | ||
|
|
d728c47142 | ||
|
|
4102468da9 | ||
|
|
936482d507 | ||
|
|
3d12d97415 | ||
|
|
0f5d2cc37c | ||
|
|
8615f19d20 | ||
|
|
5e97ca7ee6 | ||
|
|
d863f68cab | ||
|
|
6368e5c0ab | ||
|
|
0a90d9ede4 | ||
|
|
6324b65f08 | ||
|
|
44a498418c | ||
|
|
5dfc83704b | ||
|
|
febdca4b37 | ||
|
|
f5f89fda21 | ||
|
|
307f88dfb6 | ||
|
|
5b527d7ee1 | ||
|
|
807e848f0f | ||
|
|
4a31a61ef9 | ||
|
|
ee7a1cabd8 | ||
|
|
9795b9ebb1 | ||
|
|
c5b589f2e8 | ||
|
|
64ddec1bc0 | ||
|
|
a4c5e4a645 | ||
|
|
1159abbdd2 | ||
|
|
a027c2af8d | ||
|
|
5c3c32f16f | ||
|
|
39f4e29d01 | ||
|
|
992018d1c0 | ||
|
|
80fa610f9c | ||
|
|
5e16c1dc8c | ||
|
|
19d274085f | ||
|
|
0fc2362d37 | ||
|
|
21bf87a146 | ||
|
|
694f1c1b18 | ||
|
|
e21370ba54 | ||
|
|
85a4d78213 | ||
|
|
dcc8eded41 | ||
|
|
fefeb0ab0e | ||
|
|
81391fa162 | ||
|
|
1e4edd1717 | ||
|
|
c6c009603c | ||
|
|
4d88958cf6 | ||
|
|
227c491510 | ||
|
|
f4d93ae424 | ||
|
|
f68e4cf690 | ||
|
|
5f23b6d5ea | ||
|
|
7cd34512d8 | ||
|
|
07ab948c38 | ||
|
|
825a07a974 | ||
|
|
f8e1ab5fee | ||
|
|
b9e4a97922 | ||
|
|
5f07f5694c | ||
|
|
8c9d5b4873 | ||
|
|
c175a5f0f2 | ||
|
|
d90e8ea444 | ||
|
|
174eacc8ba | ||
|
|
a66f489678 | ||
|
|
e79db0a673 | ||
|
|
e365ad329f | ||
|
|
19f9227643 | ||
|
|
8f03aa9f61 | ||
|
|
2442e9876c | ||
|
|
9d30a7691c | ||
|
|
9e20840e02 | ||
|
|
dd3092c3a3 | ||
|
|
ada470bccb | ||
|
|
1ee787912b | ||
|
|
47ca5eb882 | ||
|
|
ce3a726fc0 | ||
|
|
b6c9deffda | ||
|
|
51c9d9ed65 | ||
|
|
b30cd5b107 | ||
|
|
a767f06e3f | ||
|
|
cb66a2d387 | ||
|
|
aed4e4ecdd | ||
|
|
f8fa5ae4af | ||
|
|
374c4d4ced | ||
|
|
142fb0a7d4 | ||
|
|
0211464ba2 | ||
|
|
3a556f1ea0 | ||
|
|
e9f7677170 | ||
|
|
eccfc8e928 | ||
|
|
e6b24663e4 | ||
|
|
840f72356e | ||
|
|
18e3a16e8b | ||
|
|
864a6d2977 | ||
|
|
6e375f4597 | ||
|
|
efdfd5c835 | ||
|
|
bd91857028 | ||
|
|
3079f80d4a | ||
|
|
65abc90fb6 | ||
|
|
a7b726ad18 | ||
|
|
75c1b8df01 | ||
|
|
3f9f1c50f3 | ||
|
|
48fa4e1e5b | ||
|
|
df0f602796 | ||
|
|
26cd3f5690 | ||
|
|
3355ce650d | ||
|
|
ed48ecc58c | ||
|
|
37d1a90025 | ||
|
|
3e59143ba8 | ||
|
|
9419bb5776 | ||
|
|
80573e3900 | ||
|
|
069ae2a5d6 | ||
|
|
ba24576f2f | ||
|
|
d8a6c734fa | ||
|
|
ef045dcd71 | ||
|
|
33cb7ef0b7 | ||
|
|
cdc2cb5d11 | ||
|
|
16ec3805e5 | ||
|
|
8529874368 | ||
|
|
da1010c83a | ||
|
|
cc58e177f3 | ||
|
|
d7ea8c4800 | ||
|
|
aa6ecf0984 | ||
|
|
d5f9fb06b0 | ||
|
|
c22e73293a | ||
|
|
b11dca2025 | ||
|
|
7b86c1fdcd | ||
|
|
58ebdb037c | ||
|
|
95f8a713dc | ||
|
|
74e0cc74ce | ||
|
|
1bd40ca73e | ||
|
|
f397153dfc | ||
|
|
5406392f8b | ||
|
|
f61e107f63 | ||
|
|
4b1fceb913 | ||
|
|
a4bb133b68 | ||
|
|
cd3697e8b7 | ||
|
|
3241c7aac3 | ||
|
|
624c46eb06 | ||
|
|
7a48a6b63e | ||
|
|
47d99a20d5 | ||
|
|
ad7e570d07 | ||
|
|
ae31f8ce45 | ||
|
|
7ca5c68233 | ||
|
|
2c6d63922a | ||
|
|
97d1a1dc01 | ||
|
|
8b45de90a4 | ||
|
|
7303ed65e1 | ||
|
|
da562bd6a1 | ||
|
|
d4fb4f7c52 | ||
|
|
dfbc45302e | ||
|
|
c4c1d170af | ||
|
|
fd04968f32 | ||
|
|
c2a1194424 | ||
|
|
ab1b2d0ff2 | ||
|
|
5a4da5bf78 | ||
|
|
84b31a3e7a | ||
|
|
df6c72ede3 | ||
|
|
04bb79f139 | ||
|
|
e828a7380a | ||
|
|
7ef22a41a3 | ||
|
|
96387bd26f | ||
|
|
6be01f599b | ||
|
|
63ccaa5873 | ||
|
|
8b38096a89 | ||
|
|
795b0849f3 | ||
|
|
7f14f0ae38 | ||
|
|
0edf085b68 | ||
|
|
8132a6b7ac | ||
|
|
6b48b3e277 | ||
|
|
2908f955d1 | ||
|
|
79eba878a7 | ||
|
|
68ca864141 | ||
|
|
e1fd4751de | ||
|
|
148c113fbe | ||
|
|
a0c6688976 | ||
|
|
d5a7c56ef9 | ||
|
|
0b4aa2dc21 | ||
|
|
3ab2cfec47 | ||
|
|
7298ed7c51 | ||
|
|
7098b65cb8 | ||
|
|
83d8d4d8cd | ||
|
|
2145ee1976 | ||
|
|
59a7275258 | ||
|
|
d8a05418f9 | ||
|
|
b102e93571 | ||
|
|
cdf6fc15b0 | ||
|
|
74bbeb4373 | ||
|
|
2187724ad2 | ||
|
|
eded7084d2 | ||
|
|
34c3d0a386 | ||
|
|
9d50b6f0ea | ||
|
|
ab1dc84779 | ||
|
|
7fb0e98b03 | ||
|
|
e836bdf734 | ||
|
|
c46139a17e | ||
|
|
d8391f0541 | ||
|
|
4e8374856d | ||
|
|
270f9cd23a | ||
|
|
9d83d52027 | ||
|
|
5b48eec4a1 | ||
|
|
b1edf26051 | ||
|
|
06e5bcfc83 | ||
|
|
624a8bbd67 | ||
|
|
b26cbbb60e | ||
|
|
203058a027 | ||
|
|
97bd18af4e | ||
|
|
ba05f28ae7 | ||
|
|
77a1227870 | ||
|
|
7ab2b69e18 | ||
|
|
10aaa1bc15 | ||
|
|
cdc9e50a5d | ||
|
|
6f05de0e5e | ||
|
|
56e2a4333e | ||
|
|
f959c01600 | ||
|
|
117a8c0d35 | ||
|
|
30d2730ee2 | ||
|
|
aa812feb41 | ||
|
|
552f123bea | ||
|
|
5d0cbf763f | ||
|
|
1b83c09c03 | ||
|
|
7190a550dc | ||
|
|
b2cd6accf5 | ||
|
|
053ecae4db | ||
|
|
038c994724 | ||
|
|
c161472575 | ||
|
|
008aa2fc6d | ||
|
|
6f30fd9235 | ||
|
|
9ecf621404 | ||
|
|
22db751d1e | ||
|
|
03feb7a34d | ||
|
|
35a4b63240 | ||
|
|
4dd1bfa8c1 | ||
|
|
6caa379ba1 | ||
|
|
7e6fa29cb5 | ||
|
|
44a1bfd6a6 | ||
|
|
1fc66c7460 | ||
|
|
7bd6c87eca | ||
|
|
812c191939 | ||
|
|
c741ba59c9 | ||
|
|
781c15a6a3 | ||
|
|
45ab288e07 | ||
|
|
8b33ac8f6c | ||
|
|
63ef607f1f | ||
|
|
6cfee09be9 | ||
|
|
ab335edb02 | ||
|
|
bfbf1e1f1a | ||
|
|
2d314b771f | ||
|
|
5d15abb120 | ||
|
|
46790f50cf | ||
|
|
4d0414c714 | ||
|
|
e508145c9b | ||
|
|
e0ebd1e4bd | ||
|
|
f90649eb2b | ||
|
|
9b599bc18d | ||
|
|
9b803ccc98 | ||
|
|
1282086f58 | ||
|
|
b70b646903 | ||
|
|
2dce6b15c3 | ||
|
|
4e2b2508af | ||
|
|
0ea5310290 | ||
|
|
13735843c7 | ||
|
|
618c7b816a | ||
|
|
0fcb5a8ce5 | ||
|
|
889102315e | ||
|
|
b2a788e902 | ||
|
|
82e4bfb53d | ||
|
|
e8814410ef | ||
|
|
94ff2cda73 | ||
|
|
d305987b40 | ||
|
|
167eb01d83 | ||
|
|
ad408beb66 | ||
|
|
1b870937ae | ||
|
|
2a98ba0ed3 | ||
|
|
02a9a93bde | ||
|
|
e148438e97 | ||
|
|
d46386d57e | ||
|
|
228ccf1fe3 | ||
|
|
780dbb378f | ||
|
|
1ca4288135 | ||
|
|
f5cf3638e9 | ||
|
|
5ef5e14ecc | ||
|
|
76c9af193c | ||
|
|
f9b255cd62 | ||
|
|
44ad6dd4bf | ||
|
|
1bd654dabd | ||
|
|
38b265cb51 | ||
|
|
5561c09091 | ||
|
|
3db5ff69b2 | ||
|
|
ec12e7eada | ||
|
|
631fa4a1b7 | ||
|
|
bf993db11c | ||
|
|
4ad883398f | ||
|
|
d802e8ca6a | ||
|
|
a100700630 | ||
|
|
b6b075fd49 | ||
|
|
d1622e080f | ||
|
|
2ac6deafb7 | ||
|
|
805196fbeb | ||
|
|
f103b91ffa | ||
|
|
fa4f337b49 | ||
|
|
8a4a0ddea6 | ||
|
|
45fbe4ff67 | ||
|
|
f851bc8182 | ||
|
|
9e09a1800b | ||
|
|
a34c586a89 | ||
|
|
6c3a02072b | ||
|
|
4a6754baf2 | ||
|
|
4b36897cd9 | ||
|
|
d4553818a0 | ||
|
|
6b6f03ae05 | ||
|
|
77e3757fa9 | ||
|
|
6b60f7dca0 | ||
|
|
fcdfc911ee | ||
|
|
1189be43a2 | ||
|
|
6650a07ede | ||
|
|
b19d9e2174 | ||
|
|
1f080a6c97 | ||
|
|
04897c9dc1 | ||
|
|
979eed4362 | ||
|
|
bc8a5c0330 | ||
|
|
4c8f94ac94 | ||
|
|
846a94fbc9 | ||
|
|
3cd6b22c7b | ||
|
|
c9b9ef575b | ||
|
|
275826f234 | ||
|
|
4f0488b307 | ||
|
|
e5e930aec3 | ||
|
|
fbbacb284e | ||
|
|
9f7a555b4e | ||
|
|
dd13310fb8 | ||
|
|
691cc4e036 | ||
|
|
0bb253f37b | ||
|
|
59e7e62c4b | ||
|
|
f8420d6279 | ||
|
|
99354b430e | ||
|
|
74c56f794c | ||
|
|
02237ce725 | ||
|
|
318a249c8b | ||
|
|
207fabbc6a | ||
|
|
356bcafc44 | ||
|
|
3e0aaad190 | ||
|
|
a72e4e3e28 | ||
|
|
13b3d7b4a0 | ||
|
|
e025aec028 | ||
|
|
20fe347906 | ||
|
|
9d419f48e6 | ||
|
|
9ded00f221 | ||
|
|
1650eb5847 | ||
|
|
c31a7c3ff6 | ||
|
|
b8e54fbc08 | ||
|
|
a1f8b0fd64 | ||
|
|
1b65ae00ac | ||
|
|
ebda45de4c | ||
|
|
ffc574a6f9 | ||
|
|
e2f4190209 | ||
|
|
9bc17fc5fb | ||
|
|
208a6647f1 | ||
|
|
e51c2bcaef | ||
|
|
71a1bd53b2 | ||
|
|
d0abb4e8e6 | ||
|
|
977078f06d | ||
|
|
6980c4557e | ||
|
|
632baf799e | ||
|
|
af92f5b00f | ||
|
|
4ab8abbc2b | ||
|
|
b1e62d4a57 | ||
|
|
6af3656deb | ||
|
|
4d83632009 | ||
|
|
110b373e9c | ||
|
|
ca571b0ec3 | ||
|
|
d8c26162a1 | ||
|
|
c067088747 | ||
|
|
5451cc7792 | ||
|
|
124314672f | ||
|
|
6362298fa5 | ||
|
|
8b56977b6f | ||
|
|
173567a7f2 | ||
|
|
c7d9f25d22 | ||
|
|
e27b76d117 | ||
|
|
8854c039f2 | ||
|
|
14f581abc2 | ||
|
|
2ca46c7afc | ||
|
|
82d8c1bacb | ||
|
|
2fd9831f7c | ||
|
|
195abfe7a5 | ||
|
|
d8dde19f04 | ||
|
|
585972b51a | ||
|
|
7a6546228b | ||
|
|
92f680889d | ||
|
|
785bd7fd75 | ||
|
|
c89e6aadff | ||
|
|
54a2525133 | ||
|
|
0a5866bec9 | ||
|
|
0d8e3ad48b | ||
|
|
12ef02dc3d | ||
|
|
69e8a05f35 | ||
|
|
007cd48af6 | ||
|
|
713e60b9b6 | ||
|
|
e86cefcb6f | ||
|
|
cfa4e658e0 | ||
|
|
595fe67f01 | ||
|
|
9b2feef9eb | ||
|
|
f7f90e0c8d | ||
|
|
1dd0f53b21 | ||
|
|
8299b323ee | ||
|
|
9b436c8b4c | ||
|
|
5b38fdab31 | ||
|
|
1eb300e1fc | ||
|
|
f7f6bfaae4 | ||
|
|
4ea882ede4 | ||
|
|
566e21eac8 | ||
|
|
351cc35342 | ||
|
|
37d766aedd | ||
|
|
5287e57c86 | ||
|
|
2a7e9faeec | ||
|
|
1ad1ba9e6a | ||
|
|
33a9026cdf | ||
|
|
efd0f5a3c5 | ||
|
|
f009df23ec | ||
|
|
6ba4fabdb9 | ||
|
|
9e2c22c97f | ||
|
|
39dc52157d | ||
|
|
0d437698b2 | ||
|
|
0be99858f3 | ||
|
|
eaaabc6c4f | ||
|
|
ce6d4914f4 | ||
|
|
ecf198aab8 | ||
|
|
3267b81b81 | ||
|
|
d03cfc4258 | ||
|
|
1de557975f | ||
|
|
ffba978077 | ||
|
|
13e16cf302 | ||
|
|
bd0d84bf92 | ||
|
|
1135193dfd | ||
|
|
29812c628b | ||
|
|
58fbbe0f1d | ||
|
|
631d7b87b5 | ||
|
|
6070647774 | ||
|
|
d6237859f6 | ||
|
|
0ef0aeceac | ||
|
|
b4a6b7f720 | ||
|
|
c7d46510d7 | ||
|
|
ffd3f1a783 | ||
|
|
29bafe2f7e | ||
|
|
287dd1ee2c | ||
|
|
513c23bfd9 | ||
|
|
011d03a0f6 | ||
|
|
9ab859f27b | ||
|
|
f4f65ef93e | ||
|
|
bd5718d0ad | ||
|
|
161a862ffb | ||
|
|
69994c385a | ||
|
|
b5dbbac308 | ||
|
|
582bd19ee9 | ||
|
|
74f99f227c | ||
|
|
c2bd177ea0 | ||
|
|
fe6e9f580b | ||
|
|
7216c76654 | ||
|
|
dbdfd8967d | ||
|
|
b8e40d146f | ||
|
|
4cc8bb0767 | ||
|
|
4e242b3e20 | ||
|
|
a6245478c8 | ||
|
|
2e9f5ea31a | ||
|
|
a6ad8148b9 | ||
|
|
5b5f35ccc0 | ||
|
|
9b714abf35 | ||
|
|
33122c5a1b | ||
|
|
a9c2e930ac | ||
|
|
c05e6015cc | ||
|
|
e0a75e0c25 | ||
|
|
85f5674e44 | ||
|
|
c43e8a9736 | ||
|
|
a3ac4f6b0a | ||
|
|
5dfd0350c7 | ||
|
|
ca96d609e4 | ||
|
|
2c5972f87f | ||
|
|
6079d0027a | ||
|
|
99a6c9dbf2 | ||
|
|
9342bcfce0 | ||
|
|
e504816977 | ||
|
|
b2e02084b8 | ||
|
|
db3d84f46c | ||
|
|
1b6b0b1e66 | ||
|
|
6b725cf56a | ||
|
|
64665b57d0 | ||
|
|
2b24416e90 | ||
|
|
b92a8e6e4a | ||
|
|
931fc43cc8 | ||
|
|
31aa7bd8d1 | ||
|
|
ad1911bbf4 | ||
|
|
c021c39cbd | ||
|
|
1f43d22397 | ||
|
|
a675bd08bd | ||
|
|
4d7e1dde70 | ||
|
|
ae5d18617a | ||
|
|
9732ec6797 | ||
|
|
0e28281a02 | ||
|
|
505371414f | ||
|
|
e3428d26ca | ||
|
|
35332298ef | ||
|
|
64db043a71 | ||
|
|
b60859d6cc | ||
|
|
d76621a47b | ||
|
|
4ae85ae121 | ||
|
|
cc505b4b5e | ||
|
|
1259a76047 | ||
|
|
802ca12d05 | ||
|
|
e283b555b1 | ||
|
|
b77a13812c | ||
|
|
6dfde6d485 | ||
|
|
c8eeef6947 | ||
|
|
67cb89fbdf | ||
|
|
bf4fb1fb40 | ||
|
|
f807f7f804 | ||
|
|
b8d8ed1ba9 | ||
|
|
cc794d60e7 | ||
|
|
8dd0c85ac5 | ||
|
|
76fa695241 | ||
|
|
f30c4ed2bc | ||
|
|
b752507b48 | ||
|
|
af94ba9d02 | ||
|
|
818b08d0e4 | ||
|
|
ea18996f54 | ||
|
|
68fd82e840 | ||
|
|
4fad8efbfb | ||
|
|
b78bae2d51 | ||
|
|
271f5601f3 | ||
|
|
c3b7a45e84 | ||
|
|
c3e190ce67 | ||
|
|
b75d443caf | ||
|
|
27e727a146 | ||
|
|
4ce4379235 | ||
|
|
c2c47550f9 | ||
|
|
535cc49f27 | ||
|
|
dfbf73408c | ||
|
|
bc7f3eb32f | ||
|
|
ec954f47fb | ||
|
|
81a5e0073c | ||
|
|
ab1bc9bf5f | ||
|
|
0f1eb3e914 | ||
|
|
84e27a592d | ||
|
|
c9f034b4ac | ||
|
|
a9f9d68631 | ||
|
|
707374d5dc | ||
|
|
89fa00ddff | ||
|
|
79bea15830 | ||
|
|
426f8b0f66 | ||
|
|
6a6cc27aee | ||
|
|
4c7c4d4061 | ||
|
|
4d24becf7f | ||
|
|
ba5b9b80a5 | ||
|
|
c7b0678356 | ||
|
|
a6e3222fe5 | ||
|
|
3cc852d339 | ||
|
|
0eeaa25694 | ||
|
|
aa3fac8057 | ||
|
|
c1c81ee2a4 | ||
|
|
e8496efe84 | ||
|
|
01bbacf3c4 | ||
|
|
148428ce76 | ||
|
|
c8f568ddf9 | ||
|
|
3ddda939d3 | ||
|
|
5de926d66f | ||
|
|
f878e6f8af | ||
|
|
269af961e9 | ||
|
|
ed80c6b6cc | ||
|
|
e433393c4f | ||
|
|
985ce80375 | ||
|
|
b9b9714fd5 | ||
|
|
fa969cfdde | ||
|
|
44f8e383f3 | ||
|
|
0c8da8b519 | ||
|
|
eaaa837e00 | ||
|
|
cbe3c3fdd4 | ||
|
|
6748f0a579 | ||
|
|
93b0cf7a99 | ||
|
|
d8ce68b09b | ||
|
|
78d4ced829 | ||
|
|
197c14dbcf | ||
|
|
5f20a91fa1 | ||
|
|
1e2ac54351 | ||
|
|
1e375468de | ||
|
|
c2c188b699 | ||
|
|
c46a0d7eb4 | ||
|
|
bd769a81e1 | ||
|
|
537088e7dc | ||
|
|
41fd9989a2 | ||
|
|
11d62f43c9 | ||
|
|
e4ab96021e | ||
|
|
2a7ed700d5 | ||
|
|
84716d267c | ||
|
|
e4779be97a | ||
|
|
f2da6df568 | ||
|
|
30848c0fcd | ||
|
|
e585c83209 | ||
|
|
6c1bb1601e | ||
|
|
ea87cb1ba5 | ||
|
|
3fed5bb25f | ||
|
|
27955056e0 | ||
|
|
90d70af269 | ||
|
|
b23cb8fba8 | ||
|
|
e4a709eda3 | ||
|
|
7fc1aad195 | ||
|
|
cafb8de132 | ||
|
|
d5325d7ef1 | ||
|
|
24d162814b | ||
|
|
d5694ac5fa | ||
|
|
e43de3ae4b | ||
|
|
75e67b9ee4 | ||
|
|
768f00dedb | ||
|
|
4dc07e93a8 | ||
|
|
7cc483aa0e | ||
|
|
e1e7d76cf1 | ||
|
|
93247a424a | ||
|
|
5f501ec7e2 | ||
|
|
761d255fdf | ||
|
|
ace8079086 | ||
|
|
7a44c01d89 | ||
|
|
c9bc4b7031 | ||
|
|
ae79764fe5 | ||
|
|
77f1d24de3 | ||
|
|
9ccb4226ba | ||
|
|
95e02b856b | ||
|
|
bf86a41ef1 | ||
|
|
8090fd4664 | ||
|
|
3a743f649c | ||
|
|
adec03395d | ||
|
|
74e494b010 | ||
|
|
ef3a5ae787 | ||
|
|
8c06dd6071 | ||
|
|
60c78666ab | ||
|
|
1786b0e768 | ||
|
|
8ad5f34908 | ||
|
|
6cd5fcd536 | ||
|
|
ccc67d445b | ||
|
|
9fd086e506 | ||
|
|
0b03a97708 | ||
|
|
4824a33c31 | ||
|
|
1e5fcfd14a | ||
|
|
17b8e2bd02 | ||
|
|
a8e2a3df32 | ||
|
|
0d7c7fd907 | ||
|
|
95298783bb | ||
|
|
1a398b19fd | ||
|
|
f4c8cd5e85 | ||
|
|
b8d832a08c | ||
|
|
e3edca3b5d | ||
|
|
cacfa04cb6 | ||
|
|
e591f7b3f0 | ||
|
|
7141f1a5cc | ||
|
|
44edac0497 | ||
|
|
29e1c717c3 | ||
|
|
94133d7ce8 | ||
|
|
b15c2b7971 | ||
|
|
ba8fdc925c | ||
|
|
79b3cf3e02 | ||
|
|
b4fd710e1a | ||
|
|
b68b0ede7a | ||
|
|
68f737702b | ||
|
|
f65e31d22f | ||
|
|
f496399ac4 | ||
|
|
3166ed55b2 | ||
|
|
e1dec2f1a7 | ||
|
|
bb746a9de1 | ||
|
|
ae8d4bb0f0 | ||
|
|
c94ab5976a | ||
|
|
197d82dc07 | ||
|
|
069ae2df12 | ||
|
|
6de74ea6d7 | ||
|
|
72472456d8 | ||
|
|
c5c24c239b | ||
|
|
c5b0e9f485 | ||
|
|
abdefb8a01 | ||
|
|
afbd773dc6 | ||
|
|
2a4b9ea233 | ||
|
|
3b98439eca | ||
|
|
fde63b880d | ||
|
|
2d511defd9 | ||
|
|
dd1ea9763a | ||
|
|
e76d1135dd | ||
|
|
fcf2c0fd1a | ||
|
|
9864efa532 | ||
|
|
aa620d09a0 | ||
|
|
2eabdf3f98 | ||
|
|
5ed109d59f | ||
|
|
47d9848dc4 | ||
|
|
93e504d04e | ||
|
|
b5feaa5a49 | ||
|
|
3f405b34e9 | ||
|
|
290777b3d9 | ||
|
|
77c81ca6ea | ||
|
|
2d1b7955ae | ||
|
|
862c8da560 | ||
|
|
2d9f341c3e | ||
|
|
436ee0a2ea | ||
|
|
b393f5db51 | ||
|
|
a2562f9d74 | ||
|
|
d6dadd95ac | ||
|
|
993d3f710b | ||
|
|
4a94eb3ea4 | ||
|
|
3a0cee28d6 | ||
|
|
4f845a0713 | ||
|
|
473700f016 | ||
|
|
9ce866ed4f | ||
|
|
69ef4987a6 | ||
|
|
53cc8ad35a | ||
|
|
e2fcba038c | ||
|
|
5f59f20636 | ||
|
|
59de2c7afa | ||
|
|
4b616c8cf2 | ||
|
|
4dd61df6f8 | ||
|
|
c0c31656ff | ||
|
|
8b16b43b7f | ||
|
|
dff396de0f | ||
|
|
f06ffdb6fa | ||
|
|
6e67aaa7f2 | ||
|
|
7f0d0ba3bc | ||
|
|
4a9b1cf253 | ||
|
|
6d8799af1a | ||
|
|
258409ef61 | ||
|
|
bf81f3cf2c | ||
|
|
27ebc5c8f2 | ||
|
|
97c544f91f | ||
|
|
934ab76835 | ||
|
|
fc9878f6a4 | ||
|
|
a4d3bfe3d6 | ||
|
|
a7effa8400 | ||
|
|
a04c6bbf8f | ||
|
|
77ea8cbdd7 | ||
|
|
2800983f3e | ||
|
|
8b50fe5330 | ||
|
|
73b4e18c62 | ||
|
|
20b3660495 | ||
|
|
175a01f56c | ||
|
|
046b659ce2 | ||
|
|
413c270723 | ||
|
|
ec3a2dc773 | ||
|
|
012875258c | ||
|
|
692250c6be | ||
|
|
d2352347cf | ||
|
|
92168cbbc5 | ||
|
|
963015005e | ||
|
|
10d8b701a1 | ||
|
|
543c794a76 | ||
|
|
57cd0c3dea | ||
|
|
b524dd4c35 | ||
|
|
09703609fc | ||
|
|
ba3ff7918b | ||
|
|
ef8e578677 | ||
|
|
b880ff190a | ||
|
|
05e21285aa | ||
|
|
eae04f1952 | ||
|
|
5699b05072 | ||
|
|
a1e67bcb97 | ||
|
|
09552f9d9c | ||
|
|
f18373dc5d | ||
|
|
ebbaae5526 | ||
|
|
966a70f1fa | ||
|
|
629cdfb124 | ||
|
|
ed666d3969 | ||
|
|
b76ef6ccb8 | ||
|
|
851aeae7c7 | ||
|
|
d5e32c843f | ||
|
|
96917d5552 | ||
|
|
0401604222 | ||
|
|
b238cf7f6b | ||
|
|
960dae3340 | ||
|
|
2cc998fed8 | ||
|
|
139fe30f47 | ||
|
|
4d793626ff | ||
|
|
c544188ee3 | ||
|
|
0ab153d201 | ||
|
|
8209b5f033 | ||
|
|
b27429729d | ||
|
|
60a9a49f83 | ||
|
|
b3bf6a1218 | ||
|
|
57826d645b | ||
|
|
d7d24750be | ||
|
|
6f443a74cf | ||
|
|
14a34f12d7 | ||
|
|
3431ec55dc | ||
|
|
6027b1992f | ||
|
|
e884ff31d8 | ||
|
|
05c13f6c22 | ||
|
|
94ecd871a0 | ||
|
|
12ed4ee48e | ||
|
|
332839f6ea | ||
|
|
e5ea6dd021 | ||
|
|
cccfcfa7b9 | ||
|
|
68f34e85ce | ||
|
|
3e703eb04e | ||
|
|
508460f240 | ||
|
|
6e9f147faa | ||
|
|
4540730111 | ||
|
|
e96ee95a7e | ||
|
|
2f9eafdd36 | ||
|
|
b3de67234e | ||
|
|
514c2d3c4d | ||
|
|
bfde076022 | ||
|
|
cb3aee8219 | ||
|
|
85fda57208 | ||
|
|
4b203bdba5 | ||
|
|
d3862812ff | ||
|
|
8d26385d76 | ||
|
|
3b0470dba5 | ||
|
|
8575e3160f | ||
|
|
67b7b904ba | ||
|
|
f60218ec41 | ||
|
|
a78cda4baf | ||
|
|
7a39da8cc6 | ||
|
|
5bbb53580a | ||
|
|
26451a09eb | ||
|
|
8d55877c9e | ||
|
|
a62406aaa5 | ||
|
|
91818723a1 | ||
|
|
e9aec001f4 | ||
|
|
28e8c46f29 | ||
|
|
6d586dc05c | ||
|
|
410b4e14a1 | ||
|
|
fe4e885f54 | ||
|
|
bbb739d24a | ||
|
|
26752df503 | ||
|
|
e52c391cd4 | ||
|
|
0aac30d53b | ||
|
|
0184a97dbd | ||
|
|
85b9f76f1d | ||
|
|
6322fbbd41 | ||
|
|
8ba89f1050 | ||
|
|
429925a5e9 | ||
|
|
83936293eb | ||
|
|
e2cb760dcc | ||
|
|
925b3638ff | ||
|
|
9a6fd3ef29 | ||
|
|
2f82de18ee | ||
|
|
b8ca494ee9 | ||
|
|
6e16aca8b0 | ||
|
|
d4d12daed9 | ||
|
|
f467a8f66d | ||
|
|
c9184ed87e | ||
|
|
1fc4a962e4 | ||
|
|
08284c86ed | ||
|
|
f502b0dea1 | ||
|
|
1200f28d66 | ||
|
|
76ed3476d3 | ||
|
|
58dc1f2c78 | ||
|
|
5a7f561a9b | ||
|
|
ed9a7f5436 | ||
|
|
1f64207f26 | ||
|
|
42b50483be | ||
|
|
6264cf9666 | ||
|
|
f386632800 | ||
|
|
5e49a57ecc | ||
|
|
3d31b39297 | ||
|
|
73cfe48031 | ||
|
|
05538587ef | ||
|
|
f92d7416d7 | ||
|
|
1f12d808e7 | ||
|
|
29a4066a4d | ||
|
|
7afb4e3f54 | ||
|
|
495f075b41 | ||
|
|
b5e8d529e6 | ||
|
|
3e279411fe | ||
|
|
47574c9cba | ||
|
|
6ff14ddd2e | ||
|
|
5946aa0877 | ||
|
|
d800ab2847 | ||
|
|
2c365f4723 | ||
|
|
a1a253ea50 | ||
|
|
c72058bcc6 | ||
|
|
27f26e48b7 | ||
|
|
8c23221666 | ||
|
|
731f3c37a0 | ||
|
|
4b444723f0 | ||
|
|
816605a137 | ||
|
|
78cefd78d6 | ||
|
|
a0a561ae85 | ||
|
|
ed3d0170d9 | ||
|
|
976128f368 | ||
|
|
d04d672a80 | ||
|
|
036f439f53 | ||
|
|
1bce3e6b35 | ||
|
|
e3cbec10c1 | ||
|
|
8abdd7b553 | ||
|
|
ff13c5e7af | ||
|
|
27bd0b9a91 | ||
|
|
bce144595c | ||
|
|
75eba3b07d | ||
|
|
1591eddaea | ||
|
|
4fec80ba6f | ||
|
|
7fe8ed1787 | ||
|
|
e204062310 | ||
|
|
44c722931b | ||
|
|
2d520a9826 | ||
|
|
24d894e2e2 | ||
|
|
ccfcef6b59 | ||
|
|
e0004aa28a | ||
|
|
b668112320 | ||
|
|
dae9a00a28 | ||
|
|
71995e1397 | ||
|
|
8177563ebe | ||
|
|
4202fba82a | ||
|
|
812c030e87 | ||
|
|
1217c7da91 | ||
|
|
7d69f2d956 | ||
|
|
385dcb7c60 | ||
|
|
b8b936a6ea | ||
|
|
b5f665de32 | ||
|
|
e5ae386ea4 | ||
|
|
36e51aad3c | ||
|
|
b490299a3b | ||
|
|
5db7070dd1 | ||
|
|
d7fe6b356c | ||
|
|
fcf01dd88e | ||
|
|
4f66312df8 | ||
|
|
3fafb7b189 | ||
|
|
776a070421 | ||
|
|
dfeca6cf40 | ||
|
|
6aa5bc8635 | ||
|
|
d8f47d2efa | ||
|
|
0a9315bbc7 | ||
|
|
1ff419d343 | ||
|
|
24df576795 | ||
|
|
fdf1ca30f0 | ||
|
|
052c5d19d5 | ||
|
|
5ddd199870 | ||
|
|
a9d6fa8b2b | ||
|
|
4564b05483 | ||
|
|
72613bc379 | ||
|
|
ebcd55d641 | ||
|
|
4b461a6931 | ||
|
|
93e7a38370 | ||
|
|
617304b2cf | ||
|
|
ba502fb89a | ||
|
|
6c6b9689bb | ||
|
|
d9fd937e39 | ||
|
|
fe9dc522d4 | ||
|
|
505e7e8b9d | ||
|
|
6fd7e6db3d | ||
|
|
fdca6e36ee | ||
|
|
90ae0cffec | ||
|
|
de4cb50ca6 | ||
|
|
a09e09ce76 | ||
|
|
48d2949416 | ||
|
|
6ae8373d40 | ||
|
|
b58e24cc3c | ||
|
|
d53fe399eb | ||
|
|
a837765e8c | ||
|
|
f540b494a4 | ||
|
|
8060974344 | ||
|
|
b0d975e216 | ||
|
|
e54d7d536e | ||
|
|
1e9b4d5a95 | ||
|
|
efc2b7db95 | ||
|
|
bfd68019c2 | ||
|
|
1946867bc2 | ||
|
|
1664948e41 | ||
|
|
935e588799 | ||
|
|
eed59dcc1e | ||
|
|
2cac7623a5 | ||
|
|
298d83b340 | ||
|
|
0185b75381 | ||
|
|
7132e5cdff | ||
|
|
98bdb4468b | ||
|
|
ea11ee09f3 | ||
|
|
c62c480dc6 | ||
|
|
197bd126f0 | ||
|
|
f45f07ab86 | ||
|
|
a053ff3979 | ||
|
|
ecdd2a3658 | ||
|
|
2f34ad31ac | ||
|
|
671f0afa1d | ||
|
|
64ed74c01e | ||
|
|
1a81a1898e | ||
|
|
6ba21bf2b8 | ||
|
|
09e4bc0501 | ||
|
|
6e2a7ee1bc | ||
|
|
65f0513a33 | ||
|
|
6f83c4537c | ||
|
|
cca94272fa | ||
|
|
66b121b2fc | ||
|
|
8d34120a53 | ||
|
|
1a01af079e | ||
|
|
87e5e05aea | ||
|
|
4d039aa2ca | ||
|
|
21e255a8f1 | ||
|
|
d5477c7afd | ||
|
|
02a6108235 | ||
|
|
7233341eac | ||
|
|
8be6fd95a3 | ||
|
|
59dbb47065 | ||
|
|
9c7db2491b | ||
|
|
0fe6f3c521 | ||
|
|
036362ede6 | ||
|
|
a757dd4863 | ||
|
|
f5cc22bdc6 | ||
|
|
5dd1b2c525 | ||
|
|
cc7609aa9f | ||
|
|
f1378aef91 | ||
|
|
b2d8d07109 | ||
|
|
f9791498ae | ||
|
|
f091061711 | ||
|
|
4abcff0177 | ||
|
|
63c58c2a3f | ||
|
|
304880d185 | ||
|
|
5d79d728f5 | ||
|
|
dc51af3d03 | ||
|
|
350622a107 | ||
|
|
63fda37e20 | ||
|
|
293ef29655 | ||
|
|
535c99f157 | ||
|
|
45a5df5914 | ||
|
|
3b5f22ca40 | ||
|
|
b5db4ed5f6 | ||
|
|
168524543f | ||
|
|
3e123b8497 | ||
|
|
42137efde7 | ||
|
|
eeb2f9e546 | ||
|
|
5dbaa520a5 | ||
|
|
dd48f7204c | ||
|
|
04095f7581 | ||
|
|
a584a81b3e | ||
|
|
619e8ecd0c | ||
|
|
23da638360 | ||
|
|
dfbda5e025 | ||
|
|
2b03751c3c | ||
|
|
dbc0dfd2d5 | ||
|
|
11f139a647 | ||
|
|
6e614e9e10 | ||
|
|
c049472b8a | ||
|
|
9a804b2812 | ||
|
|
fbbc40f385 | ||
|
|
8cf9f0a3e7 | ||
|
|
e6618ece2d | ||
|
|
58c4720293 | ||
|
|
836d5c44b6 | ||
|
|
11c2a3655f | ||
|
|
539aa4d333 | ||
|
|
f85a415279 | ||
|
|
6489455bed | ||
|
|
d668caa79c | ||
|
|
74bf4ee7bf | ||
|
|
33ba90c6e9 | ||
|
|
ccd62415ac | ||
|
|
bd7bb5df71 | ||
|
|
e3417a06e2 | ||
|
|
7fb80b5eae | ||
|
|
2d17b09a6d | ||
|
|
24c8f38784 | ||
|
|
25f03cf8e9 | ||
|
|
270e1c904a | ||
|
|
b4f59c7e27 | ||
|
|
ab4ee2e524 | ||
|
|
58ebb96cce | ||
|
|
99713dc7d3 | ||
|
|
1c1c0257f4 | ||
|
|
cafe659f72 | ||
|
|
72ed8196b3 | ||
|
|
107ac7ac96 | ||
|
|
234772db6d | ||
|
|
760625acba | ||
|
|
c57789d138 | ||
|
|
f33df30732 | ||
|
|
3accee1a8c | ||
|
|
a5425b2e5b | ||
|
|
6e381180ae | ||
|
|
056ba9b795 | ||
|
|
88664afe14 | ||
|
|
f98efea9b1 | ||
|
|
d9e3a4b5db | ||
|
|
66d8ffabbd | ||
|
|
ace23463c5 | ||
|
|
bbfe4e996c | ||
|
|
9f430fa07f | ||
|
|
7c53a27801 | ||
|
|
a8bc7cae56 | ||
|
|
bf1050f7cf | ||
|
|
c6f4ff1475 | ||
|
|
3a431a126d | ||
|
|
ac08316548 | ||
|
|
85e8092cca | ||
|
|
ad53fc3cf4 | ||
|
|
6fa8148ccb | ||
|
|
7c69849a0d | ||
|
|
11bc21b6d9 | ||
|
|
13f540ef1b | ||
|
|
ec5c4499f4 | ||
|
|
f2a5b6dbfd | ||
|
|
b8492b6c2f | ||
|
|
331570ea6f | ||
|
|
55af207321 | ||
|
|
d648f65aaf | ||
|
|
608b5a6317 | ||
|
|
64953c8ed2 | ||
|
|
f451b64c8f | ||
|
|
2c9475b58e | ||
|
|
6d17573c23 | ||
|
|
d12ae7fd1c | ||
|
|
224137fcf9 | ||
|
|
e4435b014e | ||
|
|
871605f4e2 | ||
|
|
e0d2f6d5b0 | ||
|
|
bfbc907cec | ||
|
|
5e9d75b4a5 | ||
|
|
627e6ea2b0 | ||
|
|
9da4316ca5 | ||
|
|
eb7cbf27bc | ||
|
|
6b95e35e96 | ||
|
|
ff3d810ea8 | ||
|
|
34194aaff7 | ||
|
|
114f290947 | ||
|
|
baafb85ba4 | ||
|
|
29ded770b1 | ||
|
|
dc026bb16f | ||
|
|
328378f9cb | ||
|
|
c1935f0a41 | ||
|
|
43cd86ba8a | ||
|
|
8e345ce465 | ||
|
|
b64d312421 | ||
|
|
ccad2ed824 | ||
|
|
369195caa5 | ||
|
|
57ed7f6772 | ||
|
|
a3648f84b2 | ||
|
|
5331cd150a | ||
|
|
7313a23dba | ||
|
|
f7278e612e | ||
|
|
b990b2fce5 | ||
|
|
aedaba018f | ||
|
|
de042b3b88 | ||
|
|
a7e9d8762d | ||
|
|
ca238bc023 | ||
|
|
40dcf0d856 | ||
|
|
d3c3026496 | ||
|
|
093f7e47cc | ||
|
|
6a12998a83 | ||
|
|
b9c84f3f3a | ||
|
|
ffad4fe35b | ||
|
|
94e6ad71f5 | ||
|
|
8571f864d2 | ||
|
|
fc6d4974a6 | ||
|
|
738ccf61c0 | ||
|
|
dcabef952c | ||
|
|
771c8a83c7 | ||
|
|
6631985990 | ||
|
|
e0f20e9425 | ||
|
|
fe7c1b969c | ||
|
|
78f306a6f7 | ||
|
|
9ac98197bb | ||
|
|
27c28eaa27 | ||
|
|
be2672716d | ||
|
|
653d90c1a5 | ||
|
|
310b1ccdc1 | ||
|
|
a59b0ad1a1 | ||
|
|
7b222fc56e | ||
|
|
d0debb2116 | ||
|
|
66f371e8b8 | ||
|
|
b843631d71 | ||
|
|
c2ddd773bc | ||
|
|
7dd3bf5e24 | ||
|
|
db7d0c3127 | ||
|
|
f346048a6e | ||
|
|
e3aa8a7aa8 | ||
|
|
cf589f2c1e | ||
|
|
8af4569583 | ||
|
|
b25db11d08 | ||
|
|
587f07543f | ||
|
|
aa93cb9f44 | ||
|
|
537dbadea0 | ||
|
|
07a07588a0 | ||
|
|
dfaa58f72d | ||
|
|
9ac263ed1b | ||
|
|
d2d8ed4884 | ||
|
|
5d8290429c | ||
|
|
6aa423a1a8 | ||
|
|
3669065466 | ||
|
|
7ebf518c02 | ||
|
|
34ed4f4206 | ||
|
|
60833c8978 | ||
|
|
482a2ad122 | ||
|
|
c0380402bc | ||
|
|
cdbf38728d | ||
|
|
0c27383dd7 | ||
|
|
ef862186dd | ||
|
|
2c2dcf81d0 | ||
|
|
1827057acc | ||
|
|
8346e6e696 | ||
|
|
e4c15fcb5c | ||
|
|
3e5a62ecd8 | ||
|
|
82475a18d9 | ||
|
|
2e996271fe | ||
|
|
a2c89a225c | ||
|
|
7166854f41 | ||
|
|
3033261891 | ||
|
|
2347efc065 | ||
|
|
9b147cd730 | ||
|
|
3a9f5bf6dd | ||
|
|
ab37bef83b | ||
|
|
ad8b316939 | ||
|
|
421fdf7460 | ||
|
|
25a96e0c63 | ||
|
|
46826bb078 | ||
|
|
f87b287291 | ||
|
|
bb9246e525 | ||
|
|
c84770b877 | ||
|
|
380fb87ecc | ||
|
|
87ae59f5e9 | ||
|
|
e42b4ebf0f | ||
|
|
d3c150411c | ||
|
|
1e166470ab | ||
|
|
34e682d385 | ||
|
|
7239258ae6 | ||
|
|
5fd12dce01 | ||
|
|
82ae0238f9 | ||
|
|
81804909d3 | ||
|
|
c366276056 | ||
|
|
1a9255c12e | ||
|
|
94f36b0273 | ||
|
|
c45dc6c62a | ||
|
|
f053a1409e | ||
|
|
22f935ab7c | ||
|
|
9388eece2b | ||
|
|
acb58bfb6a | ||
|
|
f7181615f2 | ||
|
|
f144365281 | ||
|
|
d9aa645f86 | ||
|
|
22f3d3ae76 | ||
|
|
b4da08cad8 | ||
|
|
efab1dadde | ||
|
|
33d5134b59 | ||
|
|
119cb9bbcf | ||
|
|
e6e2627636 | ||
|
|
30f7bfa121 | ||
|
|
7af825bae4 | ||
|
|
26bcda31b8 | ||
|
|
d134d0935e | ||
|
|
e4f3431116 | ||
|
|
a46982cee9 | ||
|
|
70caf49914 | ||
|
|
719aec4064 | ||
|
|
cea7839911 | ||
|
|
a1595cec78 | ||
|
|
2e165295b7 | ||
|
|
a90a0f5c8a | ||
|
|
91b3981800 | ||
|
|
0cdb32fc43 | ||
|
|
838810b76a | ||
|
|
736b9a4784 | ||
|
|
4903ccf159 | ||
|
|
51fb884c52 | ||
|
|
d4040e9e28 | ||
|
|
3fb8784c92 | ||
|
|
c02b6a37d6 | ||
|
|
814fb032eb | ||
|
|
54f9a4cb59 | ||
|
|
8e780b113d | ||
|
|
574d573ac2 | ||
|
|
78f0ddbfad | ||
|
|
6a70647d45 | ||
|
|
c1f52a321d | ||
|
|
b9557064bf | ||
|
|
cf6121e3da | ||
|
|
247c736b9b | ||
|
|
8fbc0d29ee | ||
|
|
c06c00190f | ||
|
|
c0aba0a23e | ||
|
|
b9676a75f6 | ||
|
|
69a18514e9 | ||
|
|
122cd52ce4 | ||
|
|
26ae5178a4 | ||
|
|
bf9060156a | ||
|
|
82301b6c29 | ||
|
|
1745069543 | ||
|
|
c7ddb5ef7a | ||
|
|
7b41013102 | ||
|
|
77fb2b72ae | ||
|
|
7f94709066 | ||
|
|
867822fa1e | ||
|
|
73880268ef | ||
|
|
131485ef66 | ||
|
|
11dbceb761 | ||
|
|
0127423027 | ||
|
|
85657eedf8 | ||
|
|
b48045a8f5 | ||
|
|
6f65e2f90c | ||
|
|
323634bf8b | ||
|
|
9c712a366f | ||
|
|
a8c8e4efd4 | ||
|
|
414522aed5 | ||
|
|
2be8a281d2 | ||
|
|
6308ac45b0 | ||
|
|
b9b72bc6e2 | ||
|
|
d892079844 | ||
|
|
e263c26690 | ||
|
|
f3cf3ff8b6 | ||
|
|
4902db1fc9 | ||
|
|
d563b8d944 | ||
|
|
85a0d6c7ab | ||
|
|
34840cdcef | ||
|
|
28a4649785 | ||
|
|
7c551ec445 | ||
|
|
84fbb80c8f | ||
|
|
40453b3f84 | ||
|
|
29574fd5b3 | ||
|
|
2e6f5a4910 | ||
|
|
405ba4178a | ||
|
|
efcb6db688 | ||
|
|
0018491af2 | ||
|
|
0364d23210 | ||
|
|
8c5f03cec7 | ||
|
|
f8434db549 | ||
|
|
ab904caf33 | ||
|
|
54a59adc7c | ||
|
|
64765e5199 | ||
|
|
0cd01f5c9c | ||
|
|
2a3e822f44 | ||
|
|
a828a64b75 | ||
|
|
d4d176e5d0 | ||
|
|
449d1297ca | ||
|
|
d72667fcce | ||
|
|
a41fe500d6 | ||
|
|
54f59bd7d4 | ||
|
|
98ce212093 | ||
|
|
8a1137ceab | ||
|
|
877c029c16 | ||
|
|
944692ef69 | ||
|
|
391712a4f9 | ||
|
|
ad544c803a | ||
|
|
dbf87282d3 | ||
|
|
69b3fd485d | ||
|
|
5058292537 | ||
|
|
fcc803b2bf | ||
|
|
ea0152b132 | ||
|
|
3f213d908d | ||
|
|
1ca0e78ca1 | ||
|
|
b43d3267e2 | ||
|
|
b5cb6347a4 | ||
|
|
96b9b6c127 | ||
|
|
f10ce8944b | ||
|
|
a5c401bd12 | ||
|
|
b9caf4f726 | ||
|
|
d1d5362267 | ||
|
|
9f26d3b75b | ||
|
|
a76886726b | ||
|
|
ac66e11f2b | ||
|
|
4264ceb31c | ||
|
|
023ee197be | ||
|
|
d1605794ad | ||
|
|
3376f16012 | ||
|
|
6ce6bbedcb | ||
|
|
27cc627e42 | ||
|
|
62b89daac6 | ||
|
|
773e64cc1a | ||
|
|
2d05eb3cf5 | ||
|
|
ac63b92b64 | ||
|
|
30bcbf775a | ||
|
|
7eb9f34cc3 | ||
|
|
0b08c48fc5 | ||
|
|
65e1683680 | ||
|
|
feb496056e | ||
|
|
e2eebf1696 | ||
|
|
36c28bc467 | ||
|
|
3a1f3f8388 | ||
|
|
52bfa604e1 | ||
|
|
0a6a966e2b | ||
|
|
773e1c6d68 | ||
|
|
0d1c85e643 | ||
|
|
1df7c28661 | ||
|
|
36d2b66f90 | ||
|
|
8a240e4f9c | ||
|
|
ec039e6790 | ||
|
|
142b6b4abf | ||
|
|
2a06b44be2 | ||
|
|
2dc57e7413 | ||
|
|
07a32d192c | ||
|
|
9a27448b1b | ||
|
|
9ee397b440 | ||
|
|
9d0170ac6c | ||
|
|
b4276a3896 | ||
|
|
bfcf016714 | ||
|
|
9ff4e0e91b | ||
|
|
63fcc42990 | ||
|
|
31e0fe9031 | ||
|
|
3ba2859e0c | ||
|
|
e9dd8370b0 | ||
|
|
4d7fc7f977 | ||
|
|
7450693435 | ||
|
|
8da6f0be48 | ||
|
|
11880103b1 | ||
|
|
7984708a55 | ||
|
|
24d35ab47b | ||
|
|
305d16d612 | ||
|
|
be44558886 | ||
|
|
0970e0307e | ||
|
|
5aa42d4292 | ||
|
|
e0ff66251f | ||
|
|
a175963ba5 | ||
|
|
a61dd408ed | ||
|
|
53254551f0 | ||
|
|
92312aa3e6 | ||
|
|
20746d8150 |
5
.dockerignore
Normal file
5
.dockerignore
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
Dockerfile
|
||||||
|
.travis.yml
|
||||||
|
.gitignore
|
||||||
|
demo/etc
|
||||||
|
tox.ini
|
||||||
47
.github/ISSUE_TEMPLATE.md
vendored
Normal file
47
.github/ISSUE_TEMPLATE.md
vendored
Normal file
@@ -0,0 +1,47 @@
|
|||||||
|
<!--
|
||||||
|
|
||||||
|
**IF YOU HAVE SUPPORT QUESTIONS ABOUT RUNNING OR CONFIGURING YOUR OWN HOME SERVER**:
|
||||||
|
You will likely get better support more quickly if you ask in ** #matrix:matrix.org ** ;)
|
||||||
|
|
||||||
|
|
||||||
|
This is a bug report template. By following the instructions below and
|
||||||
|
filling out the sections with your information, you will help the us to get all
|
||||||
|
the necessary data to fix your issue.
|
||||||
|
|
||||||
|
You can also preview your report before submitting it. You may remove sections
|
||||||
|
that aren't relevant to your particular case.
|
||||||
|
|
||||||
|
Text between <!-- and --> marks will be invisible in the report.
|
||||||
|
|
||||||
|
-->
|
||||||
|
|
||||||
|
### Description
|
||||||
|
|
||||||
|
Describe here the problem that you are experiencing, or the feature you are requesting.
|
||||||
|
|
||||||
|
### Steps to reproduce
|
||||||
|
|
||||||
|
- For bugs, list the steps
|
||||||
|
- that reproduce the bug
|
||||||
|
- using hyphens as bullet points
|
||||||
|
|
||||||
|
Describe how what happens differs from what you expected.
|
||||||
|
|
||||||
|
If you can identify any relevant log snippets from _homeserver.log_, please include
|
||||||
|
those here (please be careful to remove any personal or private data):
|
||||||
|
|
||||||
|
### Version information
|
||||||
|
|
||||||
|
<!-- IMPORTANT: please answer the following questions, to help us narrow down the problem -->
|
||||||
|
|
||||||
|
- **Homeserver**: Was this issue identified on matrix.org or another homeserver?
|
||||||
|
|
||||||
|
If not matrix.org:
|
||||||
|
- **Version**: What version of Synapse is running? <!--
|
||||||
|
You can find the Synapse version by inspecting the server headers (replace matrix.org with
|
||||||
|
your own homeserver domain):
|
||||||
|
$ curl -v https://matrix.org/_matrix/client/versions 2>&1 | grep "Server:"
|
||||||
|
-->
|
||||||
|
- **Install method**: package manager/git clone/pip
|
||||||
|
- **Platform**: Tell us about the environment in which your homeserver is operating
|
||||||
|
- distro, hardware, if it's running in a vm/container, etc.
|
||||||
4
.gitignore
vendored
4
.gitignore
vendored
@@ -32,6 +32,7 @@ demo/media_store.*
|
|||||||
demo/etc
|
demo/etc
|
||||||
|
|
||||||
uploads
|
uploads
|
||||||
|
cache
|
||||||
|
|
||||||
.idea/
|
.idea/
|
||||||
media_store/
|
media_store/
|
||||||
@@ -46,3 +47,6 @@ static/client/register/register_config.js
|
|||||||
|
|
||||||
env/
|
env/
|
||||||
*.config
|
*.config
|
||||||
|
|
||||||
|
.vscode/
|
||||||
|
.ropeproject/
|
||||||
|
|||||||
18
.travis.yml
18
.travis.yml
@@ -1,14 +1,22 @@
|
|||||||
sudo: false
|
sudo: false
|
||||||
language: python
|
language: python
|
||||||
python: 2.7
|
|
||||||
|
|
||||||
# tell travis to cache ~/.cache/pip
|
# tell travis to cache ~/.cache/pip
|
||||||
cache: pip
|
cache: pip
|
||||||
|
|
||||||
env:
|
matrix:
|
||||||
- TOX_ENV=packaging
|
include:
|
||||||
- TOX_ENV=pep8
|
- python: 2.7
|
||||||
- TOX_ENV=py27
|
env: TOX_ENV=packaging
|
||||||
|
|
||||||
|
- python: 2.7
|
||||||
|
env: TOX_ENV=pep8
|
||||||
|
|
||||||
|
- python: 2.7
|
||||||
|
env: TOX_ENV=py27
|
||||||
|
|
||||||
|
- python: 3.6
|
||||||
|
env: TOX_ENV=py36
|
||||||
|
|
||||||
install:
|
install:
|
||||||
- pip install tox
|
- pip install tox
|
||||||
|
|||||||
@@ -60,3 +60,6 @@ Niklas Riekenbrauck <nikriek at gmail dot.com>
|
|||||||
|
|
||||||
Christoph Witzany <christoph at web.crofting.com>
|
Christoph Witzany <christoph at web.crofting.com>
|
||||||
* Add LDAP support for authentication
|
* Add LDAP support for authentication
|
||||||
|
|
||||||
|
Pierre Jaury <pierre at jaury.eu>
|
||||||
|
* Docker packaging
|
||||||
813
CHANGES.rst
813
CHANGES.rst
@@ -1,3 +1,816 @@
|
|||||||
|
Changes in <unreleased>
|
||||||
|
=======================
|
||||||
|
|
||||||
|
This release adds an index to the events table. This means that on first
|
||||||
|
startup there will be an inceased amount of IO until the index is created, and
|
||||||
|
an increase in disk usage.
|
||||||
|
|
||||||
|
|
||||||
|
Changes in synapse v0.30.0 (2018-05-24)
|
||||||
|
==========================================
|
||||||
|
|
||||||
|
'Server Notices' are a new feature introduced in Synapse 0.30. They provide a
|
||||||
|
channel whereby server administrators can send messages to users on the server.
|
||||||
|
|
||||||
|
They are used as part of communication of the server policies (see ``docs/consent_tracking.md``),
|
||||||
|
however the intention is that they may also find a use for features such
|
||||||
|
as "Message of the day".
|
||||||
|
|
||||||
|
This feature is specific to Synapse, but uses standard Matrix communication mechanisms,
|
||||||
|
so should work with any Matrix client. For more details see ``docs/server_notices.md``
|
||||||
|
|
||||||
|
Further Server Notices/Consent Tracking Support:
|
||||||
|
|
||||||
|
* Allow overriding the server_notices user's avatar (PR #3273)
|
||||||
|
* Use the localpart in the consent uri (PR #3272)
|
||||||
|
* Support for putting %(consent_uri)s in messages (PR #3271)
|
||||||
|
* Block attempts to send server notices to remote users (PR #3270)
|
||||||
|
* Docs on consent bits (PR #3268)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
Changes in synapse v0.30.0-rc1 (2018-05-23)
|
||||||
|
==========================================
|
||||||
|
|
||||||
|
Server Notices/Consent Tracking Support:
|
||||||
|
|
||||||
|
* ConsentResource to gather policy consent from users (PR #3213)
|
||||||
|
* Move RoomCreationHandler out of synapse.handlers.Handlers (PR #3225)
|
||||||
|
* Infrastructure for a server notices room (PR #3232)
|
||||||
|
* Send users a server notice about consent (PR #3236)
|
||||||
|
* Reject attempts to send event before privacy consent is given (PR #3257)
|
||||||
|
* Add a 'has_consented' template var to consent forms (PR #3262)
|
||||||
|
* Fix dependency on jinja2 (PR #3263)
|
||||||
|
|
||||||
|
Features:
|
||||||
|
|
||||||
|
* Cohort analytics (PR #3163, #3241, #3251)
|
||||||
|
* Add lxml to docker image for web previews (PR #3239) Thanks to @ptman!
|
||||||
|
* Add in flight request metrics (PR #3252)
|
||||||
|
|
||||||
|
Changes:
|
||||||
|
|
||||||
|
* Remove unused `update_external_syncs` (PR #3233)
|
||||||
|
* Use stream rather depth ordering for push actions (PR #3212)
|
||||||
|
* Make purge_history operate on tokens (PR #3221)
|
||||||
|
* Don't support limitless pagination (PR #3265)
|
||||||
|
|
||||||
|
Bug Fixes:
|
||||||
|
|
||||||
|
* Fix logcontext resource usage tracking (PR #3258)
|
||||||
|
* Fix error in handling receipts (PR #3235)
|
||||||
|
* Stop the transaction cache caching failures (PR #3255)
|
||||||
|
|
||||||
|
Changes in synapse v0.29.1 (2018-05-17)
|
||||||
|
==========================================
|
||||||
|
Changes:
|
||||||
|
|
||||||
|
* Update docker documentation (PR #3222)
|
||||||
|
|
||||||
|
Changes in synapse v0.29.0 (2018-05-16)
|
||||||
|
===========================================
|
||||||
|
Not changes since v0.29.0-rc1
|
||||||
|
|
||||||
|
Changes in synapse v0.29.0-rc1 (2018-05-14)
|
||||||
|
===========================================
|
||||||
|
|
||||||
|
Notable changes, a docker file for running Synapse (Thanks to @kaiyou!) and a
|
||||||
|
closed spec bug in the Client Server API. Additionally further prep for Python 3
|
||||||
|
migration.
|
||||||
|
|
||||||
|
Potentially breaking change:
|
||||||
|
|
||||||
|
* Make Client-Server API return 401 for invalid token (PR #3161).
|
||||||
|
|
||||||
|
This changes the Client-server spec to return a 401 error code instead of 403
|
||||||
|
when the access token is unrecognised. This is the behaviour required by the
|
||||||
|
specification, but some clients may be relying on the old, incorrect
|
||||||
|
behaviour.
|
||||||
|
|
||||||
|
Thanks to @NotAFile for fixing this.
|
||||||
|
|
||||||
|
Features:
|
||||||
|
|
||||||
|
* Add a Dockerfile for synapse (PR #2846) Thanks to @kaiyou!
|
||||||
|
|
||||||
|
Changes - General:
|
||||||
|
|
||||||
|
* nuke-room-from-db.sh: added postgresql option and help (PR #2337) Thanks to @rubo77!
|
||||||
|
* Part user from rooms on account deactivate (PR #3201)
|
||||||
|
* Make 'unexpected logging context' into warnings (PR #3007)
|
||||||
|
* Set Server header in SynapseRequest (PR #3208)
|
||||||
|
* remove duplicates from groups tables (PR #3129)
|
||||||
|
* Improve exception handling for background processes (PR #3138)
|
||||||
|
* Add missing consumeErrors to improve exception handling (PR #3139)
|
||||||
|
* reraise exceptions more carefully (PR #3142)
|
||||||
|
* Remove redundant call to preserve_fn (PR #3143)
|
||||||
|
* Trap exceptions thrown within run_in_background (PR #3144)
|
||||||
|
|
||||||
|
Changes - Refactors:
|
||||||
|
|
||||||
|
* Refactor /context to reuse pagination storage functions (PR #3193)
|
||||||
|
* Refactor recent events func to use pagination func (PR #3195)
|
||||||
|
* Refactor pagination DB API to return concrete type (PR #3196)
|
||||||
|
* Refactor get_recent_events_for_room return type (PR #3198)
|
||||||
|
* Refactor sync APIs to reuse pagination API (PR #3199)
|
||||||
|
* Remove unused code path from member change DB func (PR #3200)
|
||||||
|
* Refactor request handling wrappers (PR #3203)
|
||||||
|
* transaction_id, destination defined twice (PR #3209) Thanks to @damir-manapov!
|
||||||
|
* Refactor event storage to prepare for changes in state calculations (PR #3141)
|
||||||
|
* Set Server header in SynapseRequest (PR #3208)
|
||||||
|
* Use deferred.addTimeout instead of time_bound_deferred (PR #3127, #3178)
|
||||||
|
* Use run_in_background in preference to preserve_fn (PR #3140)
|
||||||
|
|
||||||
|
Changes - Python 3 migration:
|
||||||
|
|
||||||
|
* Construct HMAC as bytes on py3 (PR #3156) Thanks to @NotAFile!
|
||||||
|
* run config tests on py3 (PR #3159) Thanks to @NotAFile!
|
||||||
|
* Open certificate files as bytes (PR #3084) Thanks to @NotAFile!
|
||||||
|
* Open config file in non-bytes mode (PR #3085) Thanks to @NotAFile!
|
||||||
|
* Make event properties raise AttributeError instead (PR #3102) Thanks to @NotAFile!
|
||||||
|
* Use six.moves.urlparse (PR #3108) Thanks to @NotAFile!
|
||||||
|
* Add py3 tests to tox with folders that work (PR #3145) Thanks to @NotAFile!
|
||||||
|
* Don't yield in list comprehensions (PR #3150) Thanks to @NotAFile!
|
||||||
|
* Move more xrange to six (PR #3151) Thanks to @NotAFile!
|
||||||
|
* make imports local (PR #3152) Thanks to @NotAFile!
|
||||||
|
* move httplib import to six (PR #3153) Thanks to @NotAFile!
|
||||||
|
* Replace stringIO imports with six (PR #3154, #3168) Thanks to @NotAFile!
|
||||||
|
* more bytes strings (PR #3155) Thanks to @NotAFile!
|
||||||
|
|
||||||
|
Bug Fixes:
|
||||||
|
|
||||||
|
* synapse fails to start under Twisted >= 18.4 (PR #3157)
|
||||||
|
* Fix a class of logcontext leaks (PR #3170)
|
||||||
|
* Fix a couple of logcontext leaks in unit tests (PR #3172)
|
||||||
|
* Fix logcontext leak in media repo (PR #3174)
|
||||||
|
* Escape label values in prometheus metrics (PR #3175, #3186)
|
||||||
|
* Fix 'Unhandled Error' logs with Twisted 18.4 (PR #3182) Thanks to @Half-Shot!
|
||||||
|
* Fix logcontext leaks in rate limiter (PR #3183)
|
||||||
|
* notifications: Convert next_token to string according to the spec (PR #3190) Thanks to @mujx!
|
||||||
|
* nuke-room-from-db.sh: fix deletion from search table (PR #3194) Thanks to @rubo77!
|
||||||
|
* add guard for None on purge_history api (PR #3160) Thanks to @krombel!
|
||||||
|
|
||||||
|
Changes in synapse v0.28.1 (2018-05-01)
|
||||||
|
=======================================
|
||||||
|
|
||||||
|
SECURITY UPDATE
|
||||||
|
|
||||||
|
* Clamp the allowed values of event depth received over federation to be
|
||||||
|
[0, 2^63 - 1]. This mitigates an attack where malicious events
|
||||||
|
injected with depth = 2^63 - 1 render rooms unusable. Depth is used to
|
||||||
|
determine the cosmetic ordering of events within a room, and so the ordering
|
||||||
|
of events in such a room will default to using stream_ordering rather than depth
|
||||||
|
(topological_ordering).
|
||||||
|
|
||||||
|
This is a temporary solution to mitigate abuse in the wild, whilst a long term solution
|
||||||
|
is being implemented to improve how the depth parameter is used.
|
||||||
|
|
||||||
|
Full details at
|
||||||
|
https://docs.google.com/document/d/1I3fi2S-XnpO45qrpCsowZv8P8dHcNZ4fsBsbOW7KABI
|
||||||
|
|
||||||
|
* Pin Twisted to <18.4 until we stop using the private _OpenSSLECCurve API.
|
||||||
|
|
||||||
|
|
||||||
|
Changes in synapse v0.28.0 (2018-04-26)
|
||||||
|
=======================================
|
||||||
|
|
||||||
|
Bug Fixes:
|
||||||
|
|
||||||
|
* Fix quarantine media admin API and search reindex (PR #3130)
|
||||||
|
* Fix media admin APIs (PR #3134)
|
||||||
|
|
||||||
|
|
||||||
|
Changes in synapse v0.28.0-rc1 (2018-04-24)
|
||||||
|
===========================================
|
||||||
|
|
||||||
|
Minor performance improvement to federation sending and bug fixes.
|
||||||
|
|
||||||
|
(Note: This release does not include the delta state resolution implementation discussed in matrix live)
|
||||||
|
|
||||||
|
|
||||||
|
Features:
|
||||||
|
|
||||||
|
* Add metrics for event processing lag (PR #3090)
|
||||||
|
* Add metrics for ResponseCache (PR #3092)
|
||||||
|
|
||||||
|
Changes:
|
||||||
|
|
||||||
|
* Synapse on PyPy (PR #2760) Thanks to @Valodim!
|
||||||
|
* move handling of auto_join_rooms to RegisterHandler (PR #2996) Thanks to @krombel!
|
||||||
|
* Improve handling of SRV records for federation connections (PR #3016) Thanks to @silkeh!
|
||||||
|
* Document the behaviour of ResponseCache (PR #3059)
|
||||||
|
* Preparation for py3 (PR #3061, #3073, #3074, #3075, #3103, #3104, #3106, #3107, #3109, #3110) Thanks to @NotAFile!
|
||||||
|
* update prometheus dashboard to use new metric names (PR #3069) Thanks to @krombel!
|
||||||
|
* use python3-compatible prints (PR #3074) Thanks to @NotAFile!
|
||||||
|
* Send federation events concurrently (PR #3078)
|
||||||
|
* Limit concurrent event sends for a room (PR #3079)
|
||||||
|
* Improve R30 stat definition (PR #3086)
|
||||||
|
* Send events to ASes concurrently (PR #3088)
|
||||||
|
* Refactor ResponseCache usage (PR #3093)
|
||||||
|
* Clarify that SRV may not point to a CNAME (PR #3100) Thanks to @silkeh!
|
||||||
|
* Use str(e) instead of e.message (PR #3103) Thanks to @NotAFile!
|
||||||
|
* Use six.itervalues in some places (PR #3106) Thanks to @NotAFile!
|
||||||
|
* Refactor store.have_events (PR #3117)
|
||||||
|
|
||||||
|
Bug Fixes:
|
||||||
|
|
||||||
|
* Return 401 for invalid access_token on logout (PR #2938) Thanks to @dklug!
|
||||||
|
* Return a 404 rather than a 500 on rejoining empty rooms (PR #3080)
|
||||||
|
* fix federation_domain_whitelist (PR #3099)
|
||||||
|
* Avoid creating events with huge numbers of prev_events (PR #3113)
|
||||||
|
* Reject events which have lots of prev_events (PR #3118)
|
||||||
|
|
||||||
|
|
||||||
|
Changes in synapse v0.27.4 (2018-04-13)
|
||||||
|
======================================
|
||||||
|
|
||||||
|
Changes:
|
||||||
|
|
||||||
|
* Update canonicaljson dependency (#3095)
|
||||||
|
|
||||||
|
|
||||||
|
Changes in synapse v0.27.3 (2018-04-11)
|
||||||
|
======================================
|
||||||
|
|
||||||
|
Bug fixes:
|
||||||
|
|
||||||
|
* URL quote path segments over federation (#3082)
|
||||||
|
|
||||||
|
Changes in synapse v0.27.3-rc2 (2018-04-09)
|
||||||
|
==========================================
|
||||||
|
|
||||||
|
v0.27.3-rc1 used a stale version of the develop branch so the changelog overstates
|
||||||
|
the functionality. v0.27.3-rc2 is up to date, rc1 should be ignored.
|
||||||
|
|
||||||
|
Changes in synapse v0.27.3-rc1 (2018-04-09)
|
||||||
|
=======================================
|
||||||
|
|
||||||
|
Notable changes include API support for joinability of groups. Also new metrics
|
||||||
|
and phone home stats. Phone home stats include better visibility of system usage
|
||||||
|
so we can tweak synpase to work better for all users rather than our own experience
|
||||||
|
with matrix.org. Also, recording 'r30' stat which is the measure we use to track
|
||||||
|
overal growth of the Matrix ecosystem. It is defined as:-
|
||||||
|
|
||||||
|
Counts the number of native 30 day retained users, defined as:-
|
||||||
|
* Users who have created their accounts more than 30 days
|
||||||
|
* Where last seen at most 30 days ago
|
||||||
|
* Where account creation and last_seen are > 30 days"
|
||||||
|
|
||||||
|
|
||||||
|
Features:
|
||||||
|
|
||||||
|
* Add joinability for groups (PR #3045)
|
||||||
|
* Implement group join API (PR #3046)
|
||||||
|
* Add counter metrics for calculating state delta (PR #3033)
|
||||||
|
* R30 stats (PR #3041)
|
||||||
|
* Measure time it takes to calculate state group ID (PR #3043)
|
||||||
|
* Add basic performance statistics to phone home (PR #3044)
|
||||||
|
* Add response size metrics (PR #3071)
|
||||||
|
* phone home cache size configurations (PR #3063)
|
||||||
|
|
||||||
|
Changes:
|
||||||
|
|
||||||
|
* Add a blurb explaining the main synapse worker (PR #2886) Thanks to @turt2live!
|
||||||
|
* Replace old style error catching with 'as' keyword (PR #3000) Thanks to @NotAFile!
|
||||||
|
* Use .iter* to avoid copies in StateHandler (PR #3006)
|
||||||
|
* Linearize calls to _generate_user_id (PR #3029)
|
||||||
|
* Remove last usage of ujson (PR #3030)
|
||||||
|
* Use simplejson throughout (PR #3048)
|
||||||
|
* Use static JSONEncoders (PR #3049)
|
||||||
|
* Remove uses of events.content (PR #3060)
|
||||||
|
* Improve database cache performance (PR #3068)
|
||||||
|
|
||||||
|
Bug fixes:
|
||||||
|
|
||||||
|
* Add room_id to the response of `rooms/{roomId}/join` (PR #2986) Thanks to @jplatte!
|
||||||
|
* Fix replication after switch to simplejson (PR #3015)
|
||||||
|
* 404 correctly on missing paths via NoResource (PR #3022)
|
||||||
|
* Fix error when claiming e2e keys from offline servers (PR #3034)
|
||||||
|
* fix tests/storage/test_user_directory.py (PR #3042)
|
||||||
|
* use PUT instead of POST for federating groups/m.join_policy (PR #3070) Thanks to @krombel!
|
||||||
|
* postgres port script: fix state_groups_pkey error (PR #3072)
|
||||||
|
|
||||||
|
|
||||||
|
Changes in synapse v0.27.2 (2018-03-26)
|
||||||
|
=======================================
|
||||||
|
|
||||||
|
Bug fixes:
|
||||||
|
|
||||||
|
* Fix bug which broke TCP replication between workers (PR #3015)
|
||||||
|
|
||||||
|
|
||||||
|
Changes in synapse v0.27.1 (2018-03-26)
|
||||||
|
=======================================
|
||||||
|
|
||||||
|
Meta release as v0.27.0 temporarily pointed to the wrong commit
|
||||||
|
|
||||||
|
|
||||||
|
Changes in synapse v0.27.0 (2018-03-26)
|
||||||
|
=======================================
|
||||||
|
|
||||||
|
No changes since v0.27.0-rc2
|
||||||
|
|
||||||
|
|
||||||
|
Changes in synapse v0.27.0-rc2 (2018-03-19)
|
||||||
|
===========================================
|
||||||
|
|
||||||
|
Pulls in v0.26.1
|
||||||
|
|
||||||
|
Bug fixes:
|
||||||
|
|
||||||
|
* Fix bug introduced in v0.27.0-rc1 that causes much increased memory usage in state cache (PR #3005)
|
||||||
|
|
||||||
|
|
||||||
|
Changes in synapse v0.26.1 (2018-03-15)
|
||||||
|
=======================================
|
||||||
|
|
||||||
|
Bug fixes:
|
||||||
|
|
||||||
|
* Fix bug where an invalid event caused server to stop functioning correctly,
|
||||||
|
due to parsing and serializing bugs in ujson library (PR #3008)
|
||||||
|
|
||||||
|
|
||||||
|
Changes in synapse v0.27.0-rc1 (2018-03-14)
|
||||||
|
===========================================
|
||||||
|
|
||||||
|
The common case for running Synapse is not to run separate workers, but for those that do, be aware that synctl no longer starts the main synapse when using ``-a`` option with workers. A new worker file should be added with ``worker_app: synapse.app.homeserver``.
|
||||||
|
|
||||||
|
This release also begins the process of renaming a number of the metrics
|
||||||
|
reported to prometheus. See `docs/metrics-howto.rst <docs/metrics-howto.rst#block-and-response-metrics-renamed-for-0-27-0>`_.
|
||||||
|
Note that the v0.28.0 release will remove the deprecated metric names.
|
||||||
|
|
||||||
|
Features:
|
||||||
|
|
||||||
|
* Add ability for ASes to override message send time (PR #2754)
|
||||||
|
* Add support for custom storage providers for media repository (PR #2867, #2777, #2783, #2789, #2791, #2804, #2812, #2814, #2857, #2868, #2767)
|
||||||
|
* Add purge API features, see `docs/admin_api/purge_history_api.rst <docs/admin_api/purge_history_api.rst>`_ for full details (PR #2858, #2867, #2882, #2946, #2962, #2943)
|
||||||
|
* Add support for whitelisting 3PIDs that users can register. (PR #2813)
|
||||||
|
* Add ``/room/{id}/event/{id}`` API (PR #2766)
|
||||||
|
* Add an admin API to get all the media in a room (PR #2818) Thanks to @turt2live!
|
||||||
|
* Add ``federation_domain_whitelist`` option (PR #2820, #2821)
|
||||||
|
|
||||||
|
|
||||||
|
Changes:
|
||||||
|
|
||||||
|
* Continue to factor out processing from main process and into worker processes. See updated `docs/workers.rst <docs/workers.rst>`_ (PR #2892 - #2904, #2913, #2920 - #2926, #2947, #2847, #2854, #2872, #2873, #2874, #2928, #2929, #2934, #2856, #2976 - #2984, #2987 - #2989, #2991 - #2993, #2995, #2784)
|
||||||
|
* Ensure state cache is used when persisting events (PR #2864, #2871, #2802, #2835, #2836, #2841, #2842, #2849)
|
||||||
|
* Change the default config to bind on both IPv4 and IPv6 on all platforms (PR #2435) Thanks to @silkeh!
|
||||||
|
* No longer require a specific version of saml2 (PR #2695) Thanks to @okurz!
|
||||||
|
* Remove ``verbosity``/``log_file`` from generated config (PR #2755)
|
||||||
|
* Add and improve metrics and logging (PR #2770, #2778, #2785, #2786, #2787, #2793, #2794, #2795, #2809, #2810, #2833, #2834, #2844, #2965, #2927, #2975, #2790, #2796, #2838)
|
||||||
|
* When using synctl with workers, don't start the main synapse automatically (PR #2774)
|
||||||
|
* Minor performance improvements (PR #2773, #2792)
|
||||||
|
* Use a connection pool for non-federation outbound connections (PR #2817)
|
||||||
|
* Make it possible to run unit tests against postgres (PR #2829)
|
||||||
|
* Update pynacl dependency to 1.2.1 or higher (PR #2888) Thanks to @bachp!
|
||||||
|
* Remove ability for AS users to call /events and /sync (PR #2948)
|
||||||
|
* Use bcrypt.checkpw (PR #2949) Thanks to @krombel!
|
||||||
|
|
||||||
|
Bug fixes:
|
||||||
|
|
||||||
|
* Fix broken ``ldap_config`` config option (PR #2683) Thanks to @seckrv!
|
||||||
|
* Fix error message when user is not allowed to unban (PR #2761) Thanks to @turt2live!
|
||||||
|
* Fix publicised groups GET API (singular) over federation (PR #2772)
|
||||||
|
* Fix user directory when using ``user_directory_search_all_users`` config option (PR #2803, #2831)
|
||||||
|
* Fix error on ``/publicRooms`` when no rooms exist (PR #2827)
|
||||||
|
* Fix bug in quarantine_media (PR #2837)
|
||||||
|
* Fix url_previews when no Content-Type is returned from URL (PR #2845)
|
||||||
|
* Fix rare race in sync API when joining room (PR #2944)
|
||||||
|
* Fix slow event search, switch back from GIST to GIN indexes (PR #2769, #2848)
|
||||||
|
|
||||||
|
|
||||||
|
Changes in synapse v0.26.0 (2018-01-05)
|
||||||
|
=======================================
|
||||||
|
|
||||||
|
No changes since v0.26.0-rc1
|
||||||
|
|
||||||
|
|
||||||
|
Changes in synapse v0.26.0-rc1 (2017-12-13)
|
||||||
|
===========================================
|
||||||
|
|
||||||
|
Features:
|
||||||
|
|
||||||
|
* Add ability for ASes to publicise groups for their users (PR #2686)
|
||||||
|
* Add all local users to the user_directory and optionally search them (PR
|
||||||
|
#2723)
|
||||||
|
* Add support for custom login types for validating users (PR #2729)
|
||||||
|
|
||||||
|
|
||||||
|
Changes:
|
||||||
|
|
||||||
|
* Update example Prometheus config to new format (PR #2648) Thanks to
|
||||||
|
@krombel!
|
||||||
|
* Rename redact_content option to include_content in Push API (PR #2650)
|
||||||
|
* Declare support for r0.3.0 (PR #2677)
|
||||||
|
* Improve upserts (PR #2684, #2688, #2689, #2713)
|
||||||
|
* Improve documentation of workers (PR #2700)
|
||||||
|
* Improve tracebacks on exceptions (PR #2705)
|
||||||
|
* Allow guest access to group APIs for reading (PR #2715)
|
||||||
|
* Support for posting content in federation_client script (PR #2716)
|
||||||
|
* Delete devices and pushers on logouts etc (PR #2722)
|
||||||
|
|
||||||
|
|
||||||
|
Bug fixes:
|
||||||
|
|
||||||
|
* Fix database port script (PR #2673)
|
||||||
|
* Fix internal server error on login with ldap_auth_provider (PR #2678) Thanks
|
||||||
|
to @jkolo!
|
||||||
|
* Fix error on sqlite 3.7 (PR #2697)
|
||||||
|
* Fix OPTIONS on preview_url (PR #2707)
|
||||||
|
* Fix error handling on dns lookup (PR #2711)
|
||||||
|
* Fix wrong avatars when inviting multiple users when creating room (PR #2717)
|
||||||
|
* Fix 500 when joining matrix-dev (PR #2719)
|
||||||
|
|
||||||
|
|
||||||
|
Changes in synapse v0.25.1 (2017-11-17)
|
||||||
|
=======================================
|
||||||
|
|
||||||
|
Bug fixes:
|
||||||
|
|
||||||
|
* Fix login with LDAP and other password provider modules (PR #2678). Thanks to
|
||||||
|
@jkolo!
|
||||||
|
|
||||||
|
Changes in synapse v0.25.0 (2017-11-15)
|
||||||
|
=======================================
|
||||||
|
|
||||||
|
Bug fixes:
|
||||||
|
|
||||||
|
* Fix port script (PR #2673)
|
||||||
|
|
||||||
|
|
||||||
|
Changes in synapse v0.25.0-rc1 (2017-11-14)
|
||||||
|
===========================================
|
||||||
|
|
||||||
|
Features:
|
||||||
|
|
||||||
|
* Add is_public to groups table to allow for private groups (PR #2582)
|
||||||
|
* Add a route for determining who you are (PR #2668) Thanks to @turt2live!
|
||||||
|
* Add more features to the password providers (PR #2608, #2610, #2620, #2622,
|
||||||
|
#2623, #2624, #2626, #2628, #2629)
|
||||||
|
* Add a hook for custom rest endpoints (PR #2627)
|
||||||
|
* Add API to update group room visibility (PR #2651)
|
||||||
|
|
||||||
|
|
||||||
|
Changes:
|
||||||
|
|
||||||
|
* Ignore <noscript> tags when generating URL preview descriptions (PR #2576)
|
||||||
|
Thanks to @maximevaillancourt!
|
||||||
|
* Register some /unstable endpoints in /r0 as well (PR #2579) Thanks to
|
||||||
|
@krombel!
|
||||||
|
* Support /keys/upload on /r0 as well as /unstable (PR #2585)
|
||||||
|
* Front-end proxy: pass through auth header (PR #2586)
|
||||||
|
* Allow ASes to deactivate their own users (PR #2589)
|
||||||
|
* Remove refresh tokens (PR #2613)
|
||||||
|
* Automatically set default displayname on register (PR #2617)
|
||||||
|
* Log login requests (PR #2618)
|
||||||
|
* Always return `is_public` in the `/groups/:group_id/rooms` API (PR #2630)
|
||||||
|
* Avoid no-op media deletes (PR #2637) Thanks to @spantaleev!
|
||||||
|
* Fix various embarrassing typos around user_directory and add some doc. (PR
|
||||||
|
#2643)
|
||||||
|
* Return whether a user is an admin within a group (PR #2647)
|
||||||
|
* Namespace visibility options for groups (PR #2657)
|
||||||
|
* Downcase UserIDs on registration (PR #2662)
|
||||||
|
* Cache failures when fetching URL previews (PR #2669)
|
||||||
|
|
||||||
|
|
||||||
|
Bug fixes:
|
||||||
|
|
||||||
|
* Fix port script (PR #2577)
|
||||||
|
* Fix error when running synapse with no logfile (PR #2581)
|
||||||
|
* Fix UI auth when deleting devices (PR #2591)
|
||||||
|
* Fix typo when checking if user is invited to group (PR #2599)
|
||||||
|
* Fix the port script to drop NUL values in all tables (PR #2611)
|
||||||
|
* Fix appservices being backlogged and not receiving new events due to a bug in
|
||||||
|
notify_interested_services (PR #2631) Thanks to @xyzz!
|
||||||
|
* Fix updating rooms avatar/display name when modified by admin (PR #2636)
|
||||||
|
Thanks to @farialima!
|
||||||
|
* Fix bug in state group storage (PR #2649)
|
||||||
|
* Fix 500 on invalid utf-8 in request (PR #2663)
|
||||||
|
|
||||||
|
|
||||||
|
Changes in synapse v0.24.1 (2017-10-24)
|
||||||
|
=======================================
|
||||||
|
|
||||||
|
Bug fixes:
|
||||||
|
|
||||||
|
* Fix updating group profiles over federation (PR #2567)
|
||||||
|
|
||||||
|
|
||||||
|
Changes in synapse v0.24.0 (2017-10-23)
|
||||||
|
=======================================
|
||||||
|
|
||||||
|
No changes since v0.24.0-rc1
|
||||||
|
|
||||||
|
|
||||||
|
Changes in synapse v0.24.0-rc1 (2017-10-19)
|
||||||
|
===========================================
|
||||||
|
|
||||||
|
Features:
|
||||||
|
|
||||||
|
* Add Group Server (PR #2352, #2363, #2374, #2377, #2378, #2382, #2410, #2426,
|
||||||
|
#2430, #2454, #2471, #2472, #2544)
|
||||||
|
* Add support for channel notifications (PR #2501)
|
||||||
|
* Add basic implementation of backup media store (PR #2538)
|
||||||
|
* Add config option to auto-join new users to rooms (PR #2545)
|
||||||
|
|
||||||
|
|
||||||
|
Changes:
|
||||||
|
|
||||||
|
* Make the spam checker a module (PR #2474)
|
||||||
|
* Delete expired url cache data (PR #2478)
|
||||||
|
* Ignore incoming events for rooms that we have left (PR #2490)
|
||||||
|
* Allow spam checker to reject invites too (PR #2492)
|
||||||
|
* Add room creation checks to spam checker (PR #2495)
|
||||||
|
* Spam checking: add the invitee to user_may_invite (PR #2502)
|
||||||
|
* Process events from federation for different rooms in parallel (PR #2520)
|
||||||
|
* Allow error strings from spam checker (PR #2531)
|
||||||
|
* Improve error handling for missing files in config (PR #2551)
|
||||||
|
|
||||||
|
|
||||||
|
Bug fixes:
|
||||||
|
|
||||||
|
* Fix handling SERVFAILs when doing AAAA lookups for federation (PR #2477)
|
||||||
|
* Fix incompatibility with newer versions of ujson (PR #2483) Thanks to
|
||||||
|
@jeremycline!
|
||||||
|
* Fix notification keywords that start/end with non-word chars (PR #2500)
|
||||||
|
* Fix stack overflow and logcontexts from linearizer (PR #2532)
|
||||||
|
* Fix 500 error when fields missing from power_levels event (PR #2552)
|
||||||
|
* Fix 500 error when we get an error handling a PDU (PR #2553)
|
||||||
|
|
||||||
|
|
||||||
|
Changes in synapse v0.23.1 (2017-10-02)
|
||||||
|
=======================================
|
||||||
|
|
||||||
|
Changes:
|
||||||
|
|
||||||
|
* Make 'affinity' package optional, as it is not supported on some platforms
|
||||||
|
|
||||||
|
|
||||||
|
Changes in synapse v0.23.0 (2017-10-02)
|
||||||
|
=======================================
|
||||||
|
|
||||||
|
No changes since v0.23.0-rc2
|
||||||
|
|
||||||
|
|
||||||
|
Changes in synapse v0.23.0-rc2 (2017-09-26)
|
||||||
|
===========================================
|
||||||
|
|
||||||
|
Bug fixes:
|
||||||
|
|
||||||
|
* Fix regression in performance of syncs (PR #2470)
|
||||||
|
|
||||||
|
|
||||||
|
Changes in synapse v0.23.0-rc1 (2017-09-25)
|
||||||
|
===========================================
|
||||||
|
|
||||||
|
Features:
|
||||||
|
|
||||||
|
* Add a frontend proxy worker (PR #2344)
|
||||||
|
* Add support for event_id_only push format (PR #2450)
|
||||||
|
* Add a PoC for filtering spammy events (PR #2456)
|
||||||
|
* Add a config option to block all room invites (PR #2457)
|
||||||
|
|
||||||
|
|
||||||
|
Changes:
|
||||||
|
|
||||||
|
* Use bcrypt module instead of py-bcrypt (PR #2288) Thanks to @kyrias!
|
||||||
|
* Improve performance of generating push notifications (PR #2343, #2357, #2365,
|
||||||
|
#2366, #2371)
|
||||||
|
* Improve DB performance for device list handling in sync (PR #2362)
|
||||||
|
* Include a sample prometheus config (PR #2416)
|
||||||
|
* Document known to work postgres version (PR #2433) Thanks to @ptman!
|
||||||
|
|
||||||
|
|
||||||
|
Bug fixes:
|
||||||
|
|
||||||
|
* Fix caching error in the push evaluator (PR #2332)
|
||||||
|
* Fix bug where pusherpool didn't start and broke some rooms (PR #2342)
|
||||||
|
* Fix port script for user directory tables (PR #2375)
|
||||||
|
* Fix device lists notifications when user rejoins a room (PR #2443, #2449)
|
||||||
|
* Fix sync to always send down current state events in timeline (PR #2451)
|
||||||
|
* Fix bug where guest users were incorrectly kicked (PR #2453)
|
||||||
|
* Fix bug talking to IPv6 only servers using SRV records (PR #2462)
|
||||||
|
|
||||||
|
|
||||||
|
Changes in synapse v0.22.1 (2017-07-06)
|
||||||
|
=======================================
|
||||||
|
|
||||||
|
Bug fixes:
|
||||||
|
|
||||||
|
* Fix bug where pusher pool didn't start and caused issues when
|
||||||
|
interacting with some rooms (PR #2342)
|
||||||
|
|
||||||
|
|
||||||
|
Changes in synapse v0.22.0 (2017-07-06)
|
||||||
|
=======================================
|
||||||
|
|
||||||
|
No changes since v0.22.0-rc2
|
||||||
|
|
||||||
|
|
||||||
|
Changes in synapse v0.22.0-rc2 (2017-07-04)
|
||||||
|
===========================================
|
||||||
|
|
||||||
|
Changes:
|
||||||
|
|
||||||
|
* Improve performance of storing user IPs (PR #2307, #2308)
|
||||||
|
* Slightly improve performance of verifying access tokens (PR #2320)
|
||||||
|
* Slightly improve performance of event persistence (PR #2321)
|
||||||
|
* Increase default cache factor size from 0.1 to 0.5 (PR #2330)
|
||||||
|
|
||||||
|
Bug fixes:
|
||||||
|
|
||||||
|
* Fix bug with storing registration sessions that caused frequent CPU churn
|
||||||
|
(PR #2319)
|
||||||
|
|
||||||
|
|
||||||
|
Changes in synapse v0.22.0-rc1 (2017-06-26)
|
||||||
|
===========================================
|
||||||
|
|
||||||
|
Features:
|
||||||
|
|
||||||
|
* Add a user directory API (PR #2252, and many more)
|
||||||
|
* Add shutdown room API to remove room from local server (PR #2291)
|
||||||
|
* Add API to quarantine media (PR #2292)
|
||||||
|
* Add new config option to not send event contents to push servers (PR #2301)
|
||||||
|
Thanks to @cjdelisle!
|
||||||
|
|
||||||
|
Changes:
|
||||||
|
|
||||||
|
* Various performance fixes (PR #2177, #2233, #2230, #2238, #2248, #2256,
|
||||||
|
#2274)
|
||||||
|
* Deduplicate sync filters (PR #2219) Thanks to @krombel!
|
||||||
|
* Correct a typo in UPGRADE.rst (PR #2231) Thanks to @aaronraimist!
|
||||||
|
* Add count of one time keys to sync stream (PR #2237)
|
||||||
|
* Only store event_auth for state events (PR #2247)
|
||||||
|
* Store URL cache preview downloads separately (PR #2299)
|
||||||
|
|
||||||
|
Bug fixes:
|
||||||
|
|
||||||
|
* Fix users not getting notifications when AS listened to that user_id (PR
|
||||||
|
#2216) Thanks to @slipeer!
|
||||||
|
* Fix users without push set up not getting notifications after joining rooms
|
||||||
|
(PR #2236)
|
||||||
|
* Fix preview url API to trim long descriptions (PR #2243)
|
||||||
|
* Fix bug where we used cached but unpersisted state group as prev group,
|
||||||
|
resulting in broken state of restart (PR #2263)
|
||||||
|
* Fix removing of pushers when using workers (PR #2267)
|
||||||
|
* Fix CORS headers to allow Authorization header (PR #2285) Thanks to @krombel!
|
||||||
|
|
||||||
|
|
||||||
|
Changes in synapse v0.21.1 (2017-06-15)
|
||||||
|
=======================================
|
||||||
|
|
||||||
|
Bug fixes:
|
||||||
|
|
||||||
|
* Fix bug in anonymous usage statistic reporting (PR #2281)
|
||||||
|
|
||||||
|
|
||||||
|
Changes in synapse v0.21.0 (2017-05-18)
|
||||||
|
=======================================
|
||||||
|
|
||||||
|
No changes since v0.21.0-rc3
|
||||||
|
|
||||||
|
|
||||||
|
Changes in synapse v0.21.0-rc3 (2017-05-17)
|
||||||
|
===========================================
|
||||||
|
|
||||||
|
Features:
|
||||||
|
|
||||||
|
* Add per user rate-limiting overrides (PR #2208)
|
||||||
|
* Add config option to limit maximum number of events requested by ``/sync``
|
||||||
|
and ``/messages`` (PR #2221) Thanks to @psaavedra!
|
||||||
|
|
||||||
|
|
||||||
|
Changes:
|
||||||
|
|
||||||
|
* Various small performance fixes (PR #2201, #2202, #2224, #2226, #2227, #2228,
|
||||||
|
#2229)
|
||||||
|
* Update username availability checker API (PR #2209, #2213)
|
||||||
|
* When purging, don't de-delta state groups we're about to delete (PR #2214)
|
||||||
|
* Documentation to check synapse version (PR #2215) Thanks to @hamber-dick!
|
||||||
|
* Add an index to event_search to speed up purge history API (PR #2218)
|
||||||
|
|
||||||
|
|
||||||
|
Bug fixes:
|
||||||
|
|
||||||
|
* Fix API to allow clients to upload one-time-keys with new sigs (PR #2206)
|
||||||
|
|
||||||
|
|
||||||
|
Changes in synapse v0.21.0-rc2 (2017-05-08)
|
||||||
|
===========================================
|
||||||
|
|
||||||
|
Changes:
|
||||||
|
|
||||||
|
* Always mark remotes as up if we receive a signed request from them (PR #2190)
|
||||||
|
|
||||||
|
|
||||||
|
Bug fixes:
|
||||||
|
|
||||||
|
* Fix bug where users got pushed for rooms they had muted (PR #2200)
|
||||||
|
|
||||||
|
|
||||||
|
Changes in synapse v0.21.0-rc1 (2017-05-08)
|
||||||
|
===========================================
|
||||||
|
|
||||||
|
Features:
|
||||||
|
|
||||||
|
* Add username availability checker API (PR #2183)
|
||||||
|
* Add read marker API (PR #2120)
|
||||||
|
|
||||||
|
|
||||||
|
Changes:
|
||||||
|
|
||||||
|
* Enable guest access for the 3pl/3pid APIs (PR #1986)
|
||||||
|
* Add setting to support TURN for guests (PR #2011)
|
||||||
|
* Various performance improvements (PR #2075, #2076, #2080, #2083, #2108,
|
||||||
|
#2158, #2176, #2185)
|
||||||
|
* Make synctl a bit more user friendly (PR #2078, #2127) Thanks @APwhitehat!
|
||||||
|
* Replace HTTP replication with TCP replication (PR #2082, #2097, #2098,
|
||||||
|
#2099, #2103, #2014, #2016, #2115, #2116, #2117)
|
||||||
|
* Support authenticated SMTP (PR #2102) Thanks @DanielDent!
|
||||||
|
* Add a counter metric for successfully-sent transactions (PR #2121)
|
||||||
|
* Propagate errors sensibly from proxied IS requests (PR #2147)
|
||||||
|
* Add more granular event send metrics (PR #2178)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
Bug fixes:
|
||||||
|
|
||||||
|
* Fix nuke-room script to work with current schema (PR #1927) Thanks
|
||||||
|
@zuckschwerdt!
|
||||||
|
* Fix db port script to not assume postgres tables are in the public schema
|
||||||
|
(PR #2024) Thanks @jerrykan!
|
||||||
|
* Fix getting latest device IP for user with no devices (PR #2118)
|
||||||
|
* Fix rejection of invites to unreachable servers (PR #2145)
|
||||||
|
* Fix code for reporting old verify keys in synapse (PR #2156)
|
||||||
|
* Fix invite state to always include all events (PR #2163)
|
||||||
|
* Fix bug where synapse would always fetch state for any missing event (PR #2170)
|
||||||
|
* Fix a leak with timed out HTTP connections (PR #2180)
|
||||||
|
* Fix bug where we didn't time out HTTP requests to ASes (PR #2192)
|
||||||
|
|
||||||
|
|
||||||
|
Docs:
|
||||||
|
|
||||||
|
* Clarify doc for SQLite to PostgreSQL port (PR #1961) Thanks @benhylau!
|
||||||
|
* Fix typo in synctl help (PR #2107) Thanks @HarHarLinks!
|
||||||
|
* ``web_client_location`` documentation fix (PR #2131) Thanks @matthewjwolff!
|
||||||
|
* Update README.rst with FreeBSD changes (PR #2132) Thanks @feld!
|
||||||
|
* Clarify setting up metrics (PR #2149) Thanks @encks!
|
||||||
|
|
||||||
|
|
||||||
|
Changes in synapse v0.20.0 (2017-04-11)
|
||||||
|
=======================================
|
||||||
|
|
||||||
|
Bug fixes:
|
||||||
|
|
||||||
|
* Fix joining rooms over federation where not all servers in the room saw the
|
||||||
|
new server had joined (PR #2094)
|
||||||
|
|
||||||
|
|
||||||
|
Changes in synapse v0.20.0-rc1 (2017-03-30)
|
||||||
|
===========================================
|
||||||
|
|
||||||
|
Features:
|
||||||
|
|
||||||
|
* Add delete_devices API (PR #1993)
|
||||||
|
* Add phone number registration/login support (PR #1994, #2055)
|
||||||
|
|
||||||
|
|
||||||
|
Changes:
|
||||||
|
|
||||||
|
* Use JSONSchema for validation of filters. Thanks @pik! (PR #1783)
|
||||||
|
* Reread log config on SIGHUP (PR #1982)
|
||||||
|
* Speed up public room list (PR #1989)
|
||||||
|
* Add helpful texts to logger config options (PR #1990)
|
||||||
|
* Minor ``/sync`` performance improvements. (PR #2002, #2013, #2022)
|
||||||
|
* Add some debug to help diagnose weird federation issue (PR #2035)
|
||||||
|
* Correctly limit retries for all federation requests (PR #2050, #2061)
|
||||||
|
* Don't lock table when persisting new one time keys (PR #2053)
|
||||||
|
* Reduce some CPU work on DB threads (PR #2054)
|
||||||
|
* Cache hosts in room (PR #2060)
|
||||||
|
* Batch sending of device list pokes (PR #2063)
|
||||||
|
* Speed up persist event path in certain edge cases (PR #2070)
|
||||||
|
|
||||||
|
|
||||||
|
Bug fixes:
|
||||||
|
|
||||||
|
* Fix bug where current_state_events renamed to current_state_ids (PR #1849)
|
||||||
|
* Fix routing loop when fetching remote media (PR #1992)
|
||||||
|
* Fix current_state_events table to not lie (PR #1996)
|
||||||
|
* Fix CAS login to handle PartialDownloadError (PR #1997)
|
||||||
|
* Fix assertion to stop transaction queue getting wedged (PR #2010)
|
||||||
|
* Fix presence to fallback to last_active_ts if it beats the last sync time.
|
||||||
|
Thanks @Half-Shot! (PR #2014)
|
||||||
|
* Fix bug when federation received a PDU while a room join is in progress (PR
|
||||||
|
#2016)
|
||||||
|
* Fix resetting state on rejected events (PR #2025)
|
||||||
|
* Fix installation issues in readme. Thanks @ricco386 (PR #2037)
|
||||||
|
* Fix caching of remote servers' signature keys (PR #2042)
|
||||||
|
* Fix some leaking log context (PR #2048, #2049, #2057, #2058)
|
||||||
|
* Fix rejection of invites not reaching sync (PR #2056)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
Changes in synapse v0.19.3 (2017-03-20)
|
Changes in synapse v0.19.3 (2017-03-20)
|
||||||
=======================================
|
=======================================
|
||||||
|
|
||||||
|
|||||||
@@ -30,8 +30,12 @@ use github's pull request workflow to review the contribution, and either ask
|
|||||||
you to make any refinements needed or merge it and make them ourselves. The
|
you to make any refinements needed or merge it and make them ourselves. The
|
||||||
changes will then land on master when we next do a release.
|
changes will then land on master when we next do a release.
|
||||||
|
|
||||||
We use Jenkins for continuous integration (http://matrix.org/jenkins), and
|
We use `Jenkins <http://matrix.org/jenkins>`_ and
|
||||||
typically all pull requests get automatically tested Jenkins: if your change breaks the build, Jenkins will yell about it in #matrix-dev:matrix.org so please lurk there and keep an eye open.
|
`Travis <https://travis-ci.org/matrix-org/synapse>`_ for continuous
|
||||||
|
integration. All pull requests to synapse get automatically tested by Travis;
|
||||||
|
the Jenkins builds require an adminstrator to start them. If your change
|
||||||
|
breaks the build, this will be shown in github, so please keep an eye on the
|
||||||
|
pull request for feedback.
|
||||||
|
|
||||||
Code style
|
Code style
|
||||||
~~~~~~~~~~
|
~~~~~~~~~~
|
||||||
|
|||||||
19
Dockerfile
Normal file
19
Dockerfile
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
FROM docker.io/python:2-alpine3.7
|
||||||
|
|
||||||
|
RUN apk add --no-cache --virtual .nacl_deps su-exec build-base libffi-dev zlib-dev libressl-dev libjpeg-turbo-dev linux-headers postgresql-dev libxslt-dev
|
||||||
|
|
||||||
|
COPY . /synapse
|
||||||
|
|
||||||
|
# A wheel cache may be provided in ./cache for faster build
|
||||||
|
RUN cd /synapse \
|
||||||
|
&& pip install --upgrade pip setuptools psycopg2 lxml \
|
||||||
|
&& mkdir -p /synapse/cache \
|
||||||
|
&& pip install -f /synapse/cache --upgrade --process-dependency-links . \
|
||||||
|
&& mv /synapse/contrib/docker/start.py /synapse/contrib/docker/conf / \
|
||||||
|
&& rm -rf setup.py setup.cfg synapse
|
||||||
|
|
||||||
|
VOLUME ["/data"]
|
||||||
|
|
||||||
|
EXPOSE 8008/tcp 8448/tcp
|
||||||
|
|
||||||
|
ENTRYPOINT ["/start.py"]
|
||||||
@@ -25,6 +25,9 @@ recursive-include synapse/static *.js
|
|||||||
exclude jenkins.sh
|
exclude jenkins.sh
|
||||||
exclude jenkins*.sh
|
exclude jenkins*.sh
|
||||||
exclude jenkins*
|
exclude jenkins*
|
||||||
|
exclude Dockerfile
|
||||||
|
exclude .dockerignore
|
||||||
recursive-exclude jenkins *.sh
|
recursive-exclude jenkins *.sh
|
||||||
|
|
||||||
|
prune .github
|
||||||
prune demo/etc
|
prune demo/etc
|
||||||
|
|||||||
131
README.rst
131
README.rst
@@ -84,6 +84,7 @@ Synapse Installation
|
|||||||
Synapse is the reference python/twisted Matrix homeserver implementation.
|
Synapse is the reference python/twisted Matrix homeserver implementation.
|
||||||
|
|
||||||
System requirements:
|
System requirements:
|
||||||
|
|
||||||
- POSIX-compliant system (tested on Linux & OS X)
|
- POSIX-compliant system (tested on Linux & OS X)
|
||||||
- Python 2.7
|
- Python 2.7
|
||||||
- At least 1GB of free RAM if you want to join large public rooms like #matrix:matrix.org
|
- At least 1GB of free RAM if you want to join large public rooms like #matrix:matrix.org
|
||||||
@@ -108,10 +109,10 @@ Installing prerequisites on ArchLinux::
|
|||||||
sudo pacman -S base-devel python2 python-pip \
|
sudo pacman -S base-devel python2 python-pip \
|
||||||
python-setuptools python-virtualenv sqlite3
|
python-setuptools python-virtualenv sqlite3
|
||||||
|
|
||||||
Installing prerequisites on CentOS 7::
|
Installing prerequisites on CentOS 7 or Fedora 25::
|
||||||
|
|
||||||
sudo yum install libtiff-devel libjpeg-devel libzip-devel freetype-devel \
|
sudo yum install libtiff-devel libjpeg-devel libzip-devel freetype-devel \
|
||||||
lcms2-devel libwebp-devel tcl-devel tk-devel \
|
lcms2-devel libwebp-devel tcl-devel tk-devel redhat-rpm-config \
|
||||||
python-virtualenv libffi-devel openssl-devel
|
python-virtualenv libffi-devel openssl-devel
|
||||||
sudo yum groupinstall "Development Tools"
|
sudo yum groupinstall "Development Tools"
|
||||||
|
|
||||||
@@ -156,8 +157,9 @@ if you prefer.
|
|||||||
|
|
||||||
In case of problems, please see the _`Troubleshooting` section below.
|
In case of problems, please see the _`Troubleshooting` section below.
|
||||||
|
|
||||||
Alternatively, Silvio Fricke has contributed a Dockerfile to automate the
|
There is an offical synapse image available at https://hub.docker.com/r/matrixdotorg/synapse/tags/ which can be used with the docker-compose file available at `contrib/docker`. Further information on this including configuration options is available in `contrib/docker/README.md`.
|
||||||
above in Docker at https://registry.hub.docker.com/u/silviof/docker-matrix/.
|
|
||||||
|
Alternatively, Andreas Peters (previously Silvio Fricke) has contributed a Dockerfile to automate a synapse server in a single Docker image, at https://hub.docker.com/r/avhost/docker-matrix/tags/
|
||||||
|
|
||||||
Also, Martin Giess has created an auto-deployment process with vagrant/ansible,
|
Also, Martin Giess has created an auto-deployment process with vagrant/ansible,
|
||||||
tested with VirtualBox/AWS/DigitalOcean - see https://github.com/EMnify/matrix-synapse-auto-deploy
|
tested with VirtualBox/AWS/DigitalOcean - see https://github.com/EMnify/matrix-synapse-auto-deploy
|
||||||
@@ -199,11 +201,11 @@ different. See `the spec`__ for more information on key management.)
|
|||||||
.. __: `key_management`_
|
.. __: `key_management`_
|
||||||
|
|
||||||
The default configuration exposes two HTTP ports: 8008 and 8448. Port 8008 is
|
The default configuration exposes two HTTP ports: 8008 and 8448. Port 8008 is
|
||||||
configured without TLS; it is not recommended this be exposed outside your
|
configured without TLS; it should be behind a reverse proxy for TLS/SSL
|
||||||
local network. Port 8448 is configured to use TLS with a self-signed
|
termination on port 443 which in turn should be used for clients. Port 8448
|
||||||
certificate. This is fine for testing with but, to avoid your clients
|
is configured to use TLS with a self-signed certificate. If you would like
|
||||||
complaining about the certificate, you will almost certainly want to use
|
to do initial test with a client without having to setup a reverse proxy,
|
||||||
another certificate for production purposes. (Note that a self-signed
|
you can temporarly use another certificate. (Note that a self-signed
|
||||||
certificate is fine for `Federation`_). You can do so by changing
|
certificate is fine for `Federation`_). You can do so by changing
|
||||||
``tls_certificate_path``, ``tls_private_key_path`` and ``tls_dh_params_path``
|
``tls_certificate_path``, ``tls_private_key_path`` and ``tls_dh_params_path``
|
||||||
in ``homeserver.yaml``; alternatively, you can use a reverse-proxy, but be sure
|
in ``homeserver.yaml``; alternatively, you can use a reverse-proxy, but be sure
|
||||||
@@ -245,6 +247,25 @@ Setting up a TURN server
|
|||||||
For reliable VoIP calls to be routed via this homeserver, you MUST configure
|
For reliable VoIP calls to be routed via this homeserver, you MUST configure
|
||||||
a TURN server. See `<docs/turn-howto.rst>`_ for details.
|
a TURN server. See `<docs/turn-howto.rst>`_ for details.
|
||||||
|
|
||||||
|
IPv6
|
||||||
|
----
|
||||||
|
|
||||||
|
As of Synapse 0.19 we finally support IPv6, many thanks to @kyrias and @glyph
|
||||||
|
for providing PR #1696.
|
||||||
|
|
||||||
|
However, for federation to work on hosts with IPv6 DNS servers you **must**
|
||||||
|
be running Twisted 17.1.0 or later - see https://github.com/matrix-org/synapse/issues/1002
|
||||||
|
for details. We can't make Synapse depend on Twisted 17.1 by default
|
||||||
|
yet as it will break most older distributions (see https://github.com/matrix-org/synapse/pull/1909)
|
||||||
|
so if you are using operating system dependencies you'll have to install your
|
||||||
|
own Twisted 17.1 package via pip or backports etc.
|
||||||
|
|
||||||
|
If you're running in a virtualenv then pip should have installed the newest
|
||||||
|
Twisted automatically, but if your virtualenv is old you will need to manually
|
||||||
|
upgrade to a newer Twisted dependency via:
|
||||||
|
|
||||||
|
pip install Twisted>=17.1.0
|
||||||
|
|
||||||
|
|
||||||
Running Synapse
|
Running Synapse
|
||||||
===============
|
===============
|
||||||
@@ -263,10 +284,16 @@ Connecting to Synapse from a client
|
|||||||
The easiest way to try out your new Synapse installation is by connecting to it
|
The easiest way to try out your new Synapse installation is by connecting to it
|
||||||
from a web client. The easiest option is probably the one at
|
from a web client. The easiest option is probably the one at
|
||||||
http://riot.im/app. You will need to specify a "Custom server" when you log on
|
http://riot.im/app. You will need to specify a "Custom server" when you log on
|
||||||
or register: set this to ``https://localhost:8448`` - remember to specify the
|
or register: set this to ``https://domain.tld`` if you setup a reverse proxy
|
||||||
port (``:8448``) unless you changed the configuration. (Leave the identity
|
following the recommended setup, or ``https://localhost:8448`` - remember to specify the
|
||||||
|
port (``:8448``) if not ``:443`` unless you changed the configuration. (Leave the identity
|
||||||
server as the default - see `Identity servers`_.)
|
server as the default - see `Identity servers`_.)
|
||||||
|
|
||||||
|
If using port 8448 you will run into errors until you accept the self-signed
|
||||||
|
certificate. You can easily do this by going to ``https://localhost:8448``
|
||||||
|
directly with your browser and accept the presented certificate. You can then
|
||||||
|
go back in your web client and proceed further.
|
||||||
|
|
||||||
If all goes well you should at least be able to log in, create a room, and
|
If all goes well you should at least be able to log in, create a room, and
|
||||||
start sending messages.
|
start sending messages.
|
||||||
|
|
||||||
@@ -328,6 +355,10 @@ https://matrix.org/docs/projects/try-matrix-now.html (or build your own with one
|
|||||||
Fedora
|
Fedora
|
||||||
------
|
------
|
||||||
|
|
||||||
|
Synapse is in the Fedora repositories as ``matrix-synapse``::
|
||||||
|
|
||||||
|
sudo dnf install matrix-synapse
|
||||||
|
|
||||||
Oleg Girko provides Fedora RPMs at
|
Oleg Girko provides Fedora RPMs at
|
||||||
https://obs.infoserver.lv/project/monitor/matrix-synapse
|
https://obs.infoserver.lv/project/monitor/matrix-synapse
|
||||||
|
|
||||||
@@ -335,8 +366,11 @@ ArchLinux
|
|||||||
---------
|
---------
|
||||||
|
|
||||||
The quickest way to get up and running with ArchLinux is probably with the community package
|
The quickest way to get up and running with ArchLinux is probably with the community package
|
||||||
https://www.archlinux.org/packages/community/any/matrix-synapse/, which should pull in all
|
https://www.archlinux.org/packages/community/any/matrix-synapse/, which should pull in most of
|
||||||
the necessary dependencies.
|
the necessary dependencies. If the default web client is to be served (enabled by default in
|
||||||
|
the generated config),
|
||||||
|
https://www.archlinux.org/packages/community/any/python2-matrix-angular-sdk/ will also need to
|
||||||
|
be installed.
|
||||||
|
|
||||||
Alternatively, to install using pip a few changes may be needed as ArchLinux
|
Alternatively, to install using pip a few changes may be needed as ArchLinux
|
||||||
defaults to python 3, but synapse currently assumes python 2.7 by default:
|
defaults to python 3, but synapse currently assumes python 2.7 by default:
|
||||||
@@ -373,7 +407,7 @@ FreeBSD
|
|||||||
|
|
||||||
Synapse can be installed via FreeBSD Ports or Packages contributed by Brendan Molloy from:
|
Synapse can be installed via FreeBSD Ports or Packages contributed by Brendan Molloy from:
|
||||||
|
|
||||||
- Ports: ``cd /usr/ports/net/py-matrix-synapse && make install clean``
|
- Ports: ``cd /usr/ports/net-im/py-matrix-synapse && make install clean``
|
||||||
- Packages: ``pkg install py27-matrix-synapse``
|
- Packages: ``pkg install py27-matrix-synapse``
|
||||||
|
|
||||||
|
|
||||||
@@ -505,6 +539,30 @@ fix try re-installing from PyPI or directly from
|
|||||||
# Install from github
|
# Install from github
|
||||||
pip install --user https://github.com/pyca/pynacl/tarball/master
|
pip install --user https://github.com/pyca/pynacl/tarball/master
|
||||||
|
|
||||||
|
Running out of File Handles
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
If synapse runs out of filehandles, it typically fails badly - live-locking
|
||||||
|
at 100% CPU, and/or failing to accept new TCP connections (blocking the
|
||||||
|
connecting client). Matrix currently can legitimately use a lot of file handles,
|
||||||
|
thanks to busy rooms like #matrix:matrix.org containing hundreds of participating
|
||||||
|
servers. The first time a server talks in a room it will try to connect
|
||||||
|
simultaneously to all participating servers, which could exhaust the available
|
||||||
|
file descriptors between DNS queries & HTTPS sockets, especially if DNS is slow
|
||||||
|
to respond. (We need to improve the routing algorithm used to be better than
|
||||||
|
full mesh, but as of June 2017 this hasn't happened yet).
|
||||||
|
|
||||||
|
If you hit this failure mode, we recommend increasing the maximum number of
|
||||||
|
open file handles to be at least 4096 (assuming a default of 1024 or 256).
|
||||||
|
This is typically done by editing ``/etc/security/limits.conf``
|
||||||
|
|
||||||
|
Separately, Synapse may leak file handles if inbound HTTP requests get stuck
|
||||||
|
during processing - e.g. blocked behind a lock or talking to a remote server etc.
|
||||||
|
This is best diagnosed by matching up the 'Received request' and 'Processed request'
|
||||||
|
log lines and looking for any 'Processed request' lines which take more than
|
||||||
|
a few seconds to execute. Please let us know at #matrix-dev:matrix.org if
|
||||||
|
you see this failure mode so we can help debug it, however.
|
||||||
|
|
||||||
ArchLinux
|
ArchLinux
|
||||||
~~~~~~~~~
|
~~~~~~~~~
|
||||||
|
|
||||||
@@ -546,8 +604,9 @@ you to run your server on a machine that might not have the same name as your
|
|||||||
domain name. For example, you might want to run your server at
|
domain name. For example, you might want to run your server at
|
||||||
``synapse.example.com``, but have your Matrix user-ids look like
|
``synapse.example.com``, but have your Matrix user-ids look like
|
||||||
``@user:example.com``. (A SRV record also allows you to change the port from
|
``@user:example.com``. (A SRV record also allows you to change the port from
|
||||||
the default 8448. However, if you are thinking of using a reverse-proxy, be
|
the default 8448. However, if you are thinking of using a reverse-proxy on the
|
||||||
sure to read `Reverse-proxying the federation port`_ first.)
|
federation port, which is not recommended, be sure to read
|
||||||
|
`Reverse-proxying the federation port`_ first.)
|
||||||
|
|
||||||
To use a SRV record, first create your SRV record and publish it in DNS. This
|
To use a SRV record, first create your SRV record and publish it in DNS. This
|
||||||
should have the format ``_matrix._tcp.<yourdomain.com> <ttl> IN SRV 10 0 <port>
|
should have the format ``_matrix._tcp.<yourdomain.com> <ttl> IN SRV 10 0 <port>
|
||||||
@@ -556,6 +615,9 @@ should have the format ``_matrix._tcp.<yourdomain.com> <ttl> IN SRV 10 0 <port>
|
|||||||
$ dig -t srv _matrix._tcp.example.com
|
$ dig -t srv _matrix._tcp.example.com
|
||||||
_matrix._tcp.example.com. 3600 IN SRV 10 0 8448 synapse.example.com.
|
_matrix._tcp.example.com. 3600 IN SRV 10 0 8448 synapse.example.com.
|
||||||
|
|
||||||
|
Note that the server hostname cannot be an alias (CNAME record): it has to point
|
||||||
|
directly to the server hosting the synapse instance.
|
||||||
|
|
||||||
You can then configure your homeserver to use ``<yourdomain.com>`` as the domain in
|
You can then configure your homeserver to use ``<yourdomain.com>`` as the domain in
|
||||||
its user-ids, by setting ``server_name``::
|
its user-ids, by setting ``server_name``::
|
||||||
|
|
||||||
@@ -578,6 +640,11 @@ largest boxes pause for thought.)
|
|||||||
|
|
||||||
Troubleshooting
|
Troubleshooting
|
||||||
---------------
|
---------------
|
||||||
|
|
||||||
|
You can use the federation tester to check if your homeserver is all set:
|
||||||
|
``https://matrix.org/federationtester/api/report?server_name=<your_server_name>``
|
||||||
|
If any of the attributes under "checks" is false, federation won't work.
|
||||||
|
|
||||||
The typical failure mode with federation is that when you try to join a room,
|
The typical failure mode with federation is that when you try to join a room,
|
||||||
it is rejected with "401: Unauthorized". Generally this means that other
|
it is rejected with "401: Unauthorized". Generally this means that other
|
||||||
servers in the room couldn't access yours. (Joining a room over federation is a
|
servers in the room couldn't access yours. (Joining a room over federation is a
|
||||||
@@ -627,7 +694,7 @@ For information on how to install and use PostgreSQL, please see
|
|||||||
Using a reverse proxy with Synapse
|
Using a reverse proxy with Synapse
|
||||||
==================================
|
==================================
|
||||||
|
|
||||||
It is possible to put a reverse proxy such as
|
It is recommended to put a reverse proxy such as
|
||||||
`nginx <https://nginx.org/en/docs/http/ngx_http_proxy_module.html>`_,
|
`nginx <https://nginx.org/en/docs/http/ngx_http_proxy_module.html>`_,
|
||||||
`Apache <https://httpd.apache.org/docs/current/mod/mod_proxy_http.html>`_ or
|
`Apache <https://httpd.apache.org/docs/current/mod/mod_proxy_http.html>`_ or
|
||||||
`HAProxy <http://www.haproxy.org/>`_ in front of Synapse. One advantage of
|
`HAProxy <http://www.haproxy.org/>`_ in front of Synapse. One advantage of
|
||||||
@@ -645,9 +712,9 @@ federation port has a number of pitfalls. It is possible, but be sure to read
|
|||||||
`Reverse-proxying the federation port`_.
|
`Reverse-proxying the federation port`_.
|
||||||
|
|
||||||
The recommended setup is therefore to configure your reverse-proxy on port 443
|
The recommended setup is therefore to configure your reverse-proxy on port 443
|
||||||
for client connections, but to also expose port 8448 for server-server
|
to port 8008 of synapse for client connections, but to also directly expose port
|
||||||
connections. All the Matrix endpoints begin ``/_matrix``, so an example nginx
|
8448 for server-server connections. All the Matrix endpoints begin ``/_matrix``,
|
||||||
configuration might look like::
|
so an example nginx configuration might look like::
|
||||||
|
|
||||||
server {
|
server {
|
||||||
listen 443 ssl;
|
listen 443 ssl;
|
||||||
@@ -769,7 +836,9 @@ spidering 'internal' URLs on your network. At the very least we recommend that
|
|||||||
your loopback and RFC1918 IP addresses are blacklisted.
|
your loopback and RFC1918 IP addresses are blacklisted.
|
||||||
|
|
||||||
This also requires the optional lxml and netaddr python dependencies to be
|
This also requires the optional lxml and netaddr python dependencies to be
|
||||||
installed.
|
installed. This in turn requires the libxml2 library to be available - on
|
||||||
|
Debian/Ubuntu this means ``apt-get install libxml2-dev``, or equivalent for
|
||||||
|
your OS.
|
||||||
|
|
||||||
|
|
||||||
Password reset
|
Password reset
|
||||||
@@ -829,6 +898,17 @@ This should end with a 'PASSED' result::
|
|||||||
|
|
||||||
PASSED (successes=143)
|
PASSED (successes=143)
|
||||||
|
|
||||||
|
Running the Integration Tests
|
||||||
|
=============================
|
||||||
|
|
||||||
|
Synapse is accompanied by `SyTest <https://github.com/matrix-org/sytest>`_,
|
||||||
|
a Matrix homeserver integration testing suite, which uses HTTP requests to
|
||||||
|
access the API as a Matrix client would. It is able to run Synapse directly from
|
||||||
|
the source tree, so installation of the server is not required.
|
||||||
|
|
||||||
|
Testing with SyTest is recommended for verifying that changes related to the
|
||||||
|
Client-Server API are functioning correctly. See the `installation instructions
|
||||||
|
<https://github.com/matrix-org/sytest#installing>`_ for details.
|
||||||
|
|
||||||
Building Internal API Documentation
|
Building Internal API Documentation
|
||||||
===================================
|
===================================
|
||||||
@@ -852,12 +932,9 @@ cache a lot of recent room data and metadata in RAM in order to speed up
|
|||||||
common requests. We'll improve this in future, but for now the easiest
|
common requests. We'll improve this in future, but for now the easiest
|
||||||
way to either reduce the RAM usage (at the risk of slowing things down)
|
way to either reduce the RAM usage (at the risk of slowing things down)
|
||||||
is to set the almost-undocumented ``SYNAPSE_CACHE_FACTOR`` environment
|
is to set the almost-undocumented ``SYNAPSE_CACHE_FACTOR`` environment
|
||||||
variable. Roughly speaking, a SYNAPSE_CACHE_FACTOR of 1.0 will max out
|
variable. The default is 0.5, which can be decreased to reduce RAM usage
|
||||||
at around 3-4GB of resident memory - this is what we currently run the
|
in memory constrained enviroments, or increased if performance starts to
|
||||||
matrix.org on. The default setting is currently 0.1, which is probably
|
degrade.
|
||||||
around a ~700MB footprint. You can dial it down further to 0.02 if
|
|
||||||
desired, which targets roughly ~512MB. Conversely you can dial it up if
|
|
||||||
you need performance for lots of users and have a box with a lot of RAM.
|
|
||||||
|
|
||||||
|
|
||||||
.. _`key_management`: https://matrix.org/docs/spec/server_server/unstable.html#retrieving-server-keys
|
.. _`key_management`: https://matrix.org/docs/spec/server_server/unstable.html#retrieving-server-keys
|
||||||
|
|||||||
66
UPGRADE.rst
66
UPGRADE.rst
@@ -5,30 +5,60 @@ Before upgrading check if any special steps are required to upgrade from the
|
|||||||
what you currently have installed to current version of synapse. The extra
|
what you currently have installed to current version of synapse. The extra
|
||||||
instructions that may be required are listed later in this document.
|
instructions that may be required are listed later in this document.
|
||||||
|
|
||||||
If synapse was installed in a virtualenv then active that virtualenv before
|
1. If synapse was installed in a virtualenv then active that virtualenv before
|
||||||
upgrading. If synapse is installed in a virtualenv in ``~/.synapse/`` then run:
|
upgrading. If synapse is installed in a virtualenv in ``~/.synapse/`` then
|
||||||
|
run:
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
source ~/.synapse/bin/activate
|
||||||
|
|
||||||
|
2. If synapse was installed using pip then upgrade to the latest version by
|
||||||
|
running:
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
pip install --upgrade --process-dependency-links https://github.com/matrix-org/synapse/tarball/master
|
||||||
|
|
||||||
|
# restart synapse
|
||||||
|
synctl restart
|
||||||
|
|
||||||
|
|
||||||
|
If synapse was installed using git then upgrade to the latest version by
|
||||||
|
running:
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
# Pull the latest version of the master branch.
|
||||||
|
git pull
|
||||||
|
# Update the versions of synapse's python dependencies.
|
||||||
|
python synapse/python_dependencies.py | xargs pip install --upgrade
|
||||||
|
|
||||||
|
# restart synapse
|
||||||
|
./synctl restart
|
||||||
|
|
||||||
|
|
||||||
|
To check whether your update was sucessful, you can check the Server header
|
||||||
|
returned by the Client-Server API:
|
||||||
|
|
||||||
.. code:: bash
|
.. code:: bash
|
||||||
|
|
||||||
source ~/.synapse/bin/activate
|
# replace <host.name> with the hostname of your synapse homeserver.
|
||||||
|
# You may need to specify a port (eg, :8448) if your server is not
|
||||||
|
# configured on port 443.
|
||||||
|
curl -kv https://<host.name>/_matrix/client/versions 2>&1 | grep "Server:"
|
||||||
|
|
||||||
If synapse was installed using pip then upgrade to the latest version by
|
Upgrading to $NEXT_VERSION
|
||||||
running:
|
====================
|
||||||
|
|
||||||
.. code:: bash
|
This release expands the anonymous usage stats sent if the opt-in
|
||||||
|
``report_stats`` configuration is set to ``true``. We now capture RSS memory
|
||||||
pip install --upgrade --process-dependency-links https://github.com/matrix-org/synapse/tarball/master
|
and cpu use at a very coarse level. This requires administrators to install
|
||||||
|
the optional ``psutil`` python module.
|
||||||
If synapse was installed using git then upgrade to the latest version by
|
|
||||||
running:
|
|
||||||
|
|
||||||
.. code:: bash
|
|
||||||
|
|
||||||
# Pull the latest version of the master branch.
|
|
||||||
git pull
|
|
||||||
# Update the versions of synapse's python dependencies.
|
|
||||||
python synapse/python_dependencies.py | xargs -n1 pip install --upgrade
|
|
||||||
|
|
||||||
|
We would appreciate it if you could assist by ensuring this module is available
|
||||||
|
and ``report_stats`` is enabled. This will let us see if performance changes to
|
||||||
|
synapse are having an impact to the general community.
|
||||||
|
|
||||||
Upgrading to v0.15.0
|
Upgrading to v0.15.0
|
||||||
====================
|
====================
|
||||||
|
|||||||
10
contrib/README.rst
Normal file
10
contrib/README.rst
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
Community Contributions
|
||||||
|
=======================
|
||||||
|
|
||||||
|
Everything in this directory are projects submitted by the community that may be useful
|
||||||
|
to others. As such, the project maintainers cannot guarantee support, stability
|
||||||
|
or backwards compatibility of these projects.
|
||||||
|
|
||||||
|
Files in this directory should *not* be relied on directly, as they may not
|
||||||
|
continue to work or exist in future. If you wish to use any of these files then
|
||||||
|
they should be copied to avoid them breaking from underneath you.
|
||||||
@@ -36,15 +36,13 @@ class HttpClient(object):
|
|||||||
the request body. This will be encoded as JSON.
|
the request body. This will be encoded as JSON.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Deferred: Succeeds when we get *any* HTTP response.
|
Deferred: Succeeds when we get a 2xx HTTP response. The result
|
||||||
|
will be the decoded JSON body.
|
||||||
The result of the deferred is a tuple of `(code, response)`,
|
|
||||||
where `response` is a dict representing the decoded JSON body.
|
|
||||||
"""
|
"""
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def get_json(self, url, args=None):
|
def get_json(self, url, args=None):
|
||||||
""" Get's some json from the given host homeserver and path
|
""" Gets some json from the given host homeserver and path
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
url (str): The URL to GET data from.
|
url (str): The URL to GET data from.
|
||||||
@@ -54,10 +52,8 @@ class HttpClient(object):
|
|||||||
and *not* a string.
|
and *not* a string.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Deferred: Succeeds when we get *any* HTTP response.
|
Deferred: Succeeds when we get a 2xx HTTP response. The result
|
||||||
|
will be the decoded JSON body.
|
||||||
The result of the deferred is a tuple of `(code, response)`,
|
|
||||||
where `response` is a dict representing the decoded JSON body.
|
|
||||||
"""
|
"""
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|||||||
153
contrib/docker/README.md
Normal file
153
contrib/docker/README.md
Normal file
@@ -0,0 +1,153 @@
|
|||||||
|
# Synapse Docker
|
||||||
|
|
||||||
|
The `matrixdotorg/synapse` Docker image will run Synapse as a single process. It does not provide a
|
||||||
|
database server or a TURN server, you should run these separately.
|
||||||
|
|
||||||
|
If you run a Postgres server, you should simply include it in the same Compose
|
||||||
|
project or set the proper environment variables and the image will automatically
|
||||||
|
use that server.
|
||||||
|
|
||||||
|
## Build
|
||||||
|
|
||||||
|
Build the docker image with the `docker build` command from the root of the synapse repository.
|
||||||
|
|
||||||
|
```
|
||||||
|
docker build -t docker.io/matrixdotorg/synapse .
|
||||||
|
```
|
||||||
|
|
||||||
|
The `-t` option sets the image tag. Official images are tagged `matrixdotorg/synapse:<version>` where `<version>` is the same as the release tag in the synapse git repository.
|
||||||
|
|
||||||
|
You may have a local Python wheel cache available, in which case copy the relevant packages in the ``cache/`` directory at the root of the project.
|
||||||
|
|
||||||
|
## Run
|
||||||
|
|
||||||
|
This image is designed to run either with an automatically generated configuration
|
||||||
|
file or with a custom configuration that requires manual edition.
|
||||||
|
|
||||||
|
### Automated configuration
|
||||||
|
|
||||||
|
It is recommended that you use Docker Compose to run your containers, including
|
||||||
|
this image and a Postgres server. A sample ``docker-compose.yml`` is provided,
|
||||||
|
including example labels for reverse proxying and other artifacts.
|
||||||
|
|
||||||
|
Read the section about environment variables and set at least mandatory variables,
|
||||||
|
then run the server:
|
||||||
|
|
||||||
|
```
|
||||||
|
docker-compose up -d
|
||||||
|
```
|
||||||
|
|
||||||
|
If secrets are not specified in the environment variables, they will be generated
|
||||||
|
as part of the startup. Please ensure these secrets are kept between launches of the
|
||||||
|
Docker container, as their loss may require users to log in again.
|
||||||
|
|
||||||
|
### Manual configuration
|
||||||
|
|
||||||
|
A sample ``docker-compose.yml`` is provided, including example labels for
|
||||||
|
reverse proxying and other artifacts. The docker-compose file is an example,
|
||||||
|
please comment/uncomment sections that are not suitable for your usecase.
|
||||||
|
|
||||||
|
Specify a ``SYNAPSE_CONFIG_PATH``, preferably to a persistent path,
|
||||||
|
to use manual configuration. To generate a fresh ``homeserver.yaml``, simply run:
|
||||||
|
|
||||||
|
```
|
||||||
|
docker-compose run --rm -e SYNAPSE_SERVER_NAME=my.matrix.host synapse generate
|
||||||
|
```
|
||||||
|
|
||||||
|
Then, customize your configuration and run the server:
|
||||||
|
|
||||||
|
```
|
||||||
|
docker-compose up -d
|
||||||
|
```
|
||||||
|
|
||||||
|
### Without Compose
|
||||||
|
|
||||||
|
If you do not wish to use Compose, you may still run this image using plain
|
||||||
|
Docker commands. Note that the following is just a guideline and you may need
|
||||||
|
to add parameters to the docker run command to account for the network situation
|
||||||
|
with your postgres database.
|
||||||
|
|
||||||
|
```
|
||||||
|
docker run \
|
||||||
|
-d \
|
||||||
|
--name synapse \
|
||||||
|
-v ${DATA_PATH}:/data \
|
||||||
|
-e SYNAPSE_SERVER_NAME=my.matrix.host \
|
||||||
|
-e SYNAPSE_REPORT_STATS=yes \
|
||||||
|
docker.io/matrixdotorg/synapse:latest
|
||||||
|
```
|
||||||
|
|
||||||
|
## Volumes
|
||||||
|
|
||||||
|
The image expects a single volume, located at ``/data``, that will hold:
|
||||||
|
|
||||||
|
* temporary files during uploads;
|
||||||
|
* uploaded media and thumbnails;
|
||||||
|
* the SQLite database if you do not configure postgres;
|
||||||
|
* the appservices configuration.
|
||||||
|
|
||||||
|
You are free to use separate volumes depending on storage endpoints at your
|
||||||
|
disposal. For instance, ``/data/media`` coud be stored on a large but low
|
||||||
|
performance hdd storage while other files could be stored on high performance
|
||||||
|
endpoints.
|
||||||
|
|
||||||
|
In order to setup an application service, simply create an ``appservices``
|
||||||
|
directory in the data volume and write the application service Yaml
|
||||||
|
configuration file there. Multiple application services are supported.
|
||||||
|
|
||||||
|
## Environment
|
||||||
|
|
||||||
|
Unless you specify a custom path for the configuration file, a very generic
|
||||||
|
file will be generated, based on the following environment settings.
|
||||||
|
These are a good starting point for setting up your own deployment.
|
||||||
|
|
||||||
|
Global settings:
|
||||||
|
|
||||||
|
* ``UID``, the user id Synapse will run as [default 991]
|
||||||
|
* ``GID``, the group id Synapse will run as [default 991]
|
||||||
|
* ``SYNAPSE_CONFIG_PATH``, path to a custom config file
|
||||||
|
|
||||||
|
If ``SYNAPSE_CONFIG_PATH`` is set, you should generate a configuration file
|
||||||
|
then customize it manually. No other environment variable is required.
|
||||||
|
|
||||||
|
Otherwise, a dynamic configuration file will be used. The following environment
|
||||||
|
variables are available for configuration:
|
||||||
|
|
||||||
|
* ``SYNAPSE_SERVER_NAME`` (mandatory), the current server public hostname.
|
||||||
|
* ``SYNAPSE_REPORT_STATS``, (mandatory, ``yes`` or ``no``), enable anonymous
|
||||||
|
statistics reporting back to the Matrix project which helps us to get funding.
|
||||||
|
* ``SYNAPSE_NO_TLS``, set this variable to disable TLS in Synapse (use this if
|
||||||
|
you run your own TLS-capable reverse proxy).
|
||||||
|
* ``SYNAPSE_ENABLE_REGISTRATION``, set this variable to enable registration on
|
||||||
|
the Synapse instance.
|
||||||
|
* ``SYNAPSE_ALLOW_GUEST``, set this variable to allow guest joining this server.
|
||||||
|
* ``SYNAPSE_EVENT_CACHE_SIZE``, the event cache size [default `10K`].
|
||||||
|
* ``SYNAPSE_CACHE_FACTOR``, the cache factor [default `0.5`].
|
||||||
|
* ``SYNAPSE_RECAPTCHA_PUBLIC_KEY``, set this variable to the recaptcha public
|
||||||
|
key in order to enable recaptcha upon registration.
|
||||||
|
* ``SYNAPSE_RECAPTCHA_PRIVATE_KEY``, set this variable to the recaptcha private
|
||||||
|
key in order to enable recaptcha upon registration.
|
||||||
|
* ``SYNAPSE_TURN_URIS``, set this variable to the coma-separated list of TURN
|
||||||
|
uris to enable TURN for this homeserver.
|
||||||
|
* ``SYNAPSE_TURN_SECRET``, set this to the TURN shared secret if required.
|
||||||
|
|
||||||
|
Shared secrets, that will be initialized to random values if not set:
|
||||||
|
|
||||||
|
* ``SYNAPSE_REGISTRATION_SHARED_SECRET``, secret for registrering users if
|
||||||
|
registration is disable.
|
||||||
|
* ``SYNAPSE_MACAROON_SECRET_KEY`` secret for signing access tokens
|
||||||
|
to the server.
|
||||||
|
|
||||||
|
Database specific values (will use SQLite if not set):
|
||||||
|
|
||||||
|
* `POSTGRES_DB` - The database name for the synapse postgres database. [default: `synapse`]
|
||||||
|
* `POSTGRES_HOST` - The host of the postgres database if you wish to use postgresql instead of sqlite3. [default: `db` which is useful when using a container on the same docker network in a compose file where the postgres service is called `db`]
|
||||||
|
* `POSTGRES_PASSWORD` - The password for the synapse postgres database. **If this is set then postgres will be used instead of sqlite3.** [default: none] **NOTE**: You are highly encouraged to use postgresql! Please use the compose file to make it easier to deploy.
|
||||||
|
* `POSTGRES_USER` - The user for the synapse postgres database. [default: `matrix`]
|
||||||
|
|
||||||
|
Mail server specific values (will not send emails if not set):
|
||||||
|
|
||||||
|
* ``SYNAPSE_SMTP_HOST``, hostname to the mail server.
|
||||||
|
* ``SYNAPSE_SMTP_PORT``, TCP port for accessing the mail server [default ``25``].
|
||||||
|
* ``SYNAPSE_SMTP_USER``, username for authenticating against the mail server if any.
|
||||||
|
* ``SYNAPSE_SMTP_PASSWORD``, password for authenticating against the mail server if any.
|
||||||
219
contrib/docker/conf/homeserver.yaml
Normal file
219
contrib/docker/conf/homeserver.yaml
Normal file
@@ -0,0 +1,219 @@
|
|||||||
|
# vim:ft=yaml
|
||||||
|
|
||||||
|
## TLS ##
|
||||||
|
|
||||||
|
tls_certificate_path: "/data/{{ SYNAPSE_SERVER_NAME }}.tls.crt"
|
||||||
|
tls_private_key_path: "/data/{{ SYNAPSE_SERVER_NAME }}.tls.key"
|
||||||
|
tls_dh_params_path: "/data/{{ SYNAPSE_SERVER_NAME }}.tls.dh"
|
||||||
|
no_tls: {{ "True" if SYNAPSE_NO_TLS else "False" }}
|
||||||
|
tls_fingerprints: []
|
||||||
|
|
||||||
|
## Server ##
|
||||||
|
|
||||||
|
server_name: "{{ SYNAPSE_SERVER_NAME }}"
|
||||||
|
pid_file: /homeserver.pid
|
||||||
|
web_client: False
|
||||||
|
soft_file_limit: 0
|
||||||
|
|
||||||
|
## Ports ##
|
||||||
|
|
||||||
|
listeners:
|
||||||
|
{% if not SYNAPSE_NO_TLS %}
|
||||||
|
-
|
||||||
|
port: 8448
|
||||||
|
bind_addresses: ['0.0.0.0']
|
||||||
|
type: http
|
||||||
|
tls: true
|
||||||
|
x_forwarded: false
|
||||||
|
resources:
|
||||||
|
- names: [client]
|
||||||
|
compress: true
|
||||||
|
- names: [federation] # Federation APIs
|
||||||
|
compress: false
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
- port: 8008
|
||||||
|
tls: false
|
||||||
|
bind_addresses: ['0.0.0.0']
|
||||||
|
type: http
|
||||||
|
x_forwarded: false
|
||||||
|
|
||||||
|
resources:
|
||||||
|
- names: [client]
|
||||||
|
compress: true
|
||||||
|
- names: [federation]
|
||||||
|
compress: false
|
||||||
|
|
||||||
|
## Database ##
|
||||||
|
|
||||||
|
{% if POSTGRES_PASSWORD %}
|
||||||
|
database:
|
||||||
|
name: "psycopg2"
|
||||||
|
args:
|
||||||
|
user: "{{ POSTGRES_USER or "synapse" }}"
|
||||||
|
password: "{{ POSTGRES_PASSWORD }}"
|
||||||
|
database: "{{ POSTGRES_DB or "synapse" }}"
|
||||||
|
host: "{{ POSTGRES_HOST or "db" }}"
|
||||||
|
port: "{{ POSTGRES_PORT or "5432" }}"
|
||||||
|
cp_min: 5
|
||||||
|
cp_max: 10
|
||||||
|
{% else %}
|
||||||
|
database:
|
||||||
|
name: "sqlite3"
|
||||||
|
args:
|
||||||
|
database: "/data/homeserver.db"
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
## Performance ##
|
||||||
|
|
||||||
|
event_cache_size: "{{ SYNAPSE_EVENT_CACHE_SIZE or "10K" }}"
|
||||||
|
verbose: 0
|
||||||
|
log_file: "/data/homeserver.log"
|
||||||
|
log_config: "/compiled/log.config"
|
||||||
|
|
||||||
|
## Ratelimiting ##
|
||||||
|
|
||||||
|
rc_messages_per_second: 0.2
|
||||||
|
rc_message_burst_count: 10.0
|
||||||
|
federation_rc_window_size: 1000
|
||||||
|
federation_rc_sleep_limit: 10
|
||||||
|
federation_rc_sleep_delay: 500
|
||||||
|
federation_rc_reject_limit: 50
|
||||||
|
federation_rc_concurrent: 3
|
||||||
|
|
||||||
|
## Files ##
|
||||||
|
|
||||||
|
media_store_path: "/data/media"
|
||||||
|
uploads_path: "/data/uploads"
|
||||||
|
max_upload_size: "10M"
|
||||||
|
max_image_pixels: "32M"
|
||||||
|
dynamic_thumbnails: false
|
||||||
|
|
||||||
|
# List of thumbnail to precalculate when an image is uploaded.
|
||||||
|
thumbnail_sizes:
|
||||||
|
- width: 32
|
||||||
|
height: 32
|
||||||
|
method: crop
|
||||||
|
- width: 96
|
||||||
|
height: 96
|
||||||
|
method: crop
|
||||||
|
- width: 320
|
||||||
|
height: 240
|
||||||
|
method: scale
|
||||||
|
- width: 640
|
||||||
|
height: 480
|
||||||
|
method: scale
|
||||||
|
- width: 800
|
||||||
|
height: 600
|
||||||
|
method: scale
|
||||||
|
|
||||||
|
url_preview_enabled: False
|
||||||
|
max_spider_size: "10M"
|
||||||
|
|
||||||
|
## Captcha ##
|
||||||
|
|
||||||
|
{% if SYNAPSE_RECAPTCHA_PUBLIC_KEY %}
|
||||||
|
recaptcha_public_key: "{{ SYNAPSE_RECAPTCHA_PUBLIC_KEY }}"
|
||||||
|
recaptcha_private_key: "{{ SYNAPSE_RECAPTCHA_PRIVATE_KEY }}"
|
||||||
|
enable_registration_captcha: True
|
||||||
|
recaptcha_siteverify_api: "https://www.google.com/recaptcha/api/siteverify"
|
||||||
|
{% else %}
|
||||||
|
recaptcha_public_key: "YOUR_PUBLIC_KEY"
|
||||||
|
recaptcha_private_key: "YOUR_PRIVATE_KEY"
|
||||||
|
enable_registration_captcha: False
|
||||||
|
recaptcha_siteverify_api: "https://www.google.com/recaptcha/api/siteverify"
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
## Turn ##
|
||||||
|
|
||||||
|
{% if SYNAPSE_TURN_URIS %}
|
||||||
|
turn_uris:
|
||||||
|
{% for uri in SYNAPSE_TURN_URIS.split(',') %} - "{{ uri }}"
|
||||||
|
{% endfor %}
|
||||||
|
turn_shared_secret: "{{ SYNAPSE_TURN_SECRET }}"
|
||||||
|
turn_user_lifetime: "1h"
|
||||||
|
turn_allow_guests: True
|
||||||
|
{% else %}
|
||||||
|
turn_uris: []
|
||||||
|
turn_shared_secret: "YOUR_SHARED_SECRET"
|
||||||
|
turn_user_lifetime: "1h"
|
||||||
|
turn_allow_guests: True
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
## Registration ##
|
||||||
|
|
||||||
|
enable_registration: {{ "True" if SYNAPSE_ENABLE_REGISTRATION else "False" }}
|
||||||
|
registration_shared_secret: "{{ SYNAPSE_REGISTRATION_SHARED_SECRET }}"
|
||||||
|
bcrypt_rounds: 12
|
||||||
|
allow_guest_access: {{ "True" if SYNAPSE_ALLOW_GUEST else "False" }}
|
||||||
|
enable_group_creation: true
|
||||||
|
|
||||||
|
# The list of identity servers trusted to verify third party
|
||||||
|
# identifiers by this server.
|
||||||
|
trusted_third_party_id_servers:
|
||||||
|
- matrix.org
|
||||||
|
- vector.im
|
||||||
|
- riot.im
|
||||||
|
|
||||||
|
## Metrics ###
|
||||||
|
|
||||||
|
{% if SYNAPSE_REPORT_STATS.lower() == "yes" %}
|
||||||
|
enable_metrics: True
|
||||||
|
report_stats: True
|
||||||
|
{% else %}
|
||||||
|
enable_metrics: False
|
||||||
|
report_stats: False
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
## API Configuration ##
|
||||||
|
|
||||||
|
room_invite_state_types:
|
||||||
|
- "m.room.join_rules"
|
||||||
|
- "m.room.canonical_alias"
|
||||||
|
- "m.room.avatar"
|
||||||
|
- "m.room.name"
|
||||||
|
|
||||||
|
{% if SYNAPSE_APPSERVICES %}
|
||||||
|
app_service_config_files:
|
||||||
|
{% for appservice in SYNAPSE_APPSERVICES %} - "{{ appservice }}"
|
||||||
|
{% endfor %}
|
||||||
|
{% else %}
|
||||||
|
app_service_config_files: []
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
macaroon_secret_key: "{{ SYNAPSE_MACAROON_SECRET_KEY }}"
|
||||||
|
expire_access_token: False
|
||||||
|
|
||||||
|
## Signing Keys ##
|
||||||
|
|
||||||
|
signing_key_path: "/data/{{ SYNAPSE_SERVER_NAME }}.signing.key"
|
||||||
|
old_signing_keys: {}
|
||||||
|
key_refresh_interval: "1d" # 1 Day.
|
||||||
|
|
||||||
|
# The trusted servers to download signing keys from.
|
||||||
|
perspectives:
|
||||||
|
servers:
|
||||||
|
"matrix.org":
|
||||||
|
verify_keys:
|
||||||
|
"ed25519:auto":
|
||||||
|
key: "Noi6WqcDj0QmPxCNQqgezwTlBKrfqehY1u2FyWP9uYw"
|
||||||
|
|
||||||
|
password_config:
|
||||||
|
enabled: true
|
||||||
|
|
||||||
|
{% if SYNAPSE_SMTP_HOST %}
|
||||||
|
email:
|
||||||
|
enable_notifs: false
|
||||||
|
smtp_host: "{{ SYNAPSE_SMTP_HOST }}"
|
||||||
|
smtp_port: {{ SYNAPSE_SMTP_PORT or "25" }}
|
||||||
|
smtp_user: "{{ SYNAPSE_SMTP_USER }}"
|
||||||
|
smtp_pass: "{{ SYNAPSE_SMTP_PASSWORD }}"
|
||||||
|
require_transport_security: False
|
||||||
|
notif_from: "{{ SYNAPSE_SMTP_FROM or "hostmaster@" + SYNAPSE_SERVER_NAME }}"
|
||||||
|
app_name: Matrix
|
||||||
|
template_dir: res/templates
|
||||||
|
notif_template_html: notif_mail.html
|
||||||
|
notif_template_text: notif_mail.txt
|
||||||
|
notif_for_new_users: True
|
||||||
|
riot_base_url: "https://{{ SYNAPSE_SERVER_NAME }}"
|
||||||
|
{% endif %}
|
||||||
29
contrib/docker/conf/log.config
Normal file
29
contrib/docker/conf/log.config
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
version: 1
|
||||||
|
|
||||||
|
formatters:
|
||||||
|
precise:
|
||||||
|
format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s- %(message)s'
|
||||||
|
|
||||||
|
filters:
|
||||||
|
context:
|
||||||
|
(): synapse.util.logcontext.LoggingContextFilter
|
||||||
|
request: ""
|
||||||
|
|
||||||
|
handlers:
|
||||||
|
console:
|
||||||
|
class: logging.StreamHandler
|
||||||
|
formatter: precise
|
||||||
|
filters: [context]
|
||||||
|
|
||||||
|
loggers:
|
||||||
|
synapse:
|
||||||
|
level: {{ SYNAPSE_LOG_LEVEL or "WARNING" }}
|
||||||
|
|
||||||
|
synapse.storage.SQL:
|
||||||
|
# beware: increasing this to DEBUG will make synapse log sensitive
|
||||||
|
# information such as access tokens.
|
||||||
|
level: {{ SYNAPSE_LOG_LEVEL or "WARNING" }}
|
||||||
|
|
||||||
|
root:
|
||||||
|
level: {{ SYNAPSE_LOG_LEVEL or "WARNING" }}
|
||||||
|
handlers: [console]
|
||||||
49
contrib/docker/docker-compose.yml
Normal file
49
contrib/docker/docker-compose.yml
Normal file
@@ -0,0 +1,49 @@
|
|||||||
|
# This compose file is compatible with Compose itself, it might need some
|
||||||
|
# adjustments to run properly with stack.
|
||||||
|
|
||||||
|
version: '3'
|
||||||
|
|
||||||
|
services:
|
||||||
|
|
||||||
|
synapse:
|
||||||
|
image: docker.io/matrixdotorg/synapse:latest
|
||||||
|
# Since snyapse does not retry to connect to the database, restart upon
|
||||||
|
# failure
|
||||||
|
restart: unless-stopped
|
||||||
|
# See the readme for a full documentation of the environment settings
|
||||||
|
environment:
|
||||||
|
- SYNAPSE_SERVER_NAME=my.matrix.host
|
||||||
|
- SYNAPSE_REPORT_STATS=no
|
||||||
|
- SYNAPSE_ENABLE_REGISTRATION=yes
|
||||||
|
- SYNAPSE_LOG_LEVEL=INFO
|
||||||
|
- POSTGRES_PASSWORD=changeme
|
||||||
|
volumes:
|
||||||
|
# You may either store all the files in a local folder
|
||||||
|
- ./files:/data
|
||||||
|
# .. or you may split this between different storage points
|
||||||
|
# - ./files:/data
|
||||||
|
# - /path/to/ssd:/data/uploads
|
||||||
|
# - /path/to/large_hdd:/data/media
|
||||||
|
depends_on:
|
||||||
|
- db
|
||||||
|
# In order to expose Synapse, remove one of the following, you might for
|
||||||
|
# instance expose the TLS port directly:
|
||||||
|
ports:
|
||||||
|
- 8448:8448/tcp
|
||||||
|
# ... or use a reverse proxy, here is an example for traefik:
|
||||||
|
labels:
|
||||||
|
- traefik.enable=true
|
||||||
|
- traefik.frontend.rule=Host:my.matrix.Host
|
||||||
|
- traefik.port=8448
|
||||||
|
|
||||||
|
db:
|
||||||
|
image: docker.io/postgres:10-alpine
|
||||||
|
# Change that password, of course!
|
||||||
|
environment:
|
||||||
|
- POSTGRES_USER=synapse
|
||||||
|
- POSTGRES_PASSWORD=changeme
|
||||||
|
volumes:
|
||||||
|
# You may store the database tables in a local folder..
|
||||||
|
- ./schemas:/var/lib/postgresql/data
|
||||||
|
# .. or store them on some high performance storage for better results
|
||||||
|
# - /path/to/ssd/storage:/var/lib/postfesql/data
|
||||||
66
contrib/docker/start.py
Executable file
66
contrib/docker/start.py
Executable file
@@ -0,0 +1,66 @@
|
|||||||
|
#!/usr/local/bin/python
|
||||||
|
|
||||||
|
import jinja2
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
import subprocess
|
||||||
|
import glob
|
||||||
|
|
||||||
|
# Utility functions
|
||||||
|
convert = lambda src, dst, environ: open(dst, "w").write(jinja2.Template(open(src).read()).render(**environ))
|
||||||
|
|
||||||
|
def check_arguments(environ, args):
|
||||||
|
for argument in args:
|
||||||
|
if argument not in environ:
|
||||||
|
print("Environment variable %s is mandatory, exiting." % argument)
|
||||||
|
sys.exit(2)
|
||||||
|
|
||||||
|
def generate_secrets(environ, secrets):
|
||||||
|
for name, secret in secrets.items():
|
||||||
|
if secret not in environ:
|
||||||
|
filename = "/data/%s.%s.key" % (environ["SYNAPSE_SERVER_NAME"], name)
|
||||||
|
if os.path.exists(filename):
|
||||||
|
with open(filename) as handle: value = handle.read()
|
||||||
|
else:
|
||||||
|
print("Generating a random secret for {}".format(name))
|
||||||
|
value = os.urandom(32).encode("hex")
|
||||||
|
with open(filename, "w") as handle: handle.write(value)
|
||||||
|
environ[secret] = value
|
||||||
|
|
||||||
|
# Prepare the configuration
|
||||||
|
mode = sys.argv[1] if len(sys.argv) > 1 else None
|
||||||
|
environ = os.environ.copy()
|
||||||
|
ownership = "{}:{}".format(environ.get("UID", 991), environ.get("GID", 991))
|
||||||
|
args = ["python", "-m", "synapse.app.homeserver"]
|
||||||
|
|
||||||
|
# In generate mode, generate a configuration, missing keys, then exit
|
||||||
|
if mode == "generate":
|
||||||
|
check_arguments(environ, ("SYNAPSE_SERVER_NAME", "SYNAPSE_REPORT_STATS", "SYNAPSE_CONFIG_PATH"))
|
||||||
|
args += [
|
||||||
|
"--server-name", environ["SYNAPSE_SERVER_NAME"],
|
||||||
|
"--report-stats", environ["SYNAPSE_REPORT_STATS"],
|
||||||
|
"--config-path", environ["SYNAPSE_CONFIG_PATH"],
|
||||||
|
"--generate-config"
|
||||||
|
]
|
||||||
|
os.execv("/usr/local/bin/python", args)
|
||||||
|
|
||||||
|
# In normal mode, generate missing keys if any, then run synapse
|
||||||
|
else:
|
||||||
|
# Parse the configuration file
|
||||||
|
if "SYNAPSE_CONFIG_PATH" in environ:
|
||||||
|
args += ["--config-path", environ["SYNAPSE_CONFIG_PATH"]]
|
||||||
|
else:
|
||||||
|
check_arguments(environ, ("SYNAPSE_SERVER_NAME", "SYNAPSE_REPORT_STATS"))
|
||||||
|
generate_secrets(environ, {
|
||||||
|
"registration": "SYNAPSE_REGISTRATION_SHARED_SECRET",
|
||||||
|
"macaroon": "SYNAPSE_MACAROON_SECRET_KEY"
|
||||||
|
})
|
||||||
|
environ["SYNAPSE_APPSERVICES"] = glob.glob("/data/appservices/*.yaml")
|
||||||
|
if not os.path.exists("/compiled"): os.mkdir("/compiled")
|
||||||
|
convert("/conf/homeserver.yaml", "/compiled/homeserver.yaml", environ)
|
||||||
|
convert("/conf/log.config", "/compiled/log.config", environ)
|
||||||
|
subprocess.check_output(["chown", "-R", ownership, "/data"])
|
||||||
|
args += ["--config-path", "/compiled/homeserver.yaml"]
|
||||||
|
# Generate missing keys and start synapse
|
||||||
|
subprocess.check_output(args + ["--generate-keys"])
|
||||||
|
os.execv("/sbin/su-exec", ["su-exec", ownership] + args)
|
||||||
@@ -22,6 +22,8 @@ import argparse
|
|||||||
from synapse.events import FrozenEvent
|
from synapse.events import FrozenEvent
|
||||||
from synapse.util.frozenutils import unfreeze
|
from synapse.util.frozenutils import unfreeze
|
||||||
|
|
||||||
|
from six import string_types
|
||||||
|
|
||||||
|
|
||||||
def make_graph(file_name, room_id, file_prefix, limit):
|
def make_graph(file_name, room_id, file_prefix, limit):
|
||||||
print "Reading lines"
|
print "Reading lines"
|
||||||
@@ -58,7 +60,7 @@ def make_graph(file_name, room_id, file_prefix, limit):
|
|||||||
for key, value in unfreeze(event.get_dict()["content"]).items():
|
for key, value in unfreeze(event.get_dict()["content"]).items():
|
||||||
if value is None:
|
if value is None:
|
||||||
value = "<null>"
|
value = "<null>"
|
||||||
elif isinstance(value, basestring):
|
elif isinstance(value, string_types):
|
||||||
pass
|
pass
|
||||||
else:
|
else:
|
||||||
value = json.dumps(value)
|
value = json.dumps(value)
|
||||||
|
|||||||
37
contrib/prometheus/README
Normal file
37
contrib/prometheus/README
Normal file
@@ -0,0 +1,37 @@
|
|||||||
|
This directory contains some sample monitoring config for using the
|
||||||
|
'Prometheus' monitoring server against synapse.
|
||||||
|
|
||||||
|
To use it, first install prometheus by following the instructions at
|
||||||
|
|
||||||
|
http://prometheus.io/
|
||||||
|
|
||||||
|
### for Prometheus v1
|
||||||
|
Add a new job to the main prometheus.conf file:
|
||||||
|
|
||||||
|
job: {
|
||||||
|
name: "synapse"
|
||||||
|
|
||||||
|
target_group: {
|
||||||
|
target: "http://SERVER.LOCATION.HERE:PORT/_synapse/metrics"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
### for Prometheus v2
|
||||||
|
Add a new job to the main prometheus.yml file:
|
||||||
|
|
||||||
|
- job_name: "synapse"
|
||||||
|
metrics_path: "/_synapse/metrics"
|
||||||
|
# when endpoint uses https:
|
||||||
|
scheme: "https"
|
||||||
|
|
||||||
|
static_configs:
|
||||||
|
- targets: ['SERVER.LOCATION:PORT']
|
||||||
|
|
||||||
|
To use `synapse.rules` add
|
||||||
|
|
||||||
|
rule_files:
|
||||||
|
- "/PATH/TO/synapse-v2.rules"
|
||||||
|
|
||||||
|
Metrics are disabled by default when running synapse; they must be enabled
|
||||||
|
with the 'enable-metrics' option, either in the synapse config file or as a
|
||||||
|
command-line option.
|
||||||
395
contrib/prometheus/consoles/synapse.html
Normal file
395
contrib/prometheus/consoles/synapse.html
Normal file
@@ -0,0 +1,395 @@
|
|||||||
|
{{ template "head" . }}
|
||||||
|
|
||||||
|
{{ template "prom_content_head" . }}
|
||||||
|
<h1>System Resources</h1>
|
||||||
|
|
||||||
|
<h3>CPU</h3>
|
||||||
|
<div id="process_resource_utime"></div>
|
||||||
|
<script>
|
||||||
|
new PromConsole.Graph({
|
||||||
|
node: document.querySelector("#process_resource_utime"),
|
||||||
|
expr: "rate(process_cpu_seconds_total[2m]) * 100",
|
||||||
|
name: "[[job]]",
|
||||||
|
min: 0,
|
||||||
|
max: 100,
|
||||||
|
renderer: "line",
|
||||||
|
height: 150,
|
||||||
|
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yUnits: "%",
|
||||||
|
yTitle: "CPU Usage"
|
||||||
|
})
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<h3>Memory</h3>
|
||||||
|
<div id="process_resource_maxrss"></div>
|
||||||
|
<script>
|
||||||
|
new PromConsole.Graph({
|
||||||
|
node: document.querySelector("#process_resource_maxrss"),
|
||||||
|
expr: "process_psutil_rss:max",
|
||||||
|
name: "Maxrss",
|
||||||
|
min: 0,
|
||||||
|
renderer: "line",
|
||||||
|
height: 150,
|
||||||
|
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||||
|
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||||
|
yUnits: "bytes",
|
||||||
|
yTitle: "Usage"
|
||||||
|
})
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<h3>File descriptors</h3>
|
||||||
|
<div id="process_fds"></div>
|
||||||
|
<script>
|
||||||
|
new PromConsole.Graph({
|
||||||
|
node: document.querySelector("#process_fds"),
|
||||||
|
expr: "process_open_fds{job='synapse'}",
|
||||||
|
name: "FDs",
|
||||||
|
min: 0,
|
||||||
|
renderer: "line",
|
||||||
|
height: 150,
|
||||||
|
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yUnits: "",
|
||||||
|
yTitle: "Descriptors"
|
||||||
|
})
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<h1>Reactor</h1>
|
||||||
|
|
||||||
|
<h3>Total reactor time</h3>
|
||||||
|
<div id="reactor_total_time"></div>
|
||||||
|
<script>
|
||||||
|
new PromConsole.Graph({
|
||||||
|
node: document.querySelector("#reactor_total_time"),
|
||||||
|
expr: "rate(python_twisted_reactor_tick_time:total[2m]) / 1000",
|
||||||
|
name: "time",
|
||||||
|
max: 1,
|
||||||
|
min: 0,
|
||||||
|
renderer: "area",
|
||||||
|
height: 150,
|
||||||
|
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yUnits: "s/s",
|
||||||
|
yTitle: "Usage"
|
||||||
|
})
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<h3>Average reactor tick time</h3>
|
||||||
|
<div id="reactor_average_time"></div>
|
||||||
|
<script>
|
||||||
|
new PromConsole.Graph({
|
||||||
|
node: document.querySelector("#reactor_average_time"),
|
||||||
|
expr: "rate(python_twisted_reactor_tick_time:total[2m]) / rate(python_twisted_reactor_tick_time:count[2m]) / 1000",
|
||||||
|
name: "time",
|
||||||
|
min: 0,
|
||||||
|
renderer: "line",
|
||||||
|
height: 150,
|
||||||
|
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yUnits: "s",
|
||||||
|
yTitle: "Time"
|
||||||
|
})
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<h3>Pending calls per tick</h3>
|
||||||
|
<div id="reactor_pending_calls"></div>
|
||||||
|
<script>
|
||||||
|
new PromConsole.Graph({
|
||||||
|
node: document.querySelector("#reactor_pending_calls"),
|
||||||
|
expr: "rate(python_twisted_reactor_pending_calls:total[30s])/rate(python_twisted_reactor_pending_calls:count[30s])",
|
||||||
|
name: "calls",
|
||||||
|
min: 0,
|
||||||
|
renderer: "line",
|
||||||
|
height: 150,
|
||||||
|
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yTitle: "Pending Cals"
|
||||||
|
})
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<h1>Storage</h1>
|
||||||
|
|
||||||
|
<h3>Queries</h3>
|
||||||
|
<div id="synapse_storage_query_time"></div>
|
||||||
|
<script>
|
||||||
|
new PromConsole.Graph({
|
||||||
|
node: document.querySelector("#synapse_storage_query_time"),
|
||||||
|
expr: "rate(synapse_storage_query_time:count[2m])",
|
||||||
|
name: "[[verb]]",
|
||||||
|
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||||
|
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||||
|
yUnits: "queries/s",
|
||||||
|
yTitle: "Queries"
|
||||||
|
})
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<h3>Transactions</h3>
|
||||||
|
<div id="synapse_storage_transaction_time"></div>
|
||||||
|
<script>
|
||||||
|
new PromConsole.Graph({
|
||||||
|
node: document.querySelector("#synapse_storage_transaction_time"),
|
||||||
|
expr: "rate(synapse_storage_transaction_time:count[2m])",
|
||||||
|
name: "[[desc]]",
|
||||||
|
min: 0,
|
||||||
|
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||||
|
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||||
|
yUnits: "txn/s",
|
||||||
|
yTitle: "Transactions"
|
||||||
|
})
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<h3>Transaction execution time</h3>
|
||||||
|
<div id="synapse_storage_transactions_time_msec"></div>
|
||||||
|
<script>
|
||||||
|
new PromConsole.Graph({
|
||||||
|
node: document.querySelector("#synapse_storage_transactions_time_msec"),
|
||||||
|
expr: "rate(synapse_storage_transaction_time:total[2m]) / 1000",
|
||||||
|
name: "[[desc]]",
|
||||||
|
min: 0,
|
||||||
|
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yUnits: "s/s",
|
||||||
|
yTitle: "Usage"
|
||||||
|
})
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<h3>Database scheduling latency</h3>
|
||||||
|
<div id="synapse_storage_schedule_time"></div>
|
||||||
|
<script>
|
||||||
|
new PromConsole.Graph({
|
||||||
|
node: document.querySelector("#synapse_storage_schedule_time"),
|
||||||
|
expr: "rate(synapse_storage_schedule_time:total[2m]) / 1000",
|
||||||
|
name: "Total latency",
|
||||||
|
min: 0,
|
||||||
|
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yUnits: "s/s",
|
||||||
|
yTitle: "Usage"
|
||||||
|
})
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<h3>Cache hit ratio</h3>
|
||||||
|
<div id="synapse_cache_ratio"></div>
|
||||||
|
<script>
|
||||||
|
new PromConsole.Graph({
|
||||||
|
node: document.querySelector("#synapse_cache_ratio"),
|
||||||
|
expr: "rate(synapse_util_caches_cache:total[2m]) * 100",
|
||||||
|
name: "[[name]]",
|
||||||
|
min: 0,
|
||||||
|
max: 100,
|
||||||
|
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||||
|
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||||
|
yUnits: "%",
|
||||||
|
yTitle: "Percentage"
|
||||||
|
})
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<h3>Cache size</h3>
|
||||||
|
<div id="synapse_cache_size"></div>
|
||||||
|
<script>
|
||||||
|
new PromConsole.Graph({
|
||||||
|
node: document.querySelector("#synapse_cache_size"),
|
||||||
|
expr: "synapse_util_caches_cache:size",
|
||||||
|
name: "[[name]]",
|
||||||
|
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||||
|
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||||
|
yUnits: "",
|
||||||
|
yTitle: "Items"
|
||||||
|
})
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<h1>Requests</h1>
|
||||||
|
|
||||||
|
<h3>Requests by Servlet</h3>
|
||||||
|
<div id="synapse_http_server_request_count_servlet"></div>
|
||||||
|
<script>
|
||||||
|
new PromConsole.Graph({
|
||||||
|
node: document.querySelector("#synapse_http_server_request_count_servlet"),
|
||||||
|
expr: "rate(synapse_http_server_request_count:servlet[2m])",
|
||||||
|
name: "[[servlet]]",
|
||||||
|
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yUnits: "req/s",
|
||||||
|
yTitle: "Requests"
|
||||||
|
})
|
||||||
|
</script>
|
||||||
|
<h4> (without <tt>EventStreamRestServlet</tt> or <tt>SyncRestServlet</tt>)</h4>
|
||||||
|
<div id="synapse_http_server_request_count_servlet_minus_events"></div>
|
||||||
|
<script>
|
||||||
|
new PromConsole.Graph({
|
||||||
|
node: document.querySelector("#synapse_http_server_request_count_servlet_minus_events"),
|
||||||
|
expr: "rate(synapse_http_server_request_count:servlet{servlet!=\"EventStreamRestServlet\", servlet!=\"SyncRestServlet\"}[2m])",
|
||||||
|
name: "[[servlet]]",
|
||||||
|
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yUnits: "req/s",
|
||||||
|
yTitle: "Requests"
|
||||||
|
})
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<h3>Average response times</h3>
|
||||||
|
<div id="synapse_http_server_response_time_avg"></div>
|
||||||
|
<script>
|
||||||
|
new PromConsole.Graph({
|
||||||
|
node: document.querySelector("#synapse_http_server_response_time_avg"),
|
||||||
|
expr: "rate(synapse_http_server_response_time_seconds[2m]) / rate(synapse_http_server_response_count[2m]) / 1000",
|
||||||
|
name: "[[servlet]]",
|
||||||
|
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yUnits: "s/req",
|
||||||
|
yTitle: "Response time"
|
||||||
|
})
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<h3>All responses by code</h3>
|
||||||
|
<div id="synapse_http_server_responses"></div>
|
||||||
|
<script>
|
||||||
|
new PromConsole.Graph({
|
||||||
|
node: document.querySelector("#synapse_http_server_responses"),
|
||||||
|
expr: "rate(synapse_http_server_responses[2m])",
|
||||||
|
name: "[[method]] / [[code]]",
|
||||||
|
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yUnits: "req/s",
|
||||||
|
yTitle: "Requests"
|
||||||
|
})
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<h3>Error responses by code</h3>
|
||||||
|
<div id="synapse_http_server_responses_err"></div>
|
||||||
|
<script>
|
||||||
|
new PromConsole.Graph({
|
||||||
|
node: document.querySelector("#synapse_http_server_responses_err"),
|
||||||
|
expr: "rate(synapse_http_server_responses{code=~\"[45]..\"}[2m])",
|
||||||
|
name: "[[method]] / [[code]]",
|
||||||
|
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yUnits: "req/s",
|
||||||
|
yTitle: "Requests"
|
||||||
|
})
|
||||||
|
</script>
|
||||||
|
|
||||||
|
|
||||||
|
<h3>CPU Usage</h3>
|
||||||
|
<div id="synapse_http_server_response_ru_utime"></div>
|
||||||
|
<script>
|
||||||
|
new PromConsole.Graph({
|
||||||
|
node: document.querySelector("#synapse_http_server_response_ru_utime"),
|
||||||
|
expr: "rate(synapse_http_server_response_ru_utime_seconds[2m])",
|
||||||
|
name: "[[servlet]]",
|
||||||
|
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yUnits: "s/s",
|
||||||
|
yTitle: "CPU Usage"
|
||||||
|
})
|
||||||
|
</script>
|
||||||
|
|
||||||
|
|
||||||
|
<h3>DB Usage</h3>
|
||||||
|
<div id="synapse_http_server_response_db_txn_duration"></div>
|
||||||
|
<script>
|
||||||
|
new PromConsole.Graph({
|
||||||
|
node: document.querySelector("#synapse_http_server_response_db_txn_duration"),
|
||||||
|
expr: "rate(synapse_http_server_response_db_txn_duration_seconds[2m])",
|
||||||
|
name: "[[servlet]]",
|
||||||
|
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yUnits: "s/s",
|
||||||
|
yTitle: "DB Usage"
|
||||||
|
})
|
||||||
|
</script>
|
||||||
|
|
||||||
|
|
||||||
|
<h3>Average event send times</h3>
|
||||||
|
<div id="synapse_http_server_send_time_avg"></div>
|
||||||
|
<script>
|
||||||
|
new PromConsole.Graph({
|
||||||
|
node: document.querySelector("#synapse_http_server_send_time_avg"),
|
||||||
|
expr: "rate(synapse_http_server_response_time_second{servlet='RoomSendEventRestServlet'}[2m]) / rate(synapse_http_server_response_count{servlet='RoomSendEventRestServlet'}[2m]) / 1000",
|
||||||
|
name: "[[servlet]]",
|
||||||
|
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yUnits: "s/req",
|
||||||
|
yTitle: "Response time"
|
||||||
|
})
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<h1>Federation</h1>
|
||||||
|
|
||||||
|
<h3>Sent Messages</h3>
|
||||||
|
<div id="synapse_federation_client_sent"></div>
|
||||||
|
<script>
|
||||||
|
new PromConsole.Graph({
|
||||||
|
node: document.querySelector("#synapse_federation_client_sent"),
|
||||||
|
expr: "rate(synapse_federation_client_sent[2m])",
|
||||||
|
name: "[[type]]",
|
||||||
|
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yUnits: "req/s",
|
||||||
|
yTitle: "Requests"
|
||||||
|
})
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<h3>Received Messages</h3>
|
||||||
|
<div id="synapse_federation_server_received"></div>
|
||||||
|
<script>
|
||||||
|
new PromConsole.Graph({
|
||||||
|
node: document.querySelector("#synapse_federation_server_received"),
|
||||||
|
expr: "rate(synapse_federation_server_received[2m])",
|
||||||
|
name: "[[type]]",
|
||||||
|
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yUnits: "req/s",
|
||||||
|
yTitle: "Requests"
|
||||||
|
})
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<h3>Pending</h3>
|
||||||
|
<div id="synapse_federation_transaction_queue_pending"></div>
|
||||||
|
<script>
|
||||||
|
new PromConsole.Graph({
|
||||||
|
node: document.querySelector("#synapse_federation_transaction_queue_pending"),
|
||||||
|
expr: "synapse_federation_transaction_queue_pending",
|
||||||
|
name: "[[type]]",
|
||||||
|
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||||
|
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||||
|
yUnits: "",
|
||||||
|
yTitle: "Units"
|
||||||
|
})
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<h1>Clients</h1>
|
||||||
|
|
||||||
|
<h3>Notifiers</h3>
|
||||||
|
<div id="synapse_notifier_listeners"></div>
|
||||||
|
<script>
|
||||||
|
new PromConsole.Graph({
|
||||||
|
node: document.querySelector("#synapse_notifier_listeners"),
|
||||||
|
expr: "synapse_notifier_listeners",
|
||||||
|
name: "listeners",
|
||||||
|
min: 0,
|
||||||
|
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||||
|
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||||
|
yUnits: "",
|
||||||
|
yTitle: "Listeners"
|
||||||
|
})
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<h3>Notified Events</h3>
|
||||||
|
<div id="synapse_notifier_notified_events"></div>
|
||||||
|
<script>
|
||||||
|
new PromConsole.Graph({
|
||||||
|
node: document.querySelector("#synapse_notifier_notified_events"),
|
||||||
|
expr: "rate(synapse_notifier_notified_events[2m])",
|
||||||
|
name: "events",
|
||||||
|
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yUnits: "events/s",
|
||||||
|
yTitle: "Event rate"
|
||||||
|
})
|
||||||
|
</script>
|
||||||
|
|
||||||
|
{{ template "prom_content_tail" . }}
|
||||||
|
|
||||||
|
{{ template "tail" }}
|
||||||
21
contrib/prometheus/synapse-v1.rules
Normal file
21
contrib/prometheus/synapse-v1.rules
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
synapse_federation_transaction_queue_pendingEdus:total = sum(synapse_federation_transaction_queue_pendingEdus or absent(synapse_federation_transaction_queue_pendingEdus)*0)
|
||||||
|
synapse_federation_transaction_queue_pendingPdus:total = sum(synapse_federation_transaction_queue_pendingPdus or absent(synapse_federation_transaction_queue_pendingPdus)*0)
|
||||||
|
|
||||||
|
synapse_http_server_request_count:method{servlet=""} = sum(synapse_http_server_request_count) by (method)
|
||||||
|
synapse_http_server_request_count:servlet{method=""} = sum(synapse_http_server_request_count) by (servlet)
|
||||||
|
|
||||||
|
synapse_http_server_request_count:total{servlet=""} = sum(synapse_http_server_request_count:by_method) by (servlet)
|
||||||
|
|
||||||
|
synapse_cache:hit_ratio_5m = rate(synapse_util_caches_cache:hits[5m]) / rate(synapse_util_caches_cache:total[5m])
|
||||||
|
synapse_cache:hit_ratio_30s = rate(synapse_util_caches_cache:hits[30s]) / rate(synapse_util_caches_cache:total[30s])
|
||||||
|
|
||||||
|
synapse_federation_client_sent{type="EDU"} = synapse_federation_client_sent_edus + 0
|
||||||
|
synapse_federation_client_sent{type="PDU"} = synapse_federation_client_sent_pdu_destinations:count + 0
|
||||||
|
synapse_federation_client_sent{type="Query"} = sum(synapse_federation_client_sent_queries) by (job)
|
||||||
|
|
||||||
|
synapse_federation_server_received{type="EDU"} = synapse_federation_server_received_edus + 0
|
||||||
|
synapse_federation_server_received{type="PDU"} = synapse_federation_server_received_pdus + 0
|
||||||
|
synapse_federation_server_received{type="Query"} = sum(synapse_federation_server_received_queries) by (job)
|
||||||
|
|
||||||
|
synapse_federation_transaction_queue_pending{type="EDU"} = synapse_federation_transaction_queue_pending_edus + 0
|
||||||
|
synapse_federation_transaction_queue_pending{type="PDU"} = synapse_federation_transaction_queue_pending_pdus + 0
|
||||||
60
contrib/prometheus/synapse-v2.rules
Normal file
60
contrib/prometheus/synapse-v2.rules
Normal file
@@ -0,0 +1,60 @@
|
|||||||
|
groups:
|
||||||
|
- name: synapse
|
||||||
|
rules:
|
||||||
|
- record: "synapse_federation_transaction_queue_pendingEdus:total"
|
||||||
|
expr: "sum(synapse_federation_transaction_queue_pendingEdus or absent(synapse_federation_transaction_queue_pendingEdus)*0)"
|
||||||
|
- record: "synapse_federation_transaction_queue_pendingPdus:total"
|
||||||
|
expr: "sum(synapse_federation_transaction_queue_pendingPdus or absent(synapse_federation_transaction_queue_pendingPdus)*0)"
|
||||||
|
- record: 'synapse_http_server_request_count:method'
|
||||||
|
labels:
|
||||||
|
servlet: ""
|
||||||
|
expr: "sum(synapse_http_server_request_count) by (method)"
|
||||||
|
- record: 'synapse_http_server_request_count:servlet'
|
||||||
|
labels:
|
||||||
|
method: ""
|
||||||
|
expr: 'sum(synapse_http_server_request_count) by (servlet)'
|
||||||
|
|
||||||
|
- record: 'synapse_http_server_request_count:total'
|
||||||
|
labels:
|
||||||
|
servlet: ""
|
||||||
|
expr: 'sum(synapse_http_server_request_count:by_method) by (servlet)'
|
||||||
|
|
||||||
|
- record: 'synapse_cache:hit_ratio_5m'
|
||||||
|
expr: 'rate(synapse_util_caches_cache:hits[5m]) / rate(synapse_util_caches_cache:total[5m])'
|
||||||
|
- record: 'synapse_cache:hit_ratio_30s'
|
||||||
|
expr: 'rate(synapse_util_caches_cache:hits[30s]) / rate(synapse_util_caches_cache:total[30s])'
|
||||||
|
|
||||||
|
- record: 'synapse_federation_client_sent'
|
||||||
|
labels:
|
||||||
|
type: "EDU"
|
||||||
|
expr: 'synapse_federation_client_sent_edus + 0'
|
||||||
|
- record: 'synapse_federation_client_sent'
|
||||||
|
labels:
|
||||||
|
type: "PDU"
|
||||||
|
expr: 'synapse_federation_client_sent_pdu_destinations:count + 0'
|
||||||
|
- record: 'synapse_federation_client_sent'
|
||||||
|
labels:
|
||||||
|
type: "Query"
|
||||||
|
expr: 'sum(synapse_federation_client_sent_queries) by (job)'
|
||||||
|
|
||||||
|
- record: 'synapse_federation_server_received'
|
||||||
|
labels:
|
||||||
|
type: "EDU"
|
||||||
|
expr: 'synapse_federation_server_received_edus + 0'
|
||||||
|
- record: 'synapse_federation_server_received'
|
||||||
|
labels:
|
||||||
|
type: "PDU"
|
||||||
|
expr: 'synapse_federation_server_received_pdus + 0'
|
||||||
|
- record: 'synapse_federation_server_received'
|
||||||
|
labels:
|
||||||
|
type: "Query"
|
||||||
|
expr: 'sum(synapse_federation_server_received_queries) by (job)'
|
||||||
|
|
||||||
|
- record: 'synapse_federation_transaction_queue_pending'
|
||||||
|
labels:
|
||||||
|
type: "EDU"
|
||||||
|
expr: 'synapse_federation_transaction_queue_pending_edus + 0'
|
||||||
|
- record: 'synapse_federation_transaction_queue_pending'
|
||||||
|
labels:
|
||||||
|
type: "PDU"
|
||||||
|
expr: 'synapse_federation_transaction_queue_pending_pdus + 0'
|
||||||
@@ -2,6 +2,9 @@
|
|||||||
# (e.g. https://www.archlinux.org/packages/community/any/matrix-synapse/ for ArchLinux)
|
# (e.g. https://www.archlinux.org/packages/community/any/matrix-synapse/ for ArchLinux)
|
||||||
# rather than in a user home directory or similar under virtualenv.
|
# rather than in a user home directory or similar under virtualenv.
|
||||||
|
|
||||||
|
# **NOTE:** This is an example service file that may change in the future. If you
|
||||||
|
# wish to use this please copy rather than symlink it.
|
||||||
|
|
||||||
[Unit]
|
[Unit]
|
||||||
Description=Synapse Matrix homeserver
|
Description=Synapse Matrix homeserver
|
||||||
|
|
||||||
@@ -9,9 +12,11 @@ Description=Synapse Matrix homeserver
|
|||||||
Type=simple
|
Type=simple
|
||||||
User=synapse
|
User=synapse
|
||||||
Group=synapse
|
Group=synapse
|
||||||
EnvironmentFile=-/etc/sysconfig/synapse
|
|
||||||
WorkingDirectory=/var/lib/synapse
|
WorkingDirectory=/var/lib/synapse
|
||||||
ExecStart=/usr/bin/python2.7 -m synapse.app.homeserver --config-path=/etc/synapse/homeserver.yaml --log-config=/etc/synapse/log_config.yaml
|
ExecStart=/usr/bin/python2.7 -m synapse.app.homeserver --config-path=/etc/synapse/homeserver.yaml
|
||||||
|
ExecStop=/usr/bin/synctl stop /etc/synapse/homeserver.yaml
|
||||||
|
# EnvironmentFile=-/etc/sysconfig/synapse # Can be used to e.g. set SYNAPSE_CACHE_FACTOR
|
||||||
|
|
||||||
[Install]
|
[Install]
|
||||||
WantedBy=multi-user.target
|
WantedBy=multi-user.target
|
||||||
|
|
||||||
|
|||||||
23
docs/admin_api/media_admin_api.md
Normal file
23
docs/admin_api/media_admin_api.md
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
# List all media in a room
|
||||||
|
|
||||||
|
This API gets a list of known media in a room.
|
||||||
|
|
||||||
|
The API is:
|
||||||
|
```
|
||||||
|
GET /_matrix/client/r0/admin/room/<room_id>/media
|
||||||
|
```
|
||||||
|
including an `access_token` of a server admin.
|
||||||
|
|
||||||
|
It returns a JSON body like the following:
|
||||||
|
```
|
||||||
|
{
|
||||||
|
"local": [
|
||||||
|
"mxc://localhost/xwvutsrqponmlkjihgfedcba",
|
||||||
|
"mxc://localhost/abcdefghijklmnopqrstuvwx"
|
||||||
|
],
|
||||||
|
"remote": [
|
||||||
|
"mxc://matrix.org/xwvutsrqponmlkjihgfedcba",
|
||||||
|
"mxc://matrix.org/abcdefghijklmnopqrstuvwx"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
@@ -8,8 +8,56 @@ Depending on the amount of history being purged a call to the API may take
|
|||||||
several minutes or longer. During this period users will not be able to
|
several minutes or longer. During this period users will not be able to
|
||||||
paginate further back in the room from the point being purged from.
|
paginate further back in the room from the point being purged from.
|
||||||
|
|
||||||
The API is simply:
|
The API is:
|
||||||
|
|
||||||
``POST /_matrix/client/r0/admin/purge_history/<room_id>/<event_id>``
|
``POST /_matrix/client/r0/admin/purge_history/<room_id>[/<event_id>]``
|
||||||
|
|
||||||
including an ``access_token`` of a server admin.
|
including an ``access_token`` of a server admin.
|
||||||
|
|
||||||
|
By default, events sent by local users are not deleted, as they may represent
|
||||||
|
the only copies of this content in existence. (Events sent by remote users are
|
||||||
|
deleted.)
|
||||||
|
|
||||||
|
Room state data (such as joins, leaves, topic) is always preserved.
|
||||||
|
|
||||||
|
To delete local message events as well, set ``delete_local_events`` in the body:
|
||||||
|
|
||||||
|
.. code:: json
|
||||||
|
|
||||||
|
{
|
||||||
|
"delete_local_events": true
|
||||||
|
}
|
||||||
|
|
||||||
|
The caller must specify the point in the room to purge up to. This can be
|
||||||
|
specified by including an event_id in the URI, or by setting a
|
||||||
|
``purge_up_to_event_id`` or ``purge_up_to_ts`` in the request body. If an event
|
||||||
|
id is given, that event (and others at the same graph depth) will be retained.
|
||||||
|
If ``purge_up_to_ts`` is given, it should be a timestamp since the unix epoch,
|
||||||
|
in milliseconds.
|
||||||
|
|
||||||
|
The API starts the purge running, and returns immediately with a JSON body with
|
||||||
|
a purge id:
|
||||||
|
|
||||||
|
.. code:: json
|
||||||
|
|
||||||
|
{
|
||||||
|
"purge_id": "<opaque id>"
|
||||||
|
}
|
||||||
|
|
||||||
|
Purge status query
|
||||||
|
------------------
|
||||||
|
|
||||||
|
It is possible to poll for updates on recent purges with a second API;
|
||||||
|
|
||||||
|
``GET /_matrix/client/r0/admin/purge_history_status/<purge_id>``
|
||||||
|
|
||||||
|
(again, with a suitable ``access_token``). This API returns a JSON body like
|
||||||
|
the following:
|
||||||
|
|
||||||
|
.. code:: json
|
||||||
|
|
||||||
|
{
|
||||||
|
"status": "active"
|
||||||
|
}
|
||||||
|
|
||||||
|
The status will be one of ``active``, ``complete``, or ``failed``.
|
||||||
|
|||||||
73
docs/admin_api/user_admin_api.rst
Normal file
73
docs/admin_api/user_admin_api.rst
Normal file
@@ -0,0 +1,73 @@
|
|||||||
|
Query Account
|
||||||
|
=============
|
||||||
|
|
||||||
|
This API returns information about a specific user account.
|
||||||
|
|
||||||
|
The api is::
|
||||||
|
|
||||||
|
GET /_matrix/client/r0/admin/whois/<user_id>
|
||||||
|
|
||||||
|
including an ``access_token`` of a server admin.
|
||||||
|
|
||||||
|
It returns a JSON body like the following:
|
||||||
|
|
||||||
|
.. code:: json
|
||||||
|
|
||||||
|
{
|
||||||
|
"user_id": "<user_id>",
|
||||||
|
"devices": {
|
||||||
|
"": {
|
||||||
|
"sessions": [
|
||||||
|
{
|
||||||
|
"connections": [
|
||||||
|
{
|
||||||
|
"ip": "1.2.3.4",
|
||||||
|
"last_seen": 1417222374433,
|
||||||
|
"user_agent": "Mozilla/5.0 ..."
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ip": "1.2.3.10",
|
||||||
|
"last_seen": 1417222374500,
|
||||||
|
"user_agent": "Dalvik/2.1.0 ..."
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
``last_seen`` is measured in milliseconds since the Unix epoch.
|
||||||
|
|
||||||
|
Deactivate Account
|
||||||
|
==================
|
||||||
|
|
||||||
|
This API deactivates an account. It removes active access tokens, resets the
|
||||||
|
password, and deletes third-party IDs (to prevent the user requesting a
|
||||||
|
password reset).
|
||||||
|
|
||||||
|
The api is::
|
||||||
|
|
||||||
|
POST /_matrix/client/r0/admin/deactivate/<user_id>
|
||||||
|
|
||||||
|
including an ``access_token`` of a server admin, and an empty request body.
|
||||||
|
|
||||||
|
|
||||||
|
Reset password
|
||||||
|
==============
|
||||||
|
|
||||||
|
Changes the password of another user.
|
||||||
|
|
||||||
|
The api is::
|
||||||
|
|
||||||
|
POST /_matrix/client/r0/admin/reset_password/<user_id>
|
||||||
|
|
||||||
|
with a body of:
|
||||||
|
|
||||||
|
.. code:: json
|
||||||
|
|
||||||
|
{
|
||||||
|
"new_password": "<secret>"
|
||||||
|
}
|
||||||
|
|
||||||
|
including an ``access_token`` of a server admin.
|
||||||
@@ -1,52 +1,119 @@
|
|||||||
Basically, PEP8
|
- Everything should comply with PEP8. Code should pass
|
||||||
|
``pep8 --max-line-length=100`` without any warnings.
|
||||||
|
|
||||||
- NEVER tabs. 4 spaces to indent.
|
- **Indenting**:
|
||||||
- Max line width: 79 chars (with flexibility to overflow by a "few chars" if
|
|
||||||
|
- NEVER tabs. 4 spaces to indent.
|
||||||
|
|
||||||
|
- follow PEP8; either hanging indent or multiline-visual indent depending
|
||||||
|
on the size and shape of the arguments and what makes more sense to the
|
||||||
|
author. In other words, both this::
|
||||||
|
|
||||||
|
print("I am a fish %s" % "moo")
|
||||||
|
|
||||||
|
and this::
|
||||||
|
|
||||||
|
print("I am a fish %s" %
|
||||||
|
"moo")
|
||||||
|
|
||||||
|
and this::
|
||||||
|
|
||||||
|
print(
|
||||||
|
"I am a fish %s" %
|
||||||
|
"moo",
|
||||||
|
)
|
||||||
|
|
||||||
|
...are valid, although given each one takes up 2x more vertical space than
|
||||||
|
the previous, it's up to the author's discretion as to which layout makes
|
||||||
|
most sense for their function invocation. (e.g. if they want to add
|
||||||
|
comments per-argument, or put expressions in the arguments, or group
|
||||||
|
related arguments together, or want to deliberately extend or preserve
|
||||||
|
vertical/horizontal space)
|
||||||
|
|
||||||
|
- **Line length**:
|
||||||
|
|
||||||
|
Max line length is 79 chars (with flexibility to overflow by a "few chars" if
|
||||||
the overflowing content is not semantically significant and avoids an
|
the overflowing content is not semantically significant and avoids an
|
||||||
explosion of vertical whitespace).
|
explosion of vertical whitespace).
|
||||||
- Use camel case for class and type names
|
|
||||||
- Use underscores for functions and variables.
|
Use parentheses instead of ``\`` for line continuation where ever possible
|
||||||
- Use double quotes.
|
(which is pretty much everywhere).
|
||||||
- Use parentheses instead of '\\' for line continuation where ever possible
|
|
||||||
(which is pretty much everywhere)
|
- **Naming**:
|
||||||
- There should be max a single new line between:
|
|
||||||
|
- Use camel case for class and type names
|
||||||
|
- Use underscores for functions and variables.
|
||||||
|
|
||||||
|
- Use double quotes ``"foo"`` rather than single quotes ``'foo'``.
|
||||||
|
|
||||||
|
- **Blank lines**:
|
||||||
|
|
||||||
|
- There should be max a single new line between:
|
||||||
|
|
||||||
- statements
|
- statements
|
||||||
- functions in a class
|
- functions in a class
|
||||||
- There should be two new lines between:
|
|
||||||
|
- There should be two new lines between:
|
||||||
|
|
||||||
- definitions in a module (e.g., between different classes)
|
- definitions in a module (e.g., between different classes)
|
||||||
- There should be spaces where spaces should be and not where there shouldn't be:
|
|
||||||
- a single space after a comma
|
|
||||||
- a single space before and after for '=' when used as assignment
|
|
||||||
- no spaces before and after for '=' for default values and keyword arguments.
|
|
||||||
- Indenting must follow PEP8; either hanging indent or multiline-visual indent
|
|
||||||
depending on the size and shape of the arguments and what makes more sense to
|
|
||||||
the author. In other words, both this::
|
|
||||||
|
|
||||||
print("I am a fish %s" % "moo")
|
- **Whitespace**:
|
||||||
|
|
||||||
and this::
|
There should be spaces where spaces should be and not where there shouldn't
|
||||||
|
be:
|
||||||
|
|
||||||
print("I am a fish %s" %
|
- a single space after a comma
|
||||||
"moo")
|
- a single space before and after for '=' when used as assignment
|
||||||
|
- no spaces before and after for '=' for default values and keyword arguments.
|
||||||
|
|
||||||
and this::
|
- **Comments**: should follow the `google code style
|
||||||
|
<http://google.github.io/styleguide/pyguide.html?showone=Comments#Comments>`_.
|
||||||
|
This is so that we can generate documentation with `sphinx
|
||||||
|
<http://sphinxcontrib-napoleon.readthedocs.org/en/latest/>`_. See the
|
||||||
|
`examples
|
||||||
|
<http://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_google.html>`_
|
||||||
|
in the sphinx documentation.
|
||||||
|
|
||||||
print(
|
- **Imports**:
|
||||||
"I am a fish %s" %
|
|
||||||
"moo"
|
|
||||||
)
|
|
||||||
|
|
||||||
...are valid, although given each one takes up 2x more vertical space than
|
- Prefer to import classes and functions than packages or modules.
|
||||||
the previous, it's up to the author's discretion as to which layout makes most
|
|
||||||
sense for their function invocation. (e.g. if they want to add comments
|
|
||||||
per-argument, or put expressions in the arguments, or group related arguments
|
|
||||||
together, or want to deliberately extend or preserve vertical/horizontal
|
|
||||||
space)
|
|
||||||
|
|
||||||
Comments should follow the `google code style <http://google.github.io/styleguide/pyguide.html?showone=Comments#Comments>`_.
|
Example::
|
||||||
This is so that we can generate documentation with
|
|
||||||
`sphinx <http://sphinxcontrib-napoleon.readthedocs.org/en/latest/>`_. See the
|
|
||||||
`examples <http://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_google.html>`_
|
|
||||||
in the sphinx documentation.
|
|
||||||
|
|
||||||
Code should pass pep8 --max-line-length=100 without any warnings.
|
from synapse.types import UserID
|
||||||
|
...
|
||||||
|
user_id = UserID(local, server)
|
||||||
|
|
||||||
|
is preferred over::
|
||||||
|
|
||||||
|
from synapse import types
|
||||||
|
...
|
||||||
|
user_id = types.UserID(local, server)
|
||||||
|
|
||||||
|
(or any other variant).
|
||||||
|
|
||||||
|
This goes against the advice in the Google style guide, but it means that
|
||||||
|
errors in the name are caught early (at import time).
|
||||||
|
|
||||||
|
- Multiple imports from the same package can be combined onto one line::
|
||||||
|
|
||||||
|
from synapse.types import GroupID, RoomID, UserID
|
||||||
|
|
||||||
|
An effort should be made to keep the individual imports in alphabetical
|
||||||
|
order.
|
||||||
|
|
||||||
|
If the list becomes long, wrap it with parentheses and split it over
|
||||||
|
multiple lines.
|
||||||
|
|
||||||
|
- As per `PEP-8 <https://www.python.org/dev/peps/pep-0008/#imports>`_,
|
||||||
|
imports should be grouped in the following order, with a blank line between
|
||||||
|
each group:
|
||||||
|
|
||||||
|
1. standard library imports
|
||||||
|
2. related third party imports
|
||||||
|
3. local application/library specific imports
|
||||||
|
|
||||||
|
- Imports within each group should be sorted alphabetically by module name.
|
||||||
|
|
||||||
|
- Avoid wildcard imports (``from synapse.types import *``) and relative
|
||||||
|
imports (``from .types import UserID``).
|
||||||
|
|||||||
160
docs/consent_tracking.md
Normal file
160
docs/consent_tracking.md
Normal file
@@ -0,0 +1,160 @@
|
|||||||
|
Support in Synapse for tracking agreement to server terms and conditions
|
||||||
|
========================================================================
|
||||||
|
|
||||||
|
Synapse 0.30 introduces support for tracking whether users have agreed to the
|
||||||
|
terms and conditions set by the administrator of a server - and blocking access
|
||||||
|
to the server until they have.
|
||||||
|
|
||||||
|
There are several parts to this functionality; each requires some specific
|
||||||
|
configuration in `homeserver.yaml` to be enabled.
|
||||||
|
|
||||||
|
Note that various parts of the configuation and this document refer to the
|
||||||
|
"privacy policy": agreement with a privacy policy is one particular use of this
|
||||||
|
feature, but of course adminstrators can specify other terms and conditions
|
||||||
|
unrelated to "privacy" per se.
|
||||||
|
|
||||||
|
Collecting policy agreement from a user
|
||||||
|
---------------------------------------
|
||||||
|
|
||||||
|
Synapse can be configured to serve the user a simple policy form with an
|
||||||
|
"accept" button. Clicking "Accept" records the user's acceptance in the
|
||||||
|
database and shows a success page.
|
||||||
|
|
||||||
|
To enable this, first create templates for the policy and success pages.
|
||||||
|
These should be stored on the local filesystem.
|
||||||
|
|
||||||
|
These templates use the [Jinja2](http://jinja.pocoo.org) templating language,
|
||||||
|
and [docs/privacy_policy_templates](privacy_policy_templates) gives
|
||||||
|
examples of the sort of thing that can be done.
|
||||||
|
|
||||||
|
Note that the templates must be stored under a name giving the language of the
|
||||||
|
template - currently this must always be `en` (for "English");
|
||||||
|
internationalisation support is intended for the future.
|
||||||
|
|
||||||
|
The template for the policy itself should be versioned and named according to
|
||||||
|
the version: for example `1.0.html`. The version of the policy which the user
|
||||||
|
has agreed to is stored in the database.
|
||||||
|
|
||||||
|
Once the templates are in place, make the following changes to `homeserver.yaml`:
|
||||||
|
|
||||||
|
1. Add a `user_consent` section, which should look like:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
user_consent:
|
||||||
|
template_dir: privacy_policy_templates
|
||||||
|
version: 1.0
|
||||||
|
```
|
||||||
|
|
||||||
|
`template_dir` points to the directory containing the policy
|
||||||
|
templates. `version` defines the version of the policy which will be served
|
||||||
|
to the user. In the example above, Synapse will serve
|
||||||
|
`privacy_policy_templates/en/1.0.html`.
|
||||||
|
|
||||||
|
|
||||||
|
2. Add a `form_secret` setting at the top level:
|
||||||
|
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
form_secret: "<unique secret>"
|
||||||
|
```
|
||||||
|
|
||||||
|
This should be set to an arbitrary secret string (try `pwgen -y 30` to
|
||||||
|
generate suitable secrets).
|
||||||
|
|
||||||
|
More on what this is used for below.
|
||||||
|
|
||||||
|
3. Add `consent` wherever the `client` resource is currently enabled in the
|
||||||
|
`listeners` configuration. For example:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
listeners:
|
||||||
|
- port: 8008
|
||||||
|
resources:
|
||||||
|
- names:
|
||||||
|
- client
|
||||||
|
- consent
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
Finally, ensure that `jinja2` is installed. If you are using a virtualenv, this
|
||||||
|
should be a matter of `pip install Jinja2`. On debian, try `apt-get install
|
||||||
|
python-jinja2`.
|
||||||
|
|
||||||
|
Once this is complete, and the server has been restarted, try visiting
|
||||||
|
`https://<server>/_matrix/consent`. If correctly configured, this should give
|
||||||
|
an error "Missing string query parameter 'u'". It is now possible to manually
|
||||||
|
construct URIs where users can give their consent.
|
||||||
|
|
||||||
|
### Constructing the consent URI
|
||||||
|
|
||||||
|
It may be useful to manually construct the "consent URI" for a given user - for
|
||||||
|
instance, in order to send them an email asking them to consent. To do this,
|
||||||
|
take the base `https://<server>/_matrix/consent` URL and add the following
|
||||||
|
query parameters:
|
||||||
|
|
||||||
|
* `u`: the user id of the user. This can either be a full MXID
|
||||||
|
(`@user:server.com`) or just the localpart (`user`).
|
||||||
|
|
||||||
|
* `h`: hex-encoded HMAC-SHA256 of `u` using the `form_secret` as a key. It is
|
||||||
|
possible to calculate this on the commandline with something like:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
echo -n '<user>' | openssl sha256 -hmac '<form_secret>'
|
||||||
|
```
|
||||||
|
|
||||||
|
This should result in a URI which looks something like:
|
||||||
|
`https://<server>/_matrix/consent?u=<user>&h=68a152465a4d...`.
|
||||||
|
|
||||||
|
|
||||||
|
Sending users a server notice asking them to agree to the policy
|
||||||
|
----------------------------------------------------------------
|
||||||
|
|
||||||
|
It is possible to configure Synapse to send a [server
|
||||||
|
notice](server_notices.md) to anybody who has not yet agreed to the current
|
||||||
|
version of the policy. To do so:
|
||||||
|
|
||||||
|
* ensure that the consent resource is configured, as in the previous section
|
||||||
|
|
||||||
|
* ensure that server notices are configured, as in [server_notices.md](server_notices.md).
|
||||||
|
|
||||||
|
* Add `server_notice_content` under `user_consent` in `homeserver.yaml`. For
|
||||||
|
example:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
user_consent:
|
||||||
|
server_notice_content:
|
||||||
|
msgtype: m.text
|
||||||
|
body: >-
|
||||||
|
Please give your consent to the privacy policy at %(consent_uri)s.
|
||||||
|
```
|
||||||
|
|
||||||
|
Synapse automatically replaces the placeholder `%(consent_uri)s` with the
|
||||||
|
consent uri for that user.
|
||||||
|
|
||||||
|
* ensure that `public_baseurl` is set in `homeserver.yaml`, and gives the base
|
||||||
|
URI that clients use to connect to the server. (It is used to construct
|
||||||
|
`consent_uri` in the server notice.)
|
||||||
|
|
||||||
|
|
||||||
|
Blocking users from using the server until they agree to the policy
|
||||||
|
-------------------------------------------------------------------
|
||||||
|
|
||||||
|
Synapse can be configured to block any attempts to join rooms or send messages
|
||||||
|
until the user has given their agreement to the policy. (Joining the server
|
||||||
|
notices room is exempted from this).
|
||||||
|
|
||||||
|
To enable this, add `block_events_error` under `user_consent`. For example:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
user_consent:
|
||||||
|
block_events_error: >-
|
||||||
|
You can't send any messages until you consent to the privacy policy at
|
||||||
|
%(consent_uri)s.
|
||||||
|
```
|
||||||
|
|
||||||
|
Synapse automatically replaces the placeholder `%(consent_uri)s` with the
|
||||||
|
consent uri for that user.
|
||||||
|
|
||||||
|
ensure that `public_baseurl` is set in `homeserver.yaml`, and gives the base
|
||||||
|
URI that clients use to connect to the server. (It is used to construct
|
||||||
|
`consent_uri` in the error.)
|
||||||
@@ -279,9 +279,9 @@ Obviously that option means that the operations done in
|
|||||||
that might be fixed by setting a different logcontext via a ``with
|
that might be fixed by setting a different logcontext via a ``with
|
||||||
LoggingContext(...)`` in ``background_operation``).
|
LoggingContext(...)`` in ``background_operation``).
|
||||||
|
|
||||||
The second option is to use ``logcontext.preserve_fn``, which wraps a function
|
The second option is to use ``logcontext.run_in_background``, which wraps a
|
||||||
so that it doesn't reset the logcontext even when it returns an incomplete
|
function so that it doesn't reset the logcontext even when it returns an
|
||||||
deferred, and adds a callback to the returned deferred to reset the
|
incomplete deferred, and adds a callback to the returned deferred to reset the
|
||||||
logcontext. In other words, it turns a function that follows the Synapse rules
|
logcontext. In other words, it turns a function that follows the Synapse rules
|
||||||
about logcontexts and Deferreds into one which behaves more like an external
|
about logcontexts and Deferreds into one which behaves more like an external
|
||||||
function — the opposite operation to that described in the previous section.
|
function — the opposite operation to that described in the previous section.
|
||||||
@@ -293,15 +293,11 @@ It can be used like this:
|
|||||||
def do_request_handling():
|
def do_request_handling():
|
||||||
yield foreground_operation()
|
yield foreground_operation()
|
||||||
|
|
||||||
logcontext.preserve_fn(background_operation)()
|
logcontext.run_in_background(background_operation)
|
||||||
|
|
||||||
# this will now be logged against the request context
|
# this will now be logged against the request context
|
||||||
logger.debug("Request handling complete")
|
logger.debug("Request handling complete")
|
||||||
|
|
||||||
XXX: I think ``preserve_context_over_fn`` is supposed to do the first option,
|
|
||||||
but the fact that it does ``preserve_context_over_deferred`` on its results
|
|
||||||
means that its use is fraught with difficulty.
|
|
||||||
|
|
||||||
Passing synapse deferreds into third-party functions
|
Passing synapse deferreds into third-party functions
|
||||||
----------------------------------------------------
|
----------------------------------------------------
|
||||||
|
|
||||||
|
|||||||
43
docs/manhole.md
Normal file
43
docs/manhole.md
Normal file
@@ -0,0 +1,43 @@
|
|||||||
|
Using the synapse manhole
|
||||||
|
=========================
|
||||||
|
|
||||||
|
The "manhole" allows server administrators to access a Python shell on a running
|
||||||
|
Synapse installation. This is a very powerful mechanism for administration and
|
||||||
|
debugging.
|
||||||
|
|
||||||
|
To enable it, first uncomment the `manhole` listener configuration in
|
||||||
|
`homeserver.yaml`:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
listeners:
|
||||||
|
- port: 9000
|
||||||
|
bind_addresses: ['::1', '127.0.0.1']
|
||||||
|
type: manhole
|
||||||
|
```
|
||||||
|
|
||||||
|
(`bind_addresses` in the above is important: it ensures that access to the
|
||||||
|
manhole is only possible for local users).
|
||||||
|
|
||||||
|
Note that this will give administrative access to synapse to **all users** with
|
||||||
|
shell access to the server. It should therefore **not** be enabled in
|
||||||
|
environments where untrusted users have shell access.
|
||||||
|
|
||||||
|
Then restart synapse, and point an ssh client at port 9000 on localhost, using
|
||||||
|
the username `matrix`:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
ssh -p9000 matrix@localhost
|
||||||
|
```
|
||||||
|
|
||||||
|
The password is `rabbithole`.
|
||||||
|
|
||||||
|
This gives a Python REPL in which `hs` gives access to the
|
||||||
|
`synapse.server.HomeServer` object - which in turn gives access to many other
|
||||||
|
parts of the process.
|
||||||
|
|
||||||
|
As a simple example, retrieving an event from the database:
|
||||||
|
|
||||||
|
```
|
||||||
|
>>> hs.get_datastore().get_event('$1416420717069yeQaw:matrix.org')
|
||||||
|
<Deferred at 0x7ff253fc6998 current result: <FrozenEvent event_id='$1416420717069yeQaw:matrix.org', type='m.room.create', state_key=''>>
|
||||||
|
```
|
||||||
@@ -21,19 +21,65 @@ How to monitor Synapse metrics using Prometheus
|
|||||||
|
|
||||||
3. Add a prometheus target for synapse.
|
3. Add a prometheus target for synapse.
|
||||||
|
|
||||||
It needs to set the ``metrics_path`` to a non-default value::
|
It needs to set the ``metrics_path`` to a non-default value (under ``scrape_configs``)::
|
||||||
|
|
||||||
- job_name: "synapse"
|
- job_name: "synapse"
|
||||||
metrics_path: "/_synapse/metrics"
|
metrics_path: "/_synapse/metrics"
|
||||||
static_configs:
|
static_configs:
|
||||||
- targets:
|
- targets: ["my.server.here:9092"]
|
||||||
"my.server.here:9092"
|
|
||||||
|
|
||||||
If your prometheus is older than 1.5.2, you will need to replace
|
If your prometheus is older than 1.5.2, you will need to replace
|
||||||
``static_configs`` in the above with ``target_groups``.
|
``static_configs`` in the above with ``target_groups``.
|
||||||
|
|
||||||
Restart prometheus.
|
Restart prometheus.
|
||||||
|
|
||||||
|
|
||||||
|
Block and response metrics renamed for 0.27.0
|
||||||
|
---------------------------------------------
|
||||||
|
|
||||||
|
Synapse 0.27.0 begins the process of rationalising the duplicate ``*:count``
|
||||||
|
metrics reported for the resource tracking for code blocks and HTTP requests.
|
||||||
|
|
||||||
|
At the same time, the corresponding ``*:total`` metrics are being renamed, as
|
||||||
|
the ``:total`` suffix no longer makes sense in the absence of a corresponding
|
||||||
|
``:count`` metric.
|
||||||
|
|
||||||
|
To enable a graceful migration path, this release just adds new names for the
|
||||||
|
metrics being renamed. A future release will remove the old ones.
|
||||||
|
|
||||||
|
The following table shows the new metrics, and the old metrics which they are
|
||||||
|
replacing.
|
||||||
|
|
||||||
|
==================================================== ===================================================
|
||||||
|
New name Old name
|
||||||
|
==================================================== ===================================================
|
||||||
|
synapse_util_metrics_block_count synapse_util_metrics_block_timer:count
|
||||||
|
synapse_util_metrics_block_count synapse_util_metrics_block_ru_utime:count
|
||||||
|
synapse_util_metrics_block_count synapse_util_metrics_block_ru_stime:count
|
||||||
|
synapse_util_metrics_block_count synapse_util_metrics_block_db_txn_count:count
|
||||||
|
synapse_util_metrics_block_count synapse_util_metrics_block_db_txn_duration:count
|
||||||
|
|
||||||
|
synapse_util_metrics_block_time_seconds synapse_util_metrics_block_timer:total
|
||||||
|
synapse_util_metrics_block_ru_utime_seconds synapse_util_metrics_block_ru_utime:total
|
||||||
|
synapse_util_metrics_block_ru_stime_seconds synapse_util_metrics_block_ru_stime:total
|
||||||
|
synapse_util_metrics_block_db_txn_count synapse_util_metrics_block_db_txn_count:total
|
||||||
|
synapse_util_metrics_block_db_txn_duration_seconds synapse_util_metrics_block_db_txn_duration:total
|
||||||
|
|
||||||
|
synapse_http_server_response_count synapse_http_server_requests
|
||||||
|
synapse_http_server_response_count synapse_http_server_response_time:count
|
||||||
|
synapse_http_server_response_count synapse_http_server_response_ru_utime:count
|
||||||
|
synapse_http_server_response_count synapse_http_server_response_ru_stime:count
|
||||||
|
synapse_http_server_response_count synapse_http_server_response_db_txn_count:count
|
||||||
|
synapse_http_server_response_count synapse_http_server_response_db_txn_duration:count
|
||||||
|
|
||||||
|
synapse_http_server_response_time_seconds synapse_http_server_response_time:total
|
||||||
|
synapse_http_server_response_ru_utime_seconds synapse_http_server_response_ru_utime:total
|
||||||
|
synapse_http_server_response_ru_stime_seconds synapse_http_server_response_ru_stime:total
|
||||||
|
synapse_http_server_response_db_txn_count synapse_http_server_response_db_txn_count:total
|
||||||
|
synapse_http_server_response_db_txn_duration_seconds synapse_http_server_response_db_txn_duration:total
|
||||||
|
==================================================== ===================================================
|
||||||
|
|
||||||
|
|
||||||
Standard Metric Names
|
Standard Metric Names
|
||||||
---------------------
|
---------------------
|
||||||
|
|
||||||
@@ -43,7 +89,7 @@ have been changed to seconds, from miliseconds.
|
|||||||
|
|
||||||
================================== =============================
|
================================== =============================
|
||||||
New name Old name
|
New name Old name
|
||||||
---------------------------------- -----------------------------
|
================================== =============================
|
||||||
process_cpu_user_seconds_total process_resource_utime / 1000
|
process_cpu_user_seconds_total process_resource_utime / 1000
|
||||||
process_cpu_system_seconds_total process_resource_stime / 1000
|
process_cpu_system_seconds_total process_resource_stime / 1000
|
||||||
process_open_fds (no 'type' label) process_fds
|
process_open_fds (no 'type' label) process_fds
|
||||||
@@ -53,7 +99,7 @@ The python-specific counts of garbage collector performance have been renamed.
|
|||||||
|
|
||||||
=========================== ======================
|
=========================== ======================
|
||||||
New name Old name
|
New name Old name
|
||||||
--------------------------- ----------------------
|
=========================== ======================
|
||||||
python_gc_time reactor_gc_time
|
python_gc_time reactor_gc_time
|
||||||
python_gc_unreachable_total reactor_gc_unreachable
|
python_gc_unreachable_total reactor_gc_unreachable
|
||||||
python_gc_counts reactor_gc_counts
|
python_gc_counts reactor_gc_counts
|
||||||
@@ -63,7 +109,7 @@ The twisted-specific reactor metrics have been renamed.
|
|||||||
|
|
||||||
==================================== =====================
|
==================================== =====================
|
||||||
New name Old name
|
New name Old name
|
||||||
------------------------------------ ---------------------
|
==================================== =====================
|
||||||
python_twisted_reactor_pending_calls reactor_pending_calls
|
python_twisted_reactor_pending_calls reactor_pending_calls
|
||||||
python_twisted_reactor_tick_time reactor_tick_time
|
python_twisted_reactor_tick_time reactor_tick_time
|
||||||
==================================== =====================
|
==================================== =====================
|
||||||
|
|||||||
99
docs/password_auth_providers.rst
Normal file
99
docs/password_auth_providers.rst
Normal file
@@ -0,0 +1,99 @@
|
|||||||
|
Password auth provider modules
|
||||||
|
==============================
|
||||||
|
|
||||||
|
Password auth providers offer a way for server administrators to integrate
|
||||||
|
their Synapse installation with an existing authentication system.
|
||||||
|
|
||||||
|
A password auth provider is a Python class which is dynamically loaded into
|
||||||
|
Synapse, and provides a number of methods by which it can integrate with the
|
||||||
|
authentication system.
|
||||||
|
|
||||||
|
This document serves as a reference for those looking to implement their own
|
||||||
|
password auth providers.
|
||||||
|
|
||||||
|
Required methods
|
||||||
|
----------------
|
||||||
|
|
||||||
|
Password auth provider classes must provide the following methods:
|
||||||
|
|
||||||
|
*class* ``SomeProvider.parse_config``\(*config*)
|
||||||
|
|
||||||
|
This method is passed the ``config`` object for this module from the
|
||||||
|
homeserver configuration file.
|
||||||
|
|
||||||
|
It should perform any appropriate sanity checks on the provided
|
||||||
|
configuration, and return an object which is then passed into ``__init__``.
|
||||||
|
|
||||||
|
*class* ``SomeProvider``\(*config*, *account_handler*)
|
||||||
|
|
||||||
|
The constructor is passed the config object returned by ``parse_config``,
|
||||||
|
and a ``synapse.module_api.ModuleApi`` object which allows the
|
||||||
|
password provider to check if accounts exist and/or create new ones.
|
||||||
|
|
||||||
|
Optional methods
|
||||||
|
----------------
|
||||||
|
|
||||||
|
Password auth provider classes may optionally provide the following methods.
|
||||||
|
|
||||||
|
*class* ``SomeProvider.get_db_schema_files``\()
|
||||||
|
|
||||||
|
This method, if implemented, should return an Iterable of ``(name,
|
||||||
|
stream)`` pairs of database schema files. Each file is applied in turn at
|
||||||
|
initialisation, and a record is then made in the database so that it is
|
||||||
|
not re-applied on the next start.
|
||||||
|
|
||||||
|
``someprovider.get_supported_login_types``\()
|
||||||
|
|
||||||
|
This method, if implemented, should return a ``dict`` mapping from a login
|
||||||
|
type identifier (such as ``m.login.password``) to an iterable giving the
|
||||||
|
fields which must be provided by the user in the submission to the
|
||||||
|
``/login`` api. These fields are passed in the ``login_dict`` dictionary
|
||||||
|
to ``check_auth``.
|
||||||
|
|
||||||
|
For example, if a password auth provider wants to implement a custom login
|
||||||
|
type of ``com.example.custom_login``, where the client is expected to pass
|
||||||
|
the fields ``secret1`` and ``secret2``, the provider should implement this
|
||||||
|
method and return the following dict::
|
||||||
|
|
||||||
|
{"com.example.custom_login": ("secret1", "secret2")}
|
||||||
|
|
||||||
|
``someprovider.check_auth``\(*username*, *login_type*, *login_dict*)
|
||||||
|
|
||||||
|
This method is the one that does the real work. If implemented, it will be
|
||||||
|
called for each login attempt where the login type matches one of the keys
|
||||||
|
returned by ``get_supported_login_types``.
|
||||||
|
|
||||||
|
It is passed the (possibly UNqualified) ``user`` provided by the client,
|
||||||
|
the login type, and a dictionary of login secrets passed by the client.
|
||||||
|
|
||||||
|
The method should return a Twisted ``Deferred`` object, which resolves to
|
||||||
|
the canonical ``@localpart:domain`` user id if authentication is successful,
|
||||||
|
and ``None`` if not.
|
||||||
|
|
||||||
|
Alternatively, the ``Deferred`` can resolve to a ``(str, func)`` tuple, in
|
||||||
|
which case the second field is a callback which will be called with the
|
||||||
|
result from the ``/login`` call (including ``access_token``, ``device_id``,
|
||||||
|
etc.)
|
||||||
|
|
||||||
|
``someprovider.check_password``\(*user_id*, *password*)
|
||||||
|
|
||||||
|
This method provides a simpler interface than ``get_supported_login_types``
|
||||||
|
and ``check_auth`` for password auth providers that just want to provide a
|
||||||
|
mechanism for validating ``m.login.password`` logins.
|
||||||
|
|
||||||
|
Iif implemented, it will be called to check logins with an
|
||||||
|
``m.login.password`` login type. It is passed a qualified
|
||||||
|
``@localpart:domain`` user id, and the password provided by the user.
|
||||||
|
|
||||||
|
The method should return a Twisted ``Deferred`` object, which resolves to
|
||||||
|
``True`` if authentication is successful, and ``False`` if not.
|
||||||
|
|
||||||
|
``someprovider.on_logged_out``\(*user_id*, *device_id*, *access_token*)
|
||||||
|
|
||||||
|
This method, if implemented, is called when a user logs out. It is passed
|
||||||
|
the qualified user ID, the ID of the deactivated device (if any: access
|
||||||
|
tokens are occasionally created without an associated device ID), and the
|
||||||
|
(now deactivated) access token.
|
||||||
|
|
||||||
|
It may return a Twisted ``Deferred`` object; the logout request will wait
|
||||||
|
for the deferred to complete but the result is ignored.
|
||||||
@@ -1,10 +1,18 @@
|
|||||||
Using Postgres
|
Using Postgres
|
||||||
--------------
|
--------------
|
||||||
|
|
||||||
|
Postgres version 9.4 or later is known to work.
|
||||||
|
|
||||||
Set up database
|
Set up database
|
||||||
===============
|
===============
|
||||||
|
|
||||||
The PostgreSQL database used *must* have the correct encoding set, otherwise
|
Assuming your PostgreSQL database user is called ``postgres``, create a user
|
||||||
|
``synapse_user`` with::
|
||||||
|
|
||||||
|
su - postgres
|
||||||
|
createuser --pwprompt synapse_user
|
||||||
|
|
||||||
|
The PostgreSQL database used *must* have the correct encoding set, otherwise it
|
||||||
would not be able to store UTF8 strings. To create a database with the correct
|
would not be able to store UTF8 strings. To create a database with the correct
|
||||||
encoding use, e.g.::
|
encoding use, e.g.::
|
||||||
|
|
||||||
@@ -44,8 +52,8 @@ As with Debian/Ubuntu, postgres support depends on the postgres python connector
|
|||||||
Synapse config
|
Synapse config
|
||||||
==============
|
==============
|
||||||
|
|
||||||
When you are ready to start using PostgreSQL, add the following line to your
|
When you are ready to start using PostgreSQL, edit the ``database`` section in
|
||||||
config file::
|
your config file to match the following lines::
|
||||||
|
|
||||||
database:
|
database:
|
||||||
name: psycopg2
|
name: psycopg2
|
||||||
@@ -94,9 +102,12 @@ complete, restart synapse. For instance::
|
|||||||
cp homeserver.db homeserver.db.snapshot
|
cp homeserver.db homeserver.db.snapshot
|
||||||
./synctl start
|
./synctl start
|
||||||
|
|
||||||
Assuming your new config file (as described in the section *Synapse config*)
|
Copy the old config file into a new config file::
|
||||||
is named ``homeserver-postgres.yaml`` and the SQLite snapshot is at
|
|
||||||
``homeserver.db.snapshot`` then simply run::
|
cp homeserver.yaml homeserver-postgres.yaml
|
||||||
|
|
||||||
|
Edit the database section as described in the section *Synapse config* above
|
||||||
|
and with the SQLite snapshot located at ``homeserver.db.snapshot`` simply run::
|
||||||
|
|
||||||
synapse_port_db --sqlite-database homeserver.db.snapshot \
|
synapse_port_db --sqlite-database homeserver.db.snapshot \
|
||||||
--postgres-config homeserver-postgres.yaml
|
--postgres-config homeserver-postgres.yaml
|
||||||
@@ -112,9 +123,14 @@ script one last time, e.g. if the SQLite database is at ``homeserver.db``
|
|||||||
run::
|
run::
|
||||||
|
|
||||||
synapse_port_db --sqlite-database homeserver.db \
|
synapse_port_db --sqlite-database homeserver.db \
|
||||||
--postgres-config database_config.yaml
|
--postgres-config homeserver-postgres.yaml
|
||||||
|
|
||||||
Once that has completed, change the synapse config to point at the PostgreSQL
|
Once that has completed, change the synapse config to point at the PostgreSQL
|
||||||
database configuration file using the ``database_config`` parameter (see
|
database configuration file ``homeserver-postgres.yaml``:
|
||||||
`Synapse Config`_) and restart synapse. Synapse should now be running against
|
|
||||||
PostgreSQL.
|
./synctl stop
|
||||||
|
mv homeserver.yaml homeserver-old-sqlite.yaml
|
||||||
|
mv homeserver-postgres.yaml homeserver.yaml
|
||||||
|
./synctl start
|
||||||
|
|
||||||
|
Synapse should now be running against PostgreSQL.
|
||||||
|
|||||||
23
docs/privacy_policy_templates/en/1.0.html
Normal file
23
docs/privacy_policy_templates/en/1.0.html
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
<!doctype html>
|
||||||
|
<html lang="en">
|
||||||
|
<head>
|
||||||
|
<title>Matrix.org Privacy policy</title>
|
||||||
|
</head>
|
||||||
|
<body>
|
||||||
|
{% if has_consented %}
|
||||||
|
<p>
|
||||||
|
Your base already belong to us.
|
||||||
|
</p>
|
||||||
|
{% else %}
|
||||||
|
<p>
|
||||||
|
All your base are belong to us.
|
||||||
|
</p>
|
||||||
|
<form method="post" action="consent">
|
||||||
|
<input type="hidden" name="v" value="{{version}}"/>
|
||||||
|
<input type="hidden" name="u" value="{{user}}"/>
|
||||||
|
<input type="hidden" name="h" value="{{userhmac}}"/>
|
||||||
|
<input type="submit" value="Sure thing!"/>
|
||||||
|
</form>
|
||||||
|
{% endif %}
|
||||||
|
</body>
|
||||||
|
</html>
|
||||||
11
docs/privacy_policy_templates/en/success.html
Normal file
11
docs/privacy_policy_templates/en/success.html
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
<!doctype html>
|
||||||
|
<html lang="en">
|
||||||
|
<head>
|
||||||
|
<title>Matrix.org Privacy policy</title>
|
||||||
|
</head>
|
||||||
|
<body>
|
||||||
|
<p>
|
||||||
|
Sweet.
|
||||||
|
</p>
|
||||||
|
</body>
|
||||||
|
</html>
|
||||||
@@ -26,28 +26,10 @@ expose the append-only log to the readers should be fairly minimal.
|
|||||||
Architecture
|
Architecture
|
||||||
------------
|
------------
|
||||||
|
|
||||||
The Replication API
|
The Replication Protocol
|
||||||
~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
Synapse will optionally expose a long poll HTTP API for extracting updates. The
|
See ``tcp_replication.rst``
|
||||||
API will have a similar shape to /sync in that clients provide tokens
|
|
||||||
indicating where in the log they have reached and a timeout. The synapse server
|
|
||||||
then either responds with updates immediately if it already has updates or it
|
|
||||||
waits until the timeout for more updates. If the timeout expires and nothing
|
|
||||||
happened then the server returns an empty response.
|
|
||||||
|
|
||||||
However unlike the /sync API this replication API is returning synapse specific
|
|
||||||
data rather than trying to implement a matrix specification. The replication
|
|
||||||
results are returned as arrays of rows where the rows are mostly lifted
|
|
||||||
directly from the database. This avoids unnecessary JSON parsing on the server
|
|
||||||
and hopefully avoids an impedance mismatch between the data returned and the
|
|
||||||
required updates to the datastore.
|
|
||||||
|
|
||||||
This does not replicate all the database tables as many of the database tables
|
|
||||||
are indexes that can be recovered from the contents of other tables.
|
|
||||||
|
|
||||||
The format and parameters for the api are documented in
|
|
||||||
``synapse/replication/resource.py``.
|
|
||||||
|
|
||||||
|
|
||||||
The Slaved DataStore
|
The Slaved DataStore
|
||||||
|
|||||||
71
docs/server_notices.md
Normal file
71
docs/server_notices.md
Normal file
@@ -0,0 +1,71 @@
|
|||||||
|
Server Notices
|
||||||
|
==============
|
||||||
|
|
||||||
|
'Server Notices' are a new feature introduced in Synapse 0.30. They provide a
|
||||||
|
channel whereby server administrators can send messages to users on the server.
|
||||||
|
|
||||||
|
They are used as part of communication of the server polices(see
|
||||||
|
[consent_tracking.md](consent_tracking.md)), however the intention is that
|
||||||
|
they may also find a use for features such as "Message of the day".
|
||||||
|
|
||||||
|
This is a feature specific to Synapse, but it uses standard Matrix
|
||||||
|
communication mechanisms, so should work with any Matrix client.
|
||||||
|
|
||||||
|
User experience
|
||||||
|
---------------
|
||||||
|
|
||||||
|
When the user is first sent a server notice, they will get an invitation to a
|
||||||
|
room (typically called 'Server Notices', though this is configurable in
|
||||||
|
`homeserver.yaml`). They will be **unable to reject** this invitation -
|
||||||
|
attempts to do so will receive an error.
|
||||||
|
|
||||||
|
Once they accept the invitation, they will see the notice message in the room
|
||||||
|
history; it will appear to have come from the 'server notices user' (see
|
||||||
|
below).
|
||||||
|
|
||||||
|
The user is prevented from sending any messages in this room by the power
|
||||||
|
levels. They also cannot leave it.
|
||||||
|
|
||||||
|
Synapse configuration
|
||||||
|
---------------------
|
||||||
|
|
||||||
|
Server notices come from a specific user id on the server. Server
|
||||||
|
administrators are free to choose the user id - something like `server` is
|
||||||
|
suggested, meaning the notices will come from
|
||||||
|
`@server:<your_server_name>`. Once the Server Notices user is configured, that
|
||||||
|
user id becomes a special, privileged user, so administrators should ensure
|
||||||
|
that **it is not already allocated**.
|
||||||
|
|
||||||
|
In order to support server notices, it is necessary to add some configuration
|
||||||
|
to the `homeserver.yaml` file. In particular, you should add a `server_notices`
|
||||||
|
section, which should look like this:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
server_notices:
|
||||||
|
system_mxid_localpart: server
|
||||||
|
system_mxid_display_name: "Server Notices"
|
||||||
|
system_mxid_avatar_url: "mxc://server.com/oumMVlgDnLYFaPVkExemNVVZ"
|
||||||
|
room_name: "Server Notices"
|
||||||
|
```
|
||||||
|
|
||||||
|
The only compulsory setting is `system_mxid_localpart`, which defines the user
|
||||||
|
id of the Server Notices user, as above. `room_name` defines the name of the
|
||||||
|
room which will be created.
|
||||||
|
|
||||||
|
`system_mxid_display_name` and `system_mxid_avatar_url` can be used to set the
|
||||||
|
displayname and avatar of the Server Notices user.
|
||||||
|
|
||||||
|
Sending notices
|
||||||
|
---------------
|
||||||
|
|
||||||
|
As of the current version of synapse, there is no convenient interface for
|
||||||
|
sending notices (other than the automated ones sent as part of consent
|
||||||
|
tracking).
|
||||||
|
|
||||||
|
In the meantime, it is possible to test this feature using the manhole. Having
|
||||||
|
gone into the manhole as described in [manhole.md](manhole.md), a notice can be
|
||||||
|
sent with something like:
|
||||||
|
|
||||||
|
```
|
||||||
|
>>> hs.get_server_notices_manager().send_notice('@user:server.com', {'msgtype':'m.text', 'body':'foo'})
|
||||||
|
```
|
||||||
@@ -50,7 +50,7 @@ master_doc = 'index'
|
|||||||
|
|
||||||
# General information about the project.
|
# General information about the project.
|
||||||
project = u'Synapse'
|
project = u'Synapse'
|
||||||
copyright = u'2014, TNG'
|
copyright = u'Copyright 2014-2017 OpenMarket Ltd, 2017 Vector Creations Ltd, 2017 New Vector Ltd'
|
||||||
|
|
||||||
# The version info for the project you're documenting, acts as replacement for
|
# The version info for the project you're documenting, acts as replacement for
|
||||||
# |version| and |release|, also used in various other places throughout the
|
# |version| and |release|, also used in various other places throughout the
|
||||||
|
|||||||
223
docs/tcp_replication.rst
Normal file
223
docs/tcp_replication.rst
Normal file
@@ -0,0 +1,223 @@
|
|||||||
|
TCP Replication
|
||||||
|
===============
|
||||||
|
|
||||||
|
Motivation
|
||||||
|
----------
|
||||||
|
|
||||||
|
Previously the workers used an HTTP long poll mechanism to get updates from the
|
||||||
|
master, which had the problem of causing a lot of duplicate work on the server.
|
||||||
|
This TCP protocol replaces those APIs with the aim of increased efficiency.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
Overview
|
||||||
|
--------
|
||||||
|
|
||||||
|
The protocol is based on fire and forget, line based commands. An example flow
|
||||||
|
would be (where '>' indicates master to worker and '<' worker to master flows)::
|
||||||
|
|
||||||
|
> SERVER example.com
|
||||||
|
< REPLICATE events 53
|
||||||
|
> RDATA events 54 ["$foo1:bar.com", ...]
|
||||||
|
> RDATA events 55 ["$foo4:bar.com", ...]
|
||||||
|
|
||||||
|
The example shows the server accepting a new connection and sending its identity
|
||||||
|
with the ``SERVER`` command, followed by the client asking to subscribe to the
|
||||||
|
``events`` stream from the token ``53``. The server then periodically sends ``RDATA``
|
||||||
|
commands which have the format ``RDATA <stream_name> <token> <row>``, where the
|
||||||
|
format of ``<row>`` is defined by the individual streams.
|
||||||
|
|
||||||
|
Error reporting happens by either the client or server sending an `ERROR`
|
||||||
|
command, and usually the connection will be closed.
|
||||||
|
|
||||||
|
|
||||||
|
Since the protocol is a simple line based, its possible to manually connect to
|
||||||
|
the server using a tool like netcat. A few things should be noted when manually
|
||||||
|
using the protocol:
|
||||||
|
|
||||||
|
* When subscribing to a stream using ``REPLICATE``, the special token ``NOW`` can
|
||||||
|
be used to get all future updates. The special stream name ``ALL`` can be used
|
||||||
|
with ``NOW`` to subscribe to all available streams.
|
||||||
|
* The federation stream is only available if federation sending has been
|
||||||
|
disabled on the main process.
|
||||||
|
* The server will only time connections out that have sent a ``PING`` command.
|
||||||
|
If a ping is sent then the connection will be closed if no further commands
|
||||||
|
are receieved within 15s. Both the client and server protocol implementations
|
||||||
|
will send an initial PING on connection and ensure at least one command every
|
||||||
|
5s is sent (not necessarily ``PING``).
|
||||||
|
* ``RDATA`` commands *usually* include a numeric token, however if the stream
|
||||||
|
has multiple rows to replicate per token the server will send multiple
|
||||||
|
``RDATA`` commands, with all but the last having a token of ``batch``. See
|
||||||
|
the documentation on ``commands.RdataCommand`` for further details.
|
||||||
|
|
||||||
|
|
||||||
|
Architecture
|
||||||
|
------------
|
||||||
|
|
||||||
|
The basic structure of the protocol is line based, where the initial word of
|
||||||
|
each line specifies the command. The rest of the line is parsed based on the
|
||||||
|
command. For example, the `RDATA` command is defined as::
|
||||||
|
|
||||||
|
RDATA <stream_name> <token> <row_json>
|
||||||
|
|
||||||
|
(Note that `<row_json>` may contains spaces, but cannot contain newlines.)
|
||||||
|
|
||||||
|
Blank lines are ignored.
|
||||||
|
|
||||||
|
|
||||||
|
Keep alives
|
||||||
|
~~~~~~~~~~~
|
||||||
|
|
||||||
|
Both sides are expected to send at least one command every 5s or so, and
|
||||||
|
should send a ``PING`` command if necessary. If either side do not receive a
|
||||||
|
command within e.g. 15s then the connection should be closed.
|
||||||
|
|
||||||
|
Because the server may be connected to manually using e.g. netcat, the timeouts
|
||||||
|
aren't enabled until an initial ``PING`` command is seen. Both the client and
|
||||||
|
server implementations below send a ``PING`` command immediately on connection to
|
||||||
|
ensure the timeouts are enabled.
|
||||||
|
|
||||||
|
This ensures that both sides can quickly realize if the tcp connection has gone
|
||||||
|
and handle the situation appropriately.
|
||||||
|
|
||||||
|
|
||||||
|
Start up
|
||||||
|
~~~~~~~~
|
||||||
|
|
||||||
|
When a new connection is made, the server:
|
||||||
|
|
||||||
|
* Sends a ``SERVER`` command, which includes the identity of the server, allowing
|
||||||
|
the client to detect if its connected to the expected server
|
||||||
|
* Sends a ``PING`` command as above, to enable the client to time out connections
|
||||||
|
promptly.
|
||||||
|
|
||||||
|
The client:
|
||||||
|
|
||||||
|
* Sends a ``NAME`` command, allowing the server to associate a human friendly
|
||||||
|
name with the connection. This is optional.
|
||||||
|
* Sends a ``PING`` as above
|
||||||
|
* For each stream the client wishes to subscribe to it sends a ``REPLICATE``
|
||||||
|
with the stream_name and token it wants to subscribe from.
|
||||||
|
* On receipt of a ``SERVER`` command, checks that the server name matches the
|
||||||
|
expected server name.
|
||||||
|
|
||||||
|
|
||||||
|
Error handling
|
||||||
|
~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
If either side detects an error it can send an ``ERROR`` command and close the
|
||||||
|
connection.
|
||||||
|
|
||||||
|
If the client side loses the connection to the server it should reconnect,
|
||||||
|
following the steps above.
|
||||||
|
|
||||||
|
|
||||||
|
Congestion
|
||||||
|
~~~~~~~~~~
|
||||||
|
|
||||||
|
If the server sends messages faster than the client can consume them the server
|
||||||
|
will first buffer a (fairly large) number of commands and then disconnect the
|
||||||
|
client. This ensures that we don't queue up an unbounded number of commands in
|
||||||
|
memory and gives us a potential oppurtunity to squawk loudly. When/if the client
|
||||||
|
recovers it can reconnect to the server and ask for missed messages.
|
||||||
|
|
||||||
|
|
||||||
|
Reliability
|
||||||
|
~~~~~~~~~~~
|
||||||
|
|
||||||
|
In general the replication stream should be considered an unreliable transport
|
||||||
|
since e.g. commands are not resent if the connection disappears.
|
||||||
|
|
||||||
|
The exception to that are the replication streams, i.e. RDATA commands, since
|
||||||
|
these include tokens which can be used to restart the stream on connection
|
||||||
|
errors.
|
||||||
|
|
||||||
|
The client should keep track of the token in the last RDATA command received
|
||||||
|
for each stream so that on reconneciton it can start streaming from the correct
|
||||||
|
place. Note: not all RDATA have valid tokens due to batching. See
|
||||||
|
``RdataCommand`` for more details.
|
||||||
|
|
||||||
|
|
||||||
|
Example
|
||||||
|
~~~~~~~
|
||||||
|
|
||||||
|
An example iteraction is shown below. Each line is prefixed with '>' or '<' to
|
||||||
|
indicate which side is sending, these are *not* included on the wire::
|
||||||
|
|
||||||
|
* connection established *
|
||||||
|
> SERVER localhost:8823
|
||||||
|
> PING 1490197665618
|
||||||
|
< NAME synapse.app.appservice
|
||||||
|
< PING 1490197665618
|
||||||
|
< REPLICATE events 1
|
||||||
|
< REPLICATE backfill 1
|
||||||
|
< REPLICATE caches 1
|
||||||
|
> POSITION events 1
|
||||||
|
> POSITION backfill 1
|
||||||
|
> POSITION caches 1
|
||||||
|
> RDATA caches 2 ["get_user_by_id",["@01register-user:localhost:8823"],1490197670513]
|
||||||
|
> RDATA events 14 ["$149019767112vOHxz:localhost:8823",
|
||||||
|
"!AFDCvgApUmpdfVjIXm:localhost:8823","m.room.guest_access","",null]
|
||||||
|
< PING 1490197675618
|
||||||
|
> ERROR server stopping
|
||||||
|
* connection closed by server *
|
||||||
|
|
||||||
|
The ``POSITION`` command sent by the server is used to set the clients position
|
||||||
|
without needing to send data with the ``RDATA`` command.
|
||||||
|
|
||||||
|
|
||||||
|
An example of a batched set of ``RDATA`` is::
|
||||||
|
|
||||||
|
> RDATA caches batch ["get_user_by_id",["@test:localhost:8823"],1490197670513]
|
||||||
|
> RDATA caches batch ["get_user_by_id",["@test2:localhost:8823"],1490197670513]
|
||||||
|
> RDATA caches batch ["get_user_by_id",["@test3:localhost:8823"],1490197670513]
|
||||||
|
> RDATA caches 54 ["get_user_by_id",["@test4:localhost:8823"],1490197670513]
|
||||||
|
|
||||||
|
In this case the client shouldn't advance their caches token until it sees the
|
||||||
|
the last ``RDATA``.
|
||||||
|
|
||||||
|
|
||||||
|
List of commands
|
||||||
|
~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
The list of valid commands, with which side can send it: server (S) or client (C):
|
||||||
|
|
||||||
|
SERVER (S)
|
||||||
|
Sent at the start to identify which server the client is talking to
|
||||||
|
|
||||||
|
RDATA (S)
|
||||||
|
A single update in a stream
|
||||||
|
|
||||||
|
POSITION (S)
|
||||||
|
The position of the stream has been updated
|
||||||
|
|
||||||
|
ERROR (S, C)
|
||||||
|
There was an error
|
||||||
|
|
||||||
|
PING (S, C)
|
||||||
|
Sent periodically to ensure the connection is still alive
|
||||||
|
|
||||||
|
NAME (C)
|
||||||
|
Sent at the start by client to inform the server who they are
|
||||||
|
|
||||||
|
REPLICATE (C)
|
||||||
|
Asks the server to replicate a given stream
|
||||||
|
|
||||||
|
USER_SYNC (C)
|
||||||
|
A user has started or stopped syncing
|
||||||
|
|
||||||
|
FEDERATION_ACK (C)
|
||||||
|
Acknowledge receipt of some federation data
|
||||||
|
|
||||||
|
REMOVE_PUSHER (C)
|
||||||
|
Inform the server a pusher should be removed
|
||||||
|
|
||||||
|
INVALIDATE_CACHE (C)
|
||||||
|
Inform the server a cache should be invalidated
|
||||||
|
|
||||||
|
SYNC (S, C)
|
||||||
|
Used exclusively in tests
|
||||||
|
|
||||||
|
|
||||||
|
See ``synapse/replication/tcp/commands.py`` for a detailed description and the
|
||||||
|
format of each command.
|
||||||
@@ -50,14 +50,37 @@ You may be able to setup coturn via your package manager, or set it up manually
|
|||||||
|
|
||||||
pwgen -s 64 1
|
pwgen -s 64 1
|
||||||
|
|
||||||
5. Ensure youe firewall allows traffic into the TURN server on
|
5. Consider your security settings. TURN lets users request a relay
|
||||||
the ports you've configured it to listen on (remember to allow
|
which will connect to arbitrary IP addresses and ports. At the least
|
||||||
both TCP and UDP if you've enabled both).
|
we recommend:
|
||||||
|
|
||||||
6. If you've configured coturn to support TLS/DTLS, generate or
|
# VoIP traffic is all UDP. There is no reason to let users connect to arbitrary TCP endpoints via the relay.
|
||||||
|
no-tcp-relay
|
||||||
|
|
||||||
|
# don't let the relay ever try to connect to private IP address ranges within your network (if any)
|
||||||
|
# given the turn server is likely behind your firewall, remember to include any privileged public IPs too.
|
||||||
|
denied-peer-ip=10.0.0.0-10.255.255.255
|
||||||
|
denied-peer-ip=192.168.0.0-192.168.255.255
|
||||||
|
denied-peer-ip=172.16.0.0-172.31.255.255
|
||||||
|
|
||||||
|
# special case the turn server itself so that client->TURN->TURN->client flows work
|
||||||
|
allowed-peer-ip=10.0.0.1
|
||||||
|
|
||||||
|
# consider whether you want to limit the quota of relayed streams per user (or total) to avoid risk of DoS.
|
||||||
|
user-quota=12 # 4 streams per video call, so 12 streams = 3 simultaneous relayed calls per user.
|
||||||
|
total-quota=1200
|
||||||
|
|
||||||
|
Ideally coturn should refuse to relay traffic which isn't SRTP;
|
||||||
|
see https://github.com/matrix-org/synapse/issues/2009
|
||||||
|
|
||||||
|
6. Ensure your firewall allows traffic into the TURN server on
|
||||||
|
the ports you've configured it to listen on (remember to allow
|
||||||
|
both TCP and UDP TURN traffic)
|
||||||
|
|
||||||
|
7. If you've configured coturn to support TLS/DTLS, generate or
|
||||||
import your private key and certificate.
|
import your private key and certificate.
|
||||||
|
|
||||||
7. Start the turn server::
|
8. Start the turn server::
|
||||||
|
|
||||||
bin/turnserver -o
|
bin/turnserver -o
|
||||||
|
|
||||||
@@ -83,12 +106,19 @@ Your home server configuration file needs the following extra keys:
|
|||||||
to refresh credentials. The TURN REST API specification recommends
|
to refresh credentials. The TURN REST API specification recommends
|
||||||
one day (86400000).
|
one day (86400000).
|
||||||
|
|
||||||
|
4. "turn_allow_guests": Whether to allow guest users to use the TURN
|
||||||
|
server. This is enabled by default, as otherwise VoIP will not
|
||||||
|
work reliably for guests. However, it does introduce a security risk
|
||||||
|
as it lets guests connect to arbitrary endpoints without having gone
|
||||||
|
through a CAPTCHA or similar to register a real account.
|
||||||
|
|
||||||
As an example, here is the relevant section of the config file for
|
As an example, here is the relevant section of the config file for
|
||||||
matrix.org::
|
matrix.org::
|
||||||
|
|
||||||
turn_uris: [ "turn:turn.matrix.org:3478?transport=udp", "turn:turn.matrix.org:3478?transport=tcp" ]
|
turn_uris: [ "turn:turn.matrix.org:3478?transport=udp", "turn:turn.matrix.org:3478?transport=tcp" ]
|
||||||
turn_shared_secret: n0t4ctuAllymatr1Xd0TorgSshar3d5ecret4obvIousreAsons
|
turn_shared_secret: n0t4ctuAllymatr1Xd0TorgSshar3d5ecret4obvIousreAsons
|
||||||
turn_user_lifetime: 86400000
|
turn_user_lifetime: 86400000
|
||||||
|
turn_allow_guests: True
|
||||||
|
|
||||||
Now, restart synapse::
|
Now, restart synapse::
|
||||||
|
|
||||||
|
|||||||
@@ -56,6 +56,7 @@ As a first cut, let's do #2 and have the receiver hit the API to calculate its o
|
|||||||
API
|
API
|
||||||
---
|
---
|
||||||
|
|
||||||
|
```
|
||||||
GET /_matrix/media/r0/preview_url?url=http://wherever.com
|
GET /_matrix/media/r0/preview_url?url=http://wherever.com
|
||||||
200 OK
|
200 OK
|
||||||
{
|
{
|
||||||
@@ -66,6 +67,7 @@ GET /_matrix/media/r0/preview_url?url=http://wherever.com
|
|||||||
"og:description" : "“Synapse 0.12 is out! Lots of polishing, performance &amp; bugfixes: /sync API, /r0 prefix, fulltext search, 3PID invites https://t.co/5alhXLLEGP”"
|
"og:description" : "“Synapse 0.12 is out! Lots of polishing, performance &amp; bugfixes: /sync API, /r0 prefix, fulltext search, 3PID invites https://t.co/5alhXLLEGP”"
|
||||||
"og:site_name" : "Twitter"
|
"og:site_name" : "Twitter"
|
||||||
}
|
}
|
||||||
|
```
|
||||||
|
|
||||||
* Downloads the URL
|
* Downloads the URL
|
||||||
* If HTML, just stores it in RAM and parses it for OG meta tags
|
* If HTML, just stores it in RAM and parses it for OG meta tags
|
||||||
17
docs/user_directory.md
Normal file
17
docs/user_directory.md
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
User Directory API Implementation
|
||||||
|
=================================
|
||||||
|
|
||||||
|
The user directory is currently maintained based on the 'visible' users
|
||||||
|
on this particular server - i.e. ones which your account shares a room with, or
|
||||||
|
who are present in a publicly viewable room present on the server.
|
||||||
|
|
||||||
|
The directory info is stored in various tables, which can (typically after
|
||||||
|
DB corruption) get stale or out of sync. If this happens, for now the
|
||||||
|
quickest solution to fix it is:
|
||||||
|
|
||||||
|
```
|
||||||
|
UPDATE user_directory_stream_pos SET stream_id = NULL;
|
||||||
|
```
|
||||||
|
|
||||||
|
and restart the synapse, which should then start a background task to
|
||||||
|
flush the current tables and regenerate the directory.
|
||||||
205
docs/workers.rst
205
docs/workers.rst
@@ -1,63 +1,90 @@
|
|||||||
Scaling synapse via workers
|
Scaling synapse via workers
|
||||||
---------------------------
|
===========================
|
||||||
|
|
||||||
Synapse has experimental support for splitting out functionality into
|
Synapse has experimental support for splitting out functionality into
|
||||||
multiple separate python processes, helping greatly with scalability. These
|
multiple separate python processes, helping greatly with scalability. These
|
||||||
processes are called 'workers', and are (eventually) intended to scale
|
processes are called 'workers', and are (eventually) intended to scale
|
||||||
horizontally independently.
|
horizontally independently.
|
||||||
|
|
||||||
|
All of the below is highly experimental and subject to change as Synapse evolves,
|
||||||
|
but documenting it here to help folks needing highly scalable Synapses similar
|
||||||
|
to the one running matrix.org!
|
||||||
|
|
||||||
All processes continue to share the same database instance, and as such, workers
|
All processes continue to share the same database instance, and as such, workers
|
||||||
only work with postgres based synapse deployments (sharing a single sqlite
|
only work with postgres based synapse deployments (sharing a single sqlite
|
||||||
across multiple processes is a recipe for disaster, plus you should be using
|
across multiple processes is a recipe for disaster, plus you should be using
|
||||||
postgres anyway if you care about scalability).
|
postgres anyway if you care about scalability).
|
||||||
|
|
||||||
The workers communicate with the master synapse process via a synapse-specific
|
The workers communicate with the master synapse process via a synapse-specific
|
||||||
HTTP protocol called 'replication' - analogous to MySQL or Postgres style
|
TCP protocol called 'replication' - analogous to MySQL or Postgres style
|
||||||
database replication; feeding a stream of relevant data to the workers so they
|
database replication; feeding a stream of relevant data to the workers so they
|
||||||
can be kept in sync with the main synapse process and database state.
|
can be kept in sync with the main synapse process and database state.
|
||||||
|
|
||||||
To enable workers, you need to add a replication listener to the master synapse, e.g.::
|
Configuration
|
||||||
|
-------------
|
||||||
|
|
||||||
|
To make effective use of the workers, you will need to configure an HTTP
|
||||||
|
reverse-proxy such as nginx or haproxy, which will direct incoming requests to
|
||||||
|
the correct worker, or to the main synapse instance. Note that this includes
|
||||||
|
requests made to the federation port. The caveats regarding running a
|
||||||
|
reverse-proxy on the federation port still apply (see
|
||||||
|
https://github.com/matrix-org/synapse/blob/master/README.rst#reverse-proxying-the-federation-port).
|
||||||
|
|
||||||
|
To enable workers, you need to add two replication listeners to the master
|
||||||
|
synapse, e.g.::
|
||||||
|
|
||||||
listeners:
|
listeners:
|
||||||
|
# The TCP replication port
|
||||||
- port: 9092
|
- port: 9092
|
||||||
|
bind_address: '127.0.0.1'
|
||||||
|
type: replication
|
||||||
|
# The HTTP replication port
|
||||||
|
- port: 9093
|
||||||
bind_address: '127.0.0.1'
|
bind_address: '127.0.0.1'
|
||||||
type: http
|
type: http
|
||||||
tls: false
|
|
||||||
x_forwarded: false
|
|
||||||
resources:
|
resources:
|
||||||
- names: [replication]
|
- names: [replication]
|
||||||
compress: false
|
|
||||||
|
|
||||||
Under **no circumstances** should this replication API listener be exposed to the
|
Under **no circumstances** should these replication API listeners be exposed to
|
||||||
public internet; it currently implements no authentication whatsoever and is
|
the public internet; it currently implements no authentication whatsoever and is
|
||||||
unencrypted HTTP.
|
unencrypted.
|
||||||
|
|
||||||
You then create a set of configs for the various worker processes. These should be
|
(Roughly, the TCP port is used for streaming data from the master to the
|
||||||
worker configuration files should be stored in a dedicated subdirectory, to allow
|
workers, and the HTTP port for the workers to send data to the main
|
||||||
synctl to manipulate them.
|
synapse process.)
|
||||||
|
|
||||||
The current available worker applications are:
|
You then create a set of configs for the various worker processes. These
|
||||||
* synapse.app.pusher - handles sending push notifications to sygnal and email
|
should be worker configuration files, and should be stored in a dedicated
|
||||||
* synapse.app.synchrotron - handles /sync endpoints. can scales horizontally through multiple instances.
|
subdirectory, to allow synctl to manipulate them. An additional configuration
|
||||||
* synapse.app.appservice - handles output traffic to Application Services
|
for the master synapse process will need to be created because the process will
|
||||||
* synapse.app.federation_reader - handles receiving federation traffic (including public_rooms API)
|
not be started automatically. That configuration should look like this::
|
||||||
* synapse.app.media_repository - handles the media repository.
|
|
||||||
* synapse.app.client_reader - handles client API endpoints like /publicRooms
|
worker_app: synapse.app.homeserver
|
||||||
|
daemonize: true
|
||||||
|
|
||||||
Each worker configuration file inherits the configuration of the main homeserver
|
Each worker configuration file inherits the configuration of the main homeserver
|
||||||
configuration file. You can then override configuration specific to that worker,
|
configuration file. You can then override configuration specific to that worker,
|
||||||
e.g. the HTTP listener that it provides (if any); logging configuration; etc.
|
e.g. the HTTP listener that it provides (if any); logging configuration; etc.
|
||||||
You should minimise the number of overrides though to maintain a usable config.
|
You should minimise the number of overrides though to maintain a usable config.
|
||||||
|
|
||||||
You must specify the type of worker application (worker_app) and the replication
|
You must specify the type of worker application (``worker_app``). The currently
|
||||||
endpoint that it's talking to on the main synapse process (worker_replication_url).
|
available worker applications are listed below. You must also specify the
|
||||||
|
replication endpoints that it's talking to on the main synapse process.
|
||||||
|
``worker_replication_host`` should specify the host of the main synapse,
|
||||||
|
``worker_replication_port`` should point to the TCP replication listener port and
|
||||||
|
``worker_replication_http_port`` should point to the HTTP replication port.
|
||||||
|
|
||||||
|
Currently, only the ``event_creator`` worker requires specifying
|
||||||
|
``worker_replication_http_port``.
|
||||||
|
|
||||||
For instance::
|
For instance::
|
||||||
|
|
||||||
worker_app: synapse.app.synchrotron
|
worker_app: synapse.app.synchrotron
|
||||||
|
|
||||||
# The replication listener on the synapse to talk to.
|
# The replication listener on the synapse to talk to.
|
||||||
worker_replication_url: http://127.0.0.1:9092/_synapse/replication
|
worker_replication_host: 127.0.0.1
|
||||||
|
worker_replication_port: 9092
|
||||||
|
worker_replication_http_port: 9093
|
||||||
|
|
||||||
worker_listeners:
|
worker_listeners:
|
||||||
- type: http
|
- type: http
|
||||||
@@ -71,11 +98,11 @@ For instance::
|
|||||||
worker_log_config: /home/matrix/synapse/config/synchrotron_log_config.yaml
|
worker_log_config: /home/matrix/synapse/config/synchrotron_log_config.yaml
|
||||||
|
|
||||||
...is a full configuration for a synchrotron worker instance, which will expose a
|
...is a full configuration for a synchrotron worker instance, which will expose a
|
||||||
plain HTTP /sync endpoint on port 8083 separately from the /sync endpoint provided
|
plain HTTP ``/sync`` endpoint on port 8083 separately from the ``/sync`` endpoint provided
|
||||||
by the main synapse.
|
by the main synapse.
|
||||||
|
|
||||||
Obviously you should configure your loadbalancer to route the /sync endpoint to
|
Obviously you should configure your reverse-proxy to route the relevant
|
||||||
the synchrotron instance(s) in this instance.
|
endpoints to the worker (``localhost:8083`` in the above example).
|
||||||
|
|
||||||
Finally, to actually run your worker-based synapse, you must pass synctl the -a
|
Finally, to actually run your worker-based synapse, you must pass synctl the -a
|
||||||
commandline option to tell it to operate on all the worker configurations found
|
commandline option to tell it to operate on all the worker configurations found
|
||||||
@@ -92,7 +119,127 @@ To manipulate a specific worker, you pass the -w option to synctl::
|
|||||||
|
|
||||||
synctl -w $CONFIG/workers/synchrotron.yaml restart
|
synctl -w $CONFIG/workers/synchrotron.yaml restart
|
||||||
|
|
||||||
All of the above is highly experimental and subject to change as Synapse evolves,
|
|
||||||
but documenting it here to help folks needing highly scalable Synapses similar
|
|
||||||
to the one running matrix.org!
|
|
||||||
|
|
||||||
|
Available worker applications
|
||||||
|
-----------------------------
|
||||||
|
|
||||||
|
``synapse.app.pusher``
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Handles sending push notifications to sygnal and email. Doesn't handle any
|
||||||
|
REST endpoints itself, but you should set ``start_pushers: False`` in the
|
||||||
|
shared configuration file to stop the main synapse sending these notifications.
|
||||||
|
|
||||||
|
Note this worker cannot be load-balanced: only one instance should be active.
|
||||||
|
|
||||||
|
``synapse.app.synchrotron``
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
The synchrotron handles ``sync`` requests from clients. In particular, it can
|
||||||
|
handle REST endpoints matching the following regular expressions::
|
||||||
|
|
||||||
|
^/_matrix/client/(v2_alpha|r0)/sync$
|
||||||
|
^/_matrix/client/(api/v1|v2_alpha|r0)/events$
|
||||||
|
^/_matrix/client/(api/v1|r0)/initialSync$
|
||||||
|
^/_matrix/client/(api/v1|r0)/rooms/[^/]+/initialSync$
|
||||||
|
|
||||||
|
The above endpoints should all be routed to the synchrotron worker by the
|
||||||
|
reverse-proxy configuration.
|
||||||
|
|
||||||
|
It is possible to run multiple instances of the synchrotron to scale
|
||||||
|
horizontally. In this case the reverse-proxy should be configured to
|
||||||
|
load-balance across the instances, though it will be more efficient if all
|
||||||
|
requests from a particular user are routed to a single instance. Extracting
|
||||||
|
a userid from the access token is currently left as an exercise for the reader.
|
||||||
|
|
||||||
|
``synapse.app.appservice``
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Handles sending output traffic to Application Services. Doesn't handle any
|
||||||
|
REST endpoints itself, but you should set ``notify_appservices: False`` in the
|
||||||
|
shared configuration file to stop the main synapse sending these notifications.
|
||||||
|
|
||||||
|
Note this worker cannot be load-balanced: only one instance should be active.
|
||||||
|
|
||||||
|
``synapse.app.federation_reader``
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Handles a subset of federation endpoints. In particular, it can handle REST
|
||||||
|
endpoints matching the following regular expressions::
|
||||||
|
|
||||||
|
^/_matrix/federation/v1/event/
|
||||||
|
^/_matrix/federation/v1/state/
|
||||||
|
^/_matrix/federation/v1/state_ids/
|
||||||
|
^/_matrix/federation/v1/backfill/
|
||||||
|
^/_matrix/federation/v1/get_missing_events/
|
||||||
|
^/_matrix/federation/v1/publicRooms
|
||||||
|
|
||||||
|
The above endpoints should all be routed to the federation_reader worker by the
|
||||||
|
reverse-proxy configuration.
|
||||||
|
|
||||||
|
``synapse.app.federation_sender``
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Handles sending federation traffic to other servers. Doesn't handle any
|
||||||
|
REST endpoints itself, but you should set ``send_federation: False`` in the
|
||||||
|
shared configuration file to stop the main synapse sending this traffic.
|
||||||
|
|
||||||
|
Note this worker cannot be load-balanced: only one instance should be active.
|
||||||
|
|
||||||
|
``synapse.app.media_repository``
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Handles the media repository. It can handle all endpoints starting with::
|
||||||
|
|
||||||
|
/_matrix/media/
|
||||||
|
|
||||||
|
You should also set ``enable_media_repo: False`` in the shared configuration
|
||||||
|
file to stop the main synapse running background jobs related to managing the
|
||||||
|
media repository.
|
||||||
|
|
||||||
|
Note this worker cannot be load-balanced: only one instance should be active.
|
||||||
|
|
||||||
|
``synapse.app.client_reader``
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Handles client API endpoints. It can handle REST endpoints matching the
|
||||||
|
following regular expressions::
|
||||||
|
|
||||||
|
^/_matrix/client/(api/v1|r0|unstable)/publicRooms$
|
||||||
|
|
||||||
|
``synapse.app.user_dir``
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Handles searches in the user directory. It can handle REST endpoints matching
|
||||||
|
the following regular expressions::
|
||||||
|
|
||||||
|
^/_matrix/client/(api/v1|r0|unstable)/user_directory/search$
|
||||||
|
|
||||||
|
``synapse.app.frontend_proxy``
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Proxies some frequently-requested client endpoints to add caching and remove
|
||||||
|
load from the main synapse. It can handle REST endpoints matching the following
|
||||||
|
regular expressions::
|
||||||
|
|
||||||
|
^/_matrix/client/(api/v1|r0|unstable)/keys/upload
|
||||||
|
|
||||||
|
It will proxy any requests it cannot handle to the main synapse instance. It
|
||||||
|
must therefore be configured with the location of the main instance, via
|
||||||
|
the ``worker_main_http_uri`` setting in the frontend_proxy worker configuration
|
||||||
|
file. For example::
|
||||||
|
|
||||||
|
worker_main_http_uri: http://127.0.0.1:8008
|
||||||
|
|
||||||
|
|
||||||
|
``synapse.app.event_creator``
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Handles some event creation. It can handle REST endpoints matching::
|
||||||
|
|
||||||
|
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/send
|
||||||
|
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/(join|invite|leave|ban|unban|kick)$
|
||||||
|
^/_matrix/client/(api/v1|r0|unstable)/join/
|
||||||
|
|
||||||
|
It will create events locally and then send them on to the main synapse
|
||||||
|
instance to be persisted and handled.
|
||||||
|
|||||||
@@ -17,6 +17,7 @@ export HAPROXY_BIN=/home/haproxy/haproxy-1.6.11/haproxy
|
|||||||
./sytest/jenkins/prep_sytest_for_postgres.sh
|
./sytest/jenkins/prep_sytest_for_postgres.sh
|
||||||
|
|
||||||
./sytest/jenkins/install_and_run.sh \
|
./sytest/jenkins/install_and_run.sh \
|
||||||
|
--python $WORKSPACE/.tox/py27/bin/python \
|
||||||
--synapse-directory $WORKSPACE \
|
--synapse-directory $WORKSPACE \
|
||||||
--dendron $WORKSPACE/dendron/bin/dendron \
|
--dendron $WORKSPACE/dendron/bin/dendron \
|
||||||
--haproxy \
|
--haproxy \
|
||||||
|
|||||||
@@ -15,5 +15,6 @@ export SYNAPSE_CACHE_FACTOR=1
|
|||||||
./sytest/jenkins/prep_sytest_for_postgres.sh
|
./sytest/jenkins/prep_sytest_for_postgres.sh
|
||||||
|
|
||||||
./sytest/jenkins/install_and_run.sh \
|
./sytest/jenkins/install_and_run.sh \
|
||||||
|
--python $WORKSPACE/.tox/py27/bin/python \
|
||||||
--synapse-directory $WORKSPACE \
|
--synapse-directory $WORKSPACE \
|
||||||
--dendron $WORKSPACE/dendron/bin/dendron \
|
--dendron $WORKSPACE/dendron/bin/dendron \
|
||||||
|
|||||||
@@ -14,4 +14,5 @@ export SYNAPSE_CACHE_FACTOR=1
|
|||||||
./sytest/jenkins/prep_sytest_for_postgres.sh
|
./sytest/jenkins/prep_sytest_for_postgres.sh
|
||||||
|
|
||||||
./sytest/jenkins/install_and_run.sh \
|
./sytest/jenkins/install_and_run.sh \
|
||||||
|
--python $WORKSPACE/.tox/py27/bin/python \
|
||||||
--synapse-directory $WORKSPACE \
|
--synapse-directory $WORKSPACE \
|
||||||
|
|||||||
@@ -12,4 +12,5 @@ export SYNAPSE_CACHE_FACTOR=1
|
|||||||
./jenkins/clone.sh sytest https://github.com/matrix-org/sytest.git
|
./jenkins/clone.sh sytest https://github.com/matrix-org/sytest.git
|
||||||
|
|
||||||
./sytest/jenkins/install_and_run.sh \
|
./sytest/jenkins/install_and_run.sh \
|
||||||
|
--python $WORKSPACE/.tox/py27/bin/python \
|
||||||
--synapse-directory $WORKSPACE \
|
--synapse-directory $WORKSPACE \
|
||||||
|
|||||||
@@ -1,5 +1,7 @@
|
|||||||
#! /bin/bash
|
#! /bin/bash
|
||||||
|
|
||||||
|
set -eux
|
||||||
|
|
||||||
cd "`dirname $0`/.."
|
cd "`dirname $0`/.."
|
||||||
|
|
||||||
TOX_DIR=$WORKSPACE/.tox
|
TOX_DIR=$WORKSPACE/.tox
|
||||||
@@ -14,7 +16,20 @@ fi
|
|||||||
tox -e py27 --notest -v
|
tox -e py27 --notest -v
|
||||||
|
|
||||||
TOX_BIN=$TOX_DIR/py27/bin
|
TOX_BIN=$TOX_DIR/py27/bin
|
||||||
$TOX_BIN/pip install setuptools
|
|
||||||
|
# cryptography 2.2 requires setuptools >= 18.5.
|
||||||
|
#
|
||||||
|
# older versions of virtualenv (?) give us a virtualenv with the same version
|
||||||
|
# of setuptools as is installed on the system python (and tox runs virtualenv
|
||||||
|
# under python3, so we get the version of setuptools that is installed on that).
|
||||||
|
#
|
||||||
|
# anyway, make sure that we have a recent enough setuptools.
|
||||||
|
$TOX_BIN/pip install 'setuptools>=18.5'
|
||||||
|
|
||||||
|
# we also need a semi-recent version of pip, because old ones fail to install
|
||||||
|
# the "enum34" dependency of cryptography.
|
||||||
|
$TOX_BIN/pip install 'pip>=10'
|
||||||
|
|
||||||
{ python synapse/python_dependencies.py
|
{ python synapse/python_dependencies.py
|
||||||
echo lxml psycopg2
|
echo lxml psycopg2
|
||||||
} | xargs $TOX_BIN/pip install
|
} | xargs $TOX_BIN/pip install
|
||||||
|
|||||||
125
scripts-dev/federation_client.py
Normal file → Executable file
125
scripts-dev/federation_client.py
Normal file → Executable file
@@ -1,10 +1,30 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
#
|
||||||
|
# Copyright 2015, 2016 OpenMarket Ltd
|
||||||
|
# Copyright 2017 New Vector Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
from __future__ import print_function
|
||||||
|
|
||||||
|
import argparse
|
||||||
import nacl.signing
|
import nacl.signing
|
||||||
import json
|
import json
|
||||||
import base64
|
import base64
|
||||||
import requests
|
import requests
|
||||||
import sys
|
import sys
|
||||||
import srvlookup
|
import srvlookup
|
||||||
|
import yaml
|
||||||
|
|
||||||
def encode_base64(input_bytes):
|
def encode_base64(input_bytes):
|
||||||
"""Encode bytes as a base64 string without any padding."""
|
"""Encode bytes as a base64 string without any padding."""
|
||||||
@@ -103,15 +123,25 @@ def lookup(destination, path):
|
|||||||
except:
|
except:
|
||||||
return "https://%s:%d%s" % (destination, 8448, path)
|
return "https://%s:%d%s" % (destination, 8448, path)
|
||||||
|
|
||||||
def get_json(origin_name, origin_key, destination, path):
|
|
||||||
request_json = {
|
def request_json(method, origin_name, origin_key, destination, path, content):
|
||||||
"method": "GET",
|
if method is None:
|
||||||
|
if content is None:
|
||||||
|
method = "GET"
|
||||||
|
else:
|
||||||
|
method = "POST"
|
||||||
|
|
||||||
|
json_to_sign = {
|
||||||
|
"method": method,
|
||||||
"uri": path,
|
"uri": path,
|
||||||
"origin": origin_name,
|
"origin": origin_name,
|
||||||
"destination": destination,
|
"destination": destination,
|
||||||
}
|
}
|
||||||
|
|
||||||
signed_json = sign_json(request_json, origin_key, origin_name)
|
if content is not None:
|
||||||
|
json_to_sign["content"] = json.loads(content)
|
||||||
|
|
||||||
|
signed_json = sign_json(json_to_sign, origin_key, origin_name)
|
||||||
|
|
||||||
authorization_headers = []
|
authorization_headers = []
|
||||||
|
|
||||||
@@ -120,30 +150,97 @@ def get_json(origin_name, origin_key, destination, path):
|
|||||||
origin_name, key, sig,
|
origin_name, key, sig,
|
||||||
)
|
)
|
||||||
authorization_headers.append(bytes(header))
|
authorization_headers.append(bytes(header))
|
||||||
sys.stderr.write(header)
|
print ("Authorization: %s" % header, file=sys.stderr)
|
||||||
sys.stderr.write("\n")
|
|
||||||
|
|
||||||
result = requests.get(
|
dest = lookup(destination, path)
|
||||||
lookup(destination, path),
|
print ("Requesting %s" % dest, file=sys.stderr)
|
||||||
|
|
||||||
|
result = requests.request(
|
||||||
|
method=method,
|
||||||
|
url=dest,
|
||||||
headers={"Authorization": authorization_headers[0]},
|
headers={"Authorization": authorization_headers[0]},
|
||||||
verify=False,
|
verify=False,
|
||||||
|
data=content,
|
||||||
)
|
)
|
||||||
sys.stderr.write("Status Code: %d\n" % (result.status_code,))
|
sys.stderr.write("Status Code: %d\n" % (result.status_code,))
|
||||||
return result.json()
|
return result.json()
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
origin_name, keyfile, destination, path = sys.argv[1:]
|
parser = argparse.ArgumentParser(
|
||||||
|
description=
|
||||||
|
"Signs and sends a federation request to a matrix homeserver",
|
||||||
|
)
|
||||||
|
|
||||||
with open(keyfile) as f:
|
parser.add_argument(
|
||||||
|
"-N", "--server-name",
|
||||||
|
help="Name to give as the local homeserver. If unspecified, will be "
|
||||||
|
"read from the config file.",
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"-k", "--signing-key-path",
|
||||||
|
help="Path to the file containing the private ed25519 key to sign the "
|
||||||
|
"request with.",
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"-c", "--config",
|
||||||
|
default="homeserver.yaml",
|
||||||
|
help="Path to server config file. Ignored if --server-name and "
|
||||||
|
"--signing-key-path are both given.",
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"-d", "--destination",
|
||||||
|
default="matrix.org",
|
||||||
|
help="name of the remote homeserver. We will do SRV lookups and "
|
||||||
|
"connect appropriately.",
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"-X", "--method",
|
||||||
|
help="HTTP method to use for the request. Defaults to GET if --data is"
|
||||||
|
"unspecified, POST if it is."
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"--body",
|
||||||
|
help="Data to send as the body of the HTTP request"
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"path",
|
||||||
|
help="request path. We will add '/_matrix/federation/v1/' to this."
|
||||||
|
)
|
||||||
|
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
if not args.server_name or not args.signing_key_path:
|
||||||
|
read_args_from_config(args)
|
||||||
|
|
||||||
|
with open(args.signing_key_path) as f:
|
||||||
key = read_signing_keys(f)[0]
|
key = read_signing_keys(f)[0]
|
||||||
|
|
||||||
result = get_json(
|
result = request_json(
|
||||||
origin_name, key, destination, "/_matrix/federation/v1/" + path
|
args.method,
|
||||||
|
args.server_name, key, args.destination,
|
||||||
|
"/_matrix/federation/v1/" + args.path,
|
||||||
|
content=args.body,
|
||||||
)
|
)
|
||||||
|
|
||||||
json.dump(result, sys.stdout)
|
json.dump(result, sys.stdout)
|
||||||
print ""
|
print ("")
|
||||||
|
|
||||||
|
|
||||||
|
def read_args_from_config(args):
|
||||||
|
with open(args.config, 'r') as fh:
|
||||||
|
config = yaml.safe_load(fh)
|
||||||
|
if not args.server_name:
|
||||||
|
args.server_name = config['server_name']
|
||||||
|
if not args.signing_key_path:
|
||||||
|
args.signing_key_path = config['signing_key_path']
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
main()
|
main()
|
||||||
|
|||||||
@@ -6,19 +6,52 @@
|
|||||||
|
|
||||||
## Do not run it lightly.
|
## Do not run it lightly.
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
if [ "$1" == "-h" ] || [ "$1" == "" ]; then
|
||||||
|
echo "Call with ROOM_ID as first option and then pipe it into the database. So for instance you might run"
|
||||||
|
echo " nuke-room-from-db.sh <room_id> | sqlite3 homeserver.db"
|
||||||
|
echo "or"
|
||||||
|
echo " nuke-room-from-db.sh <room_id> | psql --dbname=synapse"
|
||||||
|
exit
|
||||||
|
fi
|
||||||
|
|
||||||
ROOMID="$1"
|
ROOMID="$1"
|
||||||
|
|
||||||
sqlite3 homeserver.db <<EOF
|
cat <<EOF
|
||||||
DELETE FROM context_depth WHERE context = '$ROOMID';
|
DELETE FROM event_forward_extremities WHERE room_id = '$ROOMID';
|
||||||
DELETE FROM current_state WHERE context = '$ROOMID';
|
DELETE FROM event_backward_extremities WHERE room_id = '$ROOMID';
|
||||||
DELETE FROM feedback WHERE room_id = '$ROOMID';
|
DELETE FROM event_edges WHERE room_id = '$ROOMID';
|
||||||
DELETE FROM messages WHERE room_id = '$ROOMID';
|
DELETE FROM room_depth WHERE room_id = '$ROOMID';
|
||||||
DELETE FROM pdu_backward_extremities WHERE context = '$ROOMID';
|
DELETE FROM state_forward_extremities WHERE room_id = '$ROOMID';
|
||||||
DELETE FROM pdu_edges WHERE context = '$ROOMID';
|
DELETE FROM events WHERE room_id = '$ROOMID';
|
||||||
DELETE FROM pdu_forward_extremities WHERE context = '$ROOMID';
|
DELETE FROM event_json WHERE room_id = '$ROOMID';
|
||||||
DELETE FROM pdus WHERE context = '$ROOMID';
|
DELETE FROM state_events WHERE room_id = '$ROOMID';
|
||||||
DELETE FROM room_data WHERE room_id = '$ROOMID';
|
DELETE FROM current_state_events WHERE room_id = '$ROOMID';
|
||||||
DELETE FROM room_memberships WHERE room_id = '$ROOMID';
|
DELETE FROM room_memberships WHERE room_id = '$ROOMID';
|
||||||
|
DELETE FROM feedback WHERE room_id = '$ROOMID';
|
||||||
|
DELETE FROM topics WHERE room_id = '$ROOMID';
|
||||||
|
DELETE FROM room_names WHERE room_id = '$ROOMID';
|
||||||
DELETE FROM rooms WHERE room_id = '$ROOMID';
|
DELETE FROM rooms WHERE room_id = '$ROOMID';
|
||||||
DELETE FROM state_pdus WHERE context = '$ROOMID';
|
DELETE FROM room_hosts WHERE room_id = '$ROOMID';
|
||||||
|
DELETE FROM room_aliases WHERE room_id = '$ROOMID';
|
||||||
|
DELETE FROM state_groups WHERE room_id = '$ROOMID';
|
||||||
|
DELETE FROM state_groups_state WHERE room_id = '$ROOMID';
|
||||||
|
DELETE FROM receipts_graph WHERE room_id = '$ROOMID';
|
||||||
|
DELETE FROM receipts_linearized WHERE room_id = '$ROOMID';
|
||||||
|
DELETE FROM event_search WHERE room_id = '$ROOMID';
|
||||||
|
DELETE FROM guest_access WHERE room_id = '$ROOMID';
|
||||||
|
DELETE FROM history_visibility WHERE room_id = '$ROOMID';
|
||||||
|
DELETE FROM room_tags WHERE room_id = '$ROOMID';
|
||||||
|
DELETE FROM room_tags_revisions WHERE room_id = '$ROOMID';
|
||||||
|
DELETE FROM room_account_data WHERE room_id = '$ROOMID';
|
||||||
|
DELETE FROM event_push_actions WHERE room_id = '$ROOMID';
|
||||||
|
DELETE FROM local_invites WHERE room_id = '$ROOMID';
|
||||||
|
DELETE FROM pusher_throttle WHERE room_id = '$ROOMID';
|
||||||
|
DELETE FROM event_reports WHERE room_id = '$ROOMID';
|
||||||
|
DELETE FROM public_room_list_stream WHERE room_id = '$ROOMID';
|
||||||
|
DELETE FROM stream_ordering_to_exterm WHERE room_id = '$ROOMID';
|
||||||
|
DELETE FROM event_auth WHERE room_id = '$ROOMID';
|
||||||
|
DELETE FROM appservice_room_list WHERE room_id = '$ROOMID';
|
||||||
|
VACUUM;
|
||||||
EOF
|
EOF
|
||||||
|
|||||||
133
scripts/move_remote_media_to_new_store.py
Executable file
133
scripts/move_remote_media_to_new_store.py
Executable file
@@ -0,0 +1,133 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright 2017 New Vector Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
"""
|
||||||
|
Moves a list of remote media from one media store to another.
|
||||||
|
|
||||||
|
The input should be a list of media files to be moved, one per line. Each line
|
||||||
|
should be formatted::
|
||||||
|
|
||||||
|
<origin server>|<file id>
|
||||||
|
|
||||||
|
This can be extracted from postgres with::
|
||||||
|
|
||||||
|
psql --tuples-only -A -c "select media_origin, filesystem_id from
|
||||||
|
matrix.remote_media_cache where ..."
|
||||||
|
|
||||||
|
To use, pipe the above into::
|
||||||
|
|
||||||
|
PYTHON_PATH=. ./scripts/move_remote_media_to_new_store.py <source repo> <dest repo>
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import print_function
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import logging
|
||||||
|
|
||||||
|
import sys
|
||||||
|
|
||||||
|
import os
|
||||||
|
|
||||||
|
import shutil
|
||||||
|
|
||||||
|
from synapse.rest.media.v1.filepath import MediaFilePaths
|
||||||
|
|
||||||
|
logger = logging.getLogger()
|
||||||
|
|
||||||
|
|
||||||
|
def main(src_repo, dest_repo):
|
||||||
|
src_paths = MediaFilePaths(src_repo)
|
||||||
|
dest_paths = MediaFilePaths(dest_repo)
|
||||||
|
for line in sys.stdin:
|
||||||
|
line = line.strip()
|
||||||
|
parts = line.split('|')
|
||||||
|
if len(parts) != 2:
|
||||||
|
print("Unable to parse input line %s" % line, file=sys.stderr)
|
||||||
|
exit(1)
|
||||||
|
|
||||||
|
move_media(parts[0], parts[1], src_paths, dest_paths)
|
||||||
|
|
||||||
|
|
||||||
|
def move_media(origin_server, file_id, src_paths, dest_paths):
|
||||||
|
"""Move the given file, and any thumbnails, to the dest repo
|
||||||
|
|
||||||
|
Args:
|
||||||
|
origin_server (str):
|
||||||
|
file_id (str):
|
||||||
|
src_paths (MediaFilePaths):
|
||||||
|
dest_paths (MediaFilePaths):
|
||||||
|
"""
|
||||||
|
logger.info("%s/%s", origin_server, file_id)
|
||||||
|
|
||||||
|
# check that the original exists
|
||||||
|
original_file = src_paths.remote_media_filepath(origin_server, file_id)
|
||||||
|
if not os.path.exists(original_file):
|
||||||
|
logger.warn(
|
||||||
|
"Original for %s/%s (%s) does not exist",
|
||||||
|
origin_server, file_id, original_file,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
mkdir_and_move(
|
||||||
|
original_file,
|
||||||
|
dest_paths.remote_media_filepath(origin_server, file_id),
|
||||||
|
)
|
||||||
|
|
||||||
|
# now look for thumbnails
|
||||||
|
original_thumb_dir = src_paths.remote_media_thumbnail_dir(
|
||||||
|
origin_server, file_id,
|
||||||
|
)
|
||||||
|
if not os.path.exists(original_thumb_dir):
|
||||||
|
return
|
||||||
|
|
||||||
|
mkdir_and_move(
|
||||||
|
original_thumb_dir,
|
||||||
|
dest_paths.remote_media_thumbnail_dir(origin_server, file_id)
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def mkdir_and_move(original_file, dest_file):
|
||||||
|
dirname = os.path.dirname(dest_file)
|
||||||
|
if not os.path.exists(dirname):
|
||||||
|
logger.debug("mkdir %s", dirname)
|
||||||
|
os.makedirs(dirname)
|
||||||
|
logger.debug("mv %s %s", original_file, dest_file)
|
||||||
|
shutil.move(original_file, dest_file)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
description=__doc__,
|
||||||
|
formatter_class = argparse.RawDescriptionHelpFormatter,
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"-v", action='store_true', help='enable debug logging')
|
||||||
|
parser.add_argument(
|
||||||
|
"src_repo",
|
||||||
|
help="Path to source content repo",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"dest_repo",
|
||||||
|
help="Path to source content repo",
|
||||||
|
)
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
logging_config = {
|
||||||
|
"level": logging.DEBUG if args.v else logging.INFO,
|
||||||
|
"format": "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(message)s"
|
||||||
|
}
|
||||||
|
logging.basicConfig(**logging_config)
|
||||||
|
|
||||||
|
main(args.src_repo, args.dest_repo)
|
||||||
@@ -1,6 +1,7 @@
|
|||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
# Copyright 2015, 2016 OpenMarket Ltd
|
# Copyright 2015, 2016 OpenMarket Ltd
|
||||||
|
# Copyright 2018 New Vector Ltd
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@@ -29,6 +30,8 @@ import time
|
|||||||
import traceback
|
import traceback
|
||||||
import yaml
|
import yaml
|
||||||
|
|
||||||
|
from six import string_types
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger("synapse_port_db")
|
logger = logging.getLogger("synapse_port_db")
|
||||||
|
|
||||||
@@ -41,6 +44,15 @@ BOOLEAN_COLUMNS = {
|
|||||||
"presence_stream": ["currently_active"],
|
"presence_stream": ["currently_active"],
|
||||||
"public_room_list_stream": ["visibility"],
|
"public_room_list_stream": ["visibility"],
|
||||||
"device_lists_outbound_pokes": ["sent"],
|
"device_lists_outbound_pokes": ["sent"],
|
||||||
|
"users_who_share_rooms": ["share_private"],
|
||||||
|
"groups": ["is_public"],
|
||||||
|
"group_rooms": ["is_public"],
|
||||||
|
"group_users": ["is_public", "is_admin"],
|
||||||
|
"group_summary_rooms": ["is_public"],
|
||||||
|
"group_room_categories": ["is_public"],
|
||||||
|
"group_summary_users": ["is_public"],
|
||||||
|
"group_roles": ["is_public"],
|
||||||
|
"local_group_membership": ["is_publicised", "is_admin"],
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@@ -111,6 +123,7 @@ class Store(object):
|
|||||||
|
|
||||||
_simple_update_one = SQLBaseStore.__dict__["_simple_update_one"]
|
_simple_update_one = SQLBaseStore.__dict__["_simple_update_one"]
|
||||||
_simple_update_one_txn = SQLBaseStore.__dict__["_simple_update_one_txn"]
|
_simple_update_one_txn = SQLBaseStore.__dict__["_simple_update_one_txn"]
|
||||||
|
_simple_update_txn = SQLBaseStore.__dict__["_simple_update_txn"]
|
||||||
|
|
||||||
def runInteraction(self, desc, func, *args, **kwargs):
|
def runInteraction(self, desc, func, *args, **kwargs):
|
||||||
def r(conn):
|
def r(conn):
|
||||||
@@ -121,7 +134,7 @@ class Store(object):
|
|||||||
try:
|
try:
|
||||||
txn = conn.cursor()
|
txn = conn.cursor()
|
||||||
return func(
|
return func(
|
||||||
LoggingTransaction(txn, desc, self.database_engine, []),
|
LoggingTransaction(txn, desc, self.database_engine, [], []),
|
||||||
*args, **kwargs
|
*args, **kwargs
|
||||||
)
|
)
|
||||||
except self.database_engine.module.DatabaseError as e:
|
except self.database_engine.module.DatabaseError as e:
|
||||||
@@ -240,6 +253,12 @@ class Porter(object):
|
|||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def handle_table(self, table, postgres_size, table_size, forward_chunk,
|
def handle_table(self, table, postgres_size, table_size, forward_chunk,
|
||||||
backward_chunk):
|
backward_chunk):
|
||||||
|
logger.info(
|
||||||
|
"Table %s: %i/%i (rows %i-%i) already ported",
|
||||||
|
table, postgres_size, table_size,
|
||||||
|
backward_chunk+1, forward_chunk-1,
|
||||||
|
)
|
||||||
|
|
||||||
if not table_size:
|
if not table_size:
|
||||||
return
|
return
|
||||||
|
|
||||||
@@ -251,6 +270,25 @@ class Porter(object):
|
|||||||
)
|
)
|
||||||
return
|
return
|
||||||
|
|
||||||
|
if table in (
|
||||||
|
"user_directory", "user_directory_search", "users_who_share_rooms",
|
||||||
|
"users_in_pubic_room",
|
||||||
|
):
|
||||||
|
# We don't port these tables, as they're a faff and we can regenreate
|
||||||
|
# them anyway.
|
||||||
|
self.progress.update(table, table_size) # Mark table as done
|
||||||
|
return
|
||||||
|
|
||||||
|
if table == "user_directory_stream_pos":
|
||||||
|
# We need to make sure there is a single row, `(X, null), as that is
|
||||||
|
# what synapse expects to be there.
|
||||||
|
yield self.postgres_store._simple_insert(
|
||||||
|
table=table,
|
||||||
|
values={"stream_id": None},
|
||||||
|
)
|
||||||
|
self.progress.update(table, table_size) # Mark table as done
|
||||||
|
return
|
||||||
|
|
||||||
forward_select = (
|
forward_select = (
|
||||||
"SELECT rowid, * FROM %s WHERE rowid >= ? ORDER BY rowid LIMIT ?"
|
"SELECT rowid, * FROM %s WHERE rowid >= ? ORDER BY rowid LIMIT ?"
|
||||||
% (table,)
|
% (table,)
|
||||||
@@ -298,7 +336,7 @@ class Porter(object):
|
|||||||
backward_chunk = min(row[0] for row in brows) - 1
|
backward_chunk = min(row[0] for row in brows) - 1
|
||||||
|
|
||||||
rows = frows + brows
|
rows = frows + brows
|
||||||
self._convert_rows(table, headers, rows)
|
rows = self._convert_rows(table, headers, rows)
|
||||||
|
|
||||||
def insert(txn):
|
def insert(txn):
|
||||||
self.postgres_store.insert_many_txn(
|
self.postgres_store.insert_many_txn(
|
||||||
@@ -356,10 +394,13 @@ class Porter(object):
|
|||||||
" VALUES (?,?,?,?,to_tsvector('english', ?),?,?)"
|
" VALUES (?,?,?,?,to_tsvector('english', ?),?,?)"
|
||||||
)
|
)
|
||||||
|
|
||||||
rows_dict = [
|
rows_dict = []
|
||||||
dict(zip(headers, row))
|
for row in rows:
|
||||||
for row in rows
|
d = dict(zip(headers, row))
|
||||||
]
|
if "\0" in d['value']:
|
||||||
|
logger.warn('dropping search row %s', d)
|
||||||
|
else:
|
||||||
|
rows_dict.append(d)
|
||||||
|
|
||||||
txn.executemany(sql, [
|
txn.executemany(sql, [
|
||||||
(
|
(
|
||||||
@@ -435,33 +476,10 @@ class Porter(object):
|
|||||||
self.progress.set_state("Preparing PostgreSQL")
|
self.progress.set_state("Preparing PostgreSQL")
|
||||||
self.setup_db(postgres_config, postgres_engine)
|
self.setup_db(postgres_config, postgres_engine)
|
||||||
|
|
||||||
# Step 2. Get tables.
|
self.progress.set_state("Creating port tables")
|
||||||
self.progress.set_state("Fetching tables")
|
|
||||||
sqlite_tables = yield self.sqlite_store._simple_select_onecol(
|
|
||||||
table="sqlite_master",
|
|
||||||
keyvalues={
|
|
||||||
"type": "table",
|
|
||||||
},
|
|
||||||
retcol="name",
|
|
||||||
)
|
|
||||||
|
|
||||||
postgres_tables = yield self.postgres_store._simple_select_onecol(
|
|
||||||
table="information_schema.tables",
|
|
||||||
keyvalues={
|
|
||||||
"table_schema": "public",
|
|
||||||
},
|
|
||||||
retcol="distinct table_name",
|
|
||||||
)
|
|
||||||
|
|
||||||
tables = set(sqlite_tables) & set(postgres_tables)
|
|
||||||
|
|
||||||
self.progress.set_state("Creating tables")
|
|
||||||
|
|
||||||
logger.info("Found %d tables", len(tables))
|
|
||||||
|
|
||||||
def create_port_table(txn):
|
def create_port_table(txn):
|
||||||
txn.execute(
|
txn.execute(
|
||||||
"CREATE TABLE port_from_sqlite3 ("
|
"CREATE TABLE IF NOT EXISTS port_from_sqlite3 ("
|
||||||
" table_name varchar(100) NOT NULL UNIQUE,"
|
" table_name varchar(100) NOT NULL UNIQUE,"
|
||||||
" forward_rowid bigint NOT NULL,"
|
" forward_rowid bigint NOT NULL,"
|
||||||
" backward_rowid bigint NOT NULL"
|
" backward_rowid bigint NOT NULL"
|
||||||
@@ -487,18 +505,33 @@ class Porter(object):
|
|||||||
"alter_table", alter_table
|
"alter_table", alter_table
|
||||||
)
|
)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.info("Failed to create port table: %s", e)
|
pass
|
||||||
|
|
||||||
try:
|
yield self.postgres_store.runInteraction(
|
||||||
yield self.postgres_store.runInteraction(
|
"create_port_table", create_port_table
|
||||||
"create_port_table", create_port_table
|
)
|
||||||
)
|
|
||||||
except Exception as e:
|
|
||||||
logger.info("Failed to create port table: %s", e)
|
|
||||||
|
|
||||||
self.progress.set_state("Setting up")
|
# Step 2. Get tables.
|
||||||
|
self.progress.set_state("Fetching tables")
|
||||||
|
sqlite_tables = yield self.sqlite_store._simple_select_onecol(
|
||||||
|
table="sqlite_master",
|
||||||
|
keyvalues={
|
||||||
|
"type": "table",
|
||||||
|
},
|
||||||
|
retcol="name",
|
||||||
|
)
|
||||||
|
|
||||||
# Set up tables.
|
postgres_tables = yield self.postgres_store._simple_select_onecol(
|
||||||
|
table="information_schema.tables",
|
||||||
|
keyvalues={},
|
||||||
|
retcol="distinct table_name",
|
||||||
|
)
|
||||||
|
|
||||||
|
tables = set(sqlite_tables) & set(postgres_tables)
|
||||||
|
logger.info("Found %d tables", len(tables))
|
||||||
|
|
||||||
|
# Step 3. Figure out what still needs copying
|
||||||
|
self.progress.set_state("Checking on port progress")
|
||||||
setup_res = yield defer.gatherResults(
|
setup_res = yield defer.gatherResults(
|
||||||
[
|
[
|
||||||
self.setup_table(table)
|
self.setup_table(table)
|
||||||
@@ -509,7 +542,8 @@ class Porter(object):
|
|||||||
consumeErrors=True,
|
consumeErrors=True,
|
||||||
)
|
)
|
||||||
|
|
||||||
# Process tables.
|
# Step 4. Do the copying.
|
||||||
|
self.progress.set_state("Copying to postgres")
|
||||||
yield defer.gatherResults(
|
yield defer.gatherResults(
|
||||||
[
|
[
|
||||||
self.handle_table(*res)
|
self.handle_table(*res)
|
||||||
@@ -518,6 +552,9 @@ class Porter(object):
|
|||||||
consumeErrors=True,
|
consumeErrors=True,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# Step 5. Do final post-processing
|
||||||
|
yield self._setup_state_group_id_seq()
|
||||||
|
|
||||||
self.progress.done()
|
self.progress.done()
|
||||||
except:
|
except:
|
||||||
global end_error_exec_info
|
global end_error_exec_info
|
||||||
@@ -533,17 +570,29 @@ class Porter(object):
|
|||||||
i for i, h in enumerate(headers) if h in bool_col_names
|
i for i, h in enumerate(headers) if h in bool_col_names
|
||||||
]
|
]
|
||||||
|
|
||||||
|
class BadValueException(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
def conv(j, col):
|
def conv(j, col):
|
||||||
if j in bool_cols:
|
if j in bool_cols:
|
||||||
return bool(col)
|
return bool(col)
|
||||||
|
elif isinstance(col, string_types) and "\0" in col:
|
||||||
|
logger.warn("DROPPING ROW: NUL value in table %s col %s: %r", table, headers[j], col)
|
||||||
|
raise BadValueException();
|
||||||
return col
|
return col
|
||||||
|
|
||||||
|
outrows = []
|
||||||
for i, row in enumerate(rows):
|
for i, row in enumerate(rows):
|
||||||
rows[i] = tuple(
|
try:
|
||||||
conv(j, col)
|
outrows.append(tuple(
|
||||||
for j, col in enumerate(row)
|
conv(j, col)
|
||||||
if j > 0
|
for j, col in enumerate(row)
|
||||||
)
|
if j > 0
|
||||||
|
))
|
||||||
|
except BadValueException:
|
||||||
|
pass
|
||||||
|
|
||||||
|
return outrows
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def _setup_sent_transactions(self):
|
def _setup_sent_transactions(self):
|
||||||
@@ -571,7 +620,7 @@ class Porter(object):
|
|||||||
"select", r,
|
"select", r,
|
||||||
)
|
)
|
||||||
|
|
||||||
self._convert_rows("sent_transactions", headers, rows)
|
rows = self._convert_rows("sent_transactions", headers, rows)
|
||||||
|
|
||||||
inserted_rows = len(rows)
|
inserted_rows = len(rows)
|
||||||
if inserted_rows:
|
if inserted_rows:
|
||||||
@@ -665,6 +714,16 @@ class Porter(object):
|
|||||||
|
|
||||||
defer.returnValue((done, remaining + done))
|
defer.returnValue((done, remaining + done))
|
||||||
|
|
||||||
|
def _setup_state_group_id_seq(self):
|
||||||
|
def r(txn):
|
||||||
|
txn.execute("SELECT MAX(id) FROM state_groups")
|
||||||
|
next_id = txn.fetchone()[0]+1
|
||||||
|
txn.execute(
|
||||||
|
"ALTER SEQUENCE state_group_id_seq RESTART WITH %s",
|
||||||
|
(next_id,),
|
||||||
|
)
|
||||||
|
return self.postgres_store.runInteraction("setup_state_group_id_seq", r)
|
||||||
|
|
||||||
|
|
||||||
##############################################
|
##############################################
|
||||||
###### The following is simply UI stuff ######
|
###### The following is simply UI stuff ######
|
||||||
|
|||||||
45
scripts/sync_room_to_group.pl
Executable file
45
scripts/sync_room_to_group.pl
Executable file
@@ -0,0 +1,45 @@
|
|||||||
|
#!/usr/bin/env perl
|
||||||
|
|
||||||
|
use strict;
|
||||||
|
use warnings;
|
||||||
|
|
||||||
|
use JSON::XS;
|
||||||
|
use LWP::UserAgent;
|
||||||
|
use URI::Escape;
|
||||||
|
|
||||||
|
if (@ARGV < 4) {
|
||||||
|
die "usage: $0 <homeserver url> <access_token> <room_id|room_alias> <group_id>\n";
|
||||||
|
}
|
||||||
|
|
||||||
|
my ($hs, $access_token, $room_id, $group_id) = @ARGV;
|
||||||
|
my $ua = LWP::UserAgent->new();
|
||||||
|
$ua->timeout(10);
|
||||||
|
|
||||||
|
if ($room_id =~ /^#/) {
|
||||||
|
$room_id = uri_escape($room_id);
|
||||||
|
$room_id = decode_json($ua->get("${hs}/_matrix/client/r0/directory/room/${room_id}?access_token=${access_token}")->decoded_content)->{room_id};
|
||||||
|
}
|
||||||
|
|
||||||
|
my $room_users = [ keys %{decode_json($ua->get("${hs}/_matrix/client/r0/rooms/${room_id}/joined_members?access_token=${access_token}")->decoded_content)->{joined}} ];
|
||||||
|
my $group_users = [
|
||||||
|
(map { $_->{user_id} } @{decode_json($ua->get("${hs}/_matrix/client/unstable/groups/${group_id}/users?access_token=${access_token}" )->decoded_content)->{chunk}}),
|
||||||
|
(map { $_->{user_id} } @{decode_json($ua->get("${hs}/_matrix/client/unstable/groups/${group_id}/invited_users?access_token=${access_token}" )->decoded_content)->{chunk}}),
|
||||||
|
];
|
||||||
|
|
||||||
|
die "refusing to sync from empty room" unless (@$room_users);
|
||||||
|
die "refusing to sync to empty group" unless (@$group_users);
|
||||||
|
|
||||||
|
my $diff = {};
|
||||||
|
foreach my $user (@$room_users) { $diff->{$user}++ }
|
||||||
|
foreach my $user (@$group_users) { $diff->{$user}-- }
|
||||||
|
|
||||||
|
foreach my $user (keys %$diff) {
|
||||||
|
if ($diff->{$user} == 1) {
|
||||||
|
warn "inviting $user";
|
||||||
|
print STDERR $ua->put("${hs}/_matrix/client/unstable/groups/${group_id}/admin/users/invite/${user}?access_token=${access_token}", Content=>'{}')->status_line."\n";
|
||||||
|
}
|
||||||
|
elsif ($diff->{$user} == -1) {
|
||||||
|
warn "removing $user";
|
||||||
|
print STDERR $ua->put("${hs}/_matrix/client/unstable/groups/${group_id}/admin/users/remove/${user}?access_token=${access_token}", Content=>'{}')->status_line."\n";
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -16,4 +16,4 @@
|
|||||||
""" This is a reference implementation of a Matrix home server.
|
""" This is a reference implementation of a Matrix home server.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
__version__ = "0.19.3"
|
__version__ = "0.30.0"
|
||||||
|
|||||||
@@ -23,7 +23,8 @@ from synapse import event_auth
|
|||||||
from synapse.api.constants import EventTypes, Membership, JoinRules
|
from synapse.api.constants import EventTypes, Membership, JoinRules
|
||||||
from synapse.api.errors import AuthError, Codes
|
from synapse.api.errors import AuthError, Codes
|
||||||
from synapse.types import UserID
|
from synapse.types import UserID
|
||||||
from synapse.util import logcontext
|
from synapse.util.caches import register_cache, CACHE_SIZE_FACTOR
|
||||||
|
from synapse.util.caches.lrucache import LruCache
|
||||||
from synapse.util.metrics import Measure
|
from synapse.util.metrics import Measure
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
@@ -39,6 +40,10 @@ AuthEventTypes = (
|
|||||||
GUEST_DEVICE_ID = "guest_device"
|
GUEST_DEVICE_ID = "guest_device"
|
||||||
|
|
||||||
|
|
||||||
|
class _InvalidMacaroonException(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
class Auth(object):
|
class Auth(object):
|
||||||
"""
|
"""
|
||||||
FIXME: This class contains a mix of functions for authenticating users
|
FIXME: This class contains a mix of functions for authenticating users
|
||||||
@@ -51,6 +56,9 @@ class Auth(object):
|
|||||||
self.state = hs.get_state_handler()
|
self.state = hs.get_state_handler()
|
||||||
self.TOKEN_NOT_FOUND_HTTP_STATUS = 401
|
self.TOKEN_NOT_FOUND_HTTP_STATUS = 401
|
||||||
|
|
||||||
|
self.token_cache = LruCache(CACHE_SIZE_FACTOR * 10000)
|
||||||
|
register_cache("token_cache", self.token_cache)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def check_from_context(self, event, context, do_sig_check=True):
|
def check_from_context(self, event, context, do_sig_check=True):
|
||||||
auth_events_ids = yield self.compute_auth_events(
|
auth_events_ids = yield self.compute_auth_events(
|
||||||
@@ -144,17 +152,8 @@ class Auth(object):
|
|||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def check_host_in_room(self, room_id, host):
|
def check_host_in_room(self, room_id, host):
|
||||||
with Measure(self.clock, "check_host_in_room"):
|
with Measure(self.clock, "check_host_in_room"):
|
||||||
latest_event_ids = yield self.store.get_latest_event_ids_in_room(room_id)
|
latest_event_ids = yield self.store.is_host_joined(room_id, host)
|
||||||
|
defer.returnValue(latest_event_ids)
|
||||||
logger.debug("calling resolve_state_groups from check_host_in_room")
|
|
||||||
entry = yield self.state.resolve_state_groups(
|
|
||||||
room_id, latest_event_ids
|
|
||||||
)
|
|
||||||
|
|
||||||
ret = yield self.store.is_host_joined(
|
|
||||||
room_id, host, entry.state_group, entry.state
|
|
||||||
)
|
|
||||||
defer.returnValue(ret)
|
|
||||||
|
|
||||||
def _check_joined_room(self, member, user_id, room_id):
|
def _check_joined_room(self, member, user_id, room_id):
|
||||||
if not member or member.membership != Membership.JOIN:
|
if not member or member.membership != Membership.JOIN:
|
||||||
@@ -205,12 +204,12 @@ class Auth(object):
|
|||||||
|
|
||||||
ip_addr = self.hs.get_ip_from_request(request)
|
ip_addr = self.hs.get_ip_from_request(request)
|
||||||
user_agent = request.requestHeaders.getRawHeaders(
|
user_agent = request.requestHeaders.getRawHeaders(
|
||||||
"User-Agent",
|
b"User-Agent",
|
||||||
default=[""]
|
default=[b""]
|
||||||
)[0]
|
)[0]
|
||||||
if user and access_token and ip_addr:
|
if user and access_token and ip_addr:
|
||||||
logcontext.preserve_fn(self.store.insert_client_ip)(
|
self.store.insert_client_ip(
|
||||||
user=user,
|
user_id=user.to_string(),
|
||||||
access_token=access_token,
|
access_token=access_token,
|
||||||
ip=ip_addr,
|
ip=ip_addr,
|
||||||
user_agent=user_agent,
|
user_agent=user_agent,
|
||||||
@@ -271,13 +270,17 @@ class Auth(object):
|
|||||||
rights (str): The operation being performed; the access token must
|
rights (str): The operation being performed; the access token must
|
||||||
allow this.
|
allow this.
|
||||||
Returns:
|
Returns:
|
||||||
dict : dict that includes the user and the ID of their access token.
|
Deferred[dict]: dict that includes:
|
||||||
|
`user` (UserID)
|
||||||
|
`is_guest` (bool)
|
||||||
|
`token_id` (int|None): access token id. May be None if guest
|
||||||
|
`device_id` (str|None): device corresponding to access token
|
||||||
Raises:
|
Raises:
|
||||||
AuthError if no user by that token exists or the token is invalid.
|
AuthError if no user by that token exists or the token is invalid.
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
macaroon = pymacaroons.Macaroon.deserialize(token)
|
user_id, guest = self._parse_and_validate_macaroon(token, rights)
|
||||||
except Exception: # deserialize can throw more-or-less anything
|
except _InvalidMacaroonException:
|
||||||
# doesn't look like a macaroon: treat it as an opaque token which
|
# doesn't look like a macaroon: treat it as an opaque token which
|
||||||
# must be in the database.
|
# must be in the database.
|
||||||
# TODO: it would be nice to get rid of this, but apparently some
|
# TODO: it would be nice to get rid of this, but apparently some
|
||||||
@@ -286,19 +289,8 @@ class Auth(object):
|
|||||||
defer.returnValue(r)
|
defer.returnValue(r)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
user_id = self.get_user_id_from_macaroon(macaroon)
|
|
||||||
user = UserID.from_string(user_id)
|
user = UserID.from_string(user_id)
|
||||||
|
|
||||||
self.validate_macaroon(
|
|
||||||
macaroon, rights, self.hs.config.expire_access_token,
|
|
||||||
user_id=user_id,
|
|
||||||
)
|
|
||||||
|
|
||||||
guest = False
|
|
||||||
for caveat in macaroon.caveats:
|
|
||||||
if caveat.caveat_id == "guest = true":
|
|
||||||
guest = True
|
|
||||||
|
|
||||||
if guest:
|
if guest:
|
||||||
# Guest access tokens are not stored in the database (there can
|
# Guest access tokens are not stored in the database (there can
|
||||||
# only be one access token per guest, anyway).
|
# only be one access token per guest, anyway).
|
||||||
@@ -370,6 +362,55 @@ class Auth(object):
|
|||||||
errcode=Codes.UNKNOWN_TOKEN
|
errcode=Codes.UNKNOWN_TOKEN
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def _parse_and_validate_macaroon(self, token, rights="access"):
|
||||||
|
"""Takes a macaroon and tries to parse and validate it. This is cached
|
||||||
|
if and only if rights == access and there isn't an expiry.
|
||||||
|
|
||||||
|
On invalid macaroon raises _InvalidMacaroonException
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
(user_id, is_guest)
|
||||||
|
"""
|
||||||
|
if rights == "access":
|
||||||
|
cached = self.token_cache.get(token, None)
|
||||||
|
if cached:
|
||||||
|
return cached
|
||||||
|
|
||||||
|
try:
|
||||||
|
macaroon = pymacaroons.Macaroon.deserialize(token)
|
||||||
|
except Exception: # deserialize can throw more-or-less anything
|
||||||
|
# doesn't look like a macaroon: treat it as an opaque token which
|
||||||
|
# must be in the database.
|
||||||
|
# TODO: it would be nice to get rid of this, but apparently some
|
||||||
|
# people use access tokens which aren't macaroons
|
||||||
|
raise _InvalidMacaroonException()
|
||||||
|
|
||||||
|
try:
|
||||||
|
user_id = self.get_user_id_from_macaroon(macaroon)
|
||||||
|
|
||||||
|
has_expiry = False
|
||||||
|
guest = False
|
||||||
|
for caveat in macaroon.caveats:
|
||||||
|
if caveat.caveat_id.startswith("time "):
|
||||||
|
has_expiry = True
|
||||||
|
elif caveat.caveat_id == "guest = true":
|
||||||
|
guest = True
|
||||||
|
|
||||||
|
self.validate_macaroon(
|
||||||
|
macaroon, rights, self.hs.config.expire_access_token,
|
||||||
|
user_id=user_id,
|
||||||
|
)
|
||||||
|
except (pymacaroons.exceptions.MacaroonException, TypeError, ValueError):
|
||||||
|
raise AuthError(
|
||||||
|
self.TOKEN_NOT_FOUND_HTTP_STATUS, "Invalid macaroon passed.",
|
||||||
|
errcode=Codes.UNKNOWN_TOKEN
|
||||||
|
)
|
||||||
|
|
||||||
|
if not has_expiry and rights == "access":
|
||||||
|
self.token_cache[token] = (user_id, guest)
|
||||||
|
|
||||||
|
return user_id, guest
|
||||||
|
|
||||||
def get_user_id_from_macaroon(self, macaroon):
|
def get_user_id_from_macaroon(self, macaroon):
|
||||||
"""Retrieve the user_id given by the caveats on the macaroon.
|
"""Retrieve the user_id given by the caveats on the macaroon.
|
||||||
|
|
||||||
@@ -482,6 +523,14 @@ class Auth(object):
|
|||||||
)
|
)
|
||||||
|
|
||||||
def is_server_admin(self, user):
|
def is_server_admin(self, user):
|
||||||
|
""" Check if the given user is a local server admin.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
user (str): mxid of user to check
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if the user is an admin
|
||||||
|
"""
|
||||||
return self.store.is_server_admin(user)
|
return self.store.is_server_admin(user)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
@@ -623,7 +672,7 @@ def has_access_token(request):
|
|||||||
bool: False if no access_token was given, True otherwise.
|
bool: False if no access_token was given, True otherwise.
|
||||||
"""
|
"""
|
||||||
query_params = request.args.get("access_token")
|
query_params = request.args.get("access_token")
|
||||||
auth_headers = request.requestHeaders.getRawHeaders("Authorization")
|
auth_headers = request.requestHeaders.getRawHeaders(b"Authorization")
|
||||||
return bool(query_params) or bool(auth_headers)
|
return bool(query_params) or bool(auth_headers)
|
||||||
|
|
||||||
|
|
||||||
@@ -643,8 +692,8 @@ def get_access_token_from_request(request, token_not_found_http_status=401):
|
|||||||
AuthError: If there isn't an access_token in the request.
|
AuthError: If there isn't an access_token in the request.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
auth_headers = request.requestHeaders.getRawHeaders("Authorization")
|
auth_headers = request.requestHeaders.getRawHeaders(b"Authorization")
|
||||||
query_params = request.args.get("access_token")
|
query_params = request.args.get(b"access_token")
|
||||||
if auth_headers:
|
if auth_headers:
|
||||||
# Try the get the access_token from a "Authorization: Bearer"
|
# Try the get the access_token from a "Authorization: Bearer"
|
||||||
# header
|
# header
|
||||||
|
|||||||
@@ -16,6 +16,9 @@
|
|||||||
|
|
||||||
"""Contains constants from the specification."""
|
"""Contains constants from the specification."""
|
||||||
|
|
||||||
|
# the "depth" field on events is limited to 2**63 - 1
|
||||||
|
MAX_DEPTH = 2**63 - 1
|
||||||
|
|
||||||
|
|
||||||
class Membership(object):
|
class Membership(object):
|
||||||
|
|
||||||
|
|||||||
@@ -15,9 +15,12 @@
|
|||||||
|
|
||||||
"""Contains exceptions and error codes."""
|
"""Contains exceptions and error codes."""
|
||||||
|
|
||||||
import json
|
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
|
import simplejson as json
|
||||||
|
from six import iteritems
|
||||||
|
from six.moves import http_client
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
@@ -46,8 +49,11 @@ class Codes(object):
|
|||||||
THREEPID_AUTH_FAILED = "M_THREEPID_AUTH_FAILED"
|
THREEPID_AUTH_FAILED = "M_THREEPID_AUTH_FAILED"
|
||||||
THREEPID_IN_USE = "M_THREEPID_IN_USE"
|
THREEPID_IN_USE = "M_THREEPID_IN_USE"
|
||||||
THREEPID_NOT_FOUND = "M_THREEPID_NOT_FOUND"
|
THREEPID_NOT_FOUND = "M_THREEPID_NOT_FOUND"
|
||||||
|
THREEPID_DENIED = "M_THREEPID_DENIED"
|
||||||
INVALID_USERNAME = "M_INVALID_USERNAME"
|
INVALID_USERNAME = "M_INVALID_USERNAME"
|
||||||
SERVER_NOT_TRUSTED = "M_SERVER_NOT_TRUSTED"
|
SERVER_NOT_TRUSTED = "M_SERVER_NOT_TRUSTED"
|
||||||
|
CONSENT_NOT_GIVEN = "M_CONSENT_NOT_GIVEN"
|
||||||
|
CANNOT_LEAVE_SERVER_NOTICE_ROOM = "M_CANNOT_LEAVE_SERVER_NOTICE_ROOM"
|
||||||
|
|
||||||
|
|
||||||
class CodeMessageException(RuntimeError):
|
class CodeMessageException(RuntimeError):
|
||||||
@@ -66,6 +72,17 @@ class CodeMessageException(RuntimeError):
|
|||||||
return cs_error(self.msg)
|
return cs_error(self.msg)
|
||||||
|
|
||||||
|
|
||||||
|
class MatrixCodeMessageException(CodeMessageException):
|
||||||
|
"""An error from a general matrix endpoint, eg. from a proxied Matrix API call.
|
||||||
|
|
||||||
|
Attributes:
|
||||||
|
errcode (str): Matrix error code e.g 'M_FORBIDDEN'
|
||||||
|
"""
|
||||||
|
def __init__(self, code, msg, errcode=Codes.UNKNOWN):
|
||||||
|
super(MatrixCodeMessageException, self).__init__(code, msg)
|
||||||
|
self.errcode = errcode
|
||||||
|
|
||||||
|
|
||||||
class SynapseError(CodeMessageException):
|
class SynapseError(CodeMessageException):
|
||||||
"""A base exception type for matrix errors which have an errcode and error
|
"""A base exception type for matrix errors which have an errcode and error
|
||||||
message (as well as an HTTP status code).
|
message (as well as an HTTP status code).
|
||||||
@@ -124,11 +141,79 @@ class SynapseError(CodeMessageException):
|
|||||||
return res
|
return res
|
||||||
|
|
||||||
|
|
||||||
|
class ConsentNotGivenError(SynapseError):
|
||||||
|
"""The error returned to the client when the user has not consented to the
|
||||||
|
privacy policy.
|
||||||
|
"""
|
||||||
|
def __init__(self, msg, consent_uri):
|
||||||
|
"""Constructs a ConsentNotGivenError
|
||||||
|
|
||||||
|
Args:
|
||||||
|
msg (str): The human-readable error message
|
||||||
|
consent_url (str): The URL where the user can give their consent
|
||||||
|
"""
|
||||||
|
super(ConsentNotGivenError, self).__init__(
|
||||||
|
code=http_client.FORBIDDEN,
|
||||||
|
msg=msg,
|
||||||
|
errcode=Codes.CONSENT_NOT_GIVEN
|
||||||
|
)
|
||||||
|
self._consent_uri = consent_uri
|
||||||
|
|
||||||
|
def error_dict(self):
|
||||||
|
return cs_error(
|
||||||
|
self.msg,
|
||||||
|
self.errcode,
|
||||||
|
consent_uri=self._consent_uri
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class RegistrationError(SynapseError):
|
class RegistrationError(SynapseError):
|
||||||
"""An error raised when a registration event fails."""
|
"""An error raised when a registration event fails."""
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class FederationDeniedError(SynapseError):
|
||||||
|
"""An error raised when the server tries to federate with a server which
|
||||||
|
is not on its federation whitelist.
|
||||||
|
|
||||||
|
Attributes:
|
||||||
|
destination (str): The destination which has been denied
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, destination):
|
||||||
|
"""Raised by federation client or server to indicate that we are
|
||||||
|
are deliberately not attempting to contact a given server because it is
|
||||||
|
not on our federation whitelist.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
destination (str): the domain in question
|
||||||
|
"""
|
||||||
|
|
||||||
|
self.destination = destination
|
||||||
|
|
||||||
|
super(FederationDeniedError, self).__init__(
|
||||||
|
code=403,
|
||||||
|
msg="Federation denied with %s." % (self.destination,),
|
||||||
|
errcode=Codes.FORBIDDEN,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class InteractiveAuthIncompleteError(Exception):
|
||||||
|
"""An error raised when UI auth is not yet complete
|
||||||
|
|
||||||
|
(This indicates we should return a 401 with 'result' as the body)
|
||||||
|
|
||||||
|
Attributes:
|
||||||
|
result (dict): the server response to the request, which should be
|
||||||
|
passed back to the client
|
||||||
|
"""
|
||||||
|
def __init__(self, result):
|
||||||
|
super(InteractiveAuthIncompleteError, self).__init__(
|
||||||
|
"Interactive auth not yet complete",
|
||||||
|
)
|
||||||
|
self.result = result
|
||||||
|
|
||||||
|
|
||||||
class UnrecognizedRequestError(SynapseError):
|
class UnrecognizedRequestError(SynapseError):
|
||||||
"""An error indicating we don't understand the request you're trying to make"""
|
"""An error indicating we don't understand the request you're trying to make"""
|
||||||
def __init__(self, *args, **kwargs):
|
def __init__(self, *args, **kwargs):
|
||||||
@@ -236,13 +321,13 @@ def cs_error(msg, code=Codes.UNKNOWN, **kwargs):
|
|||||||
|
|
||||||
Args:
|
Args:
|
||||||
msg (str): The error message.
|
msg (str): The error message.
|
||||||
code (int): The error code.
|
code (str): The error code.
|
||||||
kwargs : Additional keys to add to the response.
|
kwargs : Additional keys to add to the response.
|
||||||
Returns:
|
Returns:
|
||||||
A dict representing the error response JSON.
|
A dict representing the error response JSON.
|
||||||
"""
|
"""
|
||||||
err = {"error": msg, "errcode": code}
|
err = {"error": msg, "errcode": code}
|
||||||
for key, value in kwargs.iteritems():
|
for key, value in iteritems(kwargs):
|
||||||
err[key] = value
|
err[key] = value
|
||||||
return err
|
return err
|
||||||
|
|
||||||
|
|||||||
@@ -17,7 +17,7 @@ from synapse.storage.presence import UserPresenceState
|
|||||||
from synapse.types import UserID, RoomID
|
from synapse.types import UserID, RoomID
|
||||||
from twisted.internet import defer
|
from twisted.internet import defer
|
||||||
|
|
||||||
import ujson as json
|
import simplejson as json
|
||||||
import jsonschema
|
import jsonschema
|
||||||
from jsonschema import FormatChecker
|
from jsonschema import FormatChecker
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
# Copyright 2014-2016 OpenMarket Ltd
|
# Copyright 2014-2016 OpenMarket Ltd
|
||||||
|
# Copyright 2018 New Vector Ltd.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@@ -14,6 +15,12 @@
|
|||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
"""Contains the URL paths to prefix various aspects of the server with. """
|
"""Contains the URL paths to prefix various aspects of the server with. """
|
||||||
|
from hashlib import sha256
|
||||||
|
import hmac
|
||||||
|
|
||||||
|
from six.moves.urllib.parse import urlencode
|
||||||
|
|
||||||
|
from synapse.config import ConfigError
|
||||||
|
|
||||||
CLIENT_PREFIX = "/_matrix/client/api/v1"
|
CLIENT_PREFIX = "/_matrix/client/api/v1"
|
||||||
CLIENT_V2_ALPHA_PREFIX = "/_matrix/client/v2_alpha"
|
CLIENT_V2_ALPHA_PREFIX = "/_matrix/client/v2_alpha"
|
||||||
@@ -25,3 +32,46 @@ SERVER_KEY_PREFIX = "/_matrix/key/v1"
|
|||||||
SERVER_KEY_V2_PREFIX = "/_matrix/key/v2"
|
SERVER_KEY_V2_PREFIX = "/_matrix/key/v2"
|
||||||
MEDIA_PREFIX = "/_matrix/media/r0"
|
MEDIA_PREFIX = "/_matrix/media/r0"
|
||||||
LEGACY_MEDIA_PREFIX = "/_matrix/media/v1"
|
LEGACY_MEDIA_PREFIX = "/_matrix/media/v1"
|
||||||
|
|
||||||
|
|
||||||
|
class ConsentURIBuilder(object):
|
||||||
|
def __init__(self, hs_config):
|
||||||
|
"""
|
||||||
|
Args:
|
||||||
|
hs_config (synapse.config.homeserver.HomeServerConfig):
|
||||||
|
"""
|
||||||
|
if hs_config.form_secret is None:
|
||||||
|
raise ConfigError(
|
||||||
|
"form_secret not set in config",
|
||||||
|
)
|
||||||
|
if hs_config.public_baseurl is None:
|
||||||
|
raise ConfigError(
|
||||||
|
"public_baseurl not set in config",
|
||||||
|
)
|
||||||
|
|
||||||
|
self._hmac_secret = hs_config.form_secret.encode("utf-8")
|
||||||
|
self._public_baseurl = hs_config.public_baseurl
|
||||||
|
|
||||||
|
def build_user_consent_uri(self, user_id):
|
||||||
|
"""Build a URI which we can give to the user to do their privacy
|
||||||
|
policy consent
|
||||||
|
|
||||||
|
Args:
|
||||||
|
user_id (str): mxid or username of user
|
||||||
|
|
||||||
|
Returns
|
||||||
|
(str) the URI where the user can do consent
|
||||||
|
"""
|
||||||
|
mac = hmac.new(
|
||||||
|
key=self._hmac_secret,
|
||||||
|
msg=user_id,
|
||||||
|
digestmod=sha256,
|
||||||
|
).hexdigest()
|
||||||
|
consent_uri = "%s_matrix/consent?%s" % (
|
||||||
|
self._public_baseurl,
|
||||||
|
urlencode({
|
||||||
|
"u": user_id,
|
||||||
|
"h": mac
|
||||||
|
}),
|
||||||
|
)
|
||||||
|
return consent_uri
|
||||||
|
|||||||
178
synapse/app/_base.py
Normal file
178
synapse/app/_base.py
Normal file
@@ -0,0 +1,178 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright 2017 New Vector Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import gc
|
||||||
|
import logging
|
||||||
|
import sys
|
||||||
|
|
||||||
|
try:
|
||||||
|
import affinity
|
||||||
|
except Exception:
|
||||||
|
affinity = None
|
||||||
|
|
||||||
|
from daemonize import Daemonize
|
||||||
|
from synapse.util import PreserveLoggingContext
|
||||||
|
from synapse.util.rlimit import change_resource_limit
|
||||||
|
from twisted.internet import error, reactor
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def start_worker_reactor(appname, config):
|
||||||
|
""" Run the reactor in the main process
|
||||||
|
|
||||||
|
Daemonizes if necessary, and then configures some resources, before starting
|
||||||
|
the reactor. Pulls configuration from the 'worker' settings in 'config'.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
appname (str): application name which will be sent to syslog
|
||||||
|
config (synapse.config.Config): config object
|
||||||
|
"""
|
||||||
|
|
||||||
|
logger = logging.getLogger(config.worker_app)
|
||||||
|
|
||||||
|
start_reactor(
|
||||||
|
appname,
|
||||||
|
config.soft_file_limit,
|
||||||
|
config.gc_thresholds,
|
||||||
|
config.worker_pid_file,
|
||||||
|
config.worker_daemonize,
|
||||||
|
config.worker_cpu_affinity,
|
||||||
|
logger,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def start_reactor(
|
||||||
|
appname,
|
||||||
|
soft_file_limit,
|
||||||
|
gc_thresholds,
|
||||||
|
pid_file,
|
||||||
|
daemonize,
|
||||||
|
cpu_affinity,
|
||||||
|
logger,
|
||||||
|
):
|
||||||
|
""" Run the reactor in the main process
|
||||||
|
|
||||||
|
Daemonizes if necessary, and then configures some resources, before starting
|
||||||
|
the reactor
|
||||||
|
|
||||||
|
Args:
|
||||||
|
appname (str): application name which will be sent to syslog
|
||||||
|
soft_file_limit (int):
|
||||||
|
gc_thresholds:
|
||||||
|
pid_file (str): name of pid file to write to if daemonize is True
|
||||||
|
daemonize (bool): true to run the reactor in a background process
|
||||||
|
cpu_affinity (int|None): cpu affinity mask
|
||||||
|
logger (logging.Logger): logger instance to pass to Daemonize
|
||||||
|
"""
|
||||||
|
|
||||||
|
def run():
|
||||||
|
# make sure that we run the reactor with the sentinel log context,
|
||||||
|
# otherwise other PreserveLoggingContext instances will get confused
|
||||||
|
# and complain when they see the logcontext arbitrarily swapping
|
||||||
|
# between the sentinel and `run` logcontexts.
|
||||||
|
with PreserveLoggingContext():
|
||||||
|
logger.info("Running")
|
||||||
|
if cpu_affinity is not None:
|
||||||
|
if not affinity:
|
||||||
|
quit_with_error(
|
||||||
|
"Missing package 'affinity' required for cpu_affinity\n"
|
||||||
|
"option\n\n"
|
||||||
|
"Install by running:\n\n"
|
||||||
|
" pip install affinity\n\n"
|
||||||
|
)
|
||||||
|
logger.info("Setting CPU affinity to %s" % cpu_affinity)
|
||||||
|
affinity.set_process_affinity_mask(0, cpu_affinity)
|
||||||
|
change_resource_limit(soft_file_limit)
|
||||||
|
if gc_thresholds:
|
||||||
|
gc.set_threshold(*gc_thresholds)
|
||||||
|
reactor.run()
|
||||||
|
|
||||||
|
if daemonize:
|
||||||
|
daemon = Daemonize(
|
||||||
|
app=appname,
|
||||||
|
pid=pid_file,
|
||||||
|
action=run,
|
||||||
|
auto_close_fds=False,
|
||||||
|
verbose=True,
|
||||||
|
logger=logger,
|
||||||
|
)
|
||||||
|
daemon.start()
|
||||||
|
else:
|
||||||
|
run()
|
||||||
|
|
||||||
|
|
||||||
|
def quit_with_error(error_string):
|
||||||
|
message_lines = error_string.split("\n")
|
||||||
|
line_length = max([len(l) for l in message_lines if len(l) < 80]) + 2
|
||||||
|
sys.stderr.write("*" * line_length + '\n')
|
||||||
|
for line in message_lines:
|
||||||
|
sys.stderr.write(" %s\n" % (line.rstrip(),))
|
||||||
|
sys.stderr.write("*" * line_length + '\n')
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
|
def listen_tcp(bind_addresses, port, factory, backlog=50):
|
||||||
|
"""
|
||||||
|
Create a TCP socket for a port and several addresses
|
||||||
|
"""
|
||||||
|
for address in bind_addresses:
|
||||||
|
try:
|
||||||
|
reactor.listenTCP(
|
||||||
|
port,
|
||||||
|
factory,
|
||||||
|
backlog,
|
||||||
|
address
|
||||||
|
)
|
||||||
|
except error.CannotListenError as e:
|
||||||
|
check_bind_error(e, address, bind_addresses)
|
||||||
|
|
||||||
|
|
||||||
|
def listen_ssl(bind_addresses, port, factory, context_factory, backlog=50):
|
||||||
|
"""
|
||||||
|
Create an SSL socket for a port and several addresses
|
||||||
|
"""
|
||||||
|
for address in bind_addresses:
|
||||||
|
try:
|
||||||
|
reactor.listenSSL(
|
||||||
|
port,
|
||||||
|
factory,
|
||||||
|
context_factory,
|
||||||
|
backlog,
|
||||||
|
address
|
||||||
|
)
|
||||||
|
except error.CannotListenError as e:
|
||||||
|
check_bind_error(e, address, bind_addresses)
|
||||||
|
|
||||||
|
|
||||||
|
def check_bind_error(e, address, bind_addresses):
|
||||||
|
"""
|
||||||
|
This method checks an exception occurred while binding on 0.0.0.0.
|
||||||
|
If :: is specified in the bind addresses a warning is shown.
|
||||||
|
The exception is still raised otherwise.
|
||||||
|
|
||||||
|
Binding on both 0.0.0.0 and :: causes an exception on Linux and macOS
|
||||||
|
because :: binds on both IPv4 and IPv6 (as per RFC 3493).
|
||||||
|
When binding on 0.0.0.0 after :: this can safely be ignored.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
e (Exception): Exception that was caught.
|
||||||
|
address (str): Address on which binding was attempted.
|
||||||
|
bind_addresses (list): Addresses on which the service listens.
|
||||||
|
"""
|
||||||
|
if address == '0.0.0.0' and '::' in bind_addresses:
|
||||||
|
logger.warn('Failed to listen on 0.0.0.0, continuing because listening on [::]')
|
||||||
|
else:
|
||||||
|
raise e
|
||||||
@@ -13,37 +13,30 @@
|
|||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
import logging
|
||||||
|
import sys
|
||||||
|
|
||||||
import synapse
|
import synapse
|
||||||
|
from synapse import events
|
||||||
from synapse.server import HomeServer
|
from synapse.app import _base
|
||||||
from synapse.config._base import ConfigError
|
from synapse.config._base import ConfigError
|
||||||
from synapse.config.logger import setup_logging
|
|
||||||
from synapse.config.homeserver import HomeServerConfig
|
from synapse.config.homeserver import HomeServerConfig
|
||||||
|
from synapse.config.logger import setup_logging
|
||||||
from synapse.http.site import SynapseSite
|
from synapse.http.site import SynapseSite
|
||||||
from synapse.metrics.resource import MetricsResource, METRICS_PREFIX
|
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||||
|
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||||
from synapse.replication.slave.storage.directory import DirectoryStore
|
from synapse.replication.slave.storage.directory import DirectoryStore
|
||||||
from synapse.replication.slave.storage.events import SlavedEventStore
|
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||||
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
|
||||||
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||||
|
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||||
|
from synapse.server import HomeServer
|
||||||
from synapse.storage.engines import create_engine
|
from synapse.storage.engines import create_engine
|
||||||
from synapse.util.async import sleep
|
|
||||||
from synapse.util.httpresourcetree import create_resource_tree
|
from synapse.util.httpresourcetree import create_resource_tree
|
||||||
from synapse.util.logcontext import LoggingContext, PreserveLoggingContext
|
from synapse.util.logcontext import LoggingContext, run_in_background
|
||||||
from synapse.util.manhole import manhole
|
from synapse.util.manhole import manhole
|
||||||
from synapse.util.rlimit import change_resource_limit
|
|
||||||
from synapse.util.versionstring import get_version_string
|
from synapse.util.versionstring import get_version_string
|
||||||
|
|
||||||
from synapse import events
|
|
||||||
|
|
||||||
from twisted.internet import reactor, defer
|
from twisted.internet import reactor, defer
|
||||||
from twisted.web.resource import Resource
|
from twisted.web.resource import NoResource
|
||||||
|
|
||||||
from daemonize import Daemonize
|
|
||||||
|
|
||||||
import sys
|
|
||||||
import logging
|
|
||||||
import gc
|
|
||||||
|
|
||||||
logger = logging.getLogger("synapse.app.appservice")
|
logger = logging.getLogger("synapse.app.appservice")
|
||||||
|
|
||||||
@@ -56,19 +49,6 @@ class AppserviceSlaveStore(
|
|||||||
|
|
||||||
|
|
||||||
class AppserviceServer(HomeServer):
|
class AppserviceServer(HomeServer):
|
||||||
def get_db_conn(self, run_new_connection=True):
|
|
||||||
# Any param beginning with cp_ is a parameter for adbapi, and should
|
|
||||||
# not be passed to the database engine.
|
|
||||||
db_params = {
|
|
||||||
k: v for k, v in self.db_config.get("args", {}).items()
|
|
||||||
if not k.startswith("cp_")
|
|
||||||
}
|
|
||||||
db_conn = self.database_engine.module.connect(**db_params)
|
|
||||||
|
|
||||||
if run_new_connection:
|
|
||||||
self.database_engine.on_new_connection(db_conn)
|
|
||||||
return db_conn
|
|
||||||
|
|
||||||
def setup(self):
|
def setup(self):
|
||||||
logger.info("Setting up.")
|
logger.info("Setting up.")
|
||||||
self.datastore = AppserviceSlaveStore(self.get_db_conn(), self)
|
self.datastore = AppserviceSlaveStore(self.get_db_conn(), self)
|
||||||
@@ -84,19 +64,19 @@ class AppserviceServer(HomeServer):
|
|||||||
if name == "metrics":
|
if name == "metrics":
|
||||||
resources[METRICS_PREFIX] = MetricsResource(self)
|
resources[METRICS_PREFIX] = MetricsResource(self)
|
||||||
|
|
||||||
root_resource = create_resource_tree(resources, Resource())
|
root_resource = create_resource_tree(resources, NoResource())
|
||||||
|
|
||||||
for address in bind_addresses:
|
_base.listen_tcp(
|
||||||
reactor.listenTCP(
|
bind_addresses,
|
||||||
port,
|
port,
|
||||||
SynapseSite(
|
SynapseSite(
|
||||||
"synapse.access.http.%s" % (site_tag,),
|
"synapse.access.http.%s" % (site_tag,),
|
||||||
site_tag,
|
site_tag,
|
||||||
listener_config,
|
listener_config,
|
||||||
root_resource,
|
root_resource,
|
||||||
),
|
self.version_string,
|
||||||
interface=address
|
|
||||||
)
|
)
|
||||||
|
)
|
||||||
|
|
||||||
logger.info("Synapse appservice now listening on port %d", port)
|
logger.info("Synapse appservice now listening on port %d", port)
|
||||||
|
|
||||||
@@ -105,45 +85,42 @@ class AppserviceServer(HomeServer):
|
|||||||
if listener["type"] == "http":
|
if listener["type"] == "http":
|
||||||
self._listen_http(listener)
|
self._listen_http(listener)
|
||||||
elif listener["type"] == "manhole":
|
elif listener["type"] == "manhole":
|
||||||
bind_addresses = listener["bind_addresses"]
|
_base.listen_tcp(
|
||||||
|
listener["bind_addresses"],
|
||||||
for address in bind_addresses:
|
listener["port"],
|
||||||
reactor.listenTCP(
|
manhole(
|
||||||
listener["port"],
|
username="matrix",
|
||||||
manhole(
|
password="rabbithole",
|
||||||
username="matrix",
|
globals={"hs": self},
|
||||||
password="rabbithole",
|
|
||||||
globals={"hs": self},
|
|
||||||
),
|
|
||||||
interface=address
|
|
||||||
)
|
)
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||||
|
|
||||||
|
self.get_tcp_replication().start_replication(self)
|
||||||
|
|
||||||
|
def build_tcp_replication(self):
|
||||||
|
return ASReplicationHandler(self)
|
||||||
|
|
||||||
|
|
||||||
|
class ASReplicationHandler(ReplicationClientHandler):
|
||||||
|
def __init__(self, hs):
|
||||||
|
super(ASReplicationHandler, self).__init__(hs.get_datastore())
|
||||||
|
self.appservice_handler = hs.get_application_service_handler()
|
||||||
|
|
||||||
|
def on_rdata(self, stream_name, token, rows):
|
||||||
|
super(ASReplicationHandler, self).on_rdata(stream_name, token, rows)
|
||||||
|
|
||||||
|
if stream_name == "events":
|
||||||
|
max_stream_id = self.store.get_room_max_stream_ordering()
|
||||||
|
run_in_background(self._notify_app_services, max_stream_id)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def replicate(self):
|
def _notify_app_services(self, room_stream_id):
|
||||||
http_client = self.get_simple_http_client()
|
try:
|
||||||
store = self.get_datastore()
|
yield self.appservice_handler.notify_interested_services(room_stream_id)
|
||||||
replication_url = self.config.worker_replication_url
|
except Exception:
|
||||||
appservice_handler = self.get_application_service_handler()
|
logger.exception("Error notifying application services of event")
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def replicate(results):
|
|
||||||
stream = results.get("events")
|
|
||||||
if stream:
|
|
||||||
max_stream_id = stream["position"]
|
|
||||||
yield appservice_handler.notify_interested_services(max_stream_id)
|
|
||||||
|
|
||||||
while True:
|
|
||||||
try:
|
|
||||||
args = store.stream_positions()
|
|
||||||
args["timeout"] = 30000
|
|
||||||
result = yield http_client.get_json(replication_url, args=args)
|
|
||||||
yield store.process_replication(result)
|
|
||||||
replicate(result)
|
|
||||||
except:
|
|
||||||
logger.exception("Error replicating from %r", replication_url)
|
|
||||||
yield sleep(30)
|
|
||||||
|
|
||||||
|
|
||||||
def start(config_options):
|
def start(config_options):
|
||||||
@@ -186,37 +163,13 @@ def start(config_options):
|
|||||||
ps.setup()
|
ps.setup()
|
||||||
ps.start_listening(config.worker_listeners)
|
ps.start_listening(config.worker_listeners)
|
||||||
|
|
||||||
def run():
|
|
||||||
# make sure that we run the reactor with the sentinel log context,
|
|
||||||
# otherwise other PreserveLoggingContext instances will get confused
|
|
||||||
# and complain when they see the logcontext arbitrarily swapping
|
|
||||||
# between the sentinel and `run` logcontexts.
|
|
||||||
with PreserveLoggingContext():
|
|
||||||
logger.info("Running")
|
|
||||||
change_resource_limit(config.soft_file_limit)
|
|
||||||
if config.gc_thresholds:
|
|
||||||
gc.set_threshold(*config.gc_thresholds)
|
|
||||||
reactor.run()
|
|
||||||
|
|
||||||
def start():
|
def start():
|
||||||
ps.replicate()
|
|
||||||
ps.get_datastore().start_profiling()
|
ps.get_datastore().start_profiling()
|
||||||
ps.get_state_handler().start_caching()
|
ps.get_state_handler().start_caching()
|
||||||
|
|
||||||
reactor.callWhenRunning(start)
|
reactor.callWhenRunning(start)
|
||||||
|
|
||||||
if config.worker_daemonize:
|
_base.start_worker_reactor("synapse-appservice", config)
|
||||||
daemon = Daemonize(
|
|
||||||
app="synapse-appservice",
|
|
||||||
pid=config.worker_pid_file,
|
|
||||||
action=run,
|
|
||||||
auto_close_fds=False,
|
|
||||||
verbose=True,
|
|
||||||
logger=logger,
|
|
||||||
)
|
|
||||||
daemon.start()
|
|
||||||
else:
|
|
||||||
run()
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
|||||||
@@ -13,46 +13,38 @@
|
|||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
import logging
|
||||||
|
import sys
|
||||||
|
|
||||||
import synapse
|
import synapse
|
||||||
|
from synapse import events
|
||||||
|
from synapse.app import _base
|
||||||
from synapse.config._base import ConfigError
|
from synapse.config._base import ConfigError
|
||||||
from synapse.config.homeserver import HomeServerConfig
|
from synapse.config.homeserver import HomeServerConfig
|
||||||
from synapse.config.logger import setup_logging
|
from synapse.config.logger import setup_logging
|
||||||
from synapse.http.site import SynapseSite
|
from synapse.crypto import context_factory
|
||||||
from synapse.http.server import JsonResource
|
from synapse.http.server import JsonResource
|
||||||
from synapse.metrics.resource import MetricsResource, METRICS_PREFIX
|
from synapse.http.site import SynapseSite
|
||||||
|
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||||
from synapse.replication.slave.storage._base import BaseSlavedStore
|
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||||
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||||
|
from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
|
||||||
|
from synapse.replication.slave.storage.directory import DirectoryStore
|
||||||
from synapse.replication.slave.storage.events import SlavedEventStore
|
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||||
from synapse.replication.slave.storage.keys import SlavedKeyStore
|
from synapse.replication.slave.storage.keys import SlavedKeyStore
|
||||||
from synapse.replication.slave.storage.room import RoomStore
|
|
||||||
from synapse.replication.slave.storage.directory import DirectoryStore
|
|
||||||
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||||
|
from synapse.replication.slave.storage.room import RoomStore
|
||||||
from synapse.replication.slave.storage.transactions import TransactionStore
|
from synapse.replication.slave.storage.transactions import TransactionStore
|
||||||
|
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||||
from synapse.rest.client.v1.room import PublicRoomListRestServlet
|
from synapse.rest.client.v1.room import PublicRoomListRestServlet
|
||||||
from synapse.server import HomeServer
|
from synapse.server import HomeServer
|
||||||
from synapse.storage.client_ips import ClientIpStore
|
|
||||||
from synapse.storage.engines import create_engine
|
from synapse.storage.engines import create_engine
|
||||||
from synapse.util.async import sleep
|
|
||||||
from synapse.util.httpresourcetree import create_resource_tree
|
from synapse.util.httpresourcetree import create_resource_tree
|
||||||
from synapse.util.logcontext import LoggingContext, PreserveLoggingContext
|
from synapse.util.logcontext import LoggingContext
|
||||||
from synapse.util.manhole import manhole
|
from synapse.util.manhole import manhole
|
||||||
from synapse.util.rlimit import change_resource_limit
|
|
||||||
from synapse.util.versionstring import get_version_string
|
from synapse.util.versionstring import get_version_string
|
||||||
from synapse.crypto import context_factory
|
from twisted.internet import reactor
|
||||||
|
from twisted.web.resource import NoResource
|
||||||
from synapse import events
|
|
||||||
|
|
||||||
|
|
||||||
from twisted.internet import reactor, defer
|
|
||||||
from twisted.web.resource import Resource
|
|
||||||
|
|
||||||
from daemonize import Daemonize
|
|
||||||
|
|
||||||
import sys
|
|
||||||
import logging
|
|
||||||
import gc
|
|
||||||
|
|
||||||
logger = logging.getLogger("synapse.app.client_reader")
|
logger = logging.getLogger("synapse.app.client_reader")
|
||||||
|
|
||||||
@@ -65,26 +57,13 @@ class ClientReaderSlavedStore(
|
|||||||
SlavedApplicationServiceStore,
|
SlavedApplicationServiceStore,
|
||||||
SlavedRegistrationStore,
|
SlavedRegistrationStore,
|
||||||
TransactionStore,
|
TransactionStore,
|
||||||
|
SlavedClientIpStore,
|
||||||
BaseSlavedStore,
|
BaseSlavedStore,
|
||||||
ClientIpStore, # After BaseSlavedStore because the constructor is different
|
|
||||||
):
|
):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
class ClientReaderServer(HomeServer):
|
class ClientReaderServer(HomeServer):
|
||||||
def get_db_conn(self, run_new_connection=True):
|
|
||||||
# Any param beginning with cp_ is a parameter for adbapi, and should
|
|
||||||
# not be passed to the database engine.
|
|
||||||
db_params = {
|
|
||||||
k: v for k, v in self.db_config.get("args", {}).items()
|
|
||||||
if not k.startswith("cp_")
|
|
||||||
}
|
|
||||||
db_conn = self.database_engine.module.connect(**db_params)
|
|
||||||
|
|
||||||
if run_new_connection:
|
|
||||||
self.database_engine.on_new_connection(db_conn)
|
|
||||||
return db_conn
|
|
||||||
|
|
||||||
def setup(self):
|
def setup(self):
|
||||||
logger.info("Setting up.")
|
logger.info("Setting up.")
|
||||||
self.datastore = ClientReaderSlavedStore(self.get_db_conn(), self)
|
self.datastore = ClientReaderSlavedStore(self.get_db_conn(), self)
|
||||||
@@ -109,19 +88,19 @@ class ClientReaderServer(HomeServer):
|
|||||||
"/_matrix/client/api/v1": resource,
|
"/_matrix/client/api/v1": resource,
|
||||||
})
|
})
|
||||||
|
|
||||||
root_resource = create_resource_tree(resources, Resource())
|
root_resource = create_resource_tree(resources, NoResource())
|
||||||
|
|
||||||
for address in bind_addresses:
|
_base.listen_tcp(
|
||||||
reactor.listenTCP(
|
bind_addresses,
|
||||||
port,
|
port,
|
||||||
SynapseSite(
|
SynapseSite(
|
||||||
"synapse.access.http.%s" % (site_tag,),
|
"synapse.access.http.%s" % (site_tag,),
|
||||||
site_tag,
|
site_tag,
|
||||||
listener_config,
|
listener_config,
|
||||||
root_resource,
|
root_resource,
|
||||||
),
|
self.version_string,
|
||||||
interface=address
|
|
||||||
)
|
)
|
||||||
|
)
|
||||||
|
|
||||||
logger.info("Synapse client reader now listening on port %d", port)
|
logger.info("Synapse client reader now listening on port %d", port)
|
||||||
|
|
||||||
@@ -130,36 +109,23 @@ class ClientReaderServer(HomeServer):
|
|||||||
if listener["type"] == "http":
|
if listener["type"] == "http":
|
||||||
self._listen_http(listener)
|
self._listen_http(listener)
|
||||||
elif listener["type"] == "manhole":
|
elif listener["type"] == "manhole":
|
||||||
bind_addresses = listener["bind_addresses"]
|
_base.listen_tcp(
|
||||||
|
listener["bind_addresses"],
|
||||||
for address in bind_addresses:
|
listener["port"],
|
||||||
reactor.listenTCP(
|
manhole(
|
||||||
listener["port"],
|
username="matrix",
|
||||||
manhole(
|
password="rabbithole",
|
||||||
username="matrix",
|
globals={"hs": self},
|
||||||
password="rabbithole",
|
|
||||||
globals={"hs": self},
|
|
||||||
),
|
|
||||||
interface=address
|
|
||||||
)
|
)
|
||||||
|
)
|
||||||
|
|
||||||
else:
|
else:
|
||||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
self.get_tcp_replication().start_replication(self)
|
||||||
def replicate(self):
|
|
||||||
http_client = self.get_simple_http_client()
|
|
||||||
store = self.get_datastore()
|
|
||||||
replication_url = self.config.worker_replication_url
|
|
||||||
|
|
||||||
while True:
|
def build_tcp_replication(self):
|
||||||
try:
|
return ReplicationClientHandler(self.get_datastore())
|
||||||
args = store.stream_positions()
|
|
||||||
args["timeout"] = 30000
|
|
||||||
result = yield http_client.get_json(replication_url, args=args)
|
|
||||||
yield store.process_replication(result)
|
|
||||||
except:
|
|
||||||
logger.exception("Error replicating from %r", replication_url)
|
|
||||||
yield sleep(5)
|
|
||||||
|
|
||||||
|
|
||||||
def start(config_options):
|
def start(config_options):
|
||||||
@@ -191,40 +157,15 @@ def start(config_options):
|
|||||||
)
|
)
|
||||||
|
|
||||||
ss.setup()
|
ss.setup()
|
||||||
ss.get_handlers()
|
|
||||||
ss.start_listening(config.worker_listeners)
|
ss.start_listening(config.worker_listeners)
|
||||||
|
|
||||||
def run():
|
|
||||||
# make sure that we run the reactor with the sentinel log context,
|
|
||||||
# otherwise other PreserveLoggingContext instances will get confused
|
|
||||||
# and complain when they see the logcontext arbitrarily swapping
|
|
||||||
# between the sentinel and `run` logcontexts.
|
|
||||||
with PreserveLoggingContext():
|
|
||||||
logger.info("Running")
|
|
||||||
change_resource_limit(config.soft_file_limit)
|
|
||||||
if config.gc_thresholds:
|
|
||||||
gc.set_threshold(*config.gc_thresholds)
|
|
||||||
reactor.run()
|
|
||||||
|
|
||||||
def start():
|
def start():
|
||||||
ss.get_state_handler().start_caching()
|
ss.get_state_handler().start_caching()
|
||||||
ss.get_datastore().start_profiling()
|
ss.get_datastore().start_profiling()
|
||||||
ss.replicate()
|
|
||||||
|
|
||||||
reactor.callWhenRunning(start)
|
reactor.callWhenRunning(start)
|
||||||
|
|
||||||
if config.worker_daemonize:
|
_base.start_worker_reactor("synapse-client-reader", config)
|
||||||
daemon = Daemonize(
|
|
||||||
app="synapse-client-reader",
|
|
||||||
pid=config.worker_pid_file,
|
|
||||||
action=run,
|
|
||||||
auto_close_fds=False,
|
|
||||||
verbose=True,
|
|
||||||
logger=logger,
|
|
||||||
)
|
|
||||||
daemon.start()
|
|
||||||
else:
|
|
||||||
run()
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
|||||||
190
synapse/app/event_creator.py
Normal file
190
synapse/app/event_creator.py
Normal file
@@ -0,0 +1,190 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright 2018 New Vector Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
import logging
|
||||||
|
import sys
|
||||||
|
|
||||||
|
import synapse
|
||||||
|
from synapse import events
|
||||||
|
from synapse.app import _base
|
||||||
|
from synapse.config._base import ConfigError
|
||||||
|
from synapse.config.homeserver import HomeServerConfig
|
||||||
|
from synapse.config.logger import setup_logging
|
||||||
|
from synapse.crypto import context_factory
|
||||||
|
from synapse.http.server import JsonResource
|
||||||
|
from synapse.http.site import SynapseSite
|
||||||
|
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||||
|
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||||
|
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
|
||||||
|
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||||
|
from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
|
||||||
|
from synapse.replication.slave.storage.devices import SlavedDeviceStore
|
||||||
|
from synapse.replication.slave.storage.directory import DirectoryStore
|
||||||
|
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||||
|
from synapse.replication.slave.storage.profile import SlavedProfileStore
|
||||||
|
from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore
|
||||||
|
from synapse.replication.slave.storage.pushers import SlavedPusherStore
|
||||||
|
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
|
||||||
|
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||||
|
from synapse.replication.slave.storage.room import RoomStore
|
||||||
|
from synapse.replication.slave.storage.transactions import TransactionStore
|
||||||
|
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||||
|
from synapse.rest.client.v1.room import (
|
||||||
|
RoomSendEventRestServlet, RoomMembershipRestServlet, RoomStateEventRestServlet,
|
||||||
|
JoinRoomAliasServlet,
|
||||||
|
)
|
||||||
|
from synapse.server import HomeServer
|
||||||
|
from synapse.storage.engines import create_engine
|
||||||
|
from synapse.util.httpresourcetree import create_resource_tree
|
||||||
|
from synapse.util.logcontext import LoggingContext
|
||||||
|
from synapse.util.manhole import manhole
|
||||||
|
from synapse.util.versionstring import get_version_string
|
||||||
|
from twisted.internet import reactor
|
||||||
|
from twisted.web.resource import NoResource
|
||||||
|
|
||||||
|
logger = logging.getLogger("synapse.app.event_creator")
|
||||||
|
|
||||||
|
|
||||||
|
class EventCreatorSlavedStore(
|
||||||
|
DirectoryStore,
|
||||||
|
TransactionStore,
|
||||||
|
SlavedProfileStore,
|
||||||
|
SlavedAccountDataStore,
|
||||||
|
SlavedPusherStore,
|
||||||
|
SlavedReceiptsStore,
|
||||||
|
SlavedPushRuleStore,
|
||||||
|
SlavedDeviceStore,
|
||||||
|
SlavedClientIpStore,
|
||||||
|
SlavedApplicationServiceStore,
|
||||||
|
SlavedEventStore,
|
||||||
|
SlavedRegistrationStore,
|
||||||
|
RoomStore,
|
||||||
|
BaseSlavedStore,
|
||||||
|
):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class EventCreatorServer(HomeServer):
|
||||||
|
def setup(self):
|
||||||
|
logger.info("Setting up.")
|
||||||
|
self.datastore = EventCreatorSlavedStore(self.get_db_conn(), self)
|
||||||
|
logger.info("Finished setting up.")
|
||||||
|
|
||||||
|
def _listen_http(self, listener_config):
|
||||||
|
port = listener_config["port"]
|
||||||
|
bind_addresses = listener_config["bind_addresses"]
|
||||||
|
site_tag = listener_config.get("tag", port)
|
||||||
|
resources = {}
|
||||||
|
for res in listener_config["resources"]:
|
||||||
|
for name in res["names"]:
|
||||||
|
if name == "metrics":
|
||||||
|
resources[METRICS_PREFIX] = MetricsResource(self)
|
||||||
|
elif name == "client":
|
||||||
|
resource = JsonResource(self, canonical_json=False)
|
||||||
|
RoomSendEventRestServlet(self).register(resource)
|
||||||
|
RoomMembershipRestServlet(self).register(resource)
|
||||||
|
RoomStateEventRestServlet(self).register(resource)
|
||||||
|
JoinRoomAliasServlet(self).register(resource)
|
||||||
|
resources.update({
|
||||||
|
"/_matrix/client/r0": resource,
|
||||||
|
"/_matrix/client/unstable": resource,
|
||||||
|
"/_matrix/client/v2_alpha": resource,
|
||||||
|
"/_matrix/client/api/v1": resource,
|
||||||
|
})
|
||||||
|
|
||||||
|
root_resource = create_resource_tree(resources, NoResource())
|
||||||
|
|
||||||
|
_base.listen_tcp(
|
||||||
|
bind_addresses,
|
||||||
|
port,
|
||||||
|
SynapseSite(
|
||||||
|
"synapse.access.http.%s" % (site_tag,),
|
||||||
|
site_tag,
|
||||||
|
listener_config,
|
||||||
|
root_resource,
|
||||||
|
self.version_string,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
logger.info("Synapse event creator now listening on port %d", port)
|
||||||
|
|
||||||
|
def start_listening(self, listeners):
|
||||||
|
for listener in listeners:
|
||||||
|
if listener["type"] == "http":
|
||||||
|
self._listen_http(listener)
|
||||||
|
elif listener["type"] == "manhole":
|
||||||
|
_base.listen_tcp(
|
||||||
|
listener["bind_addresses"],
|
||||||
|
listener["port"],
|
||||||
|
manhole(
|
||||||
|
username="matrix",
|
||||||
|
password="rabbithole",
|
||||||
|
globals={"hs": self},
|
||||||
|
)
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||||
|
|
||||||
|
self.get_tcp_replication().start_replication(self)
|
||||||
|
|
||||||
|
def build_tcp_replication(self):
|
||||||
|
return ReplicationClientHandler(self.get_datastore())
|
||||||
|
|
||||||
|
|
||||||
|
def start(config_options):
|
||||||
|
try:
|
||||||
|
config = HomeServerConfig.load_config(
|
||||||
|
"Synapse event creator", config_options
|
||||||
|
)
|
||||||
|
except ConfigError as e:
|
||||||
|
sys.stderr.write("\n" + e.message + "\n")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
assert config.worker_app == "synapse.app.event_creator"
|
||||||
|
|
||||||
|
assert config.worker_replication_http_port is not None
|
||||||
|
|
||||||
|
setup_logging(config, use_worker_options=True)
|
||||||
|
|
||||||
|
events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||||
|
|
||||||
|
database_engine = create_engine(config.database_config)
|
||||||
|
|
||||||
|
tls_server_context_factory = context_factory.ServerContextFactory(config)
|
||||||
|
|
||||||
|
ss = EventCreatorServer(
|
||||||
|
config.server_name,
|
||||||
|
db_config=config.database_config,
|
||||||
|
tls_server_context_factory=tls_server_context_factory,
|
||||||
|
config=config,
|
||||||
|
version_string="Synapse/" + get_version_string(synapse),
|
||||||
|
database_engine=database_engine,
|
||||||
|
)
|
||||||
|
|
||||||
|
ss.setup()
|
||||||
|
ss.start_listening(config.worker_listeners)
|
||||||
|
|
||||||
|
def start():
|
||||||
|
ss.get_state_handler().start_caching()
|
||||||
|
ss.get_datastore().start_profiling()
|
||||||
|
|
||||||
|
reactor.callWhenRunning(start)
|
||||||
|
|
||||||
|
_base.start_worker_reactor("synapse-event-creator", config)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
with LoggingContext("main"):
|
||||||
|
start(sys.argv[1:])
|
||||||
@@ -13,43 +13,35 @@
|
|||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
import logging
|
||||||
|
import sys
|
||||||
|
|
||||||
import synapse
|
import synapse
|
||||||
|
from synapse import events
|
||||||
|
from synapse.api.urls import FEDERATION_PREFIX
|
||||||
|
from synapse.app import _base
|
||||||
from synapse.config._base import ConfigError
|
from synapse.config._base import ConfigError
|
||||||
from synapse.config.homeserver import HomeServerConfig
|
from synapse.config.homeserver import HomeServerConfig
|
||||||
from synapse.config.logger import setup_logging
|
from synapse.config.logger import setup_logging
|
||||||
|
from synapse.crypto import context_factory
|
||||||
|
from synapse.federation.transport.server import TransportLayerServer
|
||||||
from synapse.http.site import SynapseSite
|
from synapse.http.site import SynapseSite
|
||||||
from synapse.metrics.resource import MetricsResource, METRICS_PREFIX
|
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||||
from synapse.replication.slave.storage._base import BaseSlavedStore
|
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||||
|
from synapse.replication.slave.storage.directory import DirectoryStore
|
||||||
from synapse.replication.slave.storage.events import SlavedEventStore
|
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||||
from synapse.replication.slave.storage.keys import SlavedKeyStore
|
from synapse.replication.slave.storage.keys import SlavedKeyStore
|
||||||
from synapse.replication.slave.storage.room import RoomStore
|
from synapse.replication.slave.storage.room import RoomStore
|
||||||
from synapse.replication.slave.storage.transactions import TransactionStore
|
from synapse.replication.slave.storage.transactions import TransactionStore
|
||||||
from synapse.replication.slave.storage.directory import DirectoryStore
|
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||||
from synapse.server import HomeServer
|
from synapse.server import HomeServer
|
||||||
from synapse.storage.engines import create_engine
|
from synapse.storage.engines import create_engine
|
||||||
from synapse.util.async import sleep
|
|
||||||
from synapse.util.httpresourcetree import create_resource_tree
|
from synapse.util.httpresourcetree import create_resource_tree
|
||||||
from synapse.util.logcontext import LoggingContext, PreserveLoggingContext
|
from synapse.util.logcontext import LoggingContext
|
||||||
from synapse.util.manhole import manhole
|
from synapse.util.manhole import manhole
|
||||||
from synapse.util.rlimit import change_resource_limit
|
|
||||||
from synapse.util.versionstring import get_version_string
|
from synapse.util.versionstring import get_version_string
|
||||||
from synapse.api.urls import FEDERATION_PREFIX
|
from twisted.internet import reactor
|
||||||
from synapse.federation.transport.server import TransportLayerServer
|
from twisted.web.resource import NoResource
|
||||||
from synapse.crypto import context_factory
|
|
||||||
|
|
||||||
from synapse import events
|
|
||||||
|
|
||||||
|
|
||||||
from twisted.internet import reactor, defer
|
|
||||||
from twisted.web.resource import Resource
|
|
||||||
|
|
||||||
from daemonize import Daemonize
|
|
||||||
|
|
||||||
import sys
|
|
||||||
import logging
|
|
||||||
import gc
|
|
||||||
|
|
||||||
logger = logging.getLogger("synapse.app.federation_reader")
|
logger = logging.getLogger("synapse.app.federation_reader")
|
||||||
|
|
||||||
@@ -66,19 +58,6 @@ class FederationReaderSlavedStore(
|
|||||||
|
|
||||||
|
|
||||||
class FederationReaderServer(HomeServer):
|
class FederationReaderServer(HomeServer):
|
||||||
def get_db_conn(self, run_new_connection=True):
|
|
||||||
# Any param beginning with cp_ is a parameter for adbapi, and should
|
|
||||||
# not be passed to the database engine.
|
|
||||||
db_params = {
|
|
||||||
k: v for k, v in self.db_config.get("args", {}).items()
|
|
||||||
if not k.startswith("cp_")
|
|
||||||
}
|
|
||||||
db_conn = self.database_engine.module.connect(**db_params)
|
|
||||||
|
|
||||||
if run_new_connection:
|
|
||||||
self.database_engine.on_new_connection(db_conn)
|
|
||||||
return db_conn
|
|
||||||
|
|
||||||
def setup(self):
|
def setup(self):
|
||||||
logger.info("Setting up.")
|
logger.info("Setting up.")
|
||||||
self.datastore = FederationReaderSlavedStore(self.get_db_conn(), self)
|
self.datastore = FederationReaderSlavedStore(self.get_db_conn(), self)
|
||||||
@@ -98,19 +77,19 @@ class FederationReaderServer(HomeServer):
|
|||||||
FEDERATION_PREFIX: TransportLayerServer(self),
|
FEDERATION_PREFIX: TransportLayerServer(self),
|
||||||
})
|
})
|
||||||
|
|
||||||
root_resource = create_resource_tree(resources, Resource())
|
root_resource = create_resource_tree(resources, NoResource())
|
||||||
|
|
||||||
for address in bind_addresses:
|
_base.listen_tcp(
|
||||||
reactor.listenTCP(
|
bind_addresses,
|
||||||
port,
|
port,
|
||||||
SynapseSite(
|
SynapseSite(
|
||||||
"synapse.access.http.%s" % (site_tag,),
|
"synapse.access.http.%s" % (site_tag,),
|
||||||
site_tag,
|
site_tag,
|
||||||
listener_config,
|
listener_config,
|
||||||
root_resource,
|
root_resource,
|
||||||
),
|
self.version_string,
|
||||||
interface=address
|
|
||||||
)
|
)
|
||||||
|
)
|
||||||
|
|
||||||
logger.info("Synapse federation reader now listening on port %d", port)
|
logger.info("Synapse federation reader now listening on port %d", port)
|
||||||
|
|
||||||
@@ -119,36 +98,22 @@ class FederationReaderServer(HomeServer):
|
|||||||
if listener["type"] == "http":
|
if listener["type"] == "http":
|
||||||
self._listen_http(listener)
|
self._listen_http(listener)
|
||||||
elif listener["type"] == "manhole":
|
elif listener["type"] == "manhole":
|
||||||
bind_addresses = listener["bind_addresses"]
|
_base.listen_tcp(
|
||||||
|
listener["bind_addresses"],
|
||||||
for address in bind_addresses:
|
listener["port"],
|
||||||
reactor.listenTCP(
|
manhole(
|
||||||
listener["port"],
|
username="matrix",
|
||||||
manhole(
|
password="rabbithole",
|
||||||
username="matrix",
|
globals={"hs": self},
|
||||||
password="rabbithole",
|
|
||||||
globals={"hs": self},
|
|
||||||
),
|
|
||||||
interface=address
|
|
||||||
)
|
)
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
self.get_tcp_replication().start_replication(self)
|
||||||
def replicate(self):
|
|
||||||
http_client = self.get_simple_http_client()
|
|
||||||
store = self.get_datastore()
|
|
||||||
replication_url = self.config.worker_replication_url
|
|
||||||
|
|
||||||
while True:
|
def build_tcp_replication(self):
|
||||||
try:
|
return ReplicationClientHandler(self.get_datastore())
|
||||||
args = store.stream_positions()
|
|
||||||
args["timeout"] = 30000
|
|
||||||
result = yield http_client.get_json(replication_url, args=args)
|
|
||||||
yield store.process_replication(result)
|
|
||||||
except:
|
|
||||||
logger.exception("Error replicating from %r", replication_url)
|
|
||||||
yield sleep(5)
|
|
||||||
|
|
||||||
|
|
||||||
def start(config_options):
|
def start(config_options):
|
||||||
@@ -180,40 +145,15 @@ def start(config_options):
|
|||||||
)
|
)
|
||||||
|
|
||||||
ss.setup()
|
ss.setup()
|
||||||
ss.get_handlers()
|
|
||||||
ss.start_listening(config.worker_listeners)
|
ss.start_listening(config.worker_listeners)
|
||||||
|
|
||||||
def run():
|
|
||||||
# make sure that we run the reactor with the sentinel log context,
|
|
||||||
# otherwise other PreserveLoggingContext instances will get confused
|
|
||||||
# and complain when they see the logcontext arbitrarily swapping
|
|
||||||
# between the sentinel and `run` logcontexts.
|
|
||||||
with PreserveLoggingContext():
|
|
||||||
logger.info("Running")
|
|
||||||
change_resource_limit(config.soft_file_limit)
|
|
||||||
if config.gc_thresholds:
|
|
||||||
gc.set_threshold(*config.gc_thresholds)
|
|
||||||
reactor.run()
|
|
||||||
|
|
||||||
def start():
|
def start():
|
||||||
ss.get_state_handler().start_caching()
|
ss.get_state_handler().start_caching()
|
||||||
ss.get_datastore().start_profiling()
|
ss.get_datastore().start_profiling()
|
||||||
ss.replicate()
|
|
||||||
|
|
||||||
reactor.callWhenRunning(start)
|
reactor.callWhenRunning(start)
|
||||||
|
|
||||||
if config.worker_daemonize:
|
_base.start_worker_reactor("synapse-federation-reader", config)
|
||||||
daemon = Daemonize(
|
|
||||||
app="synapse-federation-reader",
|
|
||||||
pid=config.worker_pid_file,
|
|
||||||
action=run,
|
|
||||||
auto_close_fds=False,
|
|
||||||
verbose=True,
|
|
||||||
logger=logger,
|
|
||||||
)
|
|
||||||
daemon.start()
|
|
||||||
else:
|
|
||||||
run()
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
|||||||
@@ -13,69 +13,69 @@
|
|||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
import logging
|
||||||
|
import sys
|
||||||
|
|
||||||
import synapse
|
import synapse
|
||||||
|
from synapse import events
|
||||||
from synapse.server import HomeServer
|
from synapse.app import _base
|
||||||
from synapse.config._base import ConfigError
|
from synapse.config._base import ConfigError
|
||||||
from synapse.config.logger import setup_logging
|
|
||||||
from synapse.config.homeserver import HomeServerConfig
|
from synapse.config.homeserver import HomeServerConfig
|
||||||
|
from synapse.config.logger import setup_logging
|
||||||
from synapse.crypto import context_factory
|
from synapse.crypto import context_factory
|
||||||
from synapse.http.site import SynapseSite
|
|
||||||
from synapse.federation import send_queue
|
from synapse.federation import send_queue
|
||||||
from synapse.federation.units import Edu
|
from synapse.http.site import SynapseSite
|
||||||
from synapse.metrics.resource import MetricsResource, METRICS_PREFIX
|
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||||
from synapse.replication.slave.storage.deviceinbox import SlavedDeviceInboxStore
|
from synapse.replication.slave.storage.deviceinbox import SlavedDeviceInboxStore
|
||||||
|
from synapse.replication.slave.storage.devices import SlavedDeviceStore
|
||||||
from synapse.replication.slave.storage.events import SlavedEventStore
|
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||||
|
from synapse.replication.slave.storage.presence import SlavedPresenceStore
|
||||||
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
|
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
|
||||||
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||||
from synapse.replication.slave.storage.transactions import TransactionStore
|
from synapse.replication.slave.storage.transactions import TransactionStore
|
||||||
from synapse.replication.slave.storage.devices import SlavedDeviceStore
|
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||||
|
from synapse.server import HomeServer
|
||||||
from synapse.storage.engines import create_engine
|
from synapse.storage.engines import create_engine
|
||||||
from synapse.storage.presence import UserPresenceState
|
from synapse.util.async import Linearizer
|
||||||
from synapse.util.async import sleep
|
|
||||||
from synapse.util.httpresourcetree import create_resource_tree
|
from synapse.util.httpresourcetree import create_resource_tree
|
||||||
from synapse.util.logcontext import LoggingContext, PreserveLoggingContext
|
from synapse.util.logcontext import LoggingContext, run_in_background
|
||||||
from synapse.util.manhole import manhole
|
from synapse.util.manhole import manhole
|
||||||
from synapse.util.rlimit import change_resource_limit
|
|
||||||
from synapse.util.versionstring import get_version_string
|
from synapse.util.versionstring import get_version_string
|
||||||
|
from twisted.internet import defer, reactor
|
||||||
|
from twisted.web.resource import NoResource
|
||||||
|
|
||||||
from synapse import events
|
logger = logging.getLogger("synapse.app.federation_sender")
|
||||||
|
|
||||||
from twisted.internet import reactor, defer
|
|
||||||
from twisted.web.resource import Resource
|
|
||||||
|
|
||||||
from daemonize import Daemonize
|
|
||||||
|
|
||||||
import sys
|
|
||||||
import logging
|
|
||||||
import gc
|
|
||||||
import ujson as json
|
|
||||||
|
|
||||||
logger = logging.getLogger("synapse.app.appservice")
|
|
||||||
|
|
||||||
|
|
||||||
class FederationSenderSlaveStore(
|
class FederationSenderSlaveStore(
|
||||||
SlavedDeviceInboxStore, TransactionStore, SlavedReceiptsStore, SlavedEventStore,
|
SlavedDeviceInboxStore, TransactionStore, SlavedReceiptsStore, SlavedEventStore,
|
||||||
SlavedRegistrationStore, SlavedDeviceStore,
|
SlavedRegistrationStore, SlavedDeviceStore, SlavedPresenceStore,
|
||||||
):
|
):
|
||||||
pass
|
def __init__(self, db_conn, hs):
|
||||||
|
super(FederationSenderSlaveStore, self).__init__(db_conn, hs)
|
||||||
|
|
||||||
|
# We pull out the current federation stream position now so that we
|
||||||
|
# always have a known value for the federation position in memory so
|
||||||
|
# that we don't have to bounce via a deferred once when we start the
|
||||||
|
# replication streams.
|
||||||
|
self.federation_out_pos_startup = self._get_federation_out_pos(db_conn)
|
||||||
|
|
||||||
|
def _get_federation_out_pos(self, db_conn):
|
||||||
|
sql = (
|
||||||
|
"SELECT stream_id FROM federation_stream_position"
|
||||||
|
" WHERE type = ?"
|
||||||
|
)
|
||||||
|
sql = self.database_engine.convert_param_style(sql)
|
||||||
|
|
||||||
|
txn = db_conn.cursor()
|
||||||
|
txn.execute(sql, ("federation",))
|
||||||
|
rows = txn.fetchall()
|
||||||
|
txn.close()
|
||||||
|
|
||||||
|
return rows[0][0] if rows else -1
|
||||||
|
|
||||||
|
|
||||||
class FederationSenderServer(HomeServer):
|
class FederationSenderServer(HomeServer):
|
||||||
def get_db_conn(self, run_new_connection=True):
|
|
||||||
# Any param beginning with cp_ is a parameter for adbapi, and should
|
|
||||||
# not be passed to the database engine.
|
|
||||||
db_params = {
|
|
||||||
k: v for k, v in self.db_config.get("args", {}).items()
|
|
||||||
if not k.startswith("cp_")
|
|
||||||
}
|
|
||||||
db_conn = self.database_engine.module.connect(**db_params)
|
|
||||||
|
|
||||||
if run_new_connection:
|
|
||||||
self.database_engine.on_new_connection(db_conn)
|
|
||||||
return db_conn
|
|
||||||
|
|
||||||
def setup(self):
|
def setup(self):
|
||||||
logger.info("Setting up.")
|
logger.info("Setting up.")
|
||||||
self.datastore = FederationSenderSlaveStore(self.get_db_conn(), self)
|
self.datastore = FederationSenderSlaveStore(self.get_db_conn(), self)
|
||||||
@@ -91,19 +91,19 @@ class FederationSenderServer(HomeServer):
|
|||||||
if name == "metrics":
|
if name == "metrics":
|
||||||
resources[METRICS_PREFIX] = MetricsResource(self)
|
resources[METRICS_PREFIX] = MetricsResource(self)
|
||||||
|
|
||||||
root_resource = create_resource_tree(resources, Resource())
|
root_resource = create_resource_tree(resources, NoResource())
|
||||||
|
|
||||||
for address in bind_addresses:
|
_base.listen_tcp(
|
||||||
reactor.listenTCP(
|
bind_addresses,
|
||||||
port,
|
port,
|
||||||
SynapseSite(
|
SynapseSite(
|
||||||
"synapse.access.http.%s" % (site_tag,),
|
"synapse.access.http.%s" % (site_tag,),
|
||||||
site_tag,
|
site_tag,
|
||||||
listener_config,
|
listener_config,
|
||||||
root_resource,
|
root_resource,
|
||||||
),
|
self.version_string,
|
||||||
interface=address
|
|
||||||
)
|
)
|
||||||
|
)
|
||||||
|
|
||||||
logger.info("Synapse federation_sender now listening on port %d", port)
|
logger.info("Synapse federation_sender now listening on port %d", port)
|
||||||
|
|
||||||
@@ -112,41 +112,39 @@ class FederationSenderServer(HomeServer):
|
|||||||
if listener["type"] == "http":
|
if listener["type"] == "http":
|
||||||
self._listen_http(listener)
|
self._listen_http(listener)
|
||||||
elif listener["type"] == "manhole":
|
elif listener["type"] == "manhole":
|
||||||
bind_addresses = listener["bind_addresses"]
|
_base.listen_tcp(
|
||||||
|
listener["bind_addresses"],
|
||||||
for address in bind_addresses:
|
listener["port"],
|
||||||
reactor.listenTCP(
|
manhole(
|
||||||
listener["port"],
|
username="matrix",
|
||||||
manhole(
|
password="rabbithole",
|
||||||
username="matrix",
|
globals={"hs": self},
|
||||||
password="rabbithole",
|
|
||||||
globals={"hs": self},
|
|
||||||
),
|
|
||||||
interface=address
|
|
||||||
)
|
)
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
self.get_tcp_replication().start_replication(self)
|
||||||
def replicate(self):
|
|
||||||
http_client = self.get_simple_http_client()
|
|
||||||
store = self.get_datastore()
|
|
||||||
replication_url = self.config.worker_replication_url
|
|
||||||
send_handler = FederationSenderHandler(self)
|
|
||||||
|
|
||||||
send_handler.on_start()
|
def build_tcp_replication(self):
|
||||||
|
return FederationSenderReplicationHandler(self)
|
||||||
|
|
||||||
while True:
|
|
||||||
try:
|
class FederationSenderReplicationHandler(ReplicationClientHandler):
|
||||||
args = store.stream_positions()
|
def __init__(self, hs):
|
||||||
args.update((yield send_handler.stream_positions()))
|
super(FederationSenderReplicationHandler, self).__init__(hs.get_datastore())
|
||||||
args["timeout"] = 30000
|
self.send_handler = FederationSenderHandler(hs, self)
|
||||||
result = yield http_client.get_json(replication_url, args=args)
|
|
||||||
yield store.process_replication(result)
|
def on_rdata(self, stream_name, token, rows):
|
||||||
yield send_handler.process_replication(result)
|
super(FederationSenderReplicationHandler, self).on_rdata(
|
||||||
except:
|
stream_name, token, rows
|
||||||
logger.exception("Error replicating from %r", replication_url)
|
)
|
||||||
yield sleep(30)
|
self.send_handler.process_replication_rows(stream_name, token, rows)
|
||||||
|
|
||||||
|
def get_streams_to_replicate(self):
|
||||||
|
args = super(FederationSenderReplicationHandler, self).get_streams_to_replicate()
|
||||||
|
args.update(self.send_handler.stream_positions())
|
||||||
|
return args
|
||||||
|
|
||||||
|
|
||||||
def start(config_options):
|
def start(config_options):
|
||||||
@@ -192,46 +190,27 @@ def start(config_options):
|
|||||||
ps.setup()
|
ps.setup()
|
||||||
ps.start_listening(config.worker_listeners)
|
ps.start_listening(config.worker_listeners)
|
||||||
|
|
||||||
def run():
|
|
||||||
# make sure that we run the reactor with the sentinel log context,
|
|
||||||
# otherwise other PreserveLoggingContext instances will get confused
|
|
||||||
# and complain when they see the logcontext arbitrarily swapping
|
|
||||||
# between the sentinel and `run` logcontexts.
|
|
||||||
with PreserveLoggingContext():
|
|
||||||
logger.info("Running")
|
|
||||||
change_resource_limit(config.soft_file_limit)
|
|
||||||
if config.gc_thresholds:
|
|
||||||
gc.set_threshold(*config.gc_thresholds)
|
|
||||||
reactor.run()
|
|
||||||
|
|
||||||
def start():
|
def start():
|
||||||
ps.replicate()
|
|
||||||
ps.get_datastore().start_profiling()
|
ps.get_datastore().start_profiling()
|
||||||
ps.get_state_handler().start_caching()
|
ps.get_state_handler().start_caching()
|
||||||
|
|
||||||
reactor.callWhenRunning(start)
|
reactor.callWhenRunning(start)
|
||||||
|
_base.start_worker_reactor("synapse-federation-sender", config)
|
||||||
if config.worker_daemonize:
|
|
||||||
daemon = Daemonize(
|
|
||||||
app="synapse-federation-sender",
|
|
||||||
pid=config.worker_pid_file,
|
|
||||||
action=run,
|
|
||||||
auto_close_fds=False,
|
|
||||||
verbose=True,
|
|
||||||
logger=logger,
|
|
||||||
)
|
|
||||||
daemon.start()
|
|
||||||
else:
|
|
||||||
run()
|
|
||||||
|
|
||||||
|
|
||||||
class FederationSenderHandler(object):
|
class FederationSenderHandler(object):
|
||||||
"""Processes the replication stream and forwards the appropriate entries
|
"""Processes the replication stream and forwards the appropriate entries
|
||||||
to the federation sender.
|
to the federation sender.
|
||||||
"""
|
"""
|
||||||
def __init__(self, hs):
|
def __init__(self, hs, replication_client):
|
||||||
self.store = hs.get_datastore()
|
self.store = hs.get_datastore()
|
||||||
self.federation_sender = hs.get_federation_sender()
|
self.federation_sender = hs.get_federation_sender()
|
||||||
|
self.replication_client = replication_client
|
||||||
|
|
||||||
|
self.federation_position = self.store.federation_out_pos_startup
|
||||||
|
self._fed_position_linearizer = Linearizer(name="_fed_position_linearizer")
|
||||||
|
|
||||||
|
self._last_ack = self.federation_position
|
||||||
|
|
||||||
self._room_serials = {}
|
self._room_serials = {}
|
||||||
self._room_typing = {}
|
self._room_typing = {}
|
||||||
@@ -243,98 +222,38 @@ class FederationSenderHandler(object):
|
|||||||
self.store.get_room_max_stream_ordering()
|
self.store.get_room_max_stream_ordering()
|
||||||
)
|
)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def stream_positions(self):
|
def stream_positions(self):
|
||||||
stream_id = yield self.store.get_federation_out_pos("federation")
|
return {"federation": self.federation_position}
|
||||||
defer.returnValue({
|
|
||||||
"federation": stream_id,
|
|
||||||
|
|
||||||
# Ack stuff we've "processed", this should only be called from
|
def process_replication_rows(self, stream_name, token, rows):
|
||||||
# one process.
|
|
||||||
"federation_ack": stream_id,
|
|
||||||
})
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def process_replication(self, result):
|
|
||||||
# The federation stream contains things that we want to send out, e.g.
|
# The federation stream contains things that we want to send out, e.g.
|
||||||
# presence, typing, etc.
|
# presence, typing, etc.
|
||||||
fed_stream = result.get("federation")
|
if stream_name == "federation":
|
||||||
if fed_stream:
|
send_queue.process_rows_for_federation(self.federation_sender, rows)
|
||||||
latest_id = int(fed_stream["position"])
|
run_in_background(self.update_token, token)
|
||||||
|
|
||||||
# The federation stream containis a bunch of different types of
|
|
||||||
# rows that need to be handled differently. We parse the rows, put
|
|
||||||
# them into the appropriate collection and then send them off.
|
|
||||||
presence_to_send = {}
|
|
||||||
keyed_edus = {}
|
|
||||||
edus = {}
|
|
||||||
failures = {}
|
|
||||||
device_destinations = set()
|
|
||||||
|
|
||||||
# Parse the rows in the stream
|
|
||||||
for row in fed_stream["rows"]:
|
|
||||||
position, typ, content_js = row
|
|
||||||
content = json.loads(content_js)
|
|
||||||
|
|
||||||
if typ == send_queue.PRESENCE_TYPE:
|
|
||||||
destination = content["destination"]
|
|
||||||
state = UserPresenceState.from_dict(content["state"])
|
|
||||||
|
|
||||||
presence_to_send.setdefault(destination, []).append(state)
|
|
||||||
elif typ == send_queue.KEYED_EDU_TYPE:
|
|
||||||
key = content["key"]
|
|
||||||
edu = Edu(**content["edu"])
|
|
||||||
|
|
||||||
keyed_edus.setdefault(
|
|
||||||
edu.destination, {}
|
|
||||||
)[(edu.destination, tuple(key))] = edu
|
|
||||||
elif typ == send_queue.EDU_TYPE:
|
|
||||||
edu = Edu(**content)
|
|
||||||
|
|
||||||
edus.setdefault(edu.destination, []).append(edu)
|
|
||||||
elif typ == send_queue.FAILURE_TYPE:
|
|
||||||
destination = content["destination"]
|
|
||||||
failure = content["failure"]
|
|
||||||
|
|
||||||
failures.setdefault(destination, []).append(failure)
|
|
||||||
elif typ == send_queue.DEVICE_MESSAGE_TYPE:
|
|
||||||
device_destinations.add(content["destination"])
|
|
||||||
else:
|
|
||||||
raise Exception("Unrecognised federation type: %r", typ)
|
|
||||||
|
|
||||||
# We've finished collecting, send everything off
|
|
||||||
for destination, states in presence_to_send.items():
|
|
||||||
self.federation_sender.send_presence(destination, states)
|
|
||||||
|
|
||||||
for destination, edu_map in keyed_edus.items():
|
|
||||||
for key, edu in edu_map.items():
|
|
||||||
self.federation_sender.send_edu(
|
|
||||||
edu.destination, edu.edu_type, edu.content, key=key,
|
|
||||||
)
|
|
||||||
|
|
||||||
for destination, edu_list in edus.items():
|
|
||||||
for edu in edu_list:
|
|
||||||
self.federation_sender.send_edu(
|
|
||||||
edu.destination, edu.edu_type, edu.content, key=None,
|
|
||||||
)
|
|
||||||
|
|
||||||
for destination, failure_list in failures.items():
|
|
||||||
for failure in failure_list:
|
|
||||||
self.federation_sender.send_failure(destination, failure)
|
|
||||||
|
|
||||||
for destination in device_destinations:
|
|
||||||
self.federation_sender.send_device_messages(destination)
|
|
||||||
|
|
||||||
# Record where we are in the stream.
|
|
||||||
yield self.store.update_federation_out_pos(
|
|
||||||
"federation", latest_id
|
|
||||||
)
|
|
||||||
|
|
||||||
# We also need to poke the federation sender when new events happen
|
# We also need to poke the federation sender when new events happen
|
||||||
event_stream = result.get("events")
|
elif stream_name == "events":
|
||||||
if event_stream:
|
self.federation_sender.notify_new_events(token)
|
||||||
latest_pos = event_stream["position"]
|
|
||||||
self.federation_sender.notify_new_events(latest_pos)
|
@defer.inlineCallbacks
|
||||||
|
def update_token(self, token):
|
||||||
|
try:
|
||||||
|
self.federation_position = token
|
||||||
|
|
||||||
|
# We linearize here to ensure we don't have races updating the token
|
||||||
|
with (yield self._fed_position_linearizer.queue(None)):
|
||||||
|
if self._last_ack < self.federation_position:
|
||||||
|
yield self.store.update_federation_out_pos(
|
||||||
|
"federation", self.federation_position
|
||||||
|
)
|
||||||
|
|
||||||
|
# We ACK this token over replication so that the master can drop
|
||||||
|
# its in memory queues
|
||||||
|
self.replication_client.send_federation_ack(self.federation_position)
|
||||||
|
self._last_ack = self.federation_position
|
||||||
|
except Exception:
|
||||||
|
logger.exception("Error updating federation stream position")
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
|||||||
228
synapse/app/frontend_proxy.py
Normal file
228
synapse/app/frontend_proxy.py
Normal file
@@ -0,0 +1,228 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright 2016 OpenMarket Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
import logging
|
||||||
|
import sys
|
||||||
|
|
||||||
|
import synapse
|
||||||
|
from synapse import events
|
||||||
|
from synapse.api.errors import SynapseError
|
||||||
|
from synapse.app import _base
|
||||||
|
from synapse.config._base import ConfigError
|
||||||
|
from synapse.config.homeserver import HomeServerConfig
|
||||||
|
from synapse.config.logger import setup_logging
|
||||||
|
from synapse.crypto import context_factory
|
||||||
|
from synapse.http.server import JsonResource
|
||||||
|
from synapse.http.servlet import (
|
||||||
|
RestServlet, parse_json_object_from_request,
|
||||||
|
)
|
||||||
|
from synapse.http.site import SynapseSite
|
||||||
|
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||||
|
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||||
|
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||||
|
from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
|
||||||
|
from synapse.replication.slave.storage.devices import SlavedDeviceStore
|
||||||
|
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||||
|
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||||
|
from synapse.rest.client.v2_alpha._base import client_v2_patterns
|
||||||
|
from synapse.server import HomeServer
|
||||||
|
from synapse.storage.engines import create_engine
|
||||||
|
from synapse.util.httpresourcetree import create_resource_tree
|
||||||
|
from synapse.util.logcontext import LoggingContext
|
||||||
|
from synapse.util.manhole import manhole
|
||||||
|
from synapse.util.versionstring import get_version_string
|
||||||
|
from twisted.internet import defer, reactor
|
||||||
|
from twisted.web.resource import NoResource
|
||||||
|
|
||||||
|
logger = logging.getLogger("synapse.app.frontend_proxy")
|
||||||
|
|
||||||
|
|
||||||
|
class KeyUploadServlet(RestServlet):
|
||||||
|
PATTERNS = client_v2_patterns("/keys/upload(/(?P<device_id>[^/]+))?$")
|
||||||
|
|
||||||
|
def __init__(self, hs):
|
||||||
|
"""
|
||||||
|
Args:
|
||||||
|
hs (synapse.server.HomeServer): server
|
||||||
|
"""
|
||||||
|
super(KeyUploadServlet, self).__init__()
|
||||||
|
self.auth = hs.get_auth()
|
||||||
|
self.store = hs.get_datastore()
|
||||||
|
self.http_client = hs.get_simple_http_client()
|
||||||
|
self.main_uri = hs.config.worker_main_http_uri
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def on_POST(self, request, device_id):
|
||||||
|
requester = yield self.auth.get_user_by_req(request, allow_guest=True)
|
||||||
|
user_id = requester.user.to_string()
|
||||||
|
body = parse_json_object_from_request(request)
|
||||||
|
|
||||||
|
if device_id is not None:
|
||||||
|
# passing the device_id here is deprecated; however, we allow it
|
||||||
|
# for now for compatibility with older clients.
|
||||||
|
if (requester.device_id is not None and
|
||||||
|
device_id != requester.device_id):
|
||||||
|
logger.warning("Client uploading keys for a different device "
|
||||||
|
"(logged in as %s, uploading for %s)",
|
||||||
|
requester.device_id, device_id)
|
||||||
|
else:
|
||||||
|
device_id = requester.device_id
|
||||||
|
|
||||||
|
if device_id is None:
|
||||||
|
raise SynapseError(
|
||||||
|
400,
|
||||||
|
"To upload keys, you must pass device_id when authenticating"
|
||||||
|
)
|
||||||
|
|
||||||
|
if body:
|
||||||
|
# They're actually trying to upload something, proxy to main synapse.
|
||||||
|
# Pass through the auth headers, if any, in case the access token
|
||||||
|
# is there.
|
||||||
|
auth_headers = request.requestHeaders.getRawHeaders(b"Authorization", [])
|
||||||
|
headers = {
|
||||||
|
"Authorization": auth_headers,
|
||||||
|
}
|
||||||
|
result = yield self.http_client.post_json_get_json(
|
||||||
|
self.main_uri + request.uri,
|
||||||
|
body,
|
||||||
|
headers=headers,
|
||||||
|
)
|
||||||
|
|
||||||
|
defer.returnValue((200, result))
|
||||||
|
else:
|
||||||
|
# Just interested in counts.
|
||||||
|
result = yield self.store.count_e2e_one_time_keys(user_id, device_id)
|
||||||
|
defer.returnValue((200, {"one_time_key_counts": result}))
|
||||||
|
|
||||||
|
|
||||||
|
class FrontendProxySlavedStore(
|
||||||
|
SlavedDeviceStore,
|
||||||
|
SlavedClientIpStore,
|
||||||
|
SlavedApplicationServiceStore,
|
||||||
|
SlavedRegistrationStore,
|
||||||
|
BaseSlavedStore,
|
||||||
|
):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class FrontendProxyServer(HomeServer):
|
||||||
|
def setup(self):
|
||||||
|
logger.info("Setting up.")
|
||||||
|
self.datastore = FrontendProxySlavedStore(self.get_db_conn(), self)
|
||||||
|
logger.info("Finished setting up.")
|
||||||
|
|
||||||
|
def _listen_http(self, listener_config):
|
||||||
|
port = listener_config["port"]
|
||||||
|
bind_addresses = listener_config["bind_addresses"]
|
||||||
|
site_tag = listener_config.get("tag", port)
|
||||||
|
resources = {}
|
||||||
|
for res in listener_config["resources"]:
|
||||||
|
for name in res["names"]:
|
||||||
|
if name == "metrics":
|
||||||
|
resources[METRICS_PREFIX] = MetricsResource(self)
|
||||||
|
elif name == "client":
|
||||||
|
resource = JsonResource(self, canonical_json=False)
|
||||||
|
KeyUploadServlet(self).register(resource)
|
||||||
|
resources.update({
|
||||||
|
"/_matrix/client/r0": resource,
|
||||||
|
"/_matrix/client/unstable": resource,
|
||||||
|
"/_matrix/client/v2_alpha": resource,
|
||||||
|
"/_matrix/client/api/v1": resource,
|
||||||
|
})
|
||||||
|
|
||||||
|
root_resource = create_resource_tree(resources, NoResource())
|
||||||
|
|
||||||
|
_base.listen_tcp(
|
||||||
|
bind_addresses,
|
||||||
|
port,
|
||||||
|
SynapseSite(
|
||||||
|
"synapse.access.http.%s" % (site_tag,),
|
||||||
|
site_tag,
|
||||||
|
listener_config,
|
||||||
|
root_resource,
|
||||||
|
self.version_string,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
logger.info("Synapse client reader now listening on port %d", port)
|
||||||
|
|
||||||
|
def start_listening(self, listeners):
|
||||||
|
for listener in listeners:
|
||||||
|
if listener["type"] == "http":
|
||||||
|
self._listen_http(listener)
|
||||||
|
elif listener["type"] == "manhole":
|
||||||
|
_base.listen_tcp(
|
||||||
|
listener["bind_addresses"],
|
||||||
|
listener["port"],
|
||||||
|
manhole(
|
||||||
|
username="matrix",
|
||||||
|
password="rabbithole",
|
||||||
|
globals={"hs": self},
|
||||||
|
)
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||||
|
|
||||||
|
self.get_tcp_replication().start_replication(self)
|
||||||
|
|
||||||
|
def build_tcp_replication(self):
|
||||||
|
return ReplicationClientHandler(self.get_datastore())
|
||||||
|
|
||||||
|
|
||||||
|
def start(config_options):
|
||||||
|
try:
|
||||||
|
config = HomeServerConfig.load_config(
|
||||||
|
"Synapse frontend proxy", config_options
|
||||||
|
)
|
||||||
|
except ConfigError as e:
|
||||||
|
sys.stderr.write("\n" + e.message + "\n")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
assert config.worker_app == "synapse.app.frontend_proxy"
|
||||||
|
|
||||||
|
assert config.worker_main_http_uri is not None
|
||||||
|
|
||||||
|
setup_logging(config, use_worker_options=True)
|
||||||
|
|
||||||
|
events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||||
|
|
||||||
|
database_engine = create_engine(config.database_config)
|
||||||
|
|
||||||
|
tls_server_context_factory = context_factory.ServerContextFactory(config)
|
||||||
|
|
||||||
|
ss = FrontendProxyServer(
|
||||||
|
config.server_name,
|
||||||
|
db_config=config.database_config,
|
||||||
|
tls_server_context_factory=tls_server_context_factory,
|
||||||
|
config=config,
|
||||||
|
version_string="Synapse/" + get_version_string(synapse),
|
||||||
|
database_engine=database_engine,
|
||||||
|
)
|
||||||
|
|
||||||
|
ss.setup()
|
||||||
|
ss.start_listening(config.worker_listeners)
|
||||||
|
|
||||||
|
def start():
|
||||||
|
ss.get_state_handler().start_caching()
|
||||||
|
ss.get_datastore().start_profiling()
|
||||||
|
|
||||||
|
reactor.callWhenRunning(start)
|
||||||
|
|
||||||
|
_base.start_worker_reactor("synapse-frontend-proxy", config)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
with LoggingContext("main"):
|
||||||
|
start(sys.argv[1:])
|
||||||
@@ -13,61 +13,53 @@
|
|||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
import synapse
|
|
||||||
|
|
||||||
import gc
|
import gc
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
|
import synapse
|
||||||
import synapse.config.logger
|
import synapse.config.logger
|
||||||
|
from synapse import events
|
||||||
|
from synapse.api.urls import CONTENT_REPO_PREFIX, FEDERATION_PREFIX, \
|
||||||
|
LEGACY_MEDIA_PREFIX, MEDIA_PREFIX, SERVER_KEY_PREFIX, SERVER_KEY_V2_PREFIX, \
|
||||||
|
STATIC_PREFIX, WEB_CLIENT_PREFIX
|
||||||
|
from synapse.app import _base
|
||||||
|
from synapse.app._base import quit_with_error, listen_ssl, listen_tcp
|
||||||
from synapse.config._base import ConfigError
|
from synapse.config._base import ConfigError
|
||||||
|
|
||||||
from synapse.python_dependencies import (
|
|
||||||
check_requirements, DEPENDENCY_LINKS
|
|
||||||
)
|
|
||||||
|
|
||||||
from synapse.rest import ClientRestResource
|
|
||||||
from synapse.storage.engines import create_engine, IncorrectDatabaseSetup
|
|
||||||
from synapse.storage import are_all_users_on_domain
|
|
||||||
from synapse.storage.prepare_database import UpgradeDatabaseException, prepare_database
|
|
||||||
|
|
||||||
from synapse.server import HomeServer
|
|
||||||
|
|
||||||
from twisted.internet import reactor, task, defer
|
|
||||||
from twisted.application import service
|
|
||||||
from twisted.web.resource import Resource, EncodingResourceWrapper
|
|
||||||
from twisted.web.static import File
|
|
||||||
from twisted.web.server import GzipEncoderFactory
|
|
||||||
from synapse.http.server import RootRedirect
|
|
||||||
from synapse.rest.media.v0.content_repository import ContentRepoResource
|
|
||||||
from synapse.rest.media.v1.media_repository import MediaRepositoryResource
|
|
||||||
from synapse.rest.key.v1.server_key_resource import LocalKey
|
|
||||||
from synapse.rest.key.v2 import KeyApiV2Resource
|
|
||||||
from synapse.api.urls import (
|
|
||||||
FEDERATION_PREFIX, WEB_CLIENT_PREFIX, CONTENT_REPO_PREFIX,
|
|
||||||
SERVER_KEY_PREFIX, LEGACY_MEDIA_PREFIX, MEDIA_PREFIX, STATIC_PREFIX,
|
|
||||||
SERVER_KEY_V2_PREFIX,
|
|
||||||
)
|
|
||||||
from synapse.config.homeserver import HomeServerConfig
|
from synapse.config.homeserver import HomeServerConfig
|
||||||
from synapse.crypto import context_factory
|
from synapse.crypto import context_factory
|
||||||
from synapse.util.logcontext import LoggingContext, PreserveLoggingContext
|
|
||||||
from synapse.metrics import register_memory_metrics, get_metrics_for
|
|
||||||
from synapse.metrics.resource import MetricsResource, METRICS_PREFIX
|
|
||||||
from synapse.replication.resource import ReplicationResource, REPLICATION_PREFIX
|
|
||||||
from synapse.federation.transport.server import TransportLayerServer
|
from synapse.federation.transport.server import TransportLayerServer
|
||||||
|
from synapse.module_api import ModuleApi
|
||||||
|
from synapse.http.additional_resource import AdditionalResource
|
||||||
|
from synapse.http.server import RootRedirect
|
||||||
|
from synapse.http.site import SynapseSite
|
||||||
|
from synapse.metrics import register_memory_metrics
|
||||||
|
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||||
|
from synapse.python_dependencies import CONDITIONAL_REQUIREMENTS, \
|
||||||
|
check_requirements
|
||||||
|
from synapse.replication.http import ReplicationRestResource, REPLICATION_PREFIX
|
||||||
|
from synapse.replication.tcp.resource import ReplicationStreamProtocolFactory
|
||||||
|
from synapse.rest import ClientRestResource
|
||||||
|
from synapse.rest.key.v1.server_key_resource import LocalKey
|
||||||
|
from synapse.rest.key.v2 import KeyApiV2Resource
|
||||||
|
from synapse.rest.media.v0.content_repository import ContentRepoResource
|
||||||
|
from synapse.server import HomeServer
|
||||||
|
from synapse.storage import are_all_users_on_domain
|
||||||
|
from synapse.storage.engines import IncorrectDatabaseSetup, create_engine
|
||||||
|
from synapse.storage.prepare_database import UpgradeDatabaseException, prepare_database
|
||||||
|
from synapse.util.caches import CACHE_SIZE_FACTOR
|
||||||
|
from synapse.util.httpresourcetree import create_resource_tree
|
||||||
|
from synapse.util.logcontext import LoggingContext
|
||||||
|
from synapse.util.manhole import manhole
|
||||||
|
from synapse.util.module_loader import load_module
|
||||||
from synapse.util.rlimit import change_resource_limit
|
from synapse.util.rlimit import change_resource_limit
|
||||||
from synapse.util.versionstring import get_version_string
|
from synapse.util.versionstring import get_version_string
|
||||||
from synapse.util.httpresourcetree import create_resource_tree
|
from twisted.application import service
|
||||||
from synapse.util.manhole import manhole
|
from twisted.internet import defer, reactor
|
||||||
|
from twisted.web.resource import EncodingResourceWrapper, NoResource
|
||||||
from synapse.http.site import SynapseSite
|
from twisted.web.server import GzipEncoderFactory
|
||||||
|
from twisted.web.static import File
|
||||||
from synapse import events
|
|
||||||
|
|
||||||
from daemonize import Daemonize
|
|
||||||
|
|
||||||
logger = logging.getLogger("synapse.app.homeserver")
|
logger = logging.getLogger("synapse.app.homeserver")
|
||||||
|
|
||||||
@@ -92,7 +84,7 @@ def build_resource_for_web_client(hs):
|
|||||||
"\n"
|
"\n"
|
||||||
"You can also disable hosting of the webclient via the\n"
|
"You can also disable hosting of the webclient via the\n"
|
||||||
"configuration option `web_client`\n"
|
"configuration option `web_client`\n"
|
||||||
% {"dep": DEPENDENCY_LINKS["matrix-angular-sdk"]}
|
% {"dep": CONDITIONAL_REQUIREMENTS["web_client"].keys()[0]}
|
||||||
)
|
)
|
||||||
syweb_path = os.path.dirname(syweb.__file__)
|
syweb_path = os.path.dirname(syweb.__file__)
|
||||||
webclient_path = os.path.join(syweb_path, "webclient")
|
webclient_path = os.path.join(syweb_path, "webclient")
|
||||||
@@ -119,90 +111,132 @@ class SynapseHomeServer(HomeServer):
|
|||||||
resources = {}
|
resources = {}
|
||||||
for res in listener_config["resources"]:
|
for res in listener_config["resources"]:
|
||||||
for name in res["names"]:
|
for name in res["names"]:
|
||||||
if name == "client":
|
resources.update(self._configure_named_resource(
|
||||||
client_resource = ClientRestResource(self)
|
name, res.get("compress", False),
|
||||||
if res["compress"]:
|
))
|
||||||
client_resource = gz_wrap(client_resource)
|
|
||||||
|
|
||||||
resources.update({
|
additional_resources = listener_config.get("additional_resources", {})
|
||||||
"/_matrix/client/api/v1": client_resource,
|
logger.debug("Configuring additional resources: %r",
|
||||||
"/_matrix/client/r0": client_resource,
|
additional_resources)
|
||||||
"/_matrix/client/unstable": client_resource,
|
module_api = ModuleApi(self, self.get_auth_handler())
|
||||||
"/_matrix/client/v2_alpha": client_resource,
|
for path, resmodule in additional_resources.items():
|
||||||
"/_matrix/client/versions": client_resource,
|
handler_cls, config = load_module(resmodule)
|
||||||
})
|
handler = handler_cls(config, module_api)
|
||||||
|
resources[path] = AdditionalResource(self, handler.handle_request)
|
||||||
if name == "federation":
|
|
||||||
resources.update({
|
|
||||||
FEDERATION_PREFIX: TransportLayerServer(self),
|
|
||||||
})
|
|
||||||
|
|
||||||
if name in ["static", "client"]:
|
|
||||||
resources.update({
|
|
||||||
STATIC_PREFIX: File(
|
|
||||||
os.path.join(os.path.dirname(synapse.__file__), "static")
|
|
||||||
),
|
|
||||||
})
|
|
||||||
|
|
||||||
if name in ["media", "federation", "client"]:
|
|
||||||
media_repo = MediaRepositoryResource(self)
|
|
||||||
resources.update({
|
|
||||||
MEDIA_PREFIX: media_repo,
|
|
||||||
LEGACY_MEDIA_PREFIX: media_repo,
|
|
||||||
CONTENT_REPO_PREFIX: ContentRepoResource(
|
|
||||||
self, self.config.uploads_path
|
|
||||||
),
|
|
||||||
})
|
|
||||||
|
|
||||||
if name in ["keys", "federation"]:
|
|
||||||
resources.update({
|
|
||||||
SERVER_KEY_PREFIX: LocalKey(self),
|
|
||||||
SERVER_KEY_V2_PREFIX: KeyApiV2Resource(self),
|
|
||||||
})
|
|
||||||
|
|
||||||
if name == "webclient":
|
|
||||||
resources[WEB_CLIENT_PREFIX] = build_resource_for_web_client(self)
|
|
||||||
|
|
||||||
if name == "metrics" and self.get_config().enable_metrics:
|
|
||||||
resources[METRICS_PREFIX] = MetricsResource(self)
|
|
||||||
|
|
||||||
if name == "replication":
|
|
||||||
resources[REPLICATION_PREFIX] = ReplicationResource(self)
|
|
||||||
|
|
||||||
if WEB_CLIENT_PREFIX in resources:
|
if WEB_CLIENT_PREFIX in resources:
|
||||||
root_resource = RootRedirect(WEB_CLIENT_PREFIX)
|
root_resource = RootRedirect(WEB_CLIENT_PREFIX)
|
||||||
else:
|
else:
|
||||||
root_resource = Resource()
|
root_resource = NoResource()
|
||||||
|
|
||||||
root_resource = create_resource_tree(resources, root_resource)
|
root_resource = create_resource_tree(resources, root_resource)
|
||||||
|
|
||||||
if tls:
|
if tls:
|
||||||
for address in bind_addresses:
|
listen_ssl(
|
||||||
reactor.listenSSL(
|
bind_addresses,
|
||||||
port,
|
port,
|
||||||
SynapseSite(
|
SynapseSite(
|
||||||
"synapse.access.https.%s" % (site_tag,),
|
"synapse.access.https.%s" % (site_tag,),
|
||||||
site_tag,
|
site_tag,
|
||||||
listener_config,
|
listener_config,
|
||||||
root_resource,
|
root_resource,
|
||||||
),
|
self.version_string,
|
||||||
self.tls_server_context_factory,
|
),
|
||||||
interface=address
|
self.tls_server_context_factory,
|
||||||
)
|
)
|
||||||
|
|
||||||
else:
|
else:
|
||||||
for address in bind_addresses:
|
listen_tcp(
|
||||||
reactor.listenTCP(
|
bind_addresses,
|
||||||
port,
|
port,
|
||||||
SynapseSite(
|
SynapseSite(
|
||||||
"synapse.access.http.%s" % (site_tag,),
|
"synapse.access.http.%s" % (site_tag,),
|
||||||
site_tag,
|
site_tag,
|
||||||
listener_config,
|
listener_config,
|
||||||
root_resource,
|
root_resource,
|
||||||
),
|
self.version_string,
|
||||||
interface=address
|
|
||||||
)
|
)
|
||||||
|
)
|
||||||
logger.info("Synapse now listening on port %d", port)
|
logger.info("Synapse now listening on port %d", port)
|
||||||
|
|
||||||
|
def _configure_named_resource(self, name, compress=False):
|
||||||
|
"""Build a resource map for a named resource
|
||||||
|
|
||||||
|
Args:
|
||||||
|
name (str): named resource: one of "client", "federation", etc
|
||||||
|
compress (bool): whether to enable gzip compression for this
|
||||||
|
resource
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
dict[str, Resource]: map from path to HTTP resource
|
||||||
|
"""
|
||||||
|
resources = {}
|
||||||
|
if name == "client":
|
||||||
|
client_resource = ClientRestResource(self)
|
||||||
|
if compress:
|
||||||
|
client_resource = gz_wrap(client_resource)
|
||||||
|
|
||||||
|
resources.update({
|
||||||
|
"/_matrix/client/api/v1": client_resource,
|
||||||
|
"/_matrix/client/r0": client_resource,
|
||||||
|
"/_matrix/client/unstable": client_resource,
|
||||||
|
"/_matrix/client/v2_alpha": client_resource,
|
||||||
|
"/_matrix/client/versions": client_resource,
|
||||||
|
})
|
||||||
|
|
||||||
|
if name == "consent":
|
||||||
|
from synapse.rest.consent.consent_resource import ConsentResource
|
||||||
|
consent_resource = ConsentResource(self)
|
||||||
|
if compress:
|
||||||
|
consent_resource = gz_wrap(consent_resource)
|
||||||
|
resources.update({
|
||||||
|
"/_matrix/consent": consent_resource,
|
||||||
|
})
|
||||||
|
|
||||||
|
if name == "federation":
|
||||||
|
resources.update({
|
||||||
|
FEDERATION_PREFIX: TransportLayerServer(self),
|
||||||
|
})
|
||||||
|
|
||||||
|
if name in ["static", "client"]:
|
||||||
|
resources.update({
|
||||||
|
STATIC_PREFIX: File(
|
||||||
|
os.path.join(os.path.dirname(synapse.__file__), "static")
|
||||||
|
),
|
||||||
|
})
|
||||||
|
|
||||||
|
if name in ["media", "federation", "client"]:
|
||||||
|
if self.get_config().enable_media_repo:
|
||||||
|
media_repo = self.get_media_repository_resource()
|
||||||
|
resources.update({
|
||||||
|
MEDIA_PREFIX: media_repo,
|
||||||
|
LEGACY_MEDIA_PREFIX: media_repo,
|
||||||
|
CONTENT_REPO_PREFIX: ContentRepoResource(
|
||||||
|
self, self.config.uploads_path
|
||||||
|
),
|
||||||
|
})
|
||||||
|
elif name == "media":
|
||||||
|
raise ConfigError(
|
||||||
|
"'media' resource conflicts with enable_media_repo=False",
|
||||||
|
)
|
||||||
|
|
||||||
|
if name in ["keys", "federation"]:
|
||||||
|
resources.update({
|
||||||
|
SERVER_KEY_PREFIX: LocalKey(self),
|
||||||
|
SERVER_KEY_V2_PREFIX: KeyApiV2Resource(self),
|
||||||
|
})
|
||||||
|
|
||||||
|
if name == "webclient":
|
||||||
|
resources[WEB_CLIENT_PREFIX] = build_resource_for_web_client(self)
|
||||||
|
|
||||||
|
if name == "metrics" and self.get_config().enable_metrics:
|
||||||
|
resources[METRICS_PREFIX] = MetricsResource(self)
|
||||||
|
|
||||||
|
if name == "replication":
|
||||||
|
resources[REPLICATION_PREFIX] = ReplicationRestResource(self)
|
||||||
|
|
||||||
|
return resources
|
||||||
|
|
||||||
def start_listening(self):
|
def start_listening(self):
|
||||||
config = self.get_config()
|
config = self.get_config()
|
||||||
|
|
||||||
@@ -210,17 +244,24 @@ class SynapseHomeServer(HomeServer):
|
|||||||
if listener["type"] == "http":
|
if listener["type"] == "http":
|
||||||
self._listener_http(config, listener)
|
self._listener_http(config, listener)
|
||||||
elif listener["type"] == "manhole":
|
elif listener["type"] == "manhole":
|
||||||
|
listen_tcp(
|
||||||
|
listener["bind_addresses"],
|
||||||
|
listener["port"],
|
||||||
|
manhole(
|
||||||
|
username="matrix",
|
||||||
|
password="rabbithole",
|
||||||
|
globals={"hs": self},
|
||||||
|
)
|
||||||
|
)
|
||||||
|
elif listener["type"] == "replication":
|
||||||
bind_addresses = listener["bind_addresses"]
|
bind_addresses = listener["bind_addresses"]
|
||||||
|
|
||||||
for address in bind_addresses:
|
for address in bind_addresses:
|
||||||
reactor.listenTCP(
|
factory = ReplicationStreamProtocolFactory(self)
|
||||||
listener["port"],
|
server_listener = reactor.listenTCP(
|
||||||
manhole(
|
listener["port"], factory, interface=address
|
||||||
username="matrix",
|
)
|
||||||
password="rabbithole",
|
reactor.addSystemEventTrigger(
|
||||||
globals={"hs": self},
|
"before", "shutdown", server_listener.stopListening,
|
||||||
),
|
|
||||||
interface=address
|
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||||
@@ -241,29 +282,6 @@ class SynapseHomeServer(HomeServer):
|
|||||||
except IncorrectDatabaseSetup as e:
|
except IncorrectDatabaseSetup as e:
|
||||||
quit_with_error(e.message)
|
quit_with_error(e.message)
|
||||||
|
|
||||||
def get_db_conn(self, run_new_connection=True):
|
|
||||||
# Any param beginning with cp_ is a parameter for adbapi, and should
|
|
||||||
# not be passed to the database engine.
|
|
||||||
db_params = {
|
|
||||||
k: v for k, v in self.db_config.get("args", {}).items()
|
|
||||||
if not k.startswith("cp_")
|
|
||||||
}
|
|
||||||
db_conn = self.database_engine.module.connect(**db_params)
|
|
||||||
|
|
||||||
if run_new_connection:
|
|
||||||
self.database_engine.on_new_connection(db_conn)
|
|
||||||
return db_conn
|
|
||||||
|
|
||||||
|
|
||||||
def quit_with_error(error_string):
|
|
||||||
message_lines = error_string.split("\n")
|
|
||||||
line_length = max([len(l) for l in message_lines if len(l) < 80]) + 2
|
|
||||||
sys.stderr.write("*" * line_length + '\n')
|
|
||||||
for line in message_lines:
|
|
||||||
sys.stderr.write(" %s\n" % (line.rstrip(),))
|
|
||||||
sys.stderr.write("*" * line_length + '\n')
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
|
|
||||||
def setup(config_options):
|
def setup(config_options):
|
||||||
"""
|
"""
|
||||||
@@ -342,7 +360,7 @@ def setup(config_options):
|
|||||||
hs.get_state_handler().start_caching()
|
hs.get_state_handler().start_caching()
|
||||||
hs.get_datastore().start_profiling()
|
hs.get_datastore().start_profiling()
|
||||||
hs.get_datastore().start_doing_background_updates()
|
hs.get_datastore().start_doing_background_updates()
|
||||||
hs.get_replication_layer().start_get_pdu_cache()
|
hs.get_federation_client().start_get_pdu_cache()
|
||||||
|
|
||||||
register_memory_metrics(hs)
|
register_memory_metrics(hs)
|
||||||
|
|
||||||
@@ -391,10 +409,15 @@ def run(hs):
|
|||||||
ThreadPool._worker = profile(ThreadPool._worker)
|
ThreadPool._worker = profile(ThreadPool._worker)
|
||||||
reactor.run = profile(reactor.run)
|
reactor.run = profile(reactor.run)
|
||||||
|
|
||||||
start_time = hs.get_clock().time()
|
clock = hs.get_clock()
|
||||||
|
start_time = clock.time()
|
||||||
|
|
||||||
stats = {}
|
stats = {}
|
||||||
|
|
||||||
|
# Contains the list of processes we will be monitoring
|
||||||
|
# currently either 0 or 1
|
||||||
|
stats_process = []
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def phone_stats_home():
|
def phone_stats_home():
|
||||||
logger.info("Gathering stats for reporting")
|
logger.info("Gathering stats for reporting")
|
||||||
@@ -403,41 +426,36 @@ def run(hs):
|
|||||||
if uptime < 0:
|
if uptime < 0:
|
||||||
uptime = 0
|
uptime = 0
|
||||||
|
|
||||||
# If the stats directory is empty then this is the first time we've
|
|
||||||
# reported stats.
|
|
||||||
first_time = not stats
|
|
||||||
|
|
||||||
stats["homeserver"] = hs.config.server_name
|
stats["homeserver"] = hs.config.server_name
|
||||||
stats["timestamp"] = now
|
stats["timestamp"] = now
|
||||||
stats["uptime_seconds"] = uptime
|
stats["uptime_seconds"] = uptime
|
||||||
stats["total_users"] = yield hs.get_datastore().count_all_users()
|
stats["total_users"] = yield hs.get_datastore().count_all_users()
|
||||||
|
|
||||||
|
total_nonbridged_users = yield hs.get_datastore().count_nonbridged_users()
|
||||||
|
stats["total_nonbridged_users"] = total_nonbridged_users
|
||||||
|
|
||||||
room_count = yield hs.get_datastore().get_room_count()
|
room_count = yield hs.get_datastore().get_room_count()
|
||||||
stats["total_room_count"] = room_count
|
stats["total_room_count"] = room_count
|
||||||
|
|
||||||
stats["daily_active_users"] = yield hs.get_datastore().count_daily_users()
|
stats["daily_active_users"] = yield hs.get_datastore().count_daily_users()
|
||||||
daily_messages = yield hs.get_datastore().count_daily_messages()
|
stats["daily_active_rooms"] = yield hs.get_datastore().count_daily_active_rooms()
|
||||||
if daily_messages is not None:
|
stats["daily_messages"] = yield hs.get_datastore().count_daily_messages()
|
||||||
stats["daily_messages"] = daily_messages
|
|
||||||
else:
|
|
||||||
stats.pop("daily_messages", None)
|
|
||||||
|
|
||||||
if first_time:
|
r30_results = yield hs.get_datastore().count_r30_users()
|
||||||
# Add callbacks to report the synapse stats as metrics whenever
|
for name, count in r30_results.iteritems():
|
||||||
# prometheus requests them, typically every 30s.
|
stats["r30_users_" + name] = count
|
||||||
# As some of the stats are expensive to calculate we only update
|
|
||||||
# them when synapse phones home to matrix.org every 24 hours.
|
daily_sent_messages = yield hs.get_datastore().count_daily_sent_messages()
|
||||||
metrics = get_metrics_for("synapse.usage")
|
stats["daily_sent_messages"] = daily_sent_messages
|
||||||
metrics.add_callback("timestamp", lambda: stats["timestamp"])
|
stats["cache_factor"] = CACHE_SIZE_FACTOR
|
||||||
metrics.add_callback("uptime_seconds", lambda: stats["uptime_seconds"])
|
stats["event_cache_size"] = hs.config.event_cache_size
|
||||||
metrics.add_callback("total_users", lambda: stats["total_users"])
|
|
||||||
metrics.add_callback("total_room_count", lambda: stats["total_room_count"])
|
if len(stats_process) > 0:
|
||||||
metrics.add_callback(
|
stats["memory_rss"] = 0
|
||||||
"daily_active_users", lambda: stats["daily_active_users"]
|
stats["cpu_average"] = 0
|
||||||
)
|
for process in stats_process:
|
||||||
metrics.add_callback(
|
stats["memory_rss"] += process.memory_info().rss
|
||||||
"daily_messages", lambda: stats.get("daily_messages", 0)
|
stats["cpu_average"] += int(process.cpu_percent(interval=None))
|
||||||
)
|
|
||||||
|
|
||||||
logger.info("Reporting stats to matrix.org: %s" % (stats,))
|
logger.info("Reporting stats to matrix.org: %s" % (stats,))
|
||||||
try:
|
try:
|
||||||
@@ -448,42 +466,56 @@ def run(hs):
|
|||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.warn("Error reporting stats: %s", e)
|
logger.warn("Error reporting stats: %s", e)
|
||||||
|
|
||||||
|
def performance_stats_init():
|
||||||
|
try:
|
||||||
|
import psutil
|
||||||
|
process = psutil.Process()
|
||||||
|
# Ensure we can fetch both, and make the initial request for cpu_percent
|
||||||
|
# so the next request will use this as the initial point.
|
||||||
|
process.memory_info().rss
|
||||||
|
process.cpu_percent(interval=None)
|
||||||
|
logger.info("report_stats can use psutil")
|
||||||
|
stats_process.append(process)
|
||||||
|
except (ImportError, AttributeError):
|
||||||
|
logger.warn(
|
||||||
|
"report_stats enabled but psutil is not installed or incorrect version."
|
||||||
|
" Disabling reporting of memory/cpu stats."
|
||||||
|
" Ensuring psutil is available will help matrix.org track performance"
|
||||||
|
" changes across releases."
|
||||||
|
)
|
||||||
|
|
||||||
|
def generate_user_daily_visit_stats():
|
||||||
|
hs.get_datastore().generate_user_daily_visits()
|
||||||
|
|
||||||
|
# Rather than update on per session basis, batch up the requests.
|
||||||
|
# If you increase the loop period, the accuracy of user_daily_visits
|
||||||
|
# table will decrease
|
||||||
|
clock.looping_call(generate_user_daily_visit_stats, 5 * 60 * 1000)
|
||||||
|
|
||||||
if hs.config.report_stats:
|
if hs.config.report_stats:
|
||||||
phone_home_task = task.LoopingCall(phone_stats_home)
|
logger.info("Scheduling stats reporting for 3 hour intervals")
|
||||||
logger.info("Scheduling stats reporting for 24 hour intervals")
|
clock.looping_call(phone_stats_home, 3 * 60 * 60 * 1000)
|
||||||
phone_home_task.start(60 * 60 * 24, now=False)
|
|
||||||
|
|
||||||
def in_thread():
|
# We need to defer this init for the cases that we daemonize
|
||||||
# Uncomment to enable tracing of log context changes.
|
# otherwise the process ID we get is that of the non-daemon process
|
||||||
# sys.settrace(logcontext_tracer)
|
clock.call_later(0, performance_stats_init)
|
||||||
|
|
||||||
# make sure that we run the reactor with the sentinel log context,
|
# We wait 5 minutes to send the first set of stats as the server can
|
||||||
# otherwise other PreserveLoggingContext instances will get confused
|
# be quite busy the first few minutes
|
||||||
# and complain when they see the logcontext arbitrarily swapping
|
clock.call_later(5 * 60, phone_stats_home)
|
||||||
# between the sentinel and `run` logcontexts.
|
|
||||||
with PreserveLoggingContext():
|
|
||||||
change_resource_limit(hs.config.soft_file_limit)
|
|
||||||
if hs.config.gc_thresholds:
|
|
||||||
gc.set_threshold(*hs.config.gc_thresholds)
|
|
||||||
reactor.run()
|
|
||||||
|
|
||||||
if hs.config.daemonize:
|
if hs.config.daemonize and hs.config.print_pidfile:
|
||||||
|
print (hs.config.pid_file)
|
||||||
|
|
||||||
if hs.config.print_pidfile:
|
_base.start_reactor(
|
||||||
print (hs.config.pid_file)
|
"synapse-homeserver",
|
||||||
|
hs.config.soft_file_limit,
|
||||||
daemon = Daemonize(
|
hs.config.gc_thresholds,
|
||||||
app="synapse-homeserver",
|
hs.config.pid_file,
|
||||||
pid=hs.config.pid_file,
|
hs.config.daemonize,
|
||||||
action=lambda: in_thread(),
|
hs.config.cpu_affinity,
|
||||||
auto_close_fds=False,
|
logger,
|
||||||
verbose=True,
|
)
|
||||||
logger=logger,
|
|
||||||
)
|
|
||||||
|
|
||||||
daemon.start()
|
|
||||||
else:
|
|
||||||
in_thread()
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
|
|||||||
@@ -13,46 +13,37 @@
|
|||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
import logging
|
||||||
|
import sys
|
||||||
|
|
||||||
import synapse
|
import synapse
|
||||||
|
from synapse import events
|
||||||
from synapse.config._base import ConfigError
|
|
||||||
from synapse.config.homeserver import HomeServerConfig
|
|
||||||
from synapse.config.logger import setup_logging
|
|
||||||
from synapse.http.site import SynapseSite
|
|
||||||
from synapse.metrics.resource import MetricsResource, METRICS_PREFIX
|
|
||||||
from synapse.replication.slave.storage._base import BaseSlavedStore
|
|
||||||
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
|
||||||
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
|
||||||
from synapse.replication.slave.storage.transactions import TransactionStore
|
|
||||||
from synapse.rest.media.v0.content_repository import ContentRepoResource
|
|
||||||
from synapse.rest.media.v1.media_repository import MediaRepositoryResource
|
|
||||||
from synapse.server import HomeServer
|
|
||||||
from synapse.storage.client_ips import ClientIpStore
|
|
||||||
from synapse.storage.engines import create_engine
|
|
||||||
from synapse.storage.media_repository import MediaRepositoryStore
|
|
||||||
from synapse.util.async import sleep
|
|
||||||
from synapse.util.httpresourcetree import create_resource_tree
|
|
||||||
from synapse.util.logcontext import LoggingContext, PreserveLoggingContext
|
|
||||||
from synapse.util.manhole import manhole
|
|
||||||
from synapse.util.rlimit import change_resource_limit
|
|
||||||
from synapse.util.versionstring import get_version_string
|
|
||||||
from synapse.api.urls import (
|
from synapse.api.urls import (
|
||||||
CONTENT_REPO_PREFIX, LEGACY_MEDIA_PREFIX, MEDIA_PREFIX
|
CONTENT_REPO_PREFIX, LEGACY_MEDIA_PREFIX, MEDIA_PREFIX
|
||||||
)
|
)
|
||||||
|
from synapse.app import _base
|
||||||
|
from synapse.config._base import ConfigError
|
||||||
|
from synapse.config.homeserver import HomeServerConfig
|
||||||
|
from synapse.config.logger import setup_logging
|
||||||
from synapse.crypto import context_factory
|
from synapse.crypto import context_factory
|
||||||
|
from synapse.http.site import SynapseSite
|
||||||
from synapse import events
|
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||||
|
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||||
|
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||||
from twisted.internet import reactor, defer
|
from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
|
||||||
from twisted.web.resource import Resource
|
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||||
|
from synapse.replication.slave.storage.transactions import TransactionStore
|
||||||
from daemonize import Daemonize
|
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||||
|
from synapse.rest.media.v0.content_repository import ContentRepoResource
|
||||||
import sys
|
from synapse.server import HomeServer
|
||||||
import logging
|
from synapse.storage.engines import create_engine
|
||||||
import gc
|
from synapse.storage.media_repository import MediaRepositoryStore
|
||||||
|
from synapse.util.httpresourcetree import create_resource_tree
|
||||||
|
from synapse.util.logcontext import LoggingContext
|
||||||
|
from synapse.util.manhole import manhole
|
||||||
|
from synapse.util.versionstring import get_version_string
|
||||||
|
from twisted.internet import reactor
|
||||||
|
from twisted.web.resource import NoResource
|
||||||
|
|
||||||
logger = logging.getLogger("synapse.app.media_repository")
|
logger = logging.getLogger("synapse.app.media_repository")
|
||||||
|
|
||||||
@@ -60,28 +51,15 @@ logger = logging.getLogger("synapse.app.media_repository")
|
|||||||
class MediaRepositorySlavedStore(
|
class MediaRepositorySlavedStore(
|
||||||
SlavedApplicationServiceStore,
|
SlavedApplicationServiceStore,
|
||||||
SlavedRegistrationStore,
|
SlavedRegistrationStore,
|
||||||
|
SlavedClientIpStore,
|
||||||
TransactionStore,
|
TransactionStore,
|
||||||
BaseSlavedStore,
|
BaseSlavedStore,
|
||||||
MediaRepositoryStore,
|
MediaRepositoryStore,
|
||||||
ClientIpStore,
|
|
||||||
):
|
):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
class MediaRepositoryServer(HomeServer):
|
class MediaRepositoryServer(HomeServer):
|
||||||
def get_db_conn(self, run_new_connection=True):
|
|
||||||
# Any param beginning with cp_ is a parameter for adbapi, and should
|
|
||||||
# not be passed to the database engine.
|
|
||||||
db_params = {
|
|
||||||
k: v for k, v in self.db_config.get("args", {}).items()
|
|
||||||
if not k.startswith("cp_")
|
|
||||||
}
|
|
||||||
db_conn = self.database_engine.module.connect(**db_params)
|
|
||||||
|
|
||||||
if run_new_connection:
|
|
||||||
self.database_engine.on_new_connection(db_conn)
|
|
||||||
return db_conn
|
|
||||||
|
|
||||||
def setup(self):
|
def setup(self):
|
||||||
logger.info("Setting up.")
|
logger.info("Setting up.")
|
||||||
self.datastore = MediaRepositorySlavedStore(self.get_db_conn(), self)
|
self.datastore = MediaRepositorySlavedStore(self.get_db_conn(), self)
|
||||||
@@ -97,7 +75,7 @@ class MediaRepositoryServer(HomeServer):
|
|||||||
if name == "metrics":
|
if name == "metrics":
|
||||||
resources[METRICS_PREFIX] = MetricsResource(self)
|
resources[METRICS_PREFIX] = MetricsResource(self)
|
||||||
elif name == "media":
|
elif name == "media":
|
||||||
media_repo = MediaRepositoryResource(self)
|
media_repo = self.get_media_repository_resource()
|
||||||
resources.update({
|
resources.update({
|
||||||
MEDIA_PREFIX: media_repo,
|
MEDIA_PREFIX: media_repo,
|
||||||
LEGACY_MEDIA_PREFIX: media_repo,
|
LEGACY_MEDIA_PREFIX: media_repo,
|
||||||
@@ -106,19 +84,19 @@ class MediaRepositoryServer(HomeServer):
|
|||||||
),
|
),
|
||||||
})
|
})
|
||||||
|
|
||||||
root_resource = create_resource_tree(resources, Resource())
|
root_resource = create_resource_tree(resources, NoResource())
|
||||||
|
|
||||||
for address in bind_addresses:
|
_base.listen_tcp(
|
||||||
reactor.listenTCP(
|
bind_addresses,
|
||||||
port,
|
port,
|
||||||
SynapseSite(
|
SynapseSite(
|
||||||
"synapse.access.http.%s" % (site_tag,),
|
"synapse.access.http.%s" % (site_tag,),
|
||||||
site_tag,
|
site_tag,
|
||||||
listener_config,
|
listener_config,
|
||||||
root_resource,
|
root_resource,
|
||||||
),
|
self.version_string,
|
||||||
interface=address
|
|
||||||
)
|
)
|
||||||
|
)
|
||||||
|
|
||||||
logger.info("Synapse media repository now listening on port %d", port)
|
logger.info("Synapse media repository now listening on port %d", port)
|
||||||
|
|
||||||
@@ -127,36 +105,22 @@ class MediaRepositoryServer(HomeServer):
|
|||||||
if listener["type"] == "http":
|
if listener["type"] == "http":
|
||||||
self._listen_http(listener)
|
self._listen_http(listener)
|
||||||
elif listener["type"] == "manhole":
|
elif listener["type"] == "manhole":
|
||||||
bind_addresses = listener["bind_addresses"]
|
_base.listen_tcp(
|
||||||
|
listener["bind_addresses"],
|
||||||
for address in bind_addresses:
|
listener["port"],
|
||||||
reactor.listenTCP(
|
manhole(
|
||||||
listener["port"],
|
username="matrix",
|
||||||
manhole(
|
password="rabbithole",
|
||||||
username="matrix",
|
globals={"hs": self},
|
||||||
password="rabbithole",
|
|
||||||
globals={"hs": self},
|
|
||||||
),
|
|
||||||
interface=address
|
|
||||||
)
|
)
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
self.get_tcp_replication().start_replication(self)
|
||||||
def replicate(self):
|
|
||||||
http_client = self.get_simple_http_client()
|
|
||||||
store = self.get_datastore()
|
|
||||||
replication_url = self.config.worker_replication_url
|
|
||||||
|
|
||||||
while True:
|
def build_tcp_replication(self):
|
||||||
try:
|
return ReplicationClientHandler(self.get_datastore())
|
||||||
args = store.stream_positions()
|
|
||||||
args["timeout"] = 30000
|
|
||||||
result = yield http_client.get_json(replication_url, args=args)
|
|
||||||
yield store.process_replication(result)
|
|
||||||
except:
|
|
||||||
logger.exception("Error replicating from %r", replication_url)
|
|
||||||
yield sleep(5)
|
|
||||||
|
|
||||||
|
|
||||||
def start(config_options):
|
def start(config_options):
|
||||||
@@ -170,6 +134,13 @@ def start(config_options):
|
|||||||
|
|
||||||
assert config.worker_app == "synapse.app.media_repository"
|
assert config.worker_app == "synapse.app.media_repository"
|
||||||
|
|
||||||
|
if config.enable_media_repo:
|
||||||
|
_base.quit_with_error(
|
||||||
|
"enable_media_repo must be disabled in the main synapse process\n"
|
||||||
|
"before the media repo can be run in a separate worker.\n"
|
||||||
|
"Please add ``enable_media_repo: false`` to the main config\n"
|
||||||
|
)
|
||||||
|
|
||||||
setup_logging(config, use_worker_options=True)
|
setup_logging(config, use_worker_options=True)
|
||||||
|
|
||||||
events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||||
@@ -188,40 +159,15 @@ def start(config_options):
|
|||||||
)
|
)
|
||||||
|
|
||||||
ss.setup()
|
ss.setup()
|
||||||
ss.get_handlers()
|
|
||||||
ss.start_listening(config.worker_listeners)
|
ss.start_listening(config.worker_listeners)
|
||||||
|
|
||||||
def run():
|
|
||||||
# make sure that we run the reactor with the sentinel log context,
|
|
||||||
# otherwise other PreserveLoggingContext instances will get confused
|
|
||||||
# and complain when they see the logcontext arbitrarily swapping
|
|
||||||
# between the sentinel and `run` logcontexts.
|
|
||||||
with PreserveLoggingContext():
|
|
||||||
logger.info("Running")
|
|
||||||
change_resource_limit(config.soft_file_limit)
|
|
||||||
if config.gc_thresholds:
|
|
||||||
gc.set_threshold(*config.gc_thresholds)
|
|
||||||
reactor.run()
|
|
||||||
|
|
||||||
def start():
|
def start():
|
||||||
ss.get_state_handler().start_caching()
|
ss.get_state_handler().start_caching()
|
||||||
ss.get_datastore().start_profiling()
|
ss.get_datastore().start_profiling()
|
||||||
ss.replicate()
|
|
||||||
|
|
||||||
reactor.callWhenRunning(start)
|
reactor.callWhenRunning(start)
|
||||||
|
|
||||||
if config.worker_daemonize:
|
_base.start_worker_reactor("synapse-media-repository", config)
|
||||||
daemon = Daemonize(
|
|
||||||
app="synapse-media-repository",
|
|
||||||
pid=config.worker_pid_file,
|
|
||||||
action=run,
|
|
||||||
auto_close_fds=False,
|
|
||||||
verbose=True,
|
|
||||||
logger=logger,
|
|
||||||
)
|
|
||||||
daemon.start()
|
|
||||||
else:
|
|
||||||
run()
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
|||||||
@@ -13,40 +13,31 @@
|
|||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
import logging
|
||||||
|
import sys
|
||||||
|
|
||||||
import synapse
|
import synapse
|
||||||
|
from synapse import events
|
||||||
from synapse.server import HomeServer
|
from synapse.app import _base
|
||||||
from synapse.config._base import ConfigError
|
from synapse.config._base import ConfigError
|
||||||
from synapse.config.logger import setup_logging
|
|
||||||
from synapse.config.homeserver import HomeServerConfig
|
from synapse.config.homeserver import HomeServerConfig
|
||||||
|
from synapse.config.logger import setup_logging
|
||||||
from synapse.http.site import SynapseSite
|
from synapse.http.site import SynapseSite
|
||||||
from synapse.metrics.resource import MetricsResource, METRICS_PREFIX
|
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||||
from synapse.storage.roommember import RoomMemberStore
|
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
|
||||||
from synapse.replication.slave.storage.events import SlavedEventStore
|
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||||
from synapse.replication.slave.storage.pushers import SlavedPusherStore
|
from synapse.replication.slave.storage.pushers import SlavedPusherStore
|
||||||
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
|
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
|
||||||
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
|
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||||
from synapse.storage.engines import create_engine
|
from synapse.server import HomeServer
|
||||||
from synapse.storage import DataStore
|
from synapse.storage import DataStore
|
||||||
from synapse.util.async import sleep
|
from synapse.storage.engines import create_engine
|
||||||
from synapse.util.httpresourcetree import create_resource_tree
|
from synapse.util.httpresourcetree import create_resource_tree
|
||||||
from synapse.util.logcontext import LoggingContext, preserve_fn, \
|
from synapse.util.logcontext import LoggingContext, run_in_background
|
||||||
PreserveLoggingContext
|
|
||||||
from synapse.util.manhole import manhole
|
from synapse.util.manhole import manhole
|
||||||
from synapse.util.rlimit import change_resource_limit
|
|
||||||
from synapse.util.versionstring import get_version_string
|
from synapse.util.versionstring import get_version_string
|
||||||
|
from twisted.internet import defer, reactor
|
||||||
from synapse import events
|
from twisted.web.resource import NoResource
|
||||||
|
|
||||||
from twisted.internet import reactor, defer
|
|
||||||
from twisted.web.resource import Resource
|
|
||||||
|
|
||||||
from daemonize import Daemonize
|
|
||||||
|
|
||||||
import sys
|
|
||||||
import logging
|
|
||||||
import gc
|
|
||||||
|
|
||||||
logger = logging.getLogger("synapse.app.pusher")
|
logger = logging.getLogger("synapse.app.pusher")
|
||||||
|
|
||||||
@@ -83,42 +74,15 @@ class PusherSlaveStore(
|
|||||||
DataStore.get_profile_displayname.__func__
|
DataStore.get_profile_displayname.__func__
|
||||||
)
|
)
|
||||||
|
|
||||||
who_forgot_in_room = (
|
|
||||||
RoomMemberStore.__dict__["who_forgot_in_room"]
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class PusherServer(HomeServer):
|
class PusherServer(HomeServer):
|
||||||
|
|
||||||
def get_db_conn(self, run_new_connection=True):
|
|
||||||
# Any param beginning with cp_ is a parameter for adbapi, and should
|
|
||||||
# not be passed to the database engine.
|
|
||||||
db_params = {
|
|
||||||
k: v for k, v in self.db_config.get("args", {}).items()
|
|
||||||
if not k.startswith("cp_")
|
|
||||||
}
|
|
||||||
db_conn = self.database_engine.module.connect(**db_params)
|
|
||||||
|
|
||||||
if run_new_connection:
|
|
||||||
self.database_engine.on_new_connection(db_conn)
|
|
||||||
return db_conn
|
|
||||||
|
|
||||||
def setup(self):
|
def setup(self):
|
||||||
logger.info("Setting up.")
|
logger.info("Setting up.")
|
||||||
self.datastore = PusherSlaveStore(self.get_db_conn(), self)
|
self.datastore = PusherSlaveStore(self.get_db_conn(), self)
|
||||||
logger.info("Finished setting up.")
|
logger.info("Finished setting up.")
|
||||||
|
|
||||||
def remove_pusher(self, app_id, push_key, user_id):
|
def remove_pusher(self, app_id, push_key, user_id):
|
||||||
http_client = self.get_simple_http_client()
|
self.get_tcp_replication().send_remove_pusher(app_id, push_key, user_id)
|
||||||
replication_url = self.config.worker_replication_url
|
|
||||||
url = replication_url + "/remove_pushers"
|
|
||||||
return http_client.post_json_get_json(url, {
|
|
||||||
"remove": [{
|
|
||||||
"app_id": app_id,
|
|
||||||
"push_key": push_key,
|
|
||||||
"user_id": user_id,
|
|
||||||
}]
|
|
||||||
})
|
|
||||||
|
|
||||||
def _listen_http(self, listener_config):
|
def _listen_http(self, listener_config):
|
||||||
port = listener_config["port"]
|
port = listener_config["port"]
|
||||||
@@ -130,19 +94,19 @@ class PusherServer(HomeServer):
|
|||||||
if name == "metrics":
|
if name == "metrics":
|
||||||
resources[METRICS_PREFIX] = MetricsResource(self)
|
resources[METRICS_PREFIX] = MetricsResource(self)
|
||||||
|
|
||||||
root_resource = create_resource_tree(resources, Resource())
|
root_resource = create_resource_tree(resources, NoResource())
|
||||||
|
|
||||||
for address in bind_addresses:
|
_base.listen_tcp(
|
||||||
reactor.listenTCP(
|
bind_addresses,
|
||||||
port,
|
port,
|
||||||
SynapseSite(
|
SynapseSite(
|
||||||
"synapse.access.http.%s" % (site_tag,),
|
"synapse.access.http.%s" % (site_tag,),
|
||||||
site_tag,
|
site_tag,
|
||||||
listener_config,
|
listener_config,
|
||||||
root_resource,
|
root_resource,
|
||||||
),
|
self.version_string,
|
||||||
interface=address
|
|
||||||
)
|
)
|
||||||
|
)
|
||||||
|
|
||||||
logger.info("Synapse pusher now listening on port %d", port)
|
logger.info("Synapse pusher now listening on port %d", port)
|
||||||
|
|
||||||
@@ -151,88 +115,67 @@ class PusherServer(HomeServer):
|
|||||||
if listener["type"] == "http":
|
if listener["type"] == "http":
|
||||||
self._listen_http(listener)
|
self._listen_http(listener)
|
||||||
elif listener["type"] == "manhole":
|
elif listener["type"] == "manhole":
|
||||||
bind_addresses = listener["bind_addresses"]
|
_base.listen_tcp(
|
||||||
|
listener["bind_addresses"],
|
||||||
for address in bind_addresses:
|
listener["port"],
|
||||||
reactor.listenTCP(
|
manhole(
|
||||||
listener["port"],
|
username="matrix",
|
||||||
manhole(
|
password="rabbithole",
|
||||||
username="matrix",
|
globals={"hs": self},
|
||||||
password="rabbithole",
|
|
||||||
globals={"hs": self},
|
|
||||||
),
|
|
||||||
interface=address
|
|
||||||
)
|
)
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||||
|
|
||||||
|
self.get_tcp_replication().start_replication(self)
|
||||||
|
|
||||||
|
def build_tcp_replication(self):
|
||||||
|
return PusherReplicationHandler(self)
|
||||||
|
|
||||||
|
|
||||||
|
class PusherReplicationHandler(ReplicationClientHandler):
|
||||||
|
def __init__(self, hs):
|
||||||
|
super(PusherReplicationHandler, self).__init__(hs.get_datastore())
|
||||||
|
|
||||||
|
self.pusher_pool = hs.get_pusherpool()
|
||||||
|
|
||||||
|
def on_rdata(self, stream_name, token, rows):
|
||||||
|
super(PusherReplicationHandler, self).on_rdata(stream_name, token, rows)
|
||||||
|
run_in_background(self.poke_pushers, stream_name, token, rows)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def replicate(self):
|
def poke_pushers(self, stream_name, token, rows):
|
||||||
http_client = self.get_simple_http_client()
|
try:
|
||||||
store = self.get_datastore()
|
if stream_name == "pushers":
|
||||||
replication_url = self.config.worker_replication_url
|
for row in rows:
|
||||||
pusher_pool = self.get_pusherpool()
|
if row.deleted:
|
||||||
|
yield self.stop_pusher(row.user_id, row.app_id, row.pushkey)
|
||||||
def stop_pusher(user_id, app_id, pushkey):
|
else:
|
||||||
key = "%s:%s" % (app_id, pushkey)
|
yield self.start_pusher(row.user_id, row.app_id, row.pushkey)
|
||||||
pushers_for_user = pusher_pool.pushers.get(user_id, {})
|
elif stream_name == "events":
|
||||||
pusher = pushers_for_user.pop(key, None)
|
yield self.pusher_pool.on_new_notifications(
|
||||||
if pusher is None:
|
token, token,
|
||||||
return
|
|
||||||
logger.info("Stopping pusher %r / %r", user_id, key)
|
|
||||||
pusher.on_stop()
|
|
||||||
|
|
||||||
def start_pusher(user_id, app_id, pushkey):
|
|
||||||
key = "%s:%s" % (app_id, pushkey)
|
|
||||||
logger.info("Starting pusher %r / %r", user_id, key)
|
|
||||||
return pusher_pool._refresh_pusher(app_id, pushkey, user_id)
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def poke_pushers(results):
|
|
||||||
pushers_rows = set(
|
|
||||||
map(tuple, results.get("pushers", {}).get("rows", []))
|
|
||||||
)
|
|
||||||
deleted_pushers_rows = set(
|
|
||||||
map(tuple, results.get("deleted_pushers", {}).get("rows", []))
|
|
||||||
)
|
|
||||||
for row in sorted(pushers_rows | deleted_pushers_rows):
|
|
||||||
if row in deleted_pushers_rows:
|
|
||||||
user_id, app_id, pushkey = row[1:4]
|
|
||||||
stop_pusher(user_id, app_id, pushkey)
|
|
||||||
elif row in pushers_rows:
|
|
||||||
user_id = row[1]
|
|
||||||
app_id = row[5]
|
|
||||||
pushkey = row[8]
|
|
||||||
yield start_pusher(user_id, app_id, pushkey)
|
|
||||||
|
|
||||||
stream = results.get("events")
|
|
||||||
if stream and stream["rows"]:
|
|
||||||
min_stream_id = stream["rows"][0][0]
|
|
||||||
max_stream_id = stream["position"]
|
|
||||||
preserve_fn(pusher_pool.on_new_notifications)(
|
|
||||||
min_stream_id, max_stream_id
|
|
||||||
)
|
)
|
||||||
|
elif stream_name == "receipts":
|
||||||
stream = results.get("receipts")
|
yield self.pusher_pool.on_new_receipts(
|
||||||
if stream and stream["rows"]:
|
token, token, set(row.room_id for row in rows)
|
||||||
rows = stream["rows"]
|
|
||||||
affected_room_ids = set(row[1] for row in rows)
|
|
||||||
min_stream_id = rows[0][0]
|
|
||||||
max_stream_id = stream["position"]
|
|
||||||
preserve_fn(pusher_pool.on_new_receipts)(
|
|
||||||
min_stream_id, max_stream_id, affected_room_ids
|
|
||||||
)
|
)
|
||||||
|
except Exception:
|
||||||
|
logger.exception("Error poking pushers")
|
||||||
|
|
||||||
while True:
|
def stop_pusher(self, user_id, app_id, pushkey):
|
||||||
try:
|
key = "%s:%s" % (app_id, pushkey)
|
||||||
args = store.stream_positions()
|
pushers_for_user = self.pusher_pool.pushers.get(user_id, {})
|
||||||
args["timeout"] = 30000
|
pusher = pushers_for_user.pop(key, None)
|
||||||
result = yield http_client.get_json(replication_url, args=args)
|
if pusher is None:
|
||||||
yield store.process_replication(result)
|
return
|
||||||
poke_pushers(result)
|
logger.info("Stopping pusher %r / %r", user_id, key)
|
||||||
except:
|
pusher.on_stop()
|
||||||
logger.exception("Error replicating from %r", replication_url)
|
|
||||||
yield sleep(30)
|
def start_pusher(self, user_id, app_id, pushkey):
|
||||||
|
key = "%s:%s" % (app_id, pushkey)
|
||||||
|
logger.info("Starting pusher %r / %r", user_id, key)
|
||||||
|
return self.pusher_pool._refresh_pusher(app_id, pushkey, user_id)
|
||||||
|
|
||||||
|
|
||||||
def start(config_options):
|
def start(config_options):
|
||||||
@@ -275,38 +218,14 @@ def start(config_options):
|
|||||||
ps.setup()
|
ps.setup()
|
||||||
ps.start_listening(config.worker_listeners)
|
ps.start_listening(config.worker_listeners)
|
||||||
|
|
||||||
def run():
|
|
||||||
# make sure that we run the reactor with the sentinel log context,
|
|
||||||
# otherwise other PreserveLoggingContext instances will get confused
|
|
||||||
# and complain when they see the logcontext arbitrarily swapping
|
|
||||||
# between the sentinel and `run` logcontexts.
|
|
||||||
with PreserveLoggingContext():
|
|
||||||
logger.info("Running")
|
|
||||||
change_resource_limit(config.soft_file_limit)
|
|
||||||
if config.gc_thresholds:
|
|
||||||
gc.set_threshold(*config.gc_thresholds)
|
|
||||||
reactor.run()
|
|
||||||
|
|
||||||
def start():
|
def start():
|
||||||
ps.replicate()
|
|
||||||
ps.get_pusherpool().start()
|
ps.get_pusherpool().start()
|
||||||
ps.get_datastore().start_profiling()
|
ps.get_datastore().start_profiling()
|
||||||
ps.get_state_handler().start_caching()
|
ps.get_state_handler().start_caching()
|
||||||
|
|
||||||
reactor.callWhenRunning(start)
|
reactor.callWhenRunning(start)
|
||||||
|
|
||||||
if config.worker_daemonize:
|
_base.start_worker_reactor("synapse-pusher", config)
|
||||||
daemon = Daemonize(
|
|
||||||
app="synapse-pusher",
|
|
||||||
pid=config.worker_pid_file,
|
|
||||||
action=run,
|
|
||||||
auto_close_fds=False,
|
|
||||||
verbose=True,
|
|
||||||
logger=logger,
|
|
||||||
)
|
|
||||||
daemon.start()
|
|
||||||
else:
|
|
||||||
run()
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
|||||||
@@ -13,105 +13,87 @@
|
|||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
import contextlib
|
||||||
|
import logging
|
||||||
|
import sys
|
||||||
|
|
||||||
import synapse
|
import synapse
|
||||||
|
from synapse.api.constants import EventTypes
|
||||||
from synapse.api.constants import EventTypes, PresenceState
|
from synapse.app import _base
|
||||||
from synapse.config._base import ConfigError
|
from synapse.config._base import ConfigError
|
||||||
from synapse.config.homeserver import HomeServerConfig
|
from synapse.config.homeserver import HomeServerConfig
|
||||||
from synapse.config.logger import setup_logging
|
from synapse.config.logger import setup_logging
|
||||||
from synapse.handlers.presence import PresenceHandler
|
from synapse.handlers.presence import PresenceHandler, get_interested_parties
|
||||||
from synapse.http.site import SynapseSite
|
|
||||||
from synapse.http.server import JsonResource
|
from synapse.http.server import JsonResource
|
||||||
from synapse.metrics.resource import MetricsResource, METRICS_PREFIX
|
from synapse.http.site import SynapseSite
|
||||||
from synapse.rest.client.v2_alpha import sync
|
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||||
from synapse.rest.client.v1 import events
|
|
||||||
from synapse.rest.client.v1.room import RoomInitialSyncRestServlet
|
|
||||||
from synapse.rest.client.v1.initial_sync import InitialSyncRestServlet
|
|
||||||
from synapse.replication.slave.storage._base import BaseSlavedStore
|
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||||
from synapse.replication.slave.storage.events import SlavedEventStore
|
|
||||||
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
|
|
||||||
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
|
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
|
||||||
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||||
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
|
||||||
from synapse.replication.slave.storage.filtering import SlavedFilteringStore
|
|
||||||
from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore
|
|
||||||
from synapse.replication.slave.storage.presence import SlavedPresenceStore
|
|
||||||
from synapse.replication.slave.storage.deviceinbox import SlavedDeviceInboxStore
|
from synapse.replication.slave.storage.deviceinbox import SlavedDeviceInboxStore
|
||||||
from synapse.replication.slave.storage.devices import SlavedDeviceStore
|
from synapse.replication.slave.storage.devices import SlavedDeviceStore
|
||||||
|
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||||
|
from synapse.replication.slave.storage.filtering import SlavedFilteringStore
|
||||||
|
from synapse.replication.slave.storage.presence import SlavedPresenceStore
|
||||||
|
from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore
|
||||||
|
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
|
||||||
|
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||||
from synapse.replication.slave.storage.room import RoomStore
|
from synapse.replication.slave.storage.room import RoomStore
|
||||||
|
from synapse.replication.slave.storage.groups import SlavedGroupServerStore
|
||||||
|
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||||
|
from synapse.rest.client.v1 import events
|
||||||
|
from synapse.rest.client.v1.initial_sync import InitialSyncRestServlet
|
||||||
|
from synapse.rest.client.v1.room import RoomInitialSyncRestServlet
|
||||||
|
from synapse.rest.client.v2_alpha import sync
|
||||||
from synapse.server import HomeServer
|
from synapse.server import HomeServer
|
||||||
from synapse.storage.client_ips import ClientIpStore
|
|
||||||
from synapse.storage.engines import create_engine
|
from synapse.storage.engines import create_engine
|
||||||
from synapse.storage.presence import PresenceStore, UserPresenceState
|
from synapse.storage.presence import UserPresenceState
|
||||||
from synapse.storage.roommember import RoomMemberStore
|
from synapse.storage.roommember import RoomMemberStore
|
||||||
from synapse.util.async import sleep
|
|
||||||
from synapse.util.httpresourcetree import create_resource_tree
|
from synapse.util.httpresourcetree import create_resource_tree
|
||||||
from synapse.util.logcontext import LoggingContext, preserve_fn, \
|
from synapse.util.logcontext import LoggingContext, run_in_background
|
||||||
PreserveLoggingContext
|
|
||||||
from synapse.util.manhole import manhole
|
from synapse.util.manhole import manhole
|
||||||
from synapse.util.rlimit import change_resource_limit
|
|
||||||
from synapse.util.stringutils import random_string
|
from synapse.util.stringutils import random_string
|
||||||
from synapse.util.versionstring import get_version_string
|
from synapse.util.versionstring import get_version_string
|
||||||
|
from twisted.internet import defer, reactor
|
||||||
|
from twisted.web.resource import NoResource
|
||||||
|
|
||||||
from twisted.internet import reactor, defer
|
from six import iteritems
|
||||||
from twisted.web.resource import Resource
|
|
||||||
|
|
||||||
from daemonize import Daemonize
|
|
||||||
|
|
||||||
import sys
|
|
||||||
import logging
|
|
||||||
import contextlib
|
|
||||||
import gc
|
|
||||||
import ujson as json
|
|
||||||
|
|
||||||
logger = logging.getLogger("synapse.app.synchrotron")
|
logger = logging.getLogger("synapse.app.synchrotron")
|
||||||
|
|
||||||
|
|
||||||
class SynchrotronSlavedStore(
|
class SynchrotronSlavedStore(
|
||||||
SlavedPushRuleStore,
|
|
||||||
SlavedEventStore,
|
|
||||||
SlavedReceiptsStore,
|
SlavedReceiptsStore,
|
||||||
SlavedAccountDataStore,
|
SlavedAccountDataStore,
|
||||||
SlavedApplicationServiceStore,
|
SlavedApplicationServiceStore,
|
||||||
SlavedRegistrationStore,
|
SlavedRegistrationStore,
|
||||||
SlavedFilteringStore,
|
SlavedFilteringStore,
|
||||||
SlavedPresenceStore,
|
SlavedPresenceStore,
|
||||||
|
SlavedGroupServerStore,
|
||||||
SlavedDeviceInboxStore,
|
SlavedDeviceInboxStore,
|
||||||
SlavedDeviceStore,
|
SlavedDeviceStore,
|
||||||
|
SlavedPushRuleStore,
|
||||||
|
SlavedEventStore,
|
||||||
|
SlavedClientIpStore,
|
||||||
RoomStore,
|
RoomStore,
|
||||||
BaseSlavedStore,
|
BaseSlavedStore,
|
||||||
ClientIpStore, # After BaseSlavedStore because the constructor is different
|
|
||||||
):
|
):
|
||||||
who_forgot_in_room = (
|
|
||||||
RoomMemberStore.__dict__["who_forgot_in_room"]
|
|
||||||
)
|
|
||||||
|
|
||||||
did_forget = (
|
did_forget = (
|
||||||
RoomMemberStore.__dict__["did_forget"]
|
RoomMemberStore.__dict__["did_forget"]
|
||||||
)
|
)
|
||||||
|
|
||||||
# XXX: This is a bit broken because we don't persist the accepted list in a
|
|
||||||
# way that can be replicated. This means that we don't have a way to
|
|
||||||
# invalidate the cache correctly.
|
|
||||||
get_presence_list_accepted = PresenceStore.__dict__[
|
|
||||||
"get_presence_list_accepted"
|
|
||||||
]
|
|
||||||
get_presence_list_observers_accepted = PresenceStore.__dict__[
|
|
||||||
"get_presence_list_observers_accepted"
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
UPDATE_SYNCING_USERS_MS = 10 * 1000
|
UPDATE_SYNCING_USERS_MS = 10 * 1000
|
||||||
|
|
||||||
|
|
||||||
class SynchrotronPresence(object):
|
class SynchrotronPresence(object):
|
||||||
def __init__(self, hs):
|
def __init__(self, hs):
|
||||||
|
self.hs = hs
|
||||||
self.is_mine_id = hs.is_mine_id
|
self.is_mine_id = hs.is_mine_id
|
||||||
self.http_client = hs.get_simple_http_client()
|
self.http_client = hs.get_simple_http_client()
|
||||||
self.store = hs.get_datastore()
|
self.store = hs.get_datastore()
|
||||||
self.user_to_num_current_syncs = {}
|
self.user_to_num_current_syncs = {}
|
||||||
self.syncing_users_url = hs.config.worker_replication_url + "/syncing_users"
|
|
||||||
self.clock = hs.get_clock()
|
self.clock = hs.get_clock()
|
||||||
self.notifier = hs.get_notifier()
|
self.notifier = hs.get_notifier()
|
||||||
|
|
||||||
@@ -121,17 +103,52 @@ class SynchrotronPresence(object):
|
|||||||
for state in active_presence
|
for state in active_presence
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# user_id -> last_sync_ms. Lists the users that have stopped syncing
|
||||||
|
# but we haven't notified the master of that yet
|
||||||
|
self.users_going_offline = {}
|
||||||
|
|
||||||
|
self._send_stop_syncing_loop = self.clock.looping_call(
|
||||||
|
self.send_stop_syncing, 10 * 1000
|
||||||
|
)
|
||||||
|
|
||||||
self.process_id = random_string(16)
|
self.process_id = random_string(16)
|
||||||
logger.info("Presence process_id is %r", self.process_id)
|
logger.info("Presence process_id is %r", self.process_id)
|
||||||
|
|
||||||
self._sending_sync = False
|
def send_user_sync(self, user_id, is_syncing, last_sync_ms):
|
||||||
self._need_to_send_sync = False
|
self.hs.get_tcp_replication().send_user_sync(user_id, is_syncing, last_sync_ms)
|
||||||
self.clock.looping_call(
|
|
||||||
self._send_syncing_users_regularly,
|
|
||||||
UPDATE_SYNCING_USERS_MS,
|
|
||||||
)
|
|
||||||
|
|
||||||
reactor.addSystemEventTrigger("before", "shutdown", self._on_shutdown)
|
def mark_as_coming_online(self, user_id):
|
||||||
|
"""A user has started syncing. Send a UserSync to the master, unless they
|
||||||
|
had recently stopped syncing.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
user_id (str)
|
||||||
|
"""
|
||||||
|
going_offline = self.users_going_offline.pop(user_id, None)
|
||||||
|
if not going_offline:
|
||||||
|
# Safe to skip because we haven't yet told the master they were offline
|
||||||
|
self.send_user_sync(user_id, True, self.clock.time_msec())
|
||||||
|
|
||||||
|
def mark_as_going_offline(self, user_id):
|
||||||
|
"""A user has stopped syncing. We wait before notifying the master as
|
||||||
|
its likely they'll come back soon. This allows us to avoid sending
|
||||||
|
a stopped syncing immediately followed by a started syncing notification
|
||||||
|
to the master
|
||||||
|
|
||||||
|
Args:
|
||||||
|
user_id (str)
|
||||||
|
"""
|
||||||
|
self.users_going_offline[user_id] = self.clock.time_msec()
|
||||||
|
|
||||||
|
def send_stop_syncing(self):
|
||||||
|
"""Check if there are any users who have stopped syncing a while ago
|
||||||
|
and haven't come back yet. If there are poke the master about them.
|
||||||
|
"""
|
||||||
|
now = self.clock.time_msec()
|
||||||
|
for user_id, last_sync_ms in self.users_going_offline.items():
|
||||||
|
if now - last_sync_ms > 10 * 1000:
|
||||||
|
self.users_going_offline.pop(user_id, None)
|
||||||
|
self.send_user_sync(user_id, False, last_sync_ms)
|
||||||
|
|
||||||
def set_state(self, user, state, ignore_status_msg=False):
|
def set_state(self, user, state, ignore_status_msg=False):
|
||||||
# TODO Hows this supposed to work?
|
# TODO Hows this supposed to work?
|
||||||
@@ -139,18 +156,16 @@ class SynchrotronPresence(object):
|
|||||||
|
|
||||||
get_states = PresenceHandler.get_states.__func__
|
get_states = PresenceHandler.get_states.__func__
|
||||||
get_state = PresenceHandler.get_state.__func__
|
get_state = PresenceHandler.get_state.__func__
|
||||||
_get_interested_parties = PresenceHandler._get_interested_parties.__func__
|
|
||||||
current_state_for_users = PresenceHandler.current_state_for_users.__func__
|
current_state_for_users = PresenceHandler.current_state_for_users.__func__
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def user_syncing(self, user_id, affect_presence):
|
def user_syncing(self, user_id, affect_presence):
|
||||||
if affect_presence:
|
if affect_presence:
|
||||||
curr_sync = self.user_to_num_current_syncs.get(user_id, 0)
|
curr_sync = self.user_to_num_current_syncs.get(user_id, 0)
|
||||||
self.user_to_num_current_syncs[user_id] = curr_sync + 1
|
self.user_to_num_current_syncs[user_id] = curr_sync + 1
|
||||||
prev_states = yield self.current_state_for_users([user_id])
|
|
||||||
if prev_states[user_id].state == PresenceState.OFFLINE:
|
# If we went from no in flight sync to some, notify replication
|
||||||
# TODO: Don't block the sync request on this HTTP hit.
|
if self.user_to_num_current_syncs[user_id] == 1:
|
||||||
yield self._send_syncing_users_now()
|
self.mark_as_coming_online(user_id)
|
||||||
|
|
||||||
def _end():
|
def _end():
|
||||||
# We check that the user_id is in user_to_num_current_syncs because
|
# We check that the user_id is in user_to_num_current_syncs because
|
||||||
@@ -159,6 +174,10 @@ class SynchrotronPresence(object):
|
|||||||
if affect_presence and user_id in self.user_to_num_current_syncs:
|
if affect_presence and user_id in self.user_to_num_current_syncs:
|
||||||
self.user_to_num_current_syncs[user_id] -= 1
|
self.user_to_num_current_syncs[user_id] -= 1
|
||||||
|
|
||||||
|
# If we went from one in flight sync to non, notify replication
|
||||||
|
if self.user_to_num_current_syncs[user_id] == 0:
|
||||||
|
self.mark_as_going_offline(user_id)
|
||||||
|
|
||||||
@contextlib.contextmanager
|
@contextlib.contextmanager
|
||||||
def _user_syncing():
|
def _user_syncing():
|
||||||
try:
|
try:
|
||||||
@@ -166,56 +185,12 @@ class SynchrotronPresence(object):
|
|||||||
finally:
|
finally:
|
||||||
_end()
|
_end()
|
||||||
|
|
||||||
defer.returnValue(_user_syncing())
|
return defer.succeed(_user_syncing())
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def _on_shutdown(self):
|
|
||||||
# When the synchrotron is shutdown tell the master to clear the in
|
|
||||||
# progress syncs for this process
|
|
||||||
self.user_to_num_current_syncs.clear()
|
|
||||||
yield self._send_syncing_users_now()
|
|
||||||
|
|
||||||
def _send_syncing_users_regularly(self):
|
|
||||||
# Only send an update if we aren't in the middle of sending one.
|
|
||||||
if not self._sending_sync:
|
|
||||||
preserve_fn(self._send_syncing_users_now)()
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def _send_syncing_users_now(self):
|
|
||||||
if self._sending_sync:
|
|
||||||
# We don't want to race with sending another update.
|
|
||||||
# Instead we wait for that update to finish and send another
|
|
||||||
# update afterwards.
|
|
||||||
self._need_to_send_sync = True
|
|
||||||
return
|
|
||||||
|
|
||||||
# Flag that we are sending an update.
|
|
||||||
self._sending_sync = True
|
|
||||||
|
|
||||||
yield self.http_client.post_json_get_json(self.syncing_users_url, {
|
|
||||||
"process_id": self.process_id,
|
|
||||||
"syncing_users": [
|
|
||||||
user_id for user_id, count in self.user_to_num_current_syncs.items()
|
|
||||||
if count > 0
|
|
||||||
],
|
|
||||||
})
|
|
||||||
|
|
||||||
# Unset the flag as we are no longer sending an update.
|
|
||||||
self._sending_sync = False
|
|
||||||
if self._need_to_send_sync:
|
|
||||||
# If something happened while we were sending the update then
|
|
||||||
# we might need to send another update.
|
|
||||||
# TODO: Check if the update that was sent matches the current state
|
|
||||||
# as we only need to send an update if they are different.
|
|
||||||
self._need_to_send_sync = False
|
|
||||||
yield self._send_syncing_users_now()
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def notify_from_replication(self, states, stream_id):
|
def notify_from_replication(self, states, stream_id):
|
||||||
parties = yield self._get_interested_parties(
|
parties = yield get_interested_parties(self.store, states)
|
||||||
states, calculate_remote_hosts=False
|
room_ids_to_states, users_to_states = parties
|
||||||
)
|
|
||||||
room_ids_to_states, users_to_states, _ = parties
|
|
||||||
|
|
||||||
self.notifier.on_new_event(
|
self.notifier.on_new_event(
|
||||||
"presence_key", stream_id, rooms=room_ids_to_states.keys(),
|
"presence_key", stream_id, rooms=room_ids_to_states.keys(),
|
||||||
@@ -223,26 +198,24 @@ class SynchrotronPresence(object):
|
|||||||
)
|
)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def process_replication(self, result):
|
def process_replication_rows(self, token, rows):
|
||||||
stream = result.get("presence", {"rows": []})
|
states = [UserPresenceState(
|
||||||
states = []
|
row.user_id, row.state, row.last_active_ts,
|
||||||
for row in stream["rows"]:
|
row.last_federation_update_ts, row.last_user_sync_ts, row.status_msg,
|
||||||
(
|
row.currently_active
|
||||||
position, user_id, state, last_active_ts,
|
) for row in rows]
|
||||||
last_federation_update_ts, last_user_sync_ts, status_msg,
|
|
||||||
currently_active
|
|
||||||
) = row
|
|
||||||
state = UserPresenceState(
|
|
||||||
user_id, state, last_active_ts,
|
|
||||||
last_federation_update_ts, last_user_sync_ts, status_msg,
|
|
||||||
currently_active
|
|
||||||
)
|
|
||||||
self.user_to_current_state[user_id] = state
|
|
||||||
states.append(state)
|
|
||||||
|
|
||||||
if states and "position" in stream:
|
for state in states:
|
||||||
stream_id = int(stream["position"])
|
self.user_to_current_state[row.user_id] = state
|
||||||
yield self.notify_from_replication(states, stream_id)
|
|
||||||
|
stream_id = token
|
||||||
|
yield self.notify_from_replication(states, stream_id)
|
||||||
|
|
||||||
|
def get_currently_syncing_users(self):
|
||||||
|
return [
|
||||||
|
user_id for user_id, count in iteritems(self.user_to_num_current_syncs)
|
||||||
|
if count > 0
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
class SynchrotronTyping(object):
|
class SynchrotronTyping(object):
|
||||||
@@ -257,16 +230,12 @@ class SynchrotronTyping(object):
|
|||||||
# value which we *must* use for the next replication request.
|
# value which we *must* use for the next replication request.
|
||||||
return {"typing": self._latest_room_serial}
|
return {"typing": self._latest_room_serial}
|
||||||
|
|
||||||
def process_replication(self, result):
|
def process_replication_rows(self, token, rows):
|
||||||
stream = result.get("typing")
|
self._latest_room_serial = token
|
||||||
if stream:
|
|
||||||
self._latest_room_serial = int(stream["position"])
|
|
||||||
|
|
||||||
for row in stream["rows"]:
|
for row in rows:
|
||||||
position, room_id, typing_json = row
|
self._room_serials[row.room_id] = token
|
||||||
typing = json.loads(typing_json)
|
self._room_typing[row.room_id] = row.user_ids
|
||||||
self._room_serials[room_id] = position
|
|
||||||
self._room_typing[room_id] = typing
|
|
||||||
|
|
||||||
|
|
||||||
class SynchrotronApplicationService(object):
|
class SynchrotronApplicationService(object):
|
||||||
@@ -275,19 +244,6 @@ class SynchrotronApplicationService(object):
|
|||||||
|
|
||||||
|
|
||||||
class SynchrotronServer(HomeServer):
|
class SynchrotronServer(HomeServer):
|
||||||
def get_db_conn(self, run_new_connection=True):
|
|
||||||
# Any param beginning with cp_ is a parameter for adbapi, and should
|
|
||||||
# not be passed to the database engine.
|
|
||||||
db_params = {
|
|
||||||
k: v for k, v in self.db_config.get("args", {}).items()
|
|
||||||
if not k.startswith("cp_")
|
|
||||||
}
|
|
||||||
db_conn = self.database_engine.module.connect(**db_params)
|
|
||||||
|
|
||||||
if run_new_connection:
|
|
||||||
self.database_engine.on_new_connection(db_conn)
|
|
||||||
return db_conn
|
|
||||||
|
|
||||||
def setup(self):
|
def setup(self):
|
||||||
logger.info("Setting up.")
|
logger.info("Setting up.")
|
||||||
self.datastore = SynchrotronSlavedStore(self.get_db_conn(), self)
|
self.datastore = SynchrotronSlavedStore(self.get_db_conn(), self)
|
||||||
@@ -315,19 +271,19 @@ class SynchrotronServer(HomeServer):
|
|||||||
"/_matrix/client/api/v1": resource,
|
"/_matrix/client/api/v1": resource,
|
||||||
})
|
})
|
||||||
|
|
||||||
root_resource = create_resource_tree(resources, Resource())
|
root_resource = create_resource_tree(resources, NoResource())
|
||||||
|
|
||||||
for address in bind_addresses:
|
_base.listen_tcp(
|
||||||
reactor.listenTCP(
|
bind_addresses,
|
||||||
port,
|
port,
|
||||||
SynapseSite(
|
SynapseSite(
|
||||||
"synapse.access.http.%s" % (site_tag,),
|
"synapse.access.http.%s" % (site_tag,),
|
||||||
site_tag,
|
site_tag,
|
||||||
listener_config,
|
listener_config,
|
||||||
root_resource,
|
root_resource,
|
||||||
),
|
self.version_string,
|
||||||
interface=address
|
|
||||||
)
|
)
|
||||||
|
)
|
||||||
|
|
||||||
logger.info("Synapse synchrotron now listening on port %d", port)
|
logger.info("Synapse synchrotron now listening on port %d", port)
|
||||||
|
|
||||||
@@ -336,133 +292,22 @@ class SynchrotronServer(HomeServer):
|
|||||||
if listener["type"] == "http":
|
if listener["type"] == "http":
|
||||||
self._listen_http(listener)
|
self._listen_http(listener)
|
||||||
elif listener["type"] == "manhole":
|
elif listener["type"] == "manhole":
|
||||||
bind_addresses = listener["bind_addresses"]
|
_base.listen_tcp(
|
||||||
|
listener["bind_addresses"],
|
||||||
for address in bind_addresses:
|
listener["port"],
|
||||||
reactor.listenTCP(
|
manhole(
|
||||||
listener["port"],
|
username="matrix",
|
||||||
manhole(
|
password="rabbithole",
|
||||||
username="matrix",
|
globals={"hs": self},
|
||||||
password="rabbithole",
|
|
||||||
globals={"hs": self},
|
|
||||||
),
|
|
||||||
interface=address
|
|
||||||
)
|
)
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
self.get_tcp_replication().start_replication(self)
|
||||||
def replicate(self):
|
|
||||||
http_client = self.get_simple_http_client()
|
|
||||||
store = self.get_datastore()
|
|
||||||
replication_url = self.config.worker_replication_url
|
|
||||||
notifier = self.get_notifier()
|
|
||||||
presence_handler = self.get_presence_handler()
|
|
||||||
typing_handler = self.get_typing_handler()
|
|
||||||
|
|
||||||
def notify_from_stream(
|
def build_tcp_replication(self):
|
||||||
result, stream_name, stream_key, room=None, user=None
|
return SyncReplicationHandler(self)
|
||||||
):
|
|
||||||
stream = result.get(stream_name)
|
|
||||||
if stream:
|
|
||||||
position_index = stream["field_names"].index("position")
|
|
||||||
if room:
|
|
||||||
room_index = stream["field_names"].index(room)
|
|
||||||
if user:
|
|
||||||
user_index = stream["field_names"].index(user)
|
|
||||||
|
|
||||||
users = ()
|
|
||||||
rooms = ()
|
|
||||||
for row in stream["rows"]:
|
|
||||||
position = row[position_index]
|
|
||||||
|
|
||||||
if user:
|
|
||||||
users = (row[user_index],)
|
|
||||||
|
|
||||||
if room:
|
|
||||||
rooms = (row[room_index],)
|
|
||||||
|
|
||||||
notifier.on_new_event(
|
|
||||||
stream_key, position, users=users, rooms=rooms
|
|
||||||
)
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def notify_device_list_update(result):
|
|
||||||
stream = result.get("device_lists")
|
|
||||||
if not stream:
|
|
||||||
return
|
|
||||||
|
|
||||||
position_index = stream["field_names"].index("position")
|
|
||||||
user_index = stream["field_names"].index("user_id")
|
|
||||||
|
|
||||||
for row in stream["rows"]:
|
|
||||||
position = row[position_index]
|
|
||||||
user_id = row[user_index]
|
|
||||||
|
|
||||||
room_ids = yield store.get_rooms_for_user(user_id)
|
|
||||||
|
|
||||||
notifier.on_new_event(
|
|
||||||
"device_list_key", position, rooms=room_ids,
|
|
||||||
)
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def notify(result):
|
|
||||||
stream = result.get("events")
|
|
||||||
if stream:
|
|
||||||
max_position = stream["position"]
|
|
||||||
|
|
||||||
event_map = yield store.get_events([row[1] for row in stream["rows"]])
|
|
||||||
|
|
||||||
for row in stream["rows"]:
|
|
||||||
position = row[0]
|
|
||||||
event_id = row[1]
|
|
||||||
event = event_map.get(event_id, None)
|
|
||||||
if not event:
|
|
||||||
continue
|
|
||||||
|
|
||||||
extra_users = ()
|
|
||||||
if event.type == EventTypes.Member:
|
|
||||||
extra_users = (event.state_key,)
|
|
||||||
notifier.on_new_room_event(
|
|
||||||
event, position, max_position, extra_users
|
|
||||||
)
|
|
||||||
|
|
||||||
notify_from_stream(
|
|
||||||
result, "push_rules", "push_rules_key", user="user_id"
|
|
||||||
)
|
|
||||||
notify_from_stream(
|
|
||||||
result, "user_account_data", "account_data_key", user="user_id"
|
|
||||||
)
|
|
||||||
notify_from_stream(
|
|
||||||
result, "room_account_data", "account_data_key", user="user_id"
|
|
||||||
)
|
|
||||||
notify_from_stream(
|
|
||||||
result, "tag_account_data", "account_data_key", user="user_id"
|
|
||||||
)
|
|
||||||
notify_from_stream(
|
|
||||||
result, "receipts", "receipt_key", room="room_id"
|
|
||||||
)
|
|
||||||
notify_from_stream(
|
|
||||||
result, "typing", "typing_key", room="room_id"
|
|
||||||
)
|
|
||||||
notify_from_stream(
|
|
||||||
result, "to_device", "to_device_key", user="user_id"
|
|
||||||
)
|
|
||||||
yield notify_device_list_update(result)
|
|
||||||
|
|
||||||
while True:
|
|
||||||
try:
|
|
||||||
args = store.stream_positions()
|
|
||||||
args.update(typing_handler.stream_positions())
|
|
||||||
args["timeout"] = 30000
|
|
||||||
result = yield http_client.get_json(replication_url, args=args)
|
|
||||||
yield store.process_replication(result)
|
|
||||||
typing_handler.process_replication(result)
|
|
||||||
yield presence_handler.process_replication(result)
|
|
||||||
yield notify(result)
|
|
||||||
except:
|
|
||||||
logger.exception("Error replicating from %r", replication_url)
|
|
||||||
yield sleep(5)
|
|
||||||
|
|
||||||
def build_presence_handler(self):
|
def build_presence_handler(self):
|
||||||
return SynchrotronPresence(self)
|
return SynchrotronPresence(self)
|
||||||
@@ -471,6 +316,84 @@ class SynchrotronServer(HomeServer):
|
|||||||
return SynchrotronTyping(self)
|
return SynchrotronTyping(self)
|
||||||
|
|
||||||
|
|
||||||
|
class SyncReplicationHandler(ReplicationClientHandler):
|
||||||
|
def __init__(self, hs):
|
||||||
|
super(SyncReplicationHandler, self).__init__(hs.get_datastore())
|
||||||
|
|
||||||
|
self.store = hs.get_datastore()
|
||||||
|
self.typing_handler = hs.get_typing_handler()
|
||||||
|
# NB this is a SynchrotronPresence, not a normal PresenceHandler
|
||||||
|
self.presence_handler = hs.get_presence_handler()
|
||||||
|
self.notifier = hs.get_notifier()
|
||||||
|
|
||||||
|
def on_rdata(self, stream_name, token, rows):
|
||||||
|
super(SyncReplicationHandler, self).on_rdata(stream_name, token, rows)
|
||||||
|
run_in_background(self.process_and_notify, stream_name, token, rows)
|
||||||
|
|
||||||
|
def get_streams_to_replicate(self):
|
||||||
|
args = super(SyncReplicationHandler, self).get_streams_to_replicate()
|
||||||
|
args.update(self.typing_handler.stream_positions())
|
||||||
|
return args
|
||||||
|
|
||||||
|
def get_currently_syncing_users(self):
|
||||||
|
return self.presence_handler.get_currently_syncing_users()
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def process_and_notify(self, stream_name, token, rows):
|
||||||
|
try:
|
||||||
|
if stream_name == "events":
|
||||||
|
# We shouldn't get multiple rows per token for events stream, so
|
||||||
|
# we don't need to optimise this for multiple rows.
|
||||||
|
for row in rows:
|
||||||
|
event = yield self.store.get_event(row.event_id)
|
||||||
|
extra_users = ()
|
||||||
|
if event.type == EventTypes.Member:
|
||||||
|
extra_users = (event.state_key,)
|
||||||
|
max_token = self.store.get_room_max_stream_ordering()
|
||||||
|
self.notifier.on_new_room_event(
|
||||||
|
event, token, max_token, extra_users
|
||||||
|
)
|
||||||
|
elif stream_name == "push_rules":
|
||||||
|
self.notifier.on_new_event(
|
||||||
|
"push_rules_key", token, users=[row.user_id for row in rows],
|
||||||
|
)
|
||||||
|
elif stream_name in ("account_data", "tag_account_data",):
|
||||||
|
self.notifier.on_new_event(
|
||||||
|
"account_data_key", token, users=[row.user_id for row in rows],
|
||||||
|
)
|
||||||
|
elif stream_name == "receipts":
|
||||||
|
self.notifier.on_new_event(
|
||||||
|
"receipt_key", token, rooms=[row.room_id for row in rows],
|
||||||
|
)
|
||||||
|
elif stream_name == "typing":
|
||||||
|
self.typing_handler.process_replication_rows(token, rows)
|
||||||
|
self.notifier.on_new_event(
|
||||||
|
"typing_key", token, rooms=[row.room_id for row in rows],
|
||||||
|
)
|
||||||
|
elif stream_name == "to_device":
|
||||||
|
entities = [row.entity for row in rows if row.entity.startswith("@")]
|
||||||
|
if entities:
|
||||||
|
self.notifier.on_new_event(
|
||||||
|
"to_device_key", token, users=entities,
|
||||||
|
)
|
||||||
|
elif stream_name == "device_lists":
|
||||||
|
all_room_ids = set()
|
||||||
|
for row in rows:
|
||||||
|
room_ids = yield self.store.get_rooms_for_user(row.user_id)
|
||||||
|
all_room_ids.update(room_ids)
|
||||||
|
self.notifier.on_new_event(
|
||||||
|
"device_list_key", token, rooms=all_room_ids,
|
||||||
|
)
|
||||||
|
elif stream_name == "presence":
|
||||||
|
yield self.presence_handler.process_replication_rows(token, rows)
|
||||||
|
elif stream_name == "receipts":
|
||||||
|
self.notifier.on_new_event(
|
||||||
|
"groups_key", token, users=[row.user_id for row in rows],
|
||||||
|
)
|
||||||
|
except Exception:
|
||||||
|
logger.exception("Error processing replication")
|
||||||
|
|
||||||
|
|
||||||
def start(config_options):
|
def start(config_options):
|
||||||
try:
|
try:
|
||||||
config = HomeServerConfig.load_config(
|
config = HomeServerConfig.load_config(
|
||||||
@@ -500,37 +423,13 @@ def start(config_options):
|
|||||||
ss.setup()
|
ss.setup()
|
||||||
ss.start_listening(config.worker_listeners)
|
ss.start_listening(config.worker_listeners)
|
||||||
|
|
||||||
def run():
|
|
||||||
# make sure that we run the reactor with the sentinel log context,
|
|
||||||
# otherwise other PreserveLoggingContext instances will get confused
|
|
||||||
# and complain when they see the logcontext arbitrarily swapping
|
|
||||||
# between the sentinel and `run` logcontexts.
|
|
||||||
with PreserveLoggingContext():
|
|
||||||
logger.info("Running")
|
|
||||||
change_resource_limit(config.soft_file_limit)
|
|
||||||
if config.gc_thresholds:
|
|
||||||
gc.set_threshold(*config.gc_thresholds)
|
|
||||||
reactor.run()
|
|
||||||
|
|
||||||
def start():
|
def start():
|
||||||
ss.get_datastore().start_profiling()
|
ss.get_datastore().start_profiling()
|
||||||
ss.replicate()
|
|
||||||
ss.get_state_handler().start_caching()
|
ss.get_state_handler().start_caching()
|
||||||
|
|
||||||
reactor.callWhenRunning(start)
|
reactor.callWhenRunning(start)
|
||||||
|
|
||||||
if config.worker_daemonize:
|
_base.start_worker_reactor("synapse-synchrotron", config)
|
||||||
daemon = Daemonize(
|
|
||||||
app="synapse-synchrotron",
|
|
||||||
pid=config.worker_pid_file,
|
|
||||||
action=run,
|
|
||||||
auto_close_fds=False,
|
|
||||||
verbose=True,
|
|
||||||
logger=logger,
|
|
||||||
)
|
|
||||||
daemon.start()
|
|
||||||
else:
|
|
||||||
run()
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
|||||||
@@ -38,7 +38,7 @@ def pid_running(pid):
|
|||||||
try:
|
try:
|
||||||
os.kill(pid, 0)
|
os.kill(pid, 0)
|
||||||
return True
|
return True
|
||||||
except OSError, err:
|
except OSError as err:
|
||||||
if err.errno == errno.EPERM:
|
if err.errno == errno.EPERM:
|
||||||
return True
|
return True
|
||||||
return False
|
return False
|
||||||
@@ -98,7 +98,7 @@ def stop(pidfile, app):
|
|||||||
try:
|
try:
|
||||||
os.kill(pid, signal.SIGTERM)
|
os.kill(pid, signal.SIGTERM)
|
||||||
write("stopped %s" % (app,), colour=GREEN)
|
write("stopped %s" % (app,), colour=GREEN)
|
||||||
except OSError, err:
|
except OSError as err:
|
||||||
if err.errno == errno.ESRCH:
|
if err.errno == errno.ESRCH:
|
||||||
write("%s not running" % (app,), colour=YELLOW)
|
write("%s not running" % (app,), colour=YELLOW)
|
||||||
elif err.errno == errno.EPERM:
|
elif err.errno == errno.EPERM:
|
||||||
@@ -125,7 +125,7 @@ def main():
|
|||||||
"configfile",
|
"configfile",
|
||||||
nargs="?",
|
nargs="?",
|
||||||
default="homeserver.yaml",
|
default="homeserver.yaml",
|
||||||
help="the homeserver config file, defaults to homserver.yaml",
|
help="the homeserver config file, defaults to homeserver.yaml",
|
||||||
)
|
)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"-w", "--worker",
|
"-w", "--worker",
|
||||||
@@ -184,6 +184,9 @@ def main():
|
|||||||
worker_configfiles.append(worker_configfile)
|
worker_configfiles.append(worker_configfile)
|
||||||
|
|
||||||
if options.all_processes:
|
if options.all_processes:
|
||||||
|
# To start the main synapse with -a you need to add a worker file
|
||||||
|
# with worker_app == "synapse.app.homeserver"
|
||||||
|
start_stop_synapse = False
|
||||||
worker_configdir = options.all_processes
|
worker_configdir = options.all_processes
|
||||||
if not os.path.isdir(worker_configdir):
|
if not os.path.isdir(worker_configdir):
|
||||||
write(
|
write(
|
||||||
@@ -200,10 +203,29 @@ def main():
|
|||||||
with open(worker_configfile) as stream:
|
with open(worker_configfile) as stream:
|
||||||
worker_config = yaml.load(stream)
|
worker_config = yaml.load(stream)
|
||||||
worker_app = worker_config["worker_app"]
|
worker_app = worker_config["worker_app"]
|
||||||
worker_pidfile = worker_config["worker_pid_file"]
|
if worker_app == "synapse.app.homeserver":
|
||||||
worker_daemonize = worker_config["worker_daemonize"]
|
# We need to special case all of this to pick up options that may
|
||||||
assert worker_daemonize # TODO print something more user friendly
|
# be set in the main config file or in this worker config file.
|
||||||
worker_cache_factor = worker_config.get("synctl_cache_factor")
|
worker_pidfile = (
|
||||||
|
worker_config.get("pid_file")
|
||||||
|
or pidfile
|
||||||
|
)
|
||||||
|
worker_cache_factor = worker_config.get("synctl_cache_factor") or cache_factor
|
||||||
|
daemonize = worker_config.get("daemonize") or config.get("daemonize")
|
||||||
|
assert daemonize, "Main process must have daemonize set to true"
|
||||||
|
|
||||||
|
# The master process doesn't support using worker_* config.
|
||||||
|
for key in worker_config:
|
||||||
|
if key == "worker_app": # But we allow worker_app
|
||||||
|
continue
|
||||||
|
assert not key.startswith("worker_"), \
|
||||||
|
"Main process cannot use worker_* config"
|
||||||
|
else:
|
||||||
|
worker_pidfile = worker_config["worker_pid_file"]
|
||||||
|
worker_daemonize = worker_config["worker_daemonize"]
|
||||||
|
assert worker_daemonize, "In config %r: expected '%s' to be True" % (
|
||||||
|
worker_configfile, "worker_daemonize")
|
||||||
|
worker_cache_factor = worker_config.get("synctl_cache_factor")
|
||||||
workers.append(Worker(
|
workers.append(Worker(
|
||||||
worker_app, worker_configfile, worker_pidfile, worker_cache_factor,
|
worker_app, worker_configfile, worker_pidfile, worker_cache_factor,
|
||||||
))
|
))
|
||||||
@@ -230,9 +252,13 @@ def main():
|
|||||||
for running_pid in running_pids:
|
for running_pid in running_pids:
|
||||||
while pid_running(running_pid):
|
while pid_running(running_pid):
|
||||||
time.sleep(0.2)
|
time.sleep(0.2)
|
||||||
|
write("All processes exited; now restarting...")
|
||||||
|
|
||||||
if action == "start" or action == "restart":
|
if action == "start" or action == "restart":
|
||||||
if start_stop_synapse:
|
if start_stop_synapse:
|
||||||
|
# Check if synapse is already running
|
||||||
|
if os.path.exists(pidfile) and pid_running(int(open(pidfile).read())):
|
||||||
|
abort("synapse.app.homeserver already running")
|
||||||
start(configfile)
|
start(configfile)
|
||||||
|
|
||||||
for worker in workers:
|
for worker in workers:
|
||||||
|
|||||||
232
synapse/app/user_dir.py
Normal file
232
synapse/app/user_dir.py
Normal file
@@ -0,0 +1,232 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright 2017 Vector Creations Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import logging
|
||||||
|
import sys
|
||||||
|
|
||||||
|
import synapse
|
||||||
|
from synapse import events
|
||||||
|
from synapse.app import _base
|
||||||
|
from synapse.config._base import ConfigError
|
||||||
|
from synapse.config.homeserver import HomeServerConfig
|
||||||
|
from synapse.config.logger import setup_logging
|
||||||
|
from synapse.crypto import context_factory
|
||||||
|
from synapse.http.server import JsonResource
|
||||||
|
from synapse.http.site import SynapseSite
|
||||||
|
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||||
|
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||||
|
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||||
|
from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
|
||||||
|
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||||
|
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||||
|
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||||
|
from synapse.rest.client.v2_alpha import user_directory
|
||||||
|
from synapse.server import HomeServer
|
||||||
|
from synapse.storage.engines import create_engine
|
||||||
|
from synapse.storage.user_directory import UserDirectoryStore
|
||||||
|
from synapse.util.caches.stream_change_cache import StreamChangeCache
|
||||||
|
from synapse.util.httpresourcetree import create_resource_tree
|
||||||
|
from synapse.util.logcontext import LoggingContext, run_in_background
|
||||||
|
from synapse.util.manhole import manhole
|
||||||
|
from synapse.util.versionstring import get_version_string
|
||||||
|
from twisted.internet import reactor, defer
|
||||||
|
from twisted.web.resource import NoResource
|
||||||
|
|
||||||
|
logger = logging.getLogger("synapse.app.user_dir")
|
||||||
|
|
||||||
|
|
||||||
|
class UserDirectorySlaveStore(
|
||||||
|
SlavedEventStore,
|
||||||
|
SlavedApplicationServiceStore,
|
||||||
|
SlavedRegistrationStore,
|
||||||
|
SlavedClientIpStore,
|
||||||
|
UserDirectoryStore,
|
||||||
|
BaseSlavedStore,
|
||||||
|
):
|
||||||
|
def __init__(self, db_conn, hs):
|
||||||
|
super(UserDirectorySlaveStore, self).__init__(db_conn, hs)
|
||||||
|
|
||||||
|
events_max = self._stream_id_gen.get_current_token()
|
||||||
|
curr_state_delta_prefill, min_curr_state_delta_id = self._get_cache_dict(
|
||||||
|
db_conn, "current_state_delta_stream",
|
||||||
|
entity_column="room_id",
|
||||||
|
stream_column="stream_id",
|
||||||
|
max_value=events_max, # As we share the stream id with events token
|
||||||
|
limit=1000,
|
||||||
|
)
|
||||||
|
self._curr_state_delta_stream_cache = StreamChangeCache(
|
||||||
|
"_curr_state_delta_stream_cache", min_curr_state_delta_id,
|
||||||
|
prefilled_cache=curr_state_delta_prefill,
|
||||||
|
)
|
||||||
|
|
||||||
|
self._current_state_delta_pos = events_max
|
||||||
|
|
||||||
|
def stream_positions(self):
|
||||||
|
result = super(UserDirectorySlaveStore, self).stream_positions()
|
||||||
|
result["current_state_deltas"] = self._current_state_delta_pos
|
||||||
|
return result
|
||||||
|
|
||||||
|
def process_replication_rows(self, stream_name, token, rows):
|
||||||
|
if stream_name == "current_state_deltas":
|
||||||
|
self._current_state_delta_pos = token
|
||||||
|
for row in rows:
|
||||||
|
self._curr_state_delta_stream_cache.entity_has_changed(
|
||||||
|
row.room_id, token
|
||||||
|
)
|
||||||
|
return super(UserDirectorySlaveStore, self).process_replication_rows(
|
||||||
|
stream_name, token, rows
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class UserDirectoryServer(HomeServer):
|
||||||
|
def setup(self):
|
||||||
|
logger.info("Setting up.")
|
||||||
|
self.datastore = UserDirectorySlaveStore(self.get_db_conn(), self)
|
||||||
|
logger.info("Finished setting up.")
|
||||||
|
|
||||||
|
def _listen_http(self, listener_config):
|
||||||
|
port = listener_config["port"]
|
||||||
|
bind_addresses = listener_config["bind_addresses"]
|
||||||
|
site_tag = listener_config.get("tag", port)
|
||||||
|
resources = {}
|
||||||
|
for res in listener_config["resources"]:
|
||||||
|
for name in res["names"]:
|
||||||
|
if name == "metrics":
|
||||||
|
resources[METRICS_PREFIX] = MetricsResource(self)
|
||||||
|
elif name == "client":
|
||||||
|
resource = JsonResource(self, canonical_json=False)
|
||||||
|
user_directory.register_servlets(self, resource)
|
||||||
|
resources.update({
|
||||||
|
"/_matrix/client/r0": resource,
|
||||||
|
"/_matrix/client/unstable": resource,
|
||||||
|
"/_matrix/client/v2_alpha": resource,
|
||||||
|
"/_matrix/client/api/v1": resource,
|
||||||
|
})
|
||||||
|
|
||||||
|
root_resource = create_resource_tree(resources, NoResource())
|
||||||
|
|
||||||
|
_base.listen_tcp(
|
||||||
|
bind_addresses,
|
||||||
|
port,
|
||||||
|
SynapseSite(
|
||||||
|
"synapse.access.http.%s" % (site_tag,),
|
||||||
|
site_tag,
|
||||||
|
listener_config,
|
||||||
|
root_resource,
|
||||||
|
self.version_string,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
logger.info("Synapse user_dir now listening on port %d", port)
|
||||||
|
|
||||||
|
def start_listening(self, listeners):
|
||||||
|
for listener in listeners:
|
||||||
|
if listener["type"] == "http":
|
||||||
|
self._listen_http(listener)
|
||||||
|
elif listener["type"] == "manhole":
|
||||||
|
_base.listen_tcp(
|
||||||
|
listener["bind_addresses"],
|
||||||
|
listener["port"],
|
||||||
|
manhole(
|
||||||
|
username="matrix",
|
||||||
|
password="rabbithole",
|
||||||
|
globals={"hs": self},
|
||||||
|
)
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||||
|
|
||||||
|
self.get_tcp_replication().start_replication(self)
|
||||||
|
|
||||||
|
def build_tcp_replication(self):
|
||||||
|
return UserDirectoryReplicationHandler(self)
|
||||||
|
|
||||||
|
|
||||||
|
class UserDirectoryReplicationHandler(ReplicationClientHandler):
|
||||||
|
def __init__(self, hs):
|
||||||
|
super(UserDirectoryReplicationHandler, self).__init__(hs.get_datastore())
|
||||||
|
self.user_directory = hs.get_user_directory_handler()
|
||||||
|
|
||||||
|
def on_rdata(self, stream_name, token, rows):
|
||||||
|
super(UserDirectoryReplicationHandler, self).on_rdata(
|
||||||
|
stream_name, token, rows
|
||||||
|
)
|
||||||
|
if stream_name == "current_state_deltas":
|
||||||
|
run_in_background(self._notify_directory)
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def _notify_directory(self):
|
||||||
|
try:
|
||||||
|
yield self.user_directory.notify_new_event()
|
||||||
|
except Exception:
|
||||||
|
logger.exception("Error notifiying user directory of state update")
|
||||||
|
|
||||||
|
|
||||||
|
def start(config_options):
|
||||||
|
try:
|
||||||
|
config = HomeServerConfig.load_config(
|
||||||
|
"Synapse user directory", config_options
|
||||||
|
)
|
||||||
|
except ConfigError as e:
|
||||||
|
sys.stderr.write("\n" + e.message + "\n")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
assert config.worker_app == "synapse.app.user_dir"
|
||||||
|
|
||||||
|
setup_logging(config, use_worker_options=True)
|
||||||
|
|
||||||
|
events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||||
|
|
||||||
|
database_engine = create_engine(config.database_config)
|
||||||
|
|
||||||
|
if config.update_user_directory:
|
||||||
|
sys.stderr.write(
|
||||||
|
"\nThe update_user_directory must be disabled in the main synapse process"
|
||||||
|
"\nbefore they can be run in a separate worker."
|
||||||
|
"\nPlease add ``update_user_directory: false`` to the main config"
|
||||||
|
"\n"
|
||||||
|
)
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
# Force the pushers to start since they will be disabled in the main config
|
||||||
|
config.update_user_directory = True
|
||||||
|
|
||||||
|
tls_server_context_factory = context_factory.ServerContextFactory(config)
|
||||||
|
|
||||||
|
ps = UserDirectoryServer(
|
||||||
|
config.server_name,
|
||||||
|
db_config=config.database_config,
|
||||||
|
tls_server_context_factory=tls_server_context_factory,
|
||||||
|
config=config,
|
||||||
|
version_string="Synapse/" + get_version_string(synapse),
|
||||||
|
database_engine=database_engine,
|
||||||
|
)
|
||||||
|
|
||||||
|
ps.setup()
|
||||||
|
ps.start_listening(config.worker_listeners)
|
||||||
|
|
||||||
|
def start():
|
||||||
|
ps.get_datastore().start_profiling()
|
||||||
|
ps.get_state_handler().start_caching()
|
||||||
|
|
||||||
|
reactor.callWhenRunning(start)
|
||||||
|
|
||||||
|
_base.start_worker_reactor("synapse-user-dir", config)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
with LoggingContext("main"):
|
||||||
|
start(sys.argv[1:])
|
||||||
@@ -14,12 +14,15 @@
|
|||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
from synapse.api.constants import EventTypes
|
from synapse.api.constants import EventTypes
|
||||||
from synapse.util.caches.descriptors import cachedInlineCallbacks
|
from synapse.util.caches.descriptors import cachedInlineCallbacks
|
||||||
|
from synapse.types import GroupID, get_domain_from_id
|
||||||
|
|
||||||
from twisted.internet import defer
|
from twisted.internet import defer
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
import re
|
import re
|
||||||
|
|
||||||
|
from six import string_types
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
@@ -81,12 +84,13 @@ class ApplicationService(object):
|
|||||||
# values.
|
# values.
|
||||||
NS_LIST = [NS_USERS, NS_ALIASES, NS_ROOMS]
|
NS_LIST = [NS_USERS, NS_ALIASES, NS_ROOMS]
|
||||||
|
|
||||||
def __init__(self, token, url=None, namespaces=None, hs_token=None,
|
def __init__(self, token, hostname, url=None, namespaces=None, hs_token=None,
|
||||||
sender=None, id=None, protocols=None, rate_limited=True):
|
sender=None, id=None, protocols=None, rate_limited=True):
|
||||||
self.token = token
|
self.token = token
|
||||||
self.url = url
|
self.url = url
|
||||||
self.hs_token = hs_token
|
self.hs_token = hs_token
|
||||||
self.sender = sender
|
self.sender = sender
|
||||||
|
self.server_name = hostname
|
||||||
self.namespaces = self._check_namespaces(namespaces)
|
self.namespaces = self._check_namespaces(namespaces)
|
||||||
self.id = id
|
self.id = id
|
||||||
|
|
||||||
@@ -125,8 +129,26 @@ class ApplicationService(object):
|
|||||||
raise ValueError(
|
raise ValueError(
|
||||||
"Expected bool for 'exclusive' in ns '%s'" % ns
|
"Expected bool for 'exclusive' in ns '%s'" % ns
|
||||||
)
|
)
|
||||||
|
group_id = regex_obj.get("group_id")
|
||||||
|
if group_id:
|
||||||
|
if not isinstance(group_id, str):
|
||||||
|
raise ValueError(
|
||||||
|
"Expected string for 'group_id' in ns '%s'" % ns
|
||||||
|
)
|
||||||
|
try:
|
||||||
|
GroupID.from_string(group_id)
|
||||||
|
except Exception:
|
||||||
|
raise ValueError(
|
||||||
|
"Expected valid group ID for 'group_id' in ns '%s'" % ns
|
||||||
|
)
|
||||||
|
|
||||||
|
if get_domain_from_id(group_id) != self.server_name:
|
||||||
|
raise ValueError(
|
||||||
|
"Expected 'group_id' to be this host in ns '%s'" % ns
|
||||||
|
)
|
||||||
|
|
||||||
regex = regex_obj.get("regex")
|
regex = regex_obj.get("regex")
|
||||||
if isinstance(regex, basestring):
|
if isinstance(regex, string_types):
|
||||||
regex_obj["regex"] = re.compile(regex) # Pre-compile regex
|
regex_obj["regex"] = re.compile(regex) # Pre-compile regex
|
||||||
else:
|
else:
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
@@ -241,6 +263,31 @@ class ApplicationService(object):
|
|||||||
def is_exclusive_room(self, room_id):
|
def is_exclusive_room(self, room_id):
|
||||||
return self._is_exclusive(ApplicationService.NS_ROOMS, room_id)
|
return self._is_exclusive(ApplicationService.NS_ROOMS, room_id)
|
||||||
|
|
||||||
|
def get_exlusive_user_regexes(self):
|
||||||
|
"""Get the list of regexes used to determine if a user is exclusively
|
||||||
|
registered by the AS
|
||||||
|
"""
|
||||||
|
return [
|
||||||
|
regex_obj["regex"]
|
||||||
|
for regex_obj in self.namespaces[ApplicationService.NS_USERS]
|
||||||
|
if regex_obj["exclusive"]
|
||||||
|
]
|
||||||
|
|
||||||
|
def get_groups_for_user(self, user_id):
|
||||||
|
"""Get the groups that this user is associated with by this AS
|
||||||
|
|
||||||
|
Args:
|
||||||
|
user_id (str): The ID of the user.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
iterable[str]: an iterable that yields group_id strings.
|
||||||
|
"""
|
||||||
|
return (
|
||||||
|
regex_obj["group_id"]
|
||||||
|
for regex_obj in self.namespaces[ApplicationService.NS_USERS]
|
||||||
|
if "group_id" in regex_obj and regex_obj["regex"].match(user_id)
|
||||||
|
)
|
||||||
|
|
||||||
def is_rate_limited(self):
|
def is_rate_limited(self):
|
||||||
return self.rate_limited
|
return self.rate_limited
|
||||||
|
|
||||||
|
|||||||
@@ -72,7 +72,8 @@ class ApplicationServiceApi(SimpleHttpClient):
|
|||||||
super(ApplicationServiceApi, self).__init__(hs)
|
super(ApplicationServiceApi, self).__init__(hs)
|
||||||
self.clock = hs.get_clock()
|
self.clock = hs.get_clock()
|
||||||
|
|
||||||
self.protocol_meta_cache = ResponseCache(hs, timeout_ms=HOUR_IN_MS)
|
self.protocol_meta_cache = ResponseCache(hs, "as_protocol_meta",
|
||||||
|
timeout_ms=HOUR_IN_MS)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def query_user(self, service, user_id):
|
def query_user(self, service, user_id):
|
||||||
@@ -192,9 +193,7 @@ class ApplicationServiceApi(SimpleHttpClient):
|
|||||||
defer.returnValue(None)
|
defer.returnValue(None)
|
||||||
|
|
||||||
key = (service.id, protocol)
|
key = (service.id, protocol)
|
||||||
return self.protocol_meta_cache.get(key) or (
|
return self.protocol_meta_cache.wrap(key, _get)
|
||||||
self.protocol_meta_cache.set(key, _get())
|
|
||||||
)
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def push_bulk(self, service, events, txn_id=None):
|
def push_bulk(self, service, events, txn_id=None):
|
||||||
|
|||||||
@@ -51,7 +51,7 @@ components.
|
|||||||
from twisted.internet import defer
|
from twisted.internet import defer
|
||||||
|
|
||||||
from synapse.appservice import ApplicationServiceState
|
from synapse.appservice import ApplicationServiceState
|
||||||
from synapse.util.logcontext import preserve_fn
|
from synapse.util.logcontext import run_in_background
|
||||||
from synapse.util.metrics import Measure
|
from synapse.util.metrics import Measure
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
@@ -106,7 +106,7 @@ class _ServiceQueuer(object):
|
|||||||
def enqueue(self, service, event):
|
def enqueue(self, service, event):
|
||||||
# if this service isn't being sent something
|
# if this service isn't being sent something
|
||||||
self.queued_events.setdefault(service.id, []).append(event)
|
self.queued_events.setdefault(service.id, []).append(event)
|
||||||
preserve_fn(self._send_request)(service)
|
run_in_background(self._send_request, service)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def _send_request(self, service):
|
def _send_request(self, service):
|
||||||
@@ -123,7 +123,7 @@ class _ServiceQueuer(object):
|
|||||||
with Measure(self.clock, "servicequeuer.send"):
|
with Measure(self.clock, "servicequeuer.send"):
|
||||||
try:
|
try:
|
||||||
yield self.txn_ctrl.send(service, events)
|
yield self.txn_ctrl.send(service, events)
|
||||||
except:
|
except Exception:
|
||||||
logger.exception("AS request failed")
|
logger.exception("AS request failed")
|
||||||
finally:
|
finally:
|
||||||
self.requests_in_flight.discard(service.id)
|
self.requests_in_flight.discard(service.id)
|
||||||
@@ -152,10 +152,10 @@ class _TransactionController(object):
|
|||||||
if sent:
|
if sent:
|
||||||
yield txn.complete(self.store)
|
yield txn.complete(self.store)
|
||||||
else:
|
else:
|
||||||
preserve_fn(self._start_recoverer)(service)
|
run_in_background(self._start_recoverer, service)
|
||||||
except Exception as e:
|
except Exception:
|
||||||
logger.exception(e)
|
logger.exception("Error creating appservice transaction")
|
||||||
preserve_fn(self._start_recoverer)(service)
|
run_in_background(self._start_recoverer, service)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def on_recovered(self, recoverer):
|
def on_recovered(self, recoverer):
|
||||||
@@ -176,17 +176,20 @@ class _TransactionController(object):
|
|||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def _start_recoverer(self, service):
|
def _start_recoverer(self, service):
|
||||||
yield self.store.set_appservice_state(
|
try:
|
||||||
service,
|
yield self.store.set_appservice_state(
|
||||||
ApplicationServiceState.DOWN
|
service,
|
||||||
)
|
ApplicationServiceState.DOWN
|
||||||
logger.info(
|
)
|
||||||
"Application service falling behind. Starting recoverer. AS ID %s",
|
logger.info(
|
||||||
service.id
|
"Application service falling behind. Starting recoverer. AS ID %s",
|
||||||
)
|
service.id
|
||||||
recoverer = self.recoverer_fn(service, self.on_recovered)
|
)
|
||||||
self.add_recoverers([recoverer])
|
recoverer = self.recoverer_fn(service, self.on_recovered)
|
||||||
recoverer.recover()
|
self.add_recoverers([recoverer])
|
||||||
|
recoverer.recover()
|
||||||
|
except Exception:
|
||||||
|
logger.exception("Error starting AS recoverer")
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def _is_service_up(self, service):
|
def _is_service_up(self, service):
|
||||||
|
|||||||
@@ -12,3 +12,9 @@
|
|||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
|
from ._base import ConfigError
|
||||||
|
|
||||||
|
# export ConfigError if somebody does import *
|
||||||
|
# this is largely a fudge to stop PEP8 moaning about the import
|
||||||
|
__all__ = ["ConfigError"]
|
||||||
|
|||||||
@@ -19,6 +19,8 @@ import os
|
|||||||
import yaml
|
import yaml
|
||||||
from textwrap import dedent
|
from textwrap import dedent
|
||||||
|
|
||||||
|
from six import integer_types
|
||||||
|
|
||||||
|
|
||||||
class ConfigError(Exception):
|
class ConfigError(Exception):
|
||||||
pass
|
pass
|
||||||
@@ -49,7 +51,7 @@ Missing mandatory `server_name` config option.
|
|||||||
class Config(object):
|
class Config(object):
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def parse_size(value):
|
def parse_size(value):
|
||||||
if isinstance(value, int) or isinstance(value, long):
|
if isinstance(value, integer_types):
|
||||||
return value
|
return value
|
||||||
sizes = {"K": 1024, "M": 1024 * 1024}
|
sizes = {"K": 1024, "M": 1024 * 1024}
|
||||||
size = 1
|
size = 1
|
||||||
@@ -61,7 +63,7 @@ class Config(object):
|
|||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def parse_duration(value):
|
def parse_duration(value):
|
||||||
if isinstance(value, int) or isinstance(value, long):
|
if isinstance(value, integer_types):
|
||||||
return value
|
return value
|
||||||
second = 1000
|
second = 1000
|
||||||
minute = 60 * second
|
minute = 60 * second
|
||||||
@@ -81,22 +83,38 @@ class Config(object):
|
|||||||
def abspath(file_path):
|
def abspath(file_path):
|
||||||
return os.path.abspath(file_path) if file_path else file_path
|
return os.path.abspath(file_path) if file_path else file_path
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def path_exists(cls, file_path):
|
||||||
|
"""Check if a file exists
|
||||||
|
|
||||||
|
Unlike os.path.exists, this throws an exception if there is an error
|
||||||
|
checking if the file exists (for example, if there is a perms error on
|
||||||
|
the parent dir).
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if the file exists; False if not.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
os.stat(file_path)
|
||||||
|
return True
|
||||||
|
except OSError as e:
|
||||||
|
if e.errno != errno.ENOENT:
|
||||||
|
raise e
|
||||||
|
return False
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def check_file(cls, file_path, config_name):
|
def check_file(cls, file_path, config_name):
|
||||||
if file_path is None:
|
if file_path is None:
|
||||||
raise ConfigError(
|
raise ConfigError(
|
||||||
"Missing config for %s."
|
"Missing config for %s."
|
||||||
" You must specify a path for the config file. You can "
|
|
||||||
"do this with the -c or --config-path option. "
|
|
||||||
"Adding --generate-config along with --server-name "
|
|
||||||
"<server name> will generate a config file at the given path."
|
|
||||||
% (config_name,)
|
% (config_name,)
|
||||||
)
|
)
|
||||||
if not os.path.exists(file_path):
|
try:
|
||||||
|
os.stat(file_path)
|
||||||
|
except OSError as e:
|
||||||
raise ConfigError(
|
raise ConfigError(
|
||||||
"File %s config for %s doesn't exist."
|
"Error accessing file '%s' (config for %s): %s"
|
||||||
" Try running again with --generate-config"
|
% (file_path, config_name, e.strerror)
|
||||||
% (file_path, config_name,)
|
|
||||||
)
|
)
|
||||||
return cls.abspath(file_path)
|
return cls.abspath(file_path)
|
||||||
|
|
||||||
@@ -248,7 +266,7 @@ class Config(object):
|
|||||||
" -c CONFIG-FILE\""
|
" -c CONFIG-FILE\""
|
||||||
)
|
)
|
||||||
(config_path,) = config_files
|
(config_path,) = config_files
|
||||||
if not os.path.exists(config_path):
|
if not cls.path_exists(config_path):
|
||||||
if config_args.keys_directory:
|
if config_args.keys_directory:
|
||||||
config_dir_path = config_args.keys_directory
|
config_dir_path = config_args.keys_directory
|
||||||
else:
|
else:
|
||||||
@@ -261,33 +279,33 @@ class Config(object):
|
|||||||
"Must specify a server_name to a generate config for."
|
"Must specify a server_name to a generate config for."
|
||||||
" Pass -H server.name."
|
" Pass -H server.name."
|
||||||
)
|
)
|
||||||
if not os.path.exists(config_dir_path):
|
if not cls.path_exists(config_dir_path):
|
||||||
os.makedirs(config_dir_path)
|
os.makedirs(config_dir_path)
|
||||||
with open(config_path, "wb") as config_file:
|
with open(config_path, "w") as config_file:
|
||||||
config_bytes, config = obj.generate_config(
|
config_str, config = obj.generate_config(
|
||||||
config_dir_path=config_dir_path,
|
config_dir_path=config_dir_path,
|
||||||
server_name=server_name,
|
server_name=server_name,
|
||||||
report_stats=(config_args.report_stats == "yes"),
|
report_stats=(config_args.report_stats == "yes"),
|
||||||
is_generating_file=True
|
is_generating_file=True
|
||||||
)
|
)
|
||||||
obj.invoke_all("generate_files", config)
|
obj.invoke_all("generate_files", config)
|
||||||
config_file.write(config_bytes)
|
config_file.write(config_str)
|
||||||
print (
|
print((
|
||||||
"A config file has been generated in %r for server name"
|
"A config file has been generated in %r for server name"
|
||||||
" %r with corresponding SSL keys and self-signed"
|
" %r with corresponding SSL keys and self-signed"
|
||||||
" certificates. Please review this file and customise it"
|
" certificates. Please review this file and customise it"
|
||||||
" to your needs."
|
" to your needs."
|
||||||
) % (config_path, server_name)
|
) % (config_path, server_name))
|
||||||
print (
|
print(
|
||||||
"If this server name is incorrect, you will need to"
|
"If this server name is incorrect, you will need to"
|
||||||
" regenerate the SSL certificates"
|
" regenerate the SSL certificates"
|
||||||
)
|
)
|
||||||
return
|
return
|
||||||
else:
|
else:
|
||||||
print (
|
print((
|
||||||
"Config file %r already exists. Generating any missing key"
|
"Config file %r already exists. Generating any missing key"
|
||||||
" files."
|
" files."
|
||||||
) % (config_path,)
|
) % (config_path,))
|
||||||
generate_keys = True
|
generate_keys = True
|
||||||
|
|
||||||
parser = argparse.ArgumentParser(
|
parser = argparse.ArgumentParser(
|
||||||
|
|||||||
@@ -17,10 +17,12 @@ from ._base import Config, ConfigError
|
|||||||
from synapse.appservice import ApplicationService
|
from synapse.appservice import ApplicationService
|
||||||
from synapse.types import UserID
|
from synapse.types import UserID
|
||||||
|
|
||||||
import urllib
|
|
||||||
import yaml
|
import yaml
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
|
from six import string_types
|
||||||
|
from six.moves.urllib import parse as urlparse
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
@@ -89,21 +91,21 @@ def _load_appservice(hostname, as_info, config_filename):
|
|||||||
"id", "as_token", "hs_token", "sender_localpart"
|
"id", "as_token", "hs_token", "sender_localpart"
|
||||||
]
|
]
|
||||||
for field in required_string_fields:
|
for field in required_string_fields:
|
||||||
if not isinstance(as_info.get(field), basestring):
|
if not isinstance(as_info.get(field), string_types):
|
||||||
raise KeyError("Required string field: '%s' (%s)" % (
|
raise KeyError("Required string field: '%s' (%s)" % (
|
||||||
field, config_filename,
|
field, config_filename,
|
||||||
))
|
))
|
||||||
|
|
||||||
# 'url' must either be a string or explicitly null, not missing
|
# 'url' must either be a string or explicitly null, not missing
|
||||||
# to avoid accidentally turning off push for ASes.
|
# to avoid accidentally turning off push for ASes.
|
||||||
if (not isinstance(as_info.get("url"), basestring) and
|
if (not isinstance(as_info.get("url"), string_types) and
|
||||||
as_info.get("url", "") is not None):
|
as_info.get("url", "") is not None):
|
||||||
raise KeyError(
|
raise KeyError(
|
||||||
"Required string field or explicit null: 'url' (%s)" % (config_filename,)
|
"Required string field or explicit null: 'url' (%s)" % (config_filename,)
|
||||||
)
|
)
|
||||||
|
|
||||||
localpart = as_info["sender_localpart"]
|
localpart = as_info["sender_localpart"]
|
||||||
if urllib.quote(localpart) != localpart:
|
if urlparse.quote(localpart) != localpart:
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
"sender_localpart needs characters which are not URL encoded."
|
"sender_localpart needs characters which are not URL encoded."
|
||||||
)
|
)
|
||||||
@@ -128,7 +130,7 @@ def _load_appservice(hostname, as_info, config_filename):
|
|||||||
"Expected namespace entry in %s to be an object,"
|
"Expected namespace entry in %s to be an object,"
|
||||||
" but got %s", ns, regex_obj
|
" but got %s", ns, regex_obj
|
||||||
)
|
)
|
||||||
if not isinstance(regex_obj.get("regex"), basestring):
|
if not isinstance(regex_obj.get("regex"), string_types):
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
"Missing/bad type 'regex' key in %s", regex_obj
|
"Missing/bad type 'regex' key in %s", regex_obj
|
||||||
)
|
)
|
||||||
@@ -154,6 +156,7 @@ def _load_appservice(hostname, as_info, config_filename):
|
|||||||
)
|
)
|
||||||
return ApplicationService(
|
return ApplicationService(
|
||||||
token=as_info["as_token"],
|
token=as_info["as_token"],
|
||||||
|
hostname=hostname,
|
||||||
url=as_info["url"],
|
url=as_info["url"],
|
||||||
namespaces=as_info["namespaces"],
|
namespaces=as_info["namespaces"],
|
||||||
hs_token=as_info["hs_token"],
|
hs_token=as_info["hs_token"],
|
||||||
|
|||||||
@@ -41,7 +41,7 @@ class CasConfig(Config):
|
|||||||
#cas_config:
|
#cas_config:
|
||||||
# enabled: true
|
# enabled: true
|
||||||
# server_url: "https://cas-server.com"
|
# server_url: "https://cas-server.com"
|
||||||
# service_url: "https://homesever.domain.com:8448"
|
# service_url: "https://homeserver.domain.com:8448"
|
||||||
# #required_attributes:
|
# #required_attributes:
|
||||||
# # name: value
|
# # name: value
|
||||||
"""
|
"""
|
||||||
|
|||||||
79
synapse/config/consent_config.py
Normal file
79
synapse/config/consent_config.py
Normal file
@@ -0,0 +1,79 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright 2018 New Vector Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
from ._base import Config
|
||||||
|
|
||||||
|
DEFAULT_CONFIG = """\
|
||||||
|
# User Consent configuration
|
||||||
|
#
|
||||||
|
# Parts of this section are required if enabling the 'consent' resource under
|
||||||
|
# 'listeners', in particular 'template_dir' and 'version'.
|
||||||
|
#
|
||||||
|
# 'template_dir' gives the location of the templates for the HTML forms.
|
||||||
|
# This directory should contain one subdirectory per language (eg, 'en', 'fr'),
|
||||||
|
# and each language directory should contain the policy document (named as
|
||||||
|
# '<version>.html') and a success page (success.html).
|
||||||
|
#
|
||||||
|
# 'version' specifies the 'current' version of the policy document. It defines
|
||||||
|
# the version to be served by the consent resource if there is no 'v'
|
||||||
|
# parameter.
|
||||||
|
#
|
||||||
|
# 'server_notice_content', if enabled, will send a user a "Server Notice"
|
||||||
|
# asking them to consent to the privacy policy. The 'server_notices' section
|
||||||
|
# must also be configured for this to work.
|
||||||
|
#
|
||||||
|
# 'block_events_error', if set, will block any attempts to send events
|
||||||
|
# until the user consents to the privacy policy. The value of the setting is
|
||||||
|
# used as the text of the error.
|
||||||
|
#
|
||||||
|
# user_consent:
|
||||||
|
# template_dir: res/templates/privacy
|
||||||
|
# version: 1.0
|
||||||
|
# server_notice_content:
|
||||||
|
# msgtype: m.text
|
||||||
|
# body: >-
|
||||||
|
# To continue using this homeserver you must review and agree to the
|
||||||
|
# terms and conditions at %(consent_uri)s
|
||||||
|
# block_events_error: >-
|
||||||
|
# To continue using this homeserver you must review and agree to the
|
||||||
|
# terms and conditions at %(consent_uri)s
|
||||||
|
#
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
class ConsentConfig(Config):
|
||||||
|
def __init__(self):
|
||||||
|
super(ConsentConfig, self).__init__()
|
||||||
|
|
||||||
|
self.user_consent_version = None
|
||||||
|
self.user_consent_template_dir = None
|
||||||
|
self.user_consent_server_notice_content = None
|
||||||
|
self.block_events_without_consent_error = None
|
||||||
|
|
||||||
|
def read_config(self, config):
|
||||||
|
consent_config = config.get("user_consent")
|
||||||
|
if consent_config is None:
|
||||||
|
return
|
||||||
|
self.user_consent_version = str(consent_config["version"])
|
||||||
|
self.user_consent_template_dir = consent_config["template_dir"]
|
||||||
|
self.user_consent_server_notice_content = consent_config.get(
|
||||||
|
"server_notice_content",
|
||||||
|
)
|
||||||
|
self.block_events_without_consent_error = consent_config.get(
|
||||||
|
"block_events_error",
|
||||||
|
)
|
||||||
|
|
||||||
|
def default_config(self, **kwargs):
|
||||||
|
return DEFAULT_CONFIG
|
||||||
@@ -71,6 +71,15 @@ class EmailConfig(Config):
|
|||||||
self.email_riot_base_url = email_config.get(
|
self.email_riot_base_url = email_config.get(
|
||||||
"riot_base_url", None
|
"riot_base_url", None
|
||||||
)
|
)
|
||||||
|
self.email_smtp_user = email_config.get(
|
||||||
|
"smtp_user", None
|
||||||
|
)
|
||||||
|
self.email_smtp_pass = email_config.get(
|
||||||
|
"smtp_pass", None
|
||||||
|
)
|
||||||
|
self.require_transport_security = email_config.get(
|
||||||
|
"require_transport_security", False
|
||||||
|
)
|
||||||
if "app_name" in email_config:
|
if "app_name" in email_config:
|
||||||
self.email_app_name = email_config["app_name"]
|
self.email_app_name = email_config["app_name"]
|
||||||
else:
|
else:
|
||||||
@@ -91,10 +100,17 @@ class EmailConfig(Config):
|
|||||||
# Defining a custom URL for Riot is only needed if email notifications
|
# Defining a custom URL for Riot is only needed if email notifications
|
||||||
# should contain links to a self-hosted installation of Riot; when set
|
# should contain links to a self-hosted installation of Riot; when set
|
||||||
# the "app_name" setting is ignored.
|
# the "app_name" setting is ignored.
|
||||||
|
#
|
||||||
|
# If your SMTP server requires authentication, the optional smtp_user &
|
||||||
|
# smtp_pass variables should be used
|
||||||
|
#
|
||||||
#email:
|
#email:
|
||||||
# enable_notifs: false
|
# enable_notifs: false
|
||||||
# smtp_host: "localhost"
|
# smtp_host: "localhost"
|
||||||
# smtp_port: 25
|
# smtp_port: 25
|
||||||
|
# smtp_user: "exampleusername"
|
||||||
|
# smtp_pass: "examplepassword"
|
||||||
|
# require_transport_security: False
|
||||||
# notif_from: "Your Friendly %(app)s Home Server <noreply@example.com>"
|
# notif_from: "Your Friendly %(app)s Home Server <noreply@example.com>"
|
||||||
# app_name: Matrix
|
# app_name: Matrix
|
||||||
# template_dir: res/templates
|
# template_dir: res/templates
|
||||||
|
|||||||
32
synapse/config/groups.py
Normal file
32
synapse/config/groups.py
Normal file
@@ -0,0 +1,32 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright 2017 New Vector Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
from ._base import Config
|
||||||
|
|
||||||
|
|
||||||
|
class GroupsConfig(Config):
|
||||||
|
def read_config(self, config):
|
||||||
|
self.enable_group_creation = config.get("enable_group_creation", False)
|
||||||
|
self.group_creation_prefix = config.get("group_creation_prefix", "")
|
||||||
|
|
||||||
|
def default_config(self, **kwargs):
|
||||||
|
return """\
|
||||||
|
# Whether to allow non server admins to create groups on this server
|
||||||
|
enable_group_creation: false
|
||||||
|
|
||||||
|
# If enabled, non server admins can only create groups with local parts
|
||||||
|
# starting with this prefix
|
||||||
|
# group_creation_prefix: "unofficial/"
|
||||||
|
"""
|
||||||
@@ -1,5 +1,6 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
# Copyright 2014-2016 OpenMarket Ltd
|
# Copyright 2014-2016 OpenMarket Ltd
|
||||||
|
# Copyright 2018 New Vector Ltd
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@@ -12,7 +13,6 @@
|
|||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
from .tls import TlsConfig
|
from .tls import TlsConfig
|
||||||
from .server import ServerConfig
|
from .server import ServerConfig
|
||||||
from .logger import LoggingConfig
|
from .logger import LoggingConfig
|
||||||
@@ -33,6 +33,12 @@ from .jwt import JWTConfig
|
|||||||
from .password_auth_providers import PasswordAuthProviderConfig
|
from .password_auth_providers import PasswordAuthProviderConfig
|
||||||
from .emailconfig import EmailConfig
|
from .emailconfig import EmailConfig
|
||||||
from .workers import WorkerConfig
|
from .workers import WorkerConfig
|
||||||
|
from .push import PushConfig
|
||||||
|
from .spam_checker import SpamCheckerConfig
|
||||||
|
from .groups import GroupsConfig
|
||||||
|
from .user_directory import UserDirectoryConfig
|
||||||
|
from .consent_config import ConsentConfig
|
||||||
|
from .server_notices_config import ServerNoticesConfig
|
||||||
|
|
||||||
|
|
||||||
class HomeServerConfig(TlsConfig, ServerConfig, DatabaseConfig, LoggingConfig,
|
class HomeServerConfig(TlsConfig, ServerConfig, DatabaseConfig, LoggingConfig,
|
||||||
@@ -40,12 +46,16 @@ class HomeServerConfig(TlsConfig, ServerConfig, DatabaseConfig, LoggingConfig,
|
|||||||
VoipConfig, RegistrationConfig, MetricsConfig, ApiConfig,
|
VoipConfig, RegistrationConfig, MetricsConfig, ApiConfig,
|
||||||
AppServiceConfig, KeyConfig, SAML2Config, CasConfig,
|
AppServiceConfig, KeyConfig, SAML2Config, CasConfig,
|
||||||
JWTConfig, PasswordConfig, EmailConfig,
|
JWTConfig, PasswordConfig, EmailConfig,
|
||||||
WorkerConfig, PasswordAuthProviderConfig,):
|
WorkerConfig, PasswordAuthProviderConfig, PushConfig,
|
||||||
|
SpamCheckerConfig, GroupsConfig, UserDirectoryConfig,
|
||||||
|
ConsentConfig,
|
||||||
|
ServerNoticesConfig,
|
||||||
|
):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
import sys
|
import sys
|
||||||
sys.stdout.write(
|
sys.stdout.write(
|
||||||
HomeServerConfig().generate_config(sys.argv[1], sys.argv[2])[0]
|
HomeServerConfig().generate_config(sys.argv[1], sys.argv[2], True)[0]
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -59,14 +59,20 @@ class KeyConfig(Config):
|
|||||||
|
|
||||||
self.expire_access_token = config.get("expire_access_token", False)
|
self.expire_access_token = config.get("expire_access_token", False)
|
||||||
|
|
||||||
|
# a secret which is used to calculate HMACs for form values, to stop
|
||||||
|
# falsification of values
|
||||||
|
self.form_secret = config.get("form_secret", None)
|
||||||
|
|
||||||
def default_config(self, config_dir_path, server_name, is_generating_file=False,
|
def default_config(self, config_dir_path, server_name, is_generating_file=False,
|
||||||
**kwargs):
|
**kwargs):
|
||||||
base_key_name = os.path.join(config_dir_path, server_name)
|
base_key_name = os.path.join(config_dir_path, server_name)
|
||||||
|
|
||||||
if is_generating_file:
|
if is_generating_file:
|
||||||
macaroon_secret_key = random_string_with_symbols(50)
|
macaroon_secret_key = random_string_with_symbols(50)
|
||||||
|
form_secret = '"%s"' % random_string_with_symbols(50)
|
||||||
else:
|
else:
|
||||||
macaroon_secret_key = None
|
macaroon_secret_key = None
|
||||||
|
form_secret = 'null'
|
||||||
|
|
||||||
return """\
|
return """\
|
||||||
macaroon_secret_key: "%(macaroon_secret_key)s"
|
macaroon_secret_key: "%(macaroon_secret_key)s"
|
||||||
@@ -74,6 +80,10 @@ class KeyConfig(Config):
|
|||||||
# Used to enable access token expiration.
|
# Used to enable access token expiration.
|
||||||
expire_access_token: False
|
expire_access_token: False
|
||||||
|
|
||||||
|
# a secret which is used to calculate HMACs for form values, to stop
|
||||||
|
# falsification of values
|
||||||
|
form_secret: %(form_secret)s
|
||||||
|
|
||||||
## Signing Keys ##
|
## Signing Keys ##
|
||||||
|
|
||||||
# Path to the signing key to sign messages with
|
# Path to the signing key to sign messages with
|
||||||
@@ -118,10 +128,9 @@ class KeyConfig(Config):
|
|||||||
signing_keys = self.read_file(signing_key_path, "signing_key")
|
signing_keys = self.read_file(signing_key_path, "signing_key")
|
||||||
try:
|
try:
|
||||||
return read_signing_keys(signing_keys.splitlines(True))
|
return read_signing_keys(signing_keys.splitlines(True))
|
||||||
except Exception:
|
except Exception as e:
|
||||||
raise ConfigError(
|
raise ConfigError(
|
||||||
"Error reading signing_key."
|
"Error reading signing_key: %s" % (str(e))
|
||||||
" Try running again with --generate-config"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
def read_old_signing_keys(self, old_signing_keys):
|
def read_old_signing_keys(self, old_signing_keys):
|
||||||
@@ -141,7 +150,8 @@ class KeyConfig(Config):
|
|||||||
|
|
||||||
def generate_files(self, config):
|
def generate_files(self, config):
|
||||||
signing_key_path = config["signing_key_path"]
|
signing_key_path = config["signing_key_path"]
|
||||||
if not os.path.exists(signing_key_path):
|
|
||||||
|
if not self.path_exists(signing_key_path):
|
||||||
with open(signing_key_path, "w") as signing_key_file:
|
with open(signing_key_path, "w") as signing_key_file:
|
||||||
key_id = "a_" + random_string(4)
|
key_id = "a_" + random_string(4)
|
||||||
write_signing_keys(
|
write_signing_keys(
|
||||||
|
|||||||
@@ -28,27 +28,27 @@ DEFAULT_LOG_CONFIG = Template("""
|
|||||||
version: 1
|
version: 1
|
||||||
|
|
||||||
formatters:
|
formatters:
|
||||||
precise:
|
precise:
|
||||||
format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s\
|
format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - \
|
||||||
- %(message)s'
|
%(request)s - %(message)s'
|
||||||
|
|
||||||
filters:
|
filters:
|
||||||
context:
|
context:
|
||||||
(): synapse.util.logcontext.LoggingContextFilter
|
(): synapse.util.logcontext.LoggingContextFilter
|
||||||
request: ""
|
request: ""
|
||||||
|
|
||||||
handlers:
|
handlers:
|
||||||
file:
|
file:
|
||||||
class: logging.handlers.RotatingFileHandler
|
class: logging.handlers.RotatingFileHandler
|
||||||
formatter: precise
|
formatter: precise
|
||||||
filename: ${log_file}
|
filename: ${log_file}
|
||||||
maxBytes: 104857600
|
maxBytes: 104857600
|
||||||
backupCount: 10
|
backupCount: 10
|
||||||
filters: [context]
|
filters: [context]
|
||||||
console:
|
console:
|
||||||
class: logging.StreamHandler
|
class: logging.StreamHandler
|
||||||
formatter: precise
|
formatter: precise
|
||||||
filters: [context]
|
filters: [context]
|
||||||
|
|
||||||
loggers:
|
loggers:
|
||||||
synapse:
|
synapse:
|
||||||
@@ -74,17 +74,10 @@ class LoggingConfig(Config):
|
|||||||
self.log_file = self.abspath(config.get("log_file"))
|
self.log_file = self.abspath(config.get("log_file"))
|
||||||
|
|
||||||
def default_config(self, config_dir_path, server_name, **kwargs):
|
def default_config(self, config_dir_path, server_name, **kwargs):
|
||||||
log_file = self.abspath("homeserver.log")
|
|
||||||
log_config = self.abspath(
|
log_config = self.abspath(
|
||||||
os.path.join(config_dir_path, server_name + ".log.config")
|
os.path.join(config_dir_path, server_name + ".log.config")
|
||||||
)
|
)
|
||||||
return """
|
return """
|
||||||
# Logging verbosity level. Ignored if log_config is specified.
|
|
||||||
verbose: 0
|
|
||||||
|
|
||||||
# File to write logging to. Ignored if log_config is specified.
|
|
||||||
log_file: "%(log_file)s"
|
|
||||||
|
|
||||||
# A yaml python logging config file
|
# A yaml python logging config file
|
||||||
log_config: "%(log_config)s"
|
log_config: "%(log_config)s"
|
||||||
""" % locals()
|
""" % locals()
|
||||||
@@ -123,9 +116,10 @@ class LoggingConfig(Config):
|
|||||||
def generate_files(self, config):
|
def generate_files(self, config):
|
||||||
log_config = config.get("log_config")
|
log_config = config.get("log_config")
|
||||||
if log_config and not os.path.exists(log_config):
|
if log_config and not os.path.exists(log_config):
|
||||||
with open(log_config, "wb") as log_config_file:
|
log_file = self.abspath("homeserver.log")
|
||||||
|
with open(log_config, "w") as log_config_file:
|
||||||
log_config_file.write(
|
log_config_file.write(
|
||||||
DEFAULT_LOG_CONFIG.substitute(log_file=config["log_file"])
|
DEFAULT_LOG_CONFIG.substitute(log_file=log_file)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@@ -148,8 +142,11 @@ def setup_logging(config, use_worker_options=False):
|
|||||||
"%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s"
|
"%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s"
|
||||||
" - %(message)s"
|
" - %(message)s"
|
||||||
)
|
)
|
||||||
if log_config is None:
|
|
||||||
|
|
||||||
|
if log_config is None:
|
||||||
|
# We don't have a logfile, so fall back to the 'verbosity' param from
|
||||||
|
# the config or cmdline. (Note that we generate a log config for new
|
||||||
|
# installs, so this will be an unusual case)
|
||||||
level = logging.INFO
|
level = logging.INFO
|
||||||
level_for_storage = logging.INFO
|
level_for_storage = logging.INFO
|
||||||
if config.verbosity:
|
if config.verbosity:
|
||||||
@@ -157,11 +154,10 @@ def setup_logging(config, use_worker_options=False):
|
|||||||
if config.verbosity > 1:
|
if config.verbosity > 1:
|
||||||
level_for_storage = logging.DEBUG
|
level_for_storage = logging.DEBUG
|
||||||
|
|
||||||
# FIXME: we need a logging.WARN for a -q quiet option
|
|
||||||
logger = logging.getLogger('')
|
logger = logging.getLogger('')
|
||||||
logger.setLevel(level)
|
logger.setLevel(level)
|
||||||
|
|
||||||
logging.getLogger('synapse.storage').setLevel(level_for_storage)
|
logging.getLogger('synapse.storage.SQL').setLevel(level_for_storage)
|
||||||
|
|
||||||
formatter = logging.Formatter(log_format)
|
formatter = logging.Formatter(log_format)
|
||||||
if log_file:
|
if log_file:
|
||||||
@@ -176,6 +172,10 @@ def setup_logging(config, use_worker_options=False):
|
|||||||
logger.info("Opened new log file due to SIGHUP")
|
logger.info("Opened new log file due to SIGHUP")
|
||||||
else:
|
else:
|
||||||
handler = logging.StreamHandler()
|
handler = logging.StreamHandler()
|
||||||
|
|
||||||
|
def sighup(signum, stack):
|
||||||
|
pass
|
||||||
|
|
||||||
handler.setFormatter(formatter)
|
handler.setFormatter(formatter)
|
||||||
|
|
||||||
handler.addFilter(LoggingContextFilter(request=""))
|
handler.addFilter(LoggingContextFilter(request=""))
|
||||||
|
|||||||
@@ -13,44 +13,41 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
from ._base import Config, ConfigError
|
from ._base import Config
|
||||||
|
|
||||||
import importlib
|
from synapse.util.module_loader import load_module
|
||||||
|
|
||||||
|
LDAP_PROVIDER = 'ldap_auth_provider.LdapAuthProvider'
|
||||||
|
|
||||||
|
|
||||||
class PasswordAuthProviderConfig(Config):
|
class PasswordAuthProviderConfig(Config):
|
||||||
def read_config(self, config):
|
def read_config(self, config):
|
||||||
self.password_providers = []
|
self.password_providers = []
|
||||||
|
providers = []
|
||||||
|
|
||||||
# We want to be backwards compatible with the old `ldap_config`
|
# We want to be backwards compatible with the old `ldap_config`
|
||||||
# param.
|
# param.
|
||||||
ldap_config = config.get("ldap_config", {})
|
ldap_config = config.get("ldap_config", {})
|
||||||
self.ldap_enabled = ldap_config.get("enabled", False)
|
if ldap_config.get("enabled", False):
|
||||||
if self.ldap_enabled:
|
providers.append({
|
||||||
from ldap_auth_provider import LdapAuthProvider
|
'module': LDAP_PROVIDER,
|
||||||
parsed_config = LdapAuthProvider.parse_config(ldap_config)
|
'config': ldap_config,
|
||||||
self.password_providers.append((LdapAuthProvider, parsed_config))
|
})
|
||||||
|
|
||||||
providers = config.get("password_providers", [])
|
providers.extend(config.get("password_providers", []))
|
||||||
for provider in providers:
|
for provider in providers:
|
||||||
|
mod_name = provider['module']
|
||||||
|
|
||||||
# This is for backwards compat when the ldap auth provider resided
|
# This is for backwards compat when the ldap auth provider resided
|
||||||
# in this package.
|
# in this package.
|
||||||
if provider['module'] == "synapse.util.ldap_auth_provider.LdapAuthProvider":
|
if mod_name == "synapse.util.ldap_auth_provider.LdapAuthProvider":
|
||||||
from ldap_auth_provider import LdapAuthProvider
|
mod_name = LDAP_PROVIDER
|
||||||
provider_class = LdapAuthProvider
|
|
||||||
else:
|
(provider_class, provider_config) = load_module({
|
||||||
# We need to import the module, and then pick the class out of
|
"module": mod_name,
|
||||||
# that, so we split based on the last dot.
|
"config": provider['config'],
|
||||||
module, clz = provider['module'].rsplit(".", 1)
|
})
|
||||||
module = importlib.import_module(module)
|
|
||||||
provider_class = getattr(module, clz)
|
|
||||||
|
|
||||||
try:
|
|
||||||
provider_config = provider_class.parse_config(provider["config"])
|
|
||||||
except Exception as e:
|
|
||||||
raise ConfigError(
|
|
||||||
"Failed to parse config for %r: %r" % (provider['module'], e)
|
|
||||||
)
|
|
||||||
self.password_providers.append((provider_class, provider_config))
|
self.password_providers.append((provider_class, provider_config))
|
||||||
|
|
||||||
def default_config(self, **kwargs):
|
def default_config(self, **kwargs):
|
||||||
|
|||||||
61
synapse/config/push.py
Normal file
61
synapse/config/push.py
Normal file
@@ -0,0 +1,61 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright 2015, 2016 OpenMarket Ltd
|
||||||
|
# Copyright 2017 New Vector Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
from ._base import Config
|
||||||
|
|
||||||
|
|
||||||
|
class PushConfig(Config):
|
||||||
|
def read_config(self, config):
|
||||||
|
push_config = config.get("push", {})
|
||||||
|
self.push_include_content = push_config.get("include_content", True)
|
||||||
|
|
||||||
|
# There was a a 'redact_content' setting but mistakenly read from the
|
||||||
|
# 'email'section'. Check for the flag in the 'push' section, and log,
|
||||||
|
# but do not honour it to avoid nasty surprises when people upgrade.
|
||||||
|
if push_config.get("redact_content") is not None:
|
||||||
|
print(
|
||||||
|
"The push.redact_content content option has never worked. "
|
||||||
|
"Please set push.include_content if you want this behaviour"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Now check for the one in the 'email' section and honour it,
|
||||||
|
# with a warning.
|
||||||
|
push_config = config.get("email", {})
|
||||||
|
redact_content = push_config.get("redact_content")
|
||||||
|
if redact_content is not None:
|
||||||
|
print(
|
||||||
|
"The 'email.redact_content' option is deprecated: "
|
||||||
|
"please set push.include_content instead"
|
||||||
|
)
|
||||||
|
self.push_include_content = not redact_content
|
||||||
|
|
||||||
|
def default_config(self, config_dir_path, server_name, **kwargs):
|
||||||
|
return """
|
||||||
|
# Clients requesting push notifications can either have the body of
|
||||||
|
# the message sent in the notification poke along with other details
|
||||||
|
# like the sender, or just the event ID and room ID (`event_id_only`).
|
||||||
|
# If clients choose the former, this option controls whether the
|
||||||
|
# notification request includes the content of the event (other details
|
||||||
|
# like the sender are still included). For `event_id_only` push, it
|
||||||
|
# has no effect.
|
||||||
|
|
||||||
|
# For modern android devices the notification content will still appear
|
||||||
|
# because it is loaded by the app. iPhone, however will send a
|
||||||
|
# notification saying only that a message arrived and who it came from.
|
||||||
|
#
|
||||||
|
#push:
|
||||||
|
# include_content: true
|
||||||
|
"""
|
||||||
@@ -31,6 +31,8 @@ class RegistrationConfig(Config):
|
|||||||
strtobool(str(config["disable_registration"]))
|
strtobool(str(config["disable_registration"]))
|
||||||
)
|
)
|
||||||
|
|
||||||
|
self.registrations_require_3pid = config.get("registrations_require_3pid", [])
|
||||||
|
self.allowed_local_3pids = config.get("allowed_local_3pids", [])
|
||||||
self.registration_shared_secret = config.get("registration_shared_secret")
|
self.registration_shared_secret = config.get("registration_shared_secret")
|
||||||
|
|
||||||
self.bcrypt_rounds = config.get("bcrypt_rounds", 12)
|
self.bcrypt_rounds = config.get("bcrypt_rounds", 12)
|
||||||
@@ -41,6 +43,8 @@ class RegistrationConfig(Config):
|
|||||||
self.allow_guest_access and config.get("invite_3pid_guest", False)
|
self.allow_guest_access and config.get("invite_3pid_guest", False)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
self.auto_join_rooms = config.get("auto_join_rooms", [])
|
||||||
|
|
||||||
def default_config(self, **kwargs):
|
def default_config(self, **kwargs):
|
||||||
registration_shared_secret = random_string_with_symbols(50)
|
registration_shared_secret = random_string_with_symbols(50)
|
||||||
|
|
||||||
@@ -50,13 +54,32 @@ class RegistrationConfig(Config):
|
|||||||
# Enable registration for new users.
|
# Enable registration for new users.
|
||||||
enable_registration: False
|
enable_registration: False
|
||||||
|
|
||||||
|
# The user must provide all of the below types of 3PID when registering.
|
||||||
|
#
|
||||||
|
# registrations_require_3pid:
|
||||||
|
# - email
|
||||||
|
# - msisdn
|
||||||
|
|
||||||
|
# Mandate that users are only allowed to associate certain formats of
|
||||||
|
# 3PIDs with accounts on this server.
|
||||||
|
#
|
||||||
|
# allowed_local_3pids:
|
||||||
|
# - medium: email
|
||||||
|
# pattern: ".*@matrix\\.org"
|
||||||
|
# - medium: email
|
||||||
|
# pattern: ".*@vector\\.im"
|
||||||
|
# - medium: msisdn
|
||||||
|
# pattern: "\\+44"
|
||||||
|
|
||||||
# If set, allows registration by anyone who also has the shared
|
# If set, allows registration by anyone who also has the shared
|
||||||
# secret, even if registration is otherwise disabled.
|
# secret, even if registration is otherwise disabled.
|
||||||
registration_shared_secret: "%(registration_shared_secret)s"
|
registration_shared_secret: "%(registration_shared_secret)s"
|
||||||
|
|
||||||
# Set the number of bcrypt rounds used to generate password hash.
|
# Set the number of bcrypt rounds used to generate password hash.
|
||||||
# Larger numbers increase the work factor needed to generate the hash.
|
# Larger numbers increase the work factor needed to generate the hash.
|
||||||
# The default number of rounds is 12.
|
# The default number is 12 (which equates to 2^12 rounds).
|
||||||
|
# N.B. that increasing this will exponentially increase the time required
|
||||||
|
# to register or login - e.g. 24 => 2^24 rounds which will take >20 mins.
|
||||||
bcrypt_rounds: 12
|
bcrypt_rounds: 12
|
||||||
|
|
||||||
# Allows users to register as guests without a password/email/etc, and
|
# Allows users to register as guests without a password/email/etc, and
|
||||||
@@ -69,6 +92,12 @@ class RegistrationConfig(Config):
|
|||||||
trusted_third_party_id_servers:
|
trusted_third_party_id_servers:
|
||||||
- matrix.org
|
- matrix.org
|
||||||
- vector.im
|
- vector.im
|
||||||
|
- riot.im
|
||||||
|
|
||||||
|
# Users who register on this homeserver will automatically be joined
|
||||||
|
# to these rooms
|
||||||
|
#auto_join_rooms:
|
||||||
|
# - "#example:example.com"
|
||||||
""" % locals()
|
""" % locals()
|
||||||
|
|
||||||
def add_arguments(self, parser):
|
def add_arguments(self, parser):
|
||||||
|
|||||||
@@ -16,6 +16,8 @@
|
|||||||
from ._base import Config, ConfigError
|
from ._base import Config, ConfigError
|
||||||
from collections import namedtuple
|
from collections import namedtuple
|
||||||
|
|
||||||
|
from synapse.util.module_loader import load_module
|
||||||
|
|
||||||
|
|
||||||
MISSING_NETADDR = (
|
MISSING_NETADDR = (
|
||||||
"Missing netaddr library. This is required for URL preview API."
|
"Missing netaddr library. This is required for URL preview API."
|
||||||
@@ -36,6 +38,14 @@ ThumbnailRequirement = namedtuple(
|
|||||||
"ThumbnailRequirement", ["width", "height", "method", "media_type"]
|
"ThumbnailRequirement", ["width", "height", "method", "media_type"]
|
||||||
)
|
)
|
||||||
|
|
||||||
|
MediaStorageProviderConfig = namedtuple(
|
||||||
|
"MediaStorageProviderConfig", (
|
||||||
|
"store_local", # Whether to store newly uploaded local files
|
||||||
|
"store_remote", # Whether to store newly downloaded remote files
|
||||||
|
"store_synchronous", # Whether to wait for successful storage for local uploads
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def parse_thumbnail_requirements(thumbnail_sizes):
|
def parse_thumbnail_requirements(thumbnail_sizes):
|
||||||
""" Takes a list of dictionaries with "width", "height", and "method" keys
|
""" Takes a list of dictionaries with "width", "height", and "method" keys
|
||||||
@@ -70,7 +80,64 @@ class ContentRepositoryConfig(Config):
|
|||||||
self.max_upload_size = self.parse_size(config["max_upload_size"])
|
self.max_upload_size = self.parse_size(config["max_upload_size"])
|
||||||
self.max_image_pixels = self.parse_size(config["max_image_pixels"])
|
self.max_image_pixels = self.parse_size(config["max_image_pixels"])
|
||||||
self.max_spider_size = self.parse_size(config["max_spider_size"])
|
self.max_spider_size = self.parse_size(config["max_spider_size"])
|
||||||
|
|
||||||
self.media_store_path = self.ensure_directory(config["media_store_path"])
|
self.media_store_path = self.ensure_directory(config["media_store_path"])
|
||||||
|
|
||||||
|
backup_media_store_path = config.get("backup_media_store_path")
|
||||||
|
|
||||||
|
synchronous_backup_media_store = config.get(
|
||||||
|
"synchronous_backup_media_store", False
|
||||||
|
)
|
||||||
|
|
||||||
|
storage_providers = config.get("media_storage_providers", [])
|
||||||
|
|
||||||
|
if backup_media_store_path:
|
||||||
|
if storage_providers:
|
||||||
|
raise ConfigError(
|
||||||
|
"Cannot use both 'backup_media_store_path' and 'storage_providers'"
|
||||||
|
)
|
||||||
|
|
||||||
|
storage_providers = [{
|
||||||
|
"module": "file_system",
|
||||||
|
"store_local": True,
|
||||||
|
"store_synchronous": synchronous_backup_media_store,
|
||||||
|
"store_remote": True,
|
||||||
|
"config": {
|
||||||
|
"directory": backup_media_store_path,
|
||||||
|
}
|
||||||
|
}]
|
||||||
|
|
||||||
|
# This is a list of config that can be used to create the storage
|
||||||
|
# providers. The entries are tuples of (Class, class_config,
|
||||||
|
# MediaStorageProviderConfig), where Class is the class of the provider,
|
||||||
|
# the class_config the config to pass to it, and
|
||||||
|
# MediaStorageProviderConfig are options for StorageProviderWrapper.
|
||||||
|
#
|
||||||
|
# We don't create the storage providers here as not all workers need
|
||||||
|
# them to be started.
|
||||||
|
self.media_storage_providers = []
|
||||||
|
|
||||||
|
for provider_config in storage_providers:
|
||||||
|
# We special case the module "file_system" so as not to need to
|
||||||
|
# expose FileStorageProviderBackend
|
||||||
|
if provider_config["module"] == "file_system":
|
||||||
|
provider_config["module"] = (
|
||||||
|
"synapse.rest.media.v1.storage_provider"
|
||||||
|
".FileStorageProviderBackend"
|
||||||
|
)
|
||||||
|
|
||||||
|
provider_class, parsed_config = load_module(provider_config)
|
||||||
|
|
||||||
|
wrapper_config = MediaStorageProviderConfig(
|
||||||
|
provider_config.get("store_local", False),
|
||||||
|
provider_config.get("store_remote", False),
|
||||||
|
provider_config.get("store_synchronous", False),
|
||||||
|
)
|
||||||
|
|
||||||
|
self.media_storage_providers.append(
|
||||||
|
(provider_class, parsed_config, wrapper_config,)
|
||||||
|
)
|
||||||
|
|
||||||
self.uploads_path = self.ensure_directory(config["uploads_path"])
|
self.uploads_path = self.ensure_directory(config["uploads_path"])
|
||||||
self.dynamic_thumbnails = config["dynamic_thumbnails"]
|
self.dynamic_thumbnails = config["dynamic_thumbnails"]
|
||||||
self.thumbnail_requirements = parse_thumbnail_requirements(
|
self.thumbnail_requirements = parse_thumbnail_requirements(
|
||||||
@@ -115,6 +182,20 @@ class ContentRepositoryConfig(Config):
|
|||||||
# Directory where uploaded images and attachments are stored.
|
# Directory where uploaded images and attachments are stored.
|
||||||
media_store_path: "%(media_store)s"
|
media_store_path: "%(media_store)s"
|
||||||
|
|
||||||
|
# Media storage providers allow media to be stored in different
|
||||||
|
# locations.
|
||||||
|
# media_storage_providers:
|
||||||
|
# - module: file_system
|
||||||
|
# # Whether to write new local files.
|
||||||
|
# store_local: false
|
||||||
|
# # Whether to write new remote media
|
||||||
|
# store_remote: false
|
||||||
|
# # Whether to block upload requests waiting for write to this
|
||||||
|
# # provider to complete
|
||||||
|
# store_synchronous: false
|
||||||
|
# config:
|
||||||
|
# directory: /mnt/some/other/directory
|
||||||
|
|
||||||
# Directory where in-progress uploads are stored.
|
# Directory where in-progress uploads are stored.
|
||||||
uploads_path: "%(uploads_path)s"
|
uploads_path: "%(uploads_path)s"
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
# Copyright 2014-2016 OpenMarket Ltd
|
# Copyright 2014-2016 OpenMarket Ltd
|
||||||
|
# Copyright 2017 New Vector Ltd
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@@ -29,12 +30,42 @@ class ServerConfig(Config):
|
|||||||
self.user_agent_suffix = config.get("user_agent_suffix")
|
self.user_agent_suffix = config.get("user_agent_suffix")
|
||||||
self.use_frozen_dicts = config.get("use_frozen_dicts", False)
|
self.use_frozen_dicts = config.get("use_frozen_dicts", False)
|
||||||
self.public_baseurl = config.get("public_baseurl")
|
self.public_baseurl = config.get("public_baseurl")
|
||||||
|
self.cpu_affinity = config.get("cpu_affinity")
|
||||||
|
|
||||||
# Whether to send federation traffic out in this process. This only
|
# Whether to send federation traffic out in this process. This only
|
||||||
# applies to some federation traffic, and so shouldn't be used to
|
# applies to some federation traffic, and so shouldn't be used to
|
||||||
# "disable" federation
|
# "disable" federation
|
||||||
self.send_federation = config.get("send_federation", True)
|
self.send_federation = config.get("send_federation", True)
|
||||||
|
|
||||||
|
# Whether to update the user directory or not. This should be set to
|
||||||
|
# false only if we are updating the user directory in a worker
|
||||||
|
self.update_user_directory = config.get("update_user_directory", True)
|
||||||
|
|
||||||
|
# whether to enable the media repository endpoints. This should be set
|
||||||
|
# to false if the media repository is running as a separate endpoint;
|
||||||
|
# doing so ensures that we will not run cache cleanup jobs on the
|
||||||
|
# master, potentially causing inconsistency.
|
||||||
|
self.enable_media_repo = config.get("enable_media_repo", True)
|
||||||
|
|
||||||
|
self.filter_timeline_limit = config.get("filter_timeline_limit", -1)
|
||||||
|
|
||||||
|
# Whether we should block invites sent to users on this server
|
||||||
|
# (other than those sent by local server admins)
|
||||||
|
self.block_non_admin_invites = config.get(
|
||||||
|
"block_non_admin_invites", False,
|
||||||
|
)
|
||||||
|
|
||||||
|
# FIXME: federation_domain_whitelist needs sytests
|
||||||
|
self.federation_domain_whitelist = None
|
||||||
|
federation_domain_whitelist = config.get(
|
||||||
|
"federation_domain_whitelist", None
|
||||||
|
)
|
||||||
|
# turn the whitelist into a hash for speed of lookup
|
||||||
|
if federation_domain_whitelist is not None:
|
||||||
|
self.federation_domain_whitelist = {}
|
||||||
|
for domain in federation_domain_whitelist:
|
||||||
|
self.federation_domain_whitelist[domain] = True
|
||||||
|
|
||||||
if self.public_baseurl is not None:
|
if self.public_baseurl is not None:
|
||||||
if self.public_baseurl[-1] != '/':
|
if self.public_baseurl[-1] != '/':
|
||||||
self.public_baseurl += '/'
|
self.public_baseurl += '/'
|
||||||
@@ -141,9 +172,36 @@ class ServerConfig(Config):
|
|||||||
# When running as a daemon, the file to store the pid in
|
# When running as a daemon, the file to store the pid in
|
||||||
pid_file: %(pid_file)s
|
pid_file: %(pid_file)s
|
||||||
|
|
||||||
|
# CPU affinity mask. Setting this restricts the CPUs on which the
|
||||||
|
# process will be scheduled. It is represented as a bitmask, with the
|
||||||
|
# lowest order bit corresponding to the first logical CPU and the
|
||||||
|
# highest order bit corresponding to the last logical CPU. Not all CPUs
|
||||||
|
# may exist on a given system but a mask may specify more CPUs than are
|
||||||
|
# present.
|
||||||
|
#
|
||||||
|
# For example:
|
||||||
|
# 0x00000001 is processor #0,
|
||||||
|
# 0x00000003 is processors #0 and #1,
|
||||||
|
# 0xFFFFFFFF is all processors (#0 through #31).
|
||||||
|
#
|
||||||
|
# Pinning a Python process to a single CPU is desirable, because Python
|
||||||
|
# is inherently single-threaded due to the GIL, and can suffer a
|
||||||
|
# 30-40%% slowdown due to cache blow-out and thread context switching
|
||||||
|
# if the scheduler happens to schedule the underlying threads across
|
||||||
|
# different cores. See
|
||||||
|
# https://www.mirantis.com/blog/improve-performance-python-programs-restricting-single-cpu/.
|
||||||
|
#
|
||||||
|
# cpu_affinity: 0xFFFFFFFF
|
||||||
|
|
||||||
# Whether to serve a web client from the HTTP/HTTPS root resource.
|
# Whether to serve a web client from the HTTP/HTTPS root resource.
|
||||||
web_client: True
|
web_client: True
|
||||||
|
|
||||||
|
# The root directory to server for the above web client.
|
||||||
|
# If left undefined, synapse will serve the matrix-angular-sdk web client.
|
||||||
|
# Make sure matrix-angular-sdk is installed with pip if web_client is True
|
||||||
|
# and web_client_location is undefined
|
||||||
|
# web_client_location: "/path/to/web/root"
|
||||||
|
|
||||||
# The public-facing base URL for the client API (not including _matrix/...)
|
# The public-facing base URL for the client API (not including _matrix/...)
|
||||||
# public_baseurl: https://example.com:8448/
|
# public_baseurl: https://example.com:8448/
|
||||||
|
|
||||||
@@ -155,6 +213,25 @@ class ServerConfig(Config):
|
|||||||
# The GC threshold parameters to pass to `gc.set_threshold`, if defined
|
# The GC threshold parameters to pass to `gc.set_threshold`, if defined
|
||||||
# gc_thresholds: [700, 10, 10]
|
# gc_thresholds: [700, 10, 10]
|
||||||
|
|
||||||
|
# Set the limit on the returned events in the timeline in the get
|
||||||
|
# and sync operations. The default value is -1, means no upper limit.
|
||||||
|
# filter_timeline_limit: 5000
|
||||||
|
|
||||||
|
# Whether room invites to users on this server should be blocked
|
||||||
|
# (except those sent by local server admins). The default is False.
|
||||||
|
# block_non_admin_invites: True
|
||||||
|
|
||||||
|
# Restrict federation to the following whitelist of domains.
|
||||||
|
# N.B. we recommend also firewalling your federation listener to limit
|
||||||
|
# inbound federation traffic as early as possible, rather than relying
|
||||||
|
# purely on this application-layer restriction. If not specified, the
|
||||||
|
# default is to whitelist everything.
|
||||||
|
#
|
||||||
|
# federation_domain_whitelist:
|
||||||
|
# - lon.example.com
|
||||||
|
# - nyc.example.com
|
||||||
|
# - syd.example.com
|
||||||
|
|
||||||
# List of ports that Synapse should listen on, their purpose and their
|
# List of ports that Synapse should listen on, their purpose and their
|
||||||
# configuration.
|
# configuration.
|
||||||
listeners:
|
listeners:
|
||||||
@@ -165,13 +242,12 @@ class ServerConfig(Config):
|
|||||||
port: %(bind_port)s
|
port: %(bind_port)s
|
||||||
|
|
||||||
# Local addresses to listen on.
|
# Local addresses to listen on.
|
||||||
# This will listen on all IPv4 addresses by default.
|
# On Linux and Mac OS, `::` will listen on all IPv4 and IPv6
|
||||||
|
# addresses by default. For most other OSes, this will only listen
|
||||||
|
# on IPv6.
|
||||||
bind_addresses:
|
bind_addresses:
|
||||||
|
- '::'
|
||||||
- '0.0.0.0'
|
- '0.0.0.0'
|
||||||
# Uncomment to listen on all IPv6 interfaces
|
|
||||||
# N.B: On at least Linux this will also listen on all IPv4
|
|
||||||
# addresses, so you will need to comment out the line above.
|
|
||||||
# - '::'
|
|
||||||
|
|
||||||
# This is a 'http' listener, allows us to specify 'resources'.
|
# This is a 'http' listener, allows us to specify 'resources'.
|
||||||
type: http
|
type: http
|
||||||
@@ -198,11 +274,18 @@ class ServerConfig(Config):
|
|||||||
- names: [federation] # Federation APIs
|
- names: [federation] # Federation APIs
|
||||||
compress: false
|
compress: false
|
||||||
|
|
||||||
|
# optional list of additional endpoints which can be loaded via
|
||||||
|
# dynamic modules
|
||||||
|
# additional_resources:
|
||||||
|
# "/_matrix/my/custom/endpoint":
|
||||||
|
# module: my_module.CustomRequestHandler
|
||||||
|
# config: {}
|
||||||
|
|
||||||
# Unsecure HTTP listener,
|
# Unsecure HTTP listener,
|
||||||
# For when matrix traffic passes through loadbalancer that unwraps TLS.
|
# For when matrix traffic passes through loadbalancer that unwraps TLS.
|
||||||
- port: %(unsecure_port)s
|
- port: %(unsecure_port)s
|
||||||
tls: false
|
tls: false
|
||||||
bind_addresses: ['0.0.0.0']
|
bind_addresses: ['::', '0.0.0.0']
|
||||||
type: http
|
type: http
|
||||||
|
|
||||||
x_forwarded: false
|
x_forwarded: false
|
||||||
@@ -216,7 +299,7 @@ class ServerConfig(Config):
|
|||||||
# Turn on the twisted ssh manhole service on localhost on the given
|
# Turn on the twisted ssh manhole service on localhost on the given
|
||||||
# port.
|
# port.
|
||||||
# - port: 9000
|
# - port: 9000
|
||||||
# bind_address: 127.0.0.1
|
# bind_addresses: ['::1', '127.0.0.1']
|
||||||
# type: manhole
|
# type: manhole
|
||||||
""" % locals()
|
""" % locals()
|
||||||
|
|
||||||
@@ -254,7 +337,7 @@ def read_gc_thresholds(thresholds):
|
|||||||
return (
|
return (
|
||||||
int(thresholds[0]), int(thresholds[1]), int(thresholds[2]),
|
int(thresholds[0]), int(thresholds[1]), int(thresholds[2]),
|
||||||
)
|
)
|
||||||
except:
|
except Exception:
|
||||||
raise ConfigError(
|
raise ConfigError(
|
||||||
"Value of `gc_threshold` must be a list of three integers if set"
|
"Value of `gc_threshold` must be a list of three integers if set"
|
||||||
)
|
)
|
||||||
|
|||||||
86
synapse/config/server_notices_config.py
Normal file
86
synapse/config/server_notices_config.py
Normal file
@@ -0,0 +1,86 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright 2018 New Vector Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
from ._base import Config
|
||||||
|
from synapse.types import UserID
|
||||||
|
|
||||||
|
DEFAULT_CONFIG = """\
|
||||||
|
# Server Notices room configuration
|
||||||
|
#
|
||||||
|
# Uncomment this section to enable a room which can be used to send notices
|
||||||
|
# from the server to users. It is a special room which cannot be left; notices
|
||||||
|
# come from a special "notices" user id.
|
||||||
|
#
|
||||||
|
# If you uncomment this section, you *must* define the system_mxid_localpart
|
||||||
|
# setting, which defines the id of the user which will be used to send the
|
||||||
|
# notices.
|
||||||
|
#
|
||||||
|
# It's also possible to override the room name, the display name of the
|
||||||
|
# "notices" user, and the avatar for the user.
|
||||||
|
#
|
||||||
|
# server_notices:
|
||||||
|
# system_mxid_localpart: notices
|
||||||
|
# system_mxid_display_name: "Server Notices"
|
||||||
|
# system_mxid_avatar_url: "mxc://server.com/oumMVlgDnLYFaPVkExemNVVZ"
|
||||||
|
# room_name: "Server Notices"
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
class ServerNoticesConfig(Config):
|
||||||
|
"""Configuration for the server notices room.
|
||||||
|
|
||||||
|
Attributes:
|
||||||
|
server_notices_mxid (str|None):
|
||||||
|
The MXID to use for server notices.
|
||||||
|
None if server notices are not enabled.
|
||||||
|
|
||||||
|
server_notices_mxid_display_name (str|None):
|
||||||
|
The display name to use for the server notices user.
|
||||||
|
None if server notices are not enabled.
|
||||||
|
|
||||||
|
server_notices_mxid_avatar_url (str|None):
|
||||||
|
The display name to use for the server notices user.
|
||||||
|
None if server notices are not enabled.
|
||||||
|
|
||||||
|
server_notices_room_name (str|None):
|
||||||
|
The name to use for the server notices room.
|
||||||
|
None if server notices are not enabled.
|
||||||
|
"""
|
||||||
|
def __init__(self):
|
||||||
|
super(ServerNoticesConfig, self).__init__()
|
||||||
|
self.server_notices_mxid = None
|
||||||
|
self.server_notices_mxid_display_name = None
|
||||||
|
self.server_notices_mxid_avatar_url = None
|
||||||
|
self.server_notices_room_name = None
|
||||||
|
|
||||||
|
def read_config(self, config):
|
||||||
|
c = config.get("server_notices")
|
||||||
|
if c is None:
|
||||||
|
return
|
||||||
|
|
||||||
|
mxid_localpart = c['system_mxid_localpart']
|
||||||
|
self.server_notices_mxid = UserID(
|
||||||
|
mxid_localpart, self.server_name,
|
||||||
|
).to_string()
|
||||||
|
self.server_notices_mxid_display_name = c.get(
|
||||||
|
'system_mxid_display_name', None,
|
||||||
|
)
|
||||||
|
self.server_notices_mxid_avatar_url = c.get(
|
||||||
|
'system_mxid_avatar_url', None,
|
||||||
|
)
|
||||||
|
# todo: i18n
|
||||||
|
self.server_notices_room_name = c.get('room_name', "Server Notices")
|
||||||
|
|
||||||
|
def default_config(self, **kwargs):
|
||||||
|
return DEFAULT_CONFIG
|
||||||
35
synapse/config/spam_checker.py
Normal file
35
synapse/config/spam_checker.py
Normal file
@@ -0,0 +1,35 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright 2017 New Vector Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
from synapse.util.module_loader import load_module
|
||||||
|
|
||||||
|
from ._base import Config
|
||||||
|
|
||||||
|
|
||||||
|
class SpamCheckerConfig(Config):
|
||||||
|
def read_config(self, config):
|
||||||
|
self.spam_checker = None
|
||||||
|
|
||||||
|
provider = config.get("spam_checker", None)
|
||||||
|
if provider is not None:
|
||||||
|
self.spam_checker = load_module(provider)
|
||||||
|
|
||||||
|
def default_config(self, **kwargs):
|
||||||
|
return """\
|
||||||
|
# spam_checker:
|
||||||
|
# module: "my_custom_project.SuperSpamChecker"
|
||||||
|
# config:
|
||||||
|
# example_option: 'things'
|
||||||
|
"""
|
||||||
@@ -96,7 +96,7 @@ class TlsConfig(Config):
|
|||||||
# certificates returned by this server match one of the fingerprints.
|
# certificates returned by this server match one of the fingerprints.
|
||||||
#
|
#
|
||||||
# Synapse automatically adds the fingerprint of its own certificate
|
# Synapse automatically adds the fingerprint of its own certificate
|
||||||
# to the list. So if federation traffic is handle directly by synapse
|
# to the list. So if federation traffic is handled directly by synapse
|
||||||
# then no modification to the list is required.
|
# then no modification to the list is required.
|
||||||
#
|
#
|
||||||
# If synapse is run behind a load balancer that handles the TLS then it
|
# If synapse is run behind a load balancer that handles the TLS then it
|
||||||
@@ -109,6 +109,12 @@ class TlsConfig(Config):
|
|||||||
# key. It may be necessary to publish the fingerprints of a new
|
# key. It may be necessary to publish the fingerprints of a new
|
||||||
# certificate and wait until the "valid_until_ts" of the previous key
|
# certificate and wait until the "valid_until_ts" of the previous key
|
||||||
# responses have passed before deploying it.
|
# responses have passed before deploying it.
|
||||||
|
#
|
||||||
|
# You can calculate a fingerprint from a given TLS listener via:
|
||||||
|
# openssl s_client -connect $host:$port < /dev/null 2> /dev/null |
|
||||||
|
# openssl x509 -outform DER | openssl sha256 -binary | base64 | tr -d '='
|
||||||
|
# or by checking matrix.org/federationtester/api/report?server_name=$host
|
||||||
|
#
|
||||||
tls_fingerprints: []
|
tls_fingerprints: []
|
||||||
# tls_fingerprints: [{"sha256": "<base64_encoded_sha256_fingerprint>"}]
|
# tls_fingerprints: [{"sha256": "<base64_encoded_sha256_fingerprint>"}]
|
||||||
""" % locals()
|
""" % locals()
|
||||||
@@ -126,8 +132,8 @@ class TlsConfig(Config):
|
|||||||
tls_private_key_path = config["tls_private_key_path"]
|
tls_private_key_path = config["tls_private_key_path"]
|
||||||
tls_dh_params_path = config["tls_dh_params_path"]
|
tls_dh_params_path = config["tls_dh_params_path"]
|
||||||
|
|
||||||
if not os.path.exists(tls_private_key_path):
|
if not self.path_exists(tls_private_key_path):
|
||||||
with open(tls_private_key_path, "w") as private_key_file:
|
with open(tls_private_key_path, "wb") as private_key_file:
|
||||||
tls_private_key = crypto.PKey()
|
tls_private_key = crypto.PKey()
|
||||||
tls_private_key.generate_key(crypto.TYPE_RSA, 2048)
|
tls_private_key.generate_key(crypto.TYPE_RSA, 2048)
|
||||||
private_key_pem = crypto.dump_privatekey(
|
private_key_pem = crypto.dump_privatekey(
|
||||||
@@ -141,8 +147,8 @@ class TlsConfig(Config):
|
|||||||
crypto.FILETYPE_PEM, private_key_pem
|
crypto.FILETYPE_PEM, private_key_pem
|
||||||
)
|
)
|
||||||
|
|
||||||
if not os.path.exists(tls_certificate_path):
|
if not self.path_exists(tls_certificate_path):
|
||||||
with open(tls_certificate_path, "w") as certificate_file:
|
with open(tls_certificate_path, "wb") as certificate_file:
|
||||||
cert = crypto.X509()
|
cert = crypto.X509()
|
||||||
subject = cert.get_subject()
|
subject = cert.get_subject()
|
||||||
subject.CN = config["server_name"]
|
subject.CN = config["server_name"]
|
||||||
@@ -159,7 +165,7 @@ class TlsConfig(Config):
|
|||||||
|
|
||||||
certificate_file.write(cert_pem)
|
certificate_file.write(cert_pem)
|
||||||
|
|
||||||
if not os.path.exists(tls_dh_params_path):
|
if not self.path_exists(tls_dh_params_path):
|
||||||
if GENERATE_DH_PARAMS:
|
if GENERATE_DH_PARAMS:
|
||||||
subprocess.check_call([
|
subprocess.check_call([
|
||||||
"openssl", "dhparam",
|
"openssl", "dhparam",
|
||||||
|
|||||||
44
synapse/config/user_directory.py
Normal file
44
synapse/config/user_directory.py
Normal file
@@ -0,0 +1,44 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright 2017 New Vector Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
from ._base import Config
|
||||||
|
|
||||||
|
|
||||||
|
class UserDirectoryConfig(Config):
|
||||||
|
"""User Directory Configuration
|
||||||
|
Configuration for the behaviour of the /user_directory API
|
||||||
|
"""
|
||||||
|
|
||||||
|
def read_config(self, config):
|
||||||
|
self.user_directory_search_all_users = False
|
||||||
|
user_directory_config = config.get("user_directory", None)
|
||||||
|
if user_directory_config:
|
||||||
|
self.user_directory_search_all_users = (
|
||||||
|
user_directory_config.get("search_all_users", False)
|
||||||
|
)
|
||||||
|
|
||||||
|
def default_config(self, config_dir_path, server_name, **kwargs):
|
||||||
|
return """
|
||||||
|
# User Directory configuration
|
||||||
|
#
|
||||||
|
# 'search_all_users' defines whether to search all users visible to your HS
|
||||||
|
# when searching the user directory, rather than limiting to users visible
|
||||||
|
# in public rooms. Defaults to false. If you set it True, you'll have to run
|
||||||
|
# UPDATE user_directory_stream_pos SET stream_id = NULL;
|
||||||
|
# on your database to tell it to rebuild the user_directory search indexes.
|
||||||
|
#
|
||||||
|
#user_directory:
|
||||||
|
# search_all_users: false
|
||||||
|
"""
|
||||||
@@ -23,6 +23,7 @@ class VoipConfig(Config):
|
|||||||
self.turn_username = config.get("turn_username")
|
self.turn_username = config.get("turn_username")
|
||||||
self.turn_password = config.get("turn_password")
|
self.turn_password = config.get("turn_password")
|
||||||
self.turn_user_lifetime = self.parse_duration(config["turn_user_lifetime"])
|
self.turn_user_lifetime = self.parse_duration(config["turn_user_lifetime"])
|
||||||
|
self.turn_allow_guests = config.get("turn_allow_guests", True)
|
||||||
|
|
||||||
def default_config(self, **kwargs):
|
def default_config(self, **kwargs):
|
||||||
return """\
|
return """\
|
||||||
@@ -41,4 +42,11 @@ class VoipConfig(Config):
|
|||||||
|
|
||||||
# How long generated TURN credentials last
|
# How long generated TURN credentials last
|
||||||
turn_user_lifetime: "1h"
|
turn_user_lifetime: "1h"
|
||||||
|
|
||||||
|
# Whether guests should be allowed to use the TURN server.
|
||||||
|
# This defaults to True, otherwise VoIP will be unreliable for guests.
|
||||||
|
# However, it does introduce a slight security risk as it allows users to
|
||||||
|
# connect to arbitrary endpoints without having first signed up for a
|
||||||
|
# valid account (e.g. by passing a CAPTCHA).
|
||||||
|
turn_allow_guests: True
|
||||||
"""
|
"""
|
||||||
|
|||||||
@@ -23,12 +23,30 @@ class WorkerConfig(Config):
|
|||||||
|
|
||||||
def read_config(self, config):
|
def read_config(self, config):
|
||||||
self.worker_app = config.get("worker_app")
|
self.worker_app = config.get("worker_app")
|
||||||
|
|
||||||
|
# Canonicalise worker_app so that master always has None
|
||||||
|
if self.worker_app == "synapse.app.homeserver":
|
||||||
|
self.worker_app = None
|
||||||
|
|
||||||
self.worker_listeners = config.get("worker_listeners")
|
self.worker_listeners = config.get("worker_listeners")
|
||||||
self.worker_daemonize = config.get("worker_daemonize")
|
self.worker_daemonize = config.get("worker_daemonize")
|
||||||
self.worker_pid_file = config.get("worker_pid_file")
|
self.worker_pid_file = config.get("worker_pid_file")
|
||||||
self.worker_log_file = config.get("worker_log_file")
|
self.worker_log_file = config.get("worker_log_file")
|
||||||
self.worker_log_config = config.get("worker_log_config")
|
self.worker_log_config = config.get("worker_log_config")
|
||||||
self.worker_replication_url = config.get("worker_replication_url")
|
|
||||||
|
# The host used to connect to the main synapse
|
||||||
|
self.worker_replication_host = config.get("worker_replication_host", None)
|
||||||
|
|
||||||
|
# The port on the main synapse for TCP replication
|
||||||
|
self.worker_replication_port = config.get("worker_replication_port", None)
|
||||||
|
|
||||||
|
# The port on the main synapse for HTTP replication endpoint
|
||||||
|
self.worker_replication_http_port = config.get("worker_replication_http_port")
|
||||||
|
|
||||||
|
self.worker_name = config.get("worker_name", self.worker_app)
|
||||||
|
|
||||||
|
self.worker_main_http_uri = config.get("worker_main_http_uri", None)
|
||||||
|
self.worker_cpu_affinity = config.get("worker_cpu_affinity")
|
||||||
|
|
||||||
if self.worker_listeners:
|
if self.worker_listeners:
|
||||||
for listener in self.worker_listeners:
|
for listener in self.worker_listeners:
|
||||||
|
|||||||
@@ -13,8 +13,8 @@
|
|||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
from twisted.internet import ssl
|
from twisted.internet import ssl
|
||||||
from OpenSSL import SSL
|
from OpenSSL import SSL, crypto
|
||||||
from twisted.internet._sslverify import _OpenSSLECCurve, _defaultCurveName
|
from twisted.internet._sslverify import _defaultCurveName
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
@@ -32,9 +32,10 @@ class ServerContextFactory(ssl.ContextFactory):
|
|||||||
@staticmethod
|
@staticmethod
|
||||||
def configure_context(context, config):
|
def configure_context(context, config):
|
||||||
try:
|
try:
|
||||||
_ecCurve = _OpenSSLECCurve(_defaultCurveName)
|
_ecCurve = crypto.get_elliptic_curve(_defaultCurveName)
|
||||||
_ecCurve.addECKeyToContext(context)
|
context.set_tmp_ecdh(_ecCurve)
|
||||||
except:
|
|
||||||
|
except Exception:
|
||||||
logger.exception("Failed to enable elliptic curve for TLS")
|
logger.exception("Failed to enable elliptic curve for TLS")
|
||||||
context.set_options(SSL.OP_NO_SSLv2 | SSL.OP_NO_SSLv3)
|
context.set_options(SSL.OP_NO_SSLv2 | SSL.OP_NO_SSLv3)
|
||||||
context.use_certificate_chain_file(config.tls_certificate_file)
|
context.use_certificate_chain_file(config.tls_certificate_file)
|
||||||
|
|||||||
@@ -32,18 +32,25 @@ def check_event_content_hash(event, hash_algorithm=hashlib.sha256):
|
|||||||
"""Check whether the hash for this PDU matches the contents"""
|
"""Check whether the hash for this PDU matches the contents"""
|
||||||
name, expected_hash = compute_content_hash(event, hash_algorithm)
|
name, expected_hash = compute_content_hash(event, hash_algorithm)
|
||||||
logger.debug("Expecting hash: %s", encode_base64(expected_hash))
|
logger.debug("Expecting hash: %s", encode_base64(expected_hash))
|
||||||
if name not in event.hashes:
|
|
||||||
|
# some malformed events lack a 'hashes'. Protect against it being missing
|
||||||
|
# or a weird type by basically treating it the same as an unhashed event.
|
||||||
|
hashes = event.get("hashes")
|
||||||
|
if not isinstance(hashes, dict):
|
||||||
|
raise SynapseError(400, "Malformed 'hashes'", Codes.UNAUTHORIZED)
|
||||||
|
|
||||||
|
if name not in hashes:
|
||||||
raise SynapseError(
|
raise SynapseError(
|
||||||
400,
|
400,
|
||||||
"Algorithm %s not in hashes %s" % (
|
"Algorithm %s not in hashes %s" % (
|
||||||
name, list(event.hashes),
|
name, list(hashes),
|
||||||
),
|
),
|
||||||
Codes.UNAUTHORIZED,
|
Codes.UNAUTHORIZED,
|
||||||
)
|
)
|
||||||
message_hash_base64 = event.hashes[name]
|
message_hash_base64 = hashes[name]
|
||||||
try:
|
try:
|
||||||
message_hash_bytes = decode_base64(message_hash_base64)
|
message_hash_bytes = decode_base64(message_hash_base64)
|
||||||
except:
|
except Exception:
|
||||||
raise SynapseError(
|
raise SynapseError(
|
||||||
400,
|
400,
|
||||||
"Invalid base64: %s" % (message_hash_base64,),
|
"Invalid base64: %s" % (message_hash_base64,),
|
||||||
|
|||||||
@@ -13,14 +13,11 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
|
from synapse.util import logcontext
|
||||||
from twisted.web.http import HTTPClient
|
from twisted.web.http import HTTPClient
|
||||||
from twisted.internet.protocol import Factory
|
from twisted.internet.protocol import Factory
|
||||||
from twisted.internet import defer, reactor
|
from twisted.internet import defer, reactor
|
||||||
from synapse.http.endpoint import matrix_federation_endpoint
|
from synapse.http.endpoint import matrix_federation_endpoint
|
||||||
from synapse.util.logcontext import (
|
|
||||||
preserve_context_over_fn, preserve_context_over_deferred
|
|
||||||
)
|
|
||||||
import simplejson as json
|
import simplejson as json
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
@@ -43,14 +40,10 @@ def fetch_server_key(server_name, ssl_context_factory, path=KEY_API_V1):
|
|||||||
|
|
||||||
for i in range(5):
|
for i in range(5):
|
||||||
try:
|
try:
|
||||||
protocol = yield preserve_context_over_fn(
|
with logcontext.PreserveLoggingContext():
|
||||||
endpoint.connect, factory
|
protocol = yield endpoint.connect(factory)
|
||||||
)
|
server_response, server_certificate = yield protocol.remote_key
|
||||||
server_response, server_certificate = yield preserve_context_over_deferred(
|
defer.returnValue((server_response, server_certificate))
|
||||||
protocol.remote_key
|
|
||||||
)
|
|
||||||
defer.returnValue((server_response, server_certificate))
|
|
||||||
return
|
|
||||||
except SynapseKeyClientError as e:
|
except SynapseKeyClientError as e:
|
||||||
logger.exception("Error getting key for %r" % (server_name,))
|
logger.exception("Error getting key for %r" % (server_name,))
|
||||||
if e.status.startswith("4"):
|
if e.status.startswith("4"):
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user