mirror of
https://github.com/element-hq/synapse.git
synced 2025-12-19 02:20:44 +00:00
Compare commits
3043 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3c099219e0 | ||
|
|
589ecc5b58 | ||
|
|
e71fb118f4 | ||
|
|
aea80a0118 | ||
|
|
8cbbfd16fb | ||
|
|
16f41237f0 | ||
|
|
c25d7ba12e | ||
|
|
23e2dfe940 | ||
|
|
bd8d0cfab1 | ||
|
|
db18d854cd | ||
|
|
318711e139 | ||
|
|
7b411007e6 | ||
|
|
6b49628e3b | ||
|
|
217bc53c98 | ||
|
|
645cb4bf06 | ||
|
|
09f570b935 | ||
|
|
9589a1925e | ||
|
|
49e5a613f1 | ||
|
|
b8700dd7d0 | ||
|
|
c6f730282c | ||
|
|
09b29f9c4a | ||
|
|
4d298506dd | ||
|
|
8460e48d06 | ||
|
|
18e144fe08 | ||
|
|
bfe1f73855 | ||
|
|
5adb75bcba | ||
|
|
a5c98dda48 | ||
|
|
d26bec8a43 | ||
|
|
fcf55f2255 | ||
|
|
7ce98804ff | ||
|
|
cddf91c8b9 | ||
|
|
9896dab8f6 | ||
|
|
1e5280b7d0 | ||
|
|
75552d2148 | ||
|
|
294e9a0c9b | ||
|
|
46df23f581 | ||
|
|
52281e4c54 | ||
|
|
7e8726b8fb | ||
|
|
c0e08dc45b | ||
|
|
0461ef01b7 | ||
|
|
e2accd7f1d | ||
|
|
e5ab9cd24b | ||
|
|
60590211c1 | ||
|
|
c4af4c24ca | ||
|
|
05e0a2462c | ||
|
|
7dd13415db | ||
|
|
27cf170558 | ||
|
|
1aeb5e28a9 | ||
|
|
23ec51c94c | ||
|
|
d5377eba55 | ||
|
|
d11b8b6b65 | ||
|
|
8ff8ab3bce | ||
|
|
6c957e26f0 | ||
|
|
696f532453 | ||
|
|
3e6d306e94 | ||
|
|
274b8c6025 | ||
|
|
06c0d0ed08 | ||
|
|
bf98fa0864 | ||
|
|
678e649b78 | ||
|
|
0b7dfbb194 | ||
|
|
88868b2839 | ||
|
|
5addeaa02c | ||
|
|
6d8ec3462d | ||
|
|
95b6912045 | ||
|
|
966686c845 | ||
|
|
093d8c415a | ||
|
|
0ba609dc6f | ||
|
|
2117f84323 | ||
|
|
a7fe62f0cb | ||
|
|
2e7a94c36b | ||
|
|
a2aaa9cb3c | ||
|
|
d72faf2fad | ||
|
|
a0501ac57e | ||
|
|
0a3b51c420 | ||
|
|
31c7c29d43 | ||
|
|
902673e356 | ||
|
|
53a5fdf312 | ||
|
|
1dfd650348 | ||
|
|
9a779c2ddb | ||
|
|
a41117c63b | ||
|
|
32015e1109 | ||
|
|
3a42aed9a1 | ||
|
|
5a0be97ab2 | ||
|
|
415c6b672e | ||
|
|
4e9bdeba57 | ||
|
|
be31adb036 | ||
|
|
11607006d9 | ||
|
|
46beeb9a30 | ||
|
|
f22e7cda2c | ||
|
|
a8d8bf92e0 | ||
|
|
e482f8cd85 | ||
|
|
4f2e898c29 | ||
|
|
d4c14e1438 | ||
|
|
9f21de6a01 | ||
|
|
da602419b2 | ||
|
|
8ae7096958 | ||
|
|
562532dd2d | ||
|
|
5c2214f4c7 | ||
|
|
2414178ed6 | ||
|
|
40d1bbd257 | ||
|
|
8e6bd0e324 | ||
|
|
8570bb84cc | ||
|
|
ca7211104e | ||
|
|
d5eee5d601 | ||
|
|
d858f3bd4e | ||
|
|
33f469ba19 | ||
|
|
6495dbb326 | ||
|
|
2ad3fc36e6 | ||
|
|
cead75fae3 | ||
|
|
576b71dd3d | ||
|
|
99a54bf2af | ||
|
|
63ae5cbf34 | ||
|
|
fdb6849b81 | ||
|
|
66aa32ede2 | ||
|
|
6e005d1382 | ||
|
|
01e8a52825 | ||
|
|
0c9db26260 | ||
|
|
950a32eb47 | ||
|
|
bc2017a594 | ||
|
|
683149c1f9 | ||
|
|
7b908aeec4 | ||
|
|
3b0e431c82 | ||
|
|
db75c86e84 | ||
|
|
2fd96727b1 | ||
|
|
b8ee12b978 | ||
|
|
049b0b5af2 | ||
|
|
d1d54d6088 | ||
|
|
ac5f2f4d86 | ||
|
|
af3cc50511 | ||
|
|
dbf6f28d64 | ||
|
|
7767a9fc0e | ||
|
|
aab2e4da60 | ||
|
|
1315d374cc | ||
|
|
9e2601f830 | ||
|
|
122593265b | ||
|
|
e9143b6593 | ||
|
|
adaf3ec87f | ||
|
|
006e18b6bb | ||
|
|
42c89c8215 | ||
|
|
d82b6ea9e6 | ||
|
|
4f2f5171b7 | ||
|
|
94f4d7f49e | ||
|
|
57b58e2174 | ||
|
|
cdb4647a80 | ||
|
|
a376d8f761 | ||
|
|
4f5694e2ce | ||
|
|
9558236728 | ||
|
|
453adf00b6 | ||
|
|
fc149b4eeb | ||
|
|
6146332387 | ||
|
|
d2737c1fae | ||
|
|
2a13af23bc | ||
|
|
3d1ae61399 | ||
|
|
9d2c1b8429 | ||
|
|
13843f771e | ||
|
|
41d4b07a53 | ||
|
|
05ba7e3a44 | ||
|
|
53849ea9d3 | ||
|
|
268e40341b | ||
|
|
9c3da24561 | ||
|
|
53494c34df | ||
|
|
6493b22b42 | ||
|
|
6e10eed28e | ||
|
|
605defb9e4 | ||
|
|
9255a6cb17 | ||
|
|
d842ed14f4 | ||
|
|
31c8be956f | ||
|
|
28dd536e80 | ||
|
|
8721580303 | ||
|
|
dbf76fd4b9 | ||
|
|
d78ada3166 | ||
|
|
0ced8b5b47 | ||
|
|
7ec8e798b4 | ||
|
|
a5ad88913c | ||
|
|
22881b3d69 | ||
|
|
ba3166743c | ||
|
|
e3a373f002 | ||
|
|
6ab3b9c743 | ||
|
|
1bb83d5d41 | ||
|
|
13a2beabca | ||
|
|
2c3e995f38 | ||
|
|
8e8b06715f | ||
|
|
08b29d4574 | ||
|
|
77ebef9d43 | ||
|
|
9b9c38373c | ||
|
|
286e20f2bc | ||
|
|
1ea904b9f0 | ||
|
|
dc875d2712 | ||
|
|
8dc4a6144b | ||
|
|
d06a9ea5f7 | ||
|
|
c09a6daf09 | ||
|
|
692a3cc806 | ||
|
|
366dd893fc | ||
|
|
bdb7714d13 | ||
|
|
67dabe143d | ||
|
|
3de7d9fe99 | ||
|
|
11a67b7c9d | ||
|
|
0c280d4d99 | ||
|
|
bc381d5798 | ||
|
|
b1dfbc3c40 | ||
|
|
dacf3a50ac | ||
|
|
1f4b498b73 | ||
|
|
e585228860 | ||
|
|
9b7794262f | ||
|
|
639480e14a | ||
|
|
878995e660 | ||
|
|
a1a3c9660f | ||
|
|
512633ef44 | ||
|
|
2a3c33ff03 | ||
|
|
f63ff73c7f | ||
|
|
36c59ce669 | ||
|
|
cb9cdfecd0 | ||
|
|
1515560f5c | ||
|
|
bfc2ade9b3 | ||
|
|
c4bdbc2bd2 | ||
|
|
041b41a825 | ||
|
|
154b44c249 | ||
|
|
0d8c50df44 | ||
|
|
78a9698650 | ||
|
|
25b0ba30b1 | ||
|
|
f8d46cad3c | ||
|
|
d4b2e05852 | ||
|
|
eb53439c4a | ||
|
|
51d628d28d | ||
|
|
df77837a33 | ||
|
|
d3347ad485 | ||
|
|
fac3f9e678 | ||
|
|
60f6014bb7 | ||
|
|
119596ab8f | ||
|
|
b78395b7fe | ||
|
|
d5c74b9f6c | ||
|
|
0f13f30fca | ||
|
|
415aeefd89 | ||
|
|
19ceb4851f | ||
|
|
261124396e | ||
|
|
23a7f9d7f4 | ||
|
|
d7bf3a68f0 | ||
|
|
f67e906e18 | ||
|
|
971059a733 | ||
|
|
e939f3bca6 | ||
|
|
4dae4a97ed | ||
|
|
92e34615c5 | ||
|
|
ab825aa328 | ||
|
|
233699c42e | ||
|
|
427e6c4059 | ||
|
|
781cd8c54f | ||
|
|
9ef0b179e0 | ||
|
|
121591568b | ||
|
|
b3384232a0 | ||
|
|
360d899a64 | ||
|
|
d54cfbb7a8 | ||
|
|
eaa2ebf20b | ||
|
|
9daf82278f | ||
|
|
a3f9ddbede | ||
|
|
7f8eebc8ee | ||
|
|
dd723267b2 | ||
|
|
a060dfa132 | ||
|
|
f8e8ec013b | ||
|
|
1246d23710 | ||
|
|
d49cbf712f | ||
|
|
ce72d590ed | ||
|
|
11d2609da7 | ||
|
|
dab87b84a3 | ||
|
|
6d7f0f8dd3 | ||
|
|
f4284d943a | ||
|
|
d1e56cfcd1 | ||
|
|
89de934981 | ||
|
|
9fbe70a7dc | ||
|
|
a3599dda97 | ||
|
|
87478c5a60 | ||
|
|
c508b2f2f0 | ||
|
|
37354b55c9 | ||
|
|
0e9aa1d091 | ||
|
|
8eaa141d8f | ||
|
|
664adb4236 | ||
|
|
aea3a93611 | ||
|
|
41e0611895 | ||
|
|
61b439c904 | ||
|
|
87770300d5 | ||
|
|
9a311adfea | ||
|
|
64bc2162ef | ||
|
|
d2c6f4d626 | ||
|
|
5232d3bfb1 | ||
|
|
5e785d4d5b | ||
|
|
6e025a97b4 | ||
|
|
414b2b3bd1 | ||
|
|
b151eb14a2 | ||
|
|
64cebbc730 | ||
|
|
d9ae2bc826 | ||
|
|
21d5a2a08e | ||
|
|
c115deed12 | ||
|
|
072fb59446 | ||
|
|
89dda61315 | ||
|
|
687f3451bd | ||
|
|
13decdbf96 | ||
|
|
f3ef60662f | ||
|
|
e5082494eb | ||
|
|
56b0589865 | ||
|
|
11974f3787 | ||
|
|
145d14656b | ||
|
|
a13b7860c6 | ||
|
|
e54c202b81 | ||
|
|
b0500d3774 | ||
|
|
4f40d058cc | ||
|
|
135fc5b9cd | ||
|
|
020a501354 | ||
|
|
db2fd801f7 | ||
|
|
e8b03cab1b | ||
|
|
8844f95c32 | ||
|
|
7945435587 | ||
|
|
6bd1b7053e | ||
|
|
b4478e586f | ||
|
|
112c2253e2 | ||
|
|
6850f8aea3 | ||
|
|
cd087a265d | ||
|
|
87c864b698 | ||
|
|
ae85c7804e | ||
|
|
f8d1917fce | ||
|
|
6eb3aa94b6 | ||
|
|
edb45aae38 | ||
|
|
b370fe61c0 | ||
|
|
6a9777ba02 | ||
|
|
01579384cc | ||
|
|
e01ba5bda3 | ||
|
|
7b824f1475 | ||
|
|
35ff941172 | ||
|
|
1d71f484d4 | ||
|
|
15e8ed874f | ||
|
|
c7ede92d0b | ||
|
|
551422051b | ||
|
|
c7f0969731 | ||
|
|
3449da3bc7 | ||
|
|
d1679a4ed7 | ||
|
|
01afc563c3 | ||
|
|
e089100c62 | ||
|
|
68b0ee4e8d | ||
|
|
22284a6f65 | ||
|
|
917380e89d | ||
|
|
104c0bc1d5 | ||
|
|
700e5e7198 | ||
|
|
b214a04ffc | ||
|
|
0e5f479fc0 | ||
|
|
518f6de088 | ||
|
|
7d0f712348 | ||
|
|
e4570c53dd | ||
|
|
88964b987e | ||
|
|
204fc98520 | ||
|
|
301b339494 | ||
|
|
6168351877 | ||
|
|
9cd3f06ab7 | ||
|
|
f92963f5db | ||
|
|
72251d1b97 | ||
|
|
725a72ec5a | ||
|
|
a89f9f830c | ||
|
|
39ce38b024 | ||
|
|
a9a74101a4 | ||
|
|
eb8d8d6f57 | ||
|
|
8da39ad98f | ||
|
|
3ee4ad09eb | ||
|
|
0ca5c4d2af | ||
|
|
11597ddea5 | ||
|
|
2fe3f848b9 | ||
|
|
05630758f2 | ||
|
|
fcfe7f6ad3 | ||
|
|
b4e37c6f50 | ||
|
|
9ee44a372d | ||
|
|
88cc9cc69e | ||
|
|
dc7c020b33 | ||
|
|
16aeb41547 | ||
|
|
c5de6987c2 | ||
|
|
241e4e8687 | ||
|
|
929b34963d | ||
|
|
9a0db062af | ||
|
|
a838444a70 | ||
|
|
4262aba17b | ||
|
|
86932be2cb | ||
|
|
32260baa41 | ||
|
|
33f6195d9a | ||
|
|
a164270833 | ||
|
|
352e1ff9ed | ||
|
|
79452edeee | ||
|
|
6152e253d8 | ||
|
|
e9e4cb25fc | ||
|
|
792d340572 | ||
|
|
4ceaa7433a | ||
|
|
788e69098c | ||
|
|
0f890f477e | ||
|
|
545001b9e4 | ||
|
|
01ccc9e6f2 | ||
|
|
a9cb1a35c8 | ||
|
|
a32d2548d9 | ||
|
|
9187e0762f | ||
|
|
f879127aaa | ||
|
|
e6d87c93f3 | ||
|
|
004cc8a328 | ||
|
|
ef520d8d0e | ||
|
|
a134c572a6 | ||
|
|
c2a5cf2fe3 | ||
|
|
800cfd5774 | ||
|
|
152c2ac19e | ||
|
|
e70287cff3 | ||
|
|
03a26e28d9 | ||
|
|
3e0c0660b3 | ||
|
|
3f49e131d9 | ||
|
|
9b8c0fb162 | ||
|
|
691f8492fb | ||
|
|
a9d7d98d3f | ||
|
|
bdbb1eec65 | ||
|
|
01f72e2fc7 | ||
|
|
9187862002 | ||
|
|
aa3587fdd1 | ||
|
|
51406dab96 | ||
|
|
fecb45e0c3 | ||
|
|
44cd6e1358 | ||
|
|
8d6dc106d1 | ||
|
|
a052aa42e7 | ||
|
|
8efe773ef1 | ||
|
|
b7e7b52452 | ||
|
|
8cbbfaefc1 | ||
|
|
84b5cc69f5 | ||
|
|
fde8e8f09f | ||
|
|
eb9fc021e3 | ||
|
|
1c41b05c8c | ||
|
|
5bdb57cb66 | ||
|
|
f5aa027c2f | ||
|
|
e66fbcbb02 | ||
|
|
9aa5a0af51 | ||
|
|
610accbb7f | ||
|
|
c384705ee8 | ||
|
|
1a3aa957ca | ||
|
|
3f961e638a | ||
|
|
fa72803490 | ||
|
|
9a0d783c11 | ||
|
|
38f952b9bc | ||
|
|
757f1b5843 | ||
|
|
a8ce159be4 | ||
|
|
f609acc109 | ||
|
|
0092cf38ae | ||
|
|
5b631ff41a | ||
|
|
ba48755d56 | ||
|
|
926ba76e23 | ||
|
|
5a6e54264d | ||
|
|
9cf519769b | ||
|
|
7c7706f42b | ||
|
|
2cc9f76bc3 | ||
|
|
ddb00efc1d | ||
|
|
2a376579f3 | ||
|
|
873aea7168 | ||
|
|
bf7ee93cb6 | ||
|
|
5ea624b0f5 | ||
|
|
0ad5125814 | ||
|
|
068c21ab10 | ||
|
|
b29d1abab6 | ||
|
|
7367a4a823 | ||
|
|
7d26591048 | ||
|
|
2059b8573f | ||
|
|
10fdcf561d | ||
|
|
5ccb57d3ff | ||
|
|
c33c1ceddd | ||
|
|
fb647164f2 | ||
|
|
a492b17fe2 | ||
|
|
cb2c7c0669 | ||
|
|
91ea0202e6 | ||
|
|
3959754de3 | ||
|
|
4f28018c83 | ||
|
|
57db62e554 | ||
|
|
0011ede3b0 | ||
|
|
62ad701326 | ||
|
|
3f0f06cb31 | ||
|
|
3e839e0548 | ||
|
|
ebd0127999 | ||
|
|
cfe75a9fb6 | ||
|
|
f51565e023 | ||
|
|
d144ed6ffb | ||
|
|
a08726fc42 | ||
|
|
b27320b550 | ||
|
|
350331d466 | ||
|
|
1a69c6d590 | ||
|
|
df8ff682a7 | ||
|
|
3518d0ea8f | ||
|
|
d45a114824 | ||
|
|
6dbebef141 | ||
|
|
16adb11cc0 | ||
|
|
82f16faa78 | ||
|
|
b78717b87b | ||
|
|
95cb401ae0 | ||
|
|
5d8476d8ff | ||
|
|
47ce527f45 | ||
|
|
56e709857c | ||
|
|
cb9f8e527c | ||
|
|
cea462e285 | ||
|
|
bf8e97bd3c | ||
|
|
ea3442c15c | ||
|
|
16469a4f15 | ||
|
|
c82111a55f | ||
|
|
da87791975 | ||
|
|
99e9b4f26c | ||
|
|
f5160d4a3e | ||
|
|
8b3573a8b2 | ||
|
|
299fd740c7 | ||
|
|
9a2d9b4789 | ||
|
|
141c343e03 | ||
|
|
f43b6d6d9b | ||
|
|
0f942f68c1 | ||
|
|
d0fcc48f9d | ||
|
|
31becf4ac3 | ||
|
|
d023ecb810 | ||
|
|
ea7b3c4b1b | ||
|
|
6ea27fafad | ||
|
|
265b993b8a | ||
|
|
e05bf34117 | ||
|
|
631a73f7ef | ||
|
|
c3f79c9da5 | ||
|
|
889a2a853a | ||
|
|
d65ceb4b48 | ||
|
|
e48c7aac4d | ||
|
|
1708412f56 | ||
|
|
b984dd0b73 | ||
|
|
ba1d08bc4b | ||
|
|
58dd148c4f | ||
|
|
88541f9009 | ||
|
|
dbe80a286b | ||
|
|
20f40348d4 | ||
|
|
735fd8719a | ||
|
|
a56d54dcb7 | ||
|
|
02a1296ad6 | ||
|
|
8cb44da4aa | ||
|
|
8ffaacbee3 | ||
|
|
b2932107bb | ||
|
|
7aed50a038 | ||
|
|
b6c4b851f1 | ||
|
|
ed9b5eced4 | ||
|
|
d4ffe61d4f | ||
|
|
69ce365b79 | ||
|
|
2e223163ff | ||
|
|
f8bfcd7e0d | ||
|
|
d032785aa7 | ||
|
|
2c911d75e8 | ||
|
|
c818fcab11 | ||
|
|
06a14876e5 | ||
|
|
42174946f8 | ||
|
|
f394f5574d | ||
|
|
af7ed8e1ef | ||
|
|
efb79820b4 | ||
|
|
fafa3e7114 | ||
|
|
6619f047ad | ||
|
|
d960d23830 | ||
|
|
1a6c7cdf54 | ||
|
|
89b7232ff8 | ||
|
|
1773df0632 | ||
|
|
65cf454fd1 | ||
|
|
9e08a93a7b | ||
|
|
4b44f05f19 | ||
|
|
a83c514d1f | ||
|
|
33bebb63f3 | ||
|
|
483e8104db | ||
|
|
2ad4d5b5bb | ||
|
|
92789199a9 | ||
|
|
529c026ac1 | ||
|
|
7c371834cc | ||
|
|
64346be26d | ||
|
|
22518e2833 | ||
|
|
884b26ae41 | ||
|
|
1b2af11650 | ||
|
|
872ff95ed4 | ||
|
|
22004b524e | ||
|
|
4bc4236faf | ||
|
|
2324124a72 | ||
|
|
f793bc3877 | ||
|
|
784f036306 | ||
|
|
6411f725be | ||
|
|
a9a2d66cdd | ||
|
|
0c8ba5dd1c | ||
|
|
3a75de923b | ||
|
|
17445e6701 | ||
|
|
126b9bf96f | ||
|
|
157298f986 | ||
|
|
89f90d808a | ||
|
|
8ded8ba2c7 | ||
|
|
182ff17c83 | ||
|
|
f381d63813 | ||
|
|
6b8604239f | ||
|
|
f756f961ea | ||
|
|
28e973ac11 | ||
|
|
9cb3a190bc | ||
|
|
493e25d554 | ||
|
|
3594dbc6dc | ||
|
|
2311189ee4 | ||
|
|
c57607874c | ||
|
|
8956f0147a | ||
|
|
e5b4a208ce | ||
|
|
73fe866847 | ||
|
|
45b5fe9122 | ||
|
|
d62ce972f8 | ||
|
|
6ae9a3d2a6 | ||
|
|
2ec49826e8 | ||
|
|
a90c60912f | ||
|
|
50e8657867 | ||
|
|
1cf9e071dd | ||
|
|
d0957753bf | ||
|
|
199dba6c15 | ||
|
|
70349872c2 | ||
|
|
eba93b05bf | ||
|
|
bf8a36e080 | ||
|
|
5d0f665848 | ||
|
|
3bd760628b | ||
|
|
eb9b5eec81 | ||
|
|
c2ecfcc3a4 | ||
|
|
7e6cf89dc2 | ||
|
|
26d37f7a63 | ||
|
|
bb73f55fc6 | ||
|
|
faeb369f15 | ||
|
|
3dec9c66b3 | ||
|
|
46244b2759 | ||
|
|
27b094f382 | ||
|
|
573712da6b | ||
|
|
c96d547f4d | ||
|
|
d15d237b0d | ||
|
|
27939cbb0e | ||
|
|
6f72765371 | ||
|
|
cbaad969f9 | ||
|
|
ca9b9d9703 | ||
|
|
a2b25de68d | ||
|
|
8fbb4d0d19 | ||
|
|
95e4cffd85 | ||
|
|
e316bbb4c0 | ||
|
|
f5ac4dc2d4 | ||
|
|
25634ed152 | ||
|
|
24087bffa9 | ||
|
|
ad0ccf15ea | ||
|
|
e440e28456 | ||
|
|
d874d4f2d7 | ||
|
|
6ff8c87484 | ||
|
|
324c3e9399 | ||
|
|
3fc33bae8b | ||
|
|
3acd616979 | ||
|
|
923d9300ed | ||
|
|
a71a080cd2 | ||
|
|
d1a3325f99 | ||
|
|
bf5ef10a93 | ||
|
|
6af025d3c4 | ||
|
|
012e8e142a | ||
|
|
3a061cae26 | ||
|
|
b96278d6fe | ||
|
|
4810f7effd | ||
|
|
c714c61853 | ||
|
|
acac21248c | ||
|
|
6ed9ff69c2 | ||
|
|
106906a65e | ||
|
|
5fb347fc41 | ||
|
|
cd94728e93 | ||
|
|
fd1601c596 | ||
|
|
ef344b10e5 | ||
|
|
b8d821aa68 | ||
|
|
92c52df702 | ||
|
|
d28ec43e15 | ||
|
|
39bf47319f | ||
|
|
ac27f6a35e | ||
|
|
5978dccff0 | ||
|
|
278d21b5e4 | ||
|
|
5fcbf1e07c | ||
|
|
c0c9327fe0 | ||
|
|
059d3a6c8e | ||
|
|
d627174da2 | ||
|
|
ddb6a79b68 | ||
|
|
0b27ae8dc3 | ||
|
|
4a6d551704 | ||
|
|
bfdf7b9237 | ||
|
|
630caf8a70 | ||
|
|
8fd1a32456 | ||
|
|
4d09366656 | ||
|
|
a9b712e9dc | ||
|
|
32c7b8e48b | ||
|
|
1026690cd2 | ||
|
|
f44b7c022f | ||
|
|
07f1b71819 | ||
|
|
b815aa0e2d | ||
|
|
6f0b1f85f9 | ||
|
|
10b34dbb9a | ||
|
|
39a6b35496 | ||
|
|
74fcbf741b | ||
|
|
e571aef06d | ||
|
|
61ffaa8137 | ||
|
|
671540dccf | ||
|
|
ca70148c05 | ||
|
|
e511979fe6 | ||
|
|
a03c382966 | ||
|
|
48e2c641b8 | ||
|
|
d8680c969b | ||
|
|
b9b668e4bb | ||
|
|
ef1f8d4be6 | ||
|
|
a0af0054ec | ||
|
|
914a59cb8c | ||
|
|
e174c46a29 | ||
|
|
b8a4dceb3c | ||
|
|
084afbb6a0 | ||
|
|
58df3a8c5d | ||
|
|
63fd148724 | ||
|
|
5fa571a91b | ||
|
|
053255f36c | ||
|
|
f133228cb3 | ||
|
|
50fe92cd26 | ||
|
|
8ec2e638be | ||
|
|
24dd73028a | ||
|
|
e3624fad5f | ||
|
|
617199d73d | ||
|
|
3e1e69ccaf | ||
|
|
770b2252ca | ||
|
|
3d33eef6fc | ||
|
|
1ffd9cb936 | ||
|
|
107a5c9441 | ||
|
|
ee3b160a2a | ||
|
|
630573a932 | ||
|
|
f5364b47ec | ||
|
|
d8c7da5dca | ||
|
|
cf4ef60e28 | ||
|
|
cd51931b62 | ||
|
|
81010a126e | ||
|
|
8db84e9b21 | ||
|
|
b31bf0bb51 | ||
|
|
9a304ef2b0 | ||
|
|
ebfe64e3d6 | ||
|
|
225dc3b4cb | ||
|
|
9fcbbe8e7d | ||
|
|
447aed42d2 | ||
|
|
ee6fb4cf85 | ||
|
|
3c7b480ba3 | ||
|
|
25c0a020f4 | ||
|
|
3fa362502c | ||
|
|
5ff3d23564 | ||
|
|
c46e75d3d8 | ||
|
|
db91e72ade | ||
|
|
bc496df192 | ||
|
|
a1beca0e25 | ||
|
|
b5049d2e5c | ||
|
|
1f881e0746 | ||
|
|
e9021e16c4 | ||
|
|
f72c9c1fb6 | ||
|
|
b8ab78b82c | ||
|
|
9a87b8aaf7 | ||
|
|
84a9209ba7 | ||
|
|
53965334da | ||
|
|
a207cccb05 | ||
|
|
1ba2fe114c | ||
|
|
042757feb2 | ||
|
|
886c2d5019 | ||
|
|
f2bf0cda02 | ||
|
|
6d1e28a842 | ||
|
|
48bc22f89d | ||
|
|
80b8a28100 | ||
|
|
bd25f9cf36 | ||
|
|
4eeae7ad65 | ||
|
|
bb9f0f3cdb | ||
|
|
d434ae3387 | ||
|
|
431476fbc4 | ||
|
|
6b02fc80d1 | ||
|
|
9c9356512e | ||
|
|
18eae413af | ||
|
|
78d6ddba86 | ||
|
|
9dcd667ac2 | ||
|
|
33cac3dc29 | ||
|
|
6e87b34f7b | ||
|
|
d5352cbba8 | ||
|
|
14737ba495 | ||
|
|
e15d4ea248 | ||
|
|
a18828c129 | ||
|
|
6da4c4d3bd | ||
|
|
0cbda53819 | ||
|
|
77c0629ebc | ||
|
|
e16e45b1b4 | ||
|
|
e1e4ec9f9d | ||
|
|
78e7e05188 | ||
|
|
ad48dfe73d | ||
|
|
518a74586c | ||
|
|
d1fe4db882 | ||
|
|
421d68ca8c | ||
|
|
326189c25a | ||
|
|
3af53c183a | ||
|
|
63c4383927 | ||
|
|
af19f5e9aa | ||
|
|
773f0eed1e | ||
|
|
adfc0c9539 | ||
|
|
d413a2ba98 | ||
|
|
b387ee17b6 | ||
|
|
03dd745fe2 | ||
|
|
e051abd20b | ||
|
|
02ba118f81 | ||
|
|
4c65b98e4a | ||
|
|
d1f3490e75 | ||
|
|
46022025ea | ||
|
|
2186d7c06e | ||
|
|
88b9c5cbf0 | ||
|
|
d7eacc4f87 | ||
|
|
b178eca261 | ||
|
|
d8f90c4208 | ||
|
|
4b0f06e99c | ||
|
|
e98f0f9112 | ||
|
|
25adde9a04 | ||
|
|
6e9bf67f18 | ||
|
|
2b91846497 | ||
|
|
73560237d6 | ||
|
|
86c4f49a31 | ||
|
|
f632083576 | ||
|
|
6c6e197b0a | ||
|
|
d02e43b15f | ||
|
|
349c739966 | ||
|
|
9a72b70630 | ||
|
|
25e2456ee7 | ||
|
|
d32385336f | ||
|
|
b2da272b77 | ||
|
|
4528dd2443 | ||
|
|
93efd7eb04 | ||
|
|
ab9f844aaf | ||
|
|
5c431f421c | ||
|
|
d84f65255e | ||
|
|
a94d9b6b82 | ||
|
|
5552ed9a7f | ||
|
|
2c8526cac7 | ||
|
|
87b7d72760 | ||
|
|
49fce04624 | ||
|
|
b0d9e633ee | ||
|
|
ad7ec63d08 | ||
|
|
62d7d66ae5 | ||
|
|
8fe253f19b | ||
|
|
293380bef7 | ||
|
|
447f4f0d5f | ||
|
|
9d332e0f79 | ||
|
|
0af58f14ee | ||
|
|
81d037dbd8 | ||
|
|
28a6ccb49c | ||
|
|
cd871a3057 | ||
|
|
8ff6726c0d | ||
|
|
d69768348f | ||
|
|
8e85220373 | ||
|
|
3fe2bae857 | ||
|
|
aae77da73f | ||
|
|
ce4f66133e | ||
|
|
b6dc7044a9 | ||
|
|
9a89dae8c5 | ||
|
|
0af5dc63a8 | ||
|
|
5a4da21d58 | ||
|
|
d57765fc8a | ||
|
|
2cf6a7bc20 | ||
|
|
4a53f3a3e8 | ||
|
|
be0dfcd4a2 | ||
|
|
1432f7ccd5 | ||
|
|
2f18a2647b | ||
|
|
d6af5512bb | ||
|
|
ce236f8ac8 | ||
|
|
dc519602ac | ||
|
|
17b54389fe | ||
|
|
28b338ed9b | ||
|
|
a177325b49 | ||
|
|
36da256cc6 | ||
|
|
1224612a79 | ||
|
|
bc67e7d260 | ||
|
|
a87006f9c7 | ||
|
|
06db5c4b76 | ||
|
|
8716eb4920 | ||
|
|
2d9ab533f9 | ||
|
|
390093d45e | ||
|
|
2fb3a28c98 | ||
|
|
a7e4ff9cca | ||
|
|
f884cfffb9 | ||
|
|
a5213df1f7 | ||
|
|
3d5a25407c | ||
|
|
e8f7541d3f | ||
|
|
fb6563b4be | ||
|
|
1954e867b4 | ||
|
|
f23b4078c0 | ||
|
|
11ab2f56f5 | ||
|
|
0486a7814a | ||
|
|
90c14da992 | ||
|
|
1067b96364 | ||
|
|
38506773eb | ||
|
|
300edc2348 | ||
|
|
05f98a2224 | ||
|
|
3cb2dabaad | ||
|
|
d728c47142 | ||
|
|
4102468da9 | ||
|
|
936482d507 | ||
|
|
3d12d97415 | ||
|
|
0f5d2cc37c | ||
|
|
8615f19d20 | ||
|
|
5e97ca7ee6 | ||
|
|
d863f68cab | ||
|
|
6368e5c0ab | ||
|
|
0a90d9ede4 | ||
|
|
6324b65f08 | ||
|
|
44a498418c | ||
|
|
5dfc83704b | ||
|
|
febdca4b37 | ||
|
|
f5f89fda21 | ||
|
|
307f88dfb6 | ||
|
|
5b527d7ee1 | ||
|
|
807e848f0f | ||
|
|
4a31a61ef9 | ||
|
|
ee7a1cabd8 | ||
|
|
9795b9ebb1 | ||
|
|
c5b589f2e8 | ||
|
|
64ddec1bc0 | ||
|
|
a4c5e4a645 | ||
|
|
1159abbdd2 | ||
|
|
a027c2af8d | ||
|
|
5c3c32f16f | ||
|
|
39f4e29d01 | ||
|
|
992018d1c0 | ||
|
|
80fa610f9c | ||
|
|
5e16c1dc8c | ||
|
|
19d274085f | ||
|
|
0fc2362d37 | ||
|
|
21bf87a146 | ||
|
|
694f1c1b18 | ||
|
|
e21370ba54 | ||
|
|
85a4d78213 | ||
|
|
dcc8eded41 | ||
|
|
fefeb0ab0e | ||
|
|
81391fa162 | ||
|
|
1e4edd1717 | ||
|
|
c6c009603c | ||
|
|
4d88958cf6 | ||
|
|
227c491510 | ||
|
|
f4d93ae424 | ||
|
|
f68e4cf690 | ||
|
|
5f23b6d5ea | ||
|
|
7cd34512d8 | ||
|
|
07ab948c38 | ||
|
|
825a07a974 | ||
|
|
f8e1ab5fee | ||
|
|
b9e4a97922 | ||
|
|
5f07f5694c | ||
|
|
8c9d5b4873 | ||
|
|
c175a5f0f2 | ||
|
|
d90e8ea444 | ||
|
|
174eacc8ba | ||
|
|
a66f489678 | ||
|
|
e79db0a673 | ||
|
|
e365ad329f | ||
|
|
19f9227643 | ||
|
|
8f03aa9f61 | ||
|
|
2442e9876c | ||
|
|
9d30a7691c | ||
|
|
9e20840e02 | ||
|
|
dd3092c3a3 | ||
|
|
ada470bccb | ||
|
|
1ee787912b | ||
|
|
47ca5eb882 | ||
|
|
ce3a726fc0 | ||
|
|
b6c9deffda | ||
|
|
51c9d9ed65 | ||
|
|
b30cd5b107 | ||
|
|
a767f06e3f | ||
|
|
cb66a2d387 | ||
|
|
aed4e4ecdd | ||
|
|
f8fa5ae4af | ||
|
|
374c4d4ced | ||
|
|
142fb0a7d4 | ||
|
|
0211464ba2 | ||
|
|
3a556f1ea0 | ||
|
|
e9f7677170 | ||
|
|
eccfc8e928 | ||
|
|
e6b24663e4 | ||
|
|
840f72356e | ||
|
|
18e3a16e8b | ||
|
|
864a6d2977 | ||
|
|
6e375f4597 | ||
|
|
efdfd5c835 | ||
|
|
bd91857028 | ||
|
|
3079f80d4a | ||
|
|
65abc90fb6 | ||
|
|
a7b726ad18 | ||
|
|
75c1b8df01 | ||
|
|
3f9f1c50f3 | ||
|
|
48fa4e1e5b | ||
|
|
df0f602796 | ||
|
|
26cd3f5690 | ||
|
|
3355ce650d | ||
|
|
ed48ecc58c | ||
|
|
37d1a90025 | ||
|
|
3e59143ba8 | ||
|
|
9419bb5776 | ||
|
|
80573e3900 | ||
|
|
069ae2a5d6 | ||
|
|
ba24576f2f | ||
|
|
d8a6c734fa | ||
|
|
ef045dcd71 | ||
|
|
33cb7ef0b7 | ||
|
|
cdc2cb5d11 | ||
|
|
16ec3805e5 | ||
|
|
8529874368 | ||
|
|
da1010c83a | ||
|
|
cc58e177f3 | ||
|
|
d7ea8c4800 | ||
|
|
aa6ecf0984 | ||
|
|
d5f9fb06b0 | ||
|
|
c22e73293a | ||
|
|
b11dca2025 | ||
|
|
7b86c1fdcd | ||
|
|
58ebdb037c | ||
|
|
95f8a713dc | ||
|
|
74e0cc74ce | ||
|
|
1bd40ca73e | ||
|
|
f397153dfc | ||
|
|
5406392f8b | ||
|
|
f61e107f63 | ||
|
|
4b1fceb913 | ||
|
|
a4bb133b68 | ||
|
|
cd3697e8b7 | ||
|
|
3241c7aac3 | ||
|
|
624c46eb06 | ||
|
|
7a48a6b63e | ||
|
|
47d99a20d5 | ||
|
|
ad7e570d07 | ||
|
|
ae31f8ce45 | ||
|
|
7ca5c68233 | ||
|
|
2c6d63922a | ||
|
|
97d1a1dc01 | ||
|
|
8b45de90a4 | ||
|
|
7303ed65e1 | ||
|
|
da562bd6a1 | ||
|
|
d4fb4f7c52 | ||
|
|
dfbc45302e | ||
|
|
c4c1d170af | ||
|
|
fd04968f32 | ||
|
|
c2a1194424 | ||
|
|
ab1b2d0ff2 | ||
|
|
5a4da5bf78 | ||
|
|
84b31a3e7a | ||
|
|
df6c72ede3 | ||
|
|
04bb79f139 | ||
|
|
e828a7380a | ||
|
|
7ef22a41a3 | ||
|
|
96387bd26f | ||
|
|
6be01f599b | ||
|
|
63ccaa5873 | ||
|
|
8b38096a89 | ||
|
|
795b0849f3 | ||
|
|
7f14f0ae38 | ||
|
|
0edf085b68 | ||
|
|
8132a6b7ac | ||
|
|
6b48b3e277 | ||
|
|
2908f955d1 | ||
|
|
79eba878a7 | ||
|
|
68ca864141 | ||
|
|
e1fd4751de | ||
|
|
148c113fbe | ||
|
|
a0c6688976 | ||
|
|
d5a7c56ef9 | ||
|
|
0b4aa2dc21 | ||
|
|
3ab2cfec47 | ||
|
|
7298ed7c51 | ||
|
|
7098b65cb8 | ||
|
|
83d8d4d8cd | ||
|
|
2145ee1976 | ||
|
|
59a7275258 | ||
|
|
d8a05418f9 | ||
|
|
b102e93571 | ||
|
|
cdf6fc15b0 | ||
|
|
74bbeb4373 | ||
|
|
2187724ad2 | ||
|
|
eded7084d2 | ||
|
|
34c3d0a386 | ||
|
|
9d50b6f0ea | ||
|
|
ab1dc84779 | ||
|
|
7fb0e98b03 | ||
|
|
e836bdf734 | ||
|
|
c46139a17e | ||
|
|
d8391f0541 | ||
|
|
4e8374856d | ||
|
|
270f9cd23a | ||
|
|
9d83d52027 | ||
|
|
5b48eec4a1 | ||
|
|
b1edf26051 | ||
|
|
06e5bcfc83 | ||
|
|
624a8bbd67 | ||
|
|
b26cbbb60e | ||
|
|
203058a027 | ||
|
|
97bd18af4e | ||
|
|
ba05f28ae7 | ||
|
|
77a1227870 | ||
|
|
7ab2b69e18 | ||
|
|
10aaa1bc15 | ||
|
|
cdc9e50a5d | ||
|
|
6f05de0e5e | ||
|
|
56e2a4333e | ||
|
|
f959c01600 | ||
|
|
117a8c0d35 | ||
|
|
30d2730ee2 | ||
|
|
aa812feb41 | ||
|
|
552f123bea | ||
|
|
5d0cbf763f | ||
|
|
1b83c09c03 | ||
|
|
7190a550dc | ||
|
|
b2cd6accf5 | ||
|
|
053ecae4db | ||
|
|
038c994724 | ||
|
|
c161472575 | ||
|
|
008aa2fc6d | ||
|
|
6f30fd9235 | ||
|
|
9ecf621404 | ||
|
|
22db751d1e | ||
|
|
03feb7a34d | ||
|
|
35a4b63240 | ||
|
|
4dd1bfa8c1 | ||
|
|
6caa379ba1 | ||
|
|
7e6fa29cb5 | ||
|
|
44a1bfd6a6 | ||
|
|
1fc66c7460 | ||
|
|
7bd6c87eca | ||
|
|
812c191939 | ||
|
|
c741ba59c9 | ||
|
|
781c15a6a3 | ||
|
|
45ab288e07 | ||
|
|
8b33ac8f6c | ||
|
|
63ef607f1f | ||
|
|
6cfee09be9 | ||
|
|
ab335edb02 | ||
|
|
bfbf1e1f1a | ||
|
|
2d314b771f | ||
|
|
5d15abb120 | ||
|
|
46790f50cf | ||
|
|
4d0414c714 | ||
|
|
e508145c9b | ||
|
|
e0ebd1e4bd | ||
|
|
f90649eb2b | ||
|
|
9b599bc18d | ||
|
|
9b803ccc98 | ||
|
|
1282086f58 | ||
|
|
b70b646903 | ||
|
|
2dce6b15c3 | ||
|
|
4e2b2508af | ||
|
|
0ea5310290 | ||
|
|
13735843c7 | ||
|
|
618c7b816a | ||
|
|
0fcb5a8ce5 | ||
|
|
889102315e | ||
|
|
b2a788e902 | ||
|
|
82e4bfb53d | ||
|
|
e8814410ef | ||
|
|
94ff2cda73 | ||
|
|
d305987b40 | ||
|
|
167eb01d83 | ||
|
|
ad408beb66 | ||
|
|
1b870937ae | ||
|
|
2a98ba0ed3 | ||
|
|
02a9a93bde | ||
|
|
e148438e97 | ||
|
|
d46386d57e | ||
|
|
228ccf1fe3 | ||
|
|
780dbb378f | ||
|
|
1ca4288135 | ||
|
|
f5cf3638e9 | ||
|
|
5ef5e14ecc | ||
|
|
76c9af193c | ||
|
|
f9b255cd62 | ||
|
|
44ad6dd4bf | ||
|
|
1bd654dabd | ||
|
|
38b265cb51 | ||
|
|
5561c09091 | ||
|
|
3db5ff69b2 | ||
|
|
ec12e7eada | ||
|
|
631fa4a1b7 | ||
|
|
bf993db11c | ||
|
|
4ad883398f | ||
|
|
d802e8ca6a | ||
|
|
a100700630 | ||
|
|
b6b075fd49 | ||
|
|
d1622e080f | ||
|
|
2ac6deafb7 | ||
|
|
805196fbeb | ||
|
|
f103b91ffa | ||
|
|
fa4f337b49 | ||
|
|
8a4a0ddea6 | ||
|
|
45fbe4ff67 | ||
|
|
f851bc8182 | ||
|
|
9e09a1800b | ||
|
|
a34c586a89 | ||
|
|
6c3a02072b | ||
|
|
4a6754baf2 | ||
|
|
4b36897cd9 | ||
|
|
d4553818a0 | ||
|
|
6b6f03ae05 | ||
|
|
77e3757fa9 | ||
|
|
6b60f7dca0 | ||
|
|
fcdfc911ee | ||
|
|
1189be43a2 | ||
|
|
6650a07ede | ||
|
|
b19d9e2174 | ||
|
|
1f080a6c97 | ||
|
|
04897c9dc1 | ||
|
|
979eed4362 | ||
|
|
bc8a5c0330 | ||
|
|
4c8f94ac94 | ||
|
|
846a94fbc9 | ||
|
|
3cd6b22c7b | ||
|
|
c9b9ef575b | ||
|
|
275826f234 | ||
|
|
4f0488b307 | ||
|
|
e5e930aec3 | ||
|
|
fbbacb284e | ||
|
|
9f7a555b4e | ||
|
|
dd13310fb8 | ||
|
|
691cc4e036 | ||
|
|
0bb253f37b | ||
|
|
59e7e62c4b | ||
|
|
f8420d6279 | ||
|
|
99354b430e | ||
|
|
74c56f794c | ||
|
|
02237ce725 | ||
|
|
318a249c8b | ||
|
|
207fabbc6a | ||
|
|
356bcafc44 | ||
|
|
3e0aaad190 | ||
|
|
a72e4e3e28 | ||
|
|
13b3d7b4a0 | ||
|
|
e025aec028 | ||
|
|
20fe347906 | ||
|
|
9d419f48e6 | ||
|
|
9ded00f221 | ||
|
|
1650eb5847 | ||
|
|
c31a7c3ff6 | ||
|
|
b8e54fbc08 | ||
|
|
a1f8b0fd64 | ||
|
|
1b65ae00ac | ||
|
|
ebda45de4c | ||
|
|
ffc574a6f9 | ||
|
|
e2f4190209 | ||
|
|
9bc17fc5fb | ||
|
|
208a6647f1 | ||
|
|
e51c2bcaef | ||
|
|
71a1bd53b2 | ||
|
|
d0abb4e8e6 | ||
|
|
977078f06d | ||
|
|
6980c4557e | ||
|
|
632baf799e | ||
|
|
af92f5b00f | ||
|
|
4ab8abbc2b | ||
|
|
b1e62d4a57 | ||
|
|
6af3656deb | ||
|
|
4d83632009 | ||
|
|
110b373e9c | ||
|
|
ca571b0ec3 | ||
|
|
d8c26162a1 | ||
|
|
c067088747 | ||
|
|
5451cc7792 | ||
|
|
124314672f | ||
|
|
6362298fa5 | ||
|
|
8b56977b6f | ||
|
|
173567a7f2 | ||
|
|
c7d9f25d22 | ||
|
|
e27b76d117 | ||
|
|
8854c039f2 | ||
|
|
14f581abc2 | ||
|
|
2ca46c7afc | ||
|
|
82d8c1bacb | ||
|
|
2fd9831f7c | ||
|
|
195abfe7a5 | ||
|
|
d8dde19f04 | ||
|
|
585972b51a | ||
|
|
7a6546228b | ||
|
|
92f680889d | ||
|
|
785bd7fd75 | ||
|
|
c89e6aadff | ||
|
|
54a2525133 | ||
|
|
0a5866bec9 | ||
|
|
0d8e3ad48b | ||
|
|
12ef02dc3d | ||
|
|
69e8a05f35 | ||
|
|
007cd48af6 | ||
|
|
713e60b9b6 | ||
|
|
e86cefcb6f | ||
|
|
cfa4e658e0 | ||
|
|
595fe67f01 | ||
|
|
9b2feef9eb | ||
|
|
f7f90e0c8d | ||
|
|
1dd0f53b21 | ||
|
|
8299b323ee | ||
|
|
9b436c8b4c | ||
|
|
5b38fdab31 | ||
|
|
1eb300e1fc | ||
|
|
f7f6bfaae4 | ||
|
|
4ea882ede4 | ||
|
|
566e21eac8 | ||
|
|
351cc35342 | ||
|
|
37d766aedd | ||
|
|
5287e57c86 | ||
|
|
2a7e9faeec | ||
|
|
1ad1ba9e6a | ||
|
|
33a9026cdf | ||
|
|
efd0f5a3c5 | ||
|
|
f009df23ec | ||
|
|
6ba4fabdb9 | ||
|
|
9e2c22c97f | ||
|
|
39dc52157d | ||
|
|
0d437698b2 | ||
|
|
0be99858f3 | ||
|
|
eaaabc6c4f | ||
|
|
ce6d4914f4 | ||
|
|
ecf198aab8 | ||
|
|
3267b81b81 | ||
|
|
d03cfc4258 | ||
|
|
1de557975f | ||
|
|
ffba978077 | ||
|
|
13e16cf302 | ||
|
|
bd0d84bf92 | ||
|
|
1135193dfd | ||
|
|
29812c628b | ||
|
|
58fbbe0f1d | ||
|
|
631d7b87b5 | ||
|
|
6070647774 | ||
|
|
d6237859f6 | ||
|
|
0ef0aeceac | ||
|
|
b4a6b7f720 | ||
|
|
c7d46510d7 | ||
|
|
ffd3f1a783 | ||
|
|
29bafe2f7e | ||
|
|
287dd1ee2c | ||
|
|
513c23bfd9 | ||
|
|
011d03a0f6 | ||
|
|
9ab859f27b | ||
|
|
f4f65ef93e | ||
|
|
bd5718d0ad | ||
|
|
161a862ffb | ||
|
|
69994c385a | ||
|
|
b5dbbac308 | ||
|
|
582bd19ee9 | ||
|
|
74f99f227c | ||
|
|
c2bd177ea0 | ||
|
|
fe6e9f580b | ||
|
|
7216c76654 | ||
|
|
dbdfd8967d | ||
|
|
b8e40d146f | ||
|
|
4cc8bb0767 | ||
|
|
4e242b3e20 | ||
|
|
a6245478c8 | ||
|
|
2e9f5ea31a | ||
|
|
a6ad8148b9 | ||
|
|
5b5f35ccc0 | ||
|
|
9b714abf35 | ||
|
|
33122c5a1b | ||
|
|
a9c2e930ac | ||
|
|
c05e6015cc | ||
|
|
e0a75e0c25 | ||
|
|
85f5674e44 | ||
|
|
c43e8a9736 | ||
|
|
a3ac4f6b0a | ||
|
|
5dfd0350c7 | ||
|
|
ca96d609e4 | ||
|
|
2c5972f87f | ||
|
|
6079d0027a | ||
|
|
99a6c9dbf2 | ||
|
|
9342bcfce0 | ||
|
|
e504816977 | ||
|
|
b2e02084b8 | ||
|
|
db3d84f46c | ||
|
|
1b6b0b1e66 | ||
|
|
6b725cf56a | ||
|
|
64665b57d0 | ||
|
|
2b24416e90 | ||
|
|
b92a8e6e4a | ||
|
|
931fc43cc8 | ||
|
|
31aa7bd8d1 | ||
|
|
ad1911bbf4 | ||
|
|
c021c39cbd | ||
|
|
1f43d22397 | ||
|
|
a675bd08bd | ||
|
|
4d7e1dde70 | ||
|
|
ae5d18617a | ||
|
|
9732ec6797 | ||
|
|
0e28281a02 | ||
|
|
505371414f | ||
|
|
e3428d26ca | ||
|
|
35332298ef | ||
|
|
64db043a71 | ||
|
|
b60859d6cc | ||
|
|
d76621a47b | ||
|
|
4ae85ae121 | ||
|
|
cc505b4b5e | ||
|
|
1259a76047 | ||
|
|
802ca12d05 | ||
|
|
e283b555b1 | ||
|
|
b77a13812c | ||
|
|
6dfde6d485 | ||
|
|
c8eeef6947 | ||
|
|
67cb89fbdf | ||
|
|
bf4fb1fb40 | ||
|
|
f807f7f804 | ||
|
|
b8d8ed1ba9 | ||
|
|
cc794d60e7 | ||
|
|
8dd0c85ac5 | ||
|
|
76fa695241 | ||
|
|
f30c4ed2bc | ||
|
|
b752507b48 | ||
|
|
af94ba9d02 | ||
|
|
818b08d0e4 | ||
|
|
ea18996f54 | ||
|
|
68fd82e840 | ||
|
|
4fad8efbfb | ||
|
|
b78bae2d51 | ||
|
|
271f5601f3 | ||
|
|
c3b7a45e84 | ||
|
|
c3e190ce67 | ||
|
|
b75d443caf | ||
|
|
27e727a146 | ||
|
|
4ce4379235 | ||
|
|
c2c47550f9 | ||
|
|
535cc49f27 | ||
|
|
dfbf73408c | ||
|
|
bc7f3eb32f | ||
|
|
ec954f47fb | ||
|
|
81a5e0073c | ||
|
|
ab1bc9bf5f | ||
|
|
0f1eb3e914 | ||
|
|
84e27a592d | ||
|
|
c9f034b4ac | ||
|
|
a9f9d68631 | ||
|
|
707374d5dc | ||
|
|
89fa00ddff | ||
|
|
79bea15830 | ||
|
|
426f8b0f66 | ||
|
|
6a6cc27aee | ||
|
|
4c7c4d4061 | ||
|
|
4d24becf7f | ||
|
|
ba5b9b80a5 | ||
|
|
c7b0678356 | ||
|
|
a6e3222fe5 | ||
|
|
3cc852d339 | ||
|
|
0eeaa25694 | ||
|
|
aa3fac8057 | ||
|
|
c1c81ee2a4 | ||
|
|
e8496efe84 | ||
|
|
01bbacf3c4 | ||
|
|
148428ce76 | ||
|
|
c8f568ddf9 | ||
|
|
3ddda939d3 | ||
|
|
5de926d66f | ||
|
|
f878e6f8af | ||
|
|
269af961e9 | ||
|
|
ed80c6b6cc | ||
|
|
e433393c4f | ||
|
|
985ce80375 | ||
|
|
b9b9714fd5 | ||
|
|
fa969cfdde | ||
|
|
44f8e383f3 | ||
|
|
0c8da8b519 | ||
|
|
eaaa837e00 | ||
|
|
cbe3c3fdd4 | ||
|
|
6748f0a579 | ||
|
|
93b0cf7a99 | ||
|
|
d8ce68b09b | ||
|
|
78d4ced829 | ||
|
|
197c14dbcf | ||
|
|
5f20a91fa1 | ||
|
|
1e2ac54351 | ||
|
|
1e375468de | ||
|
|
c2c188b699 | ||
|
|
c46a0d7eb4 | ||
|
|
bd769a81e1 | ||
|
|
537088e7dc | ||
|
|
41fd9989a2 | ||
|
|
11d62f43c9 | ||
|
|
e4ab96021e | ||
|
|
2a7ed700d5 | ||
|
|
84716d267c | ||
|
|
e4779be97a | ||
|
|
f2da6df568 | ||
|
|
30848c0fcd | ||
|
|
e585c83209 | ||
|
|
6c1bb1601e | ||
|
|
ea87cb1ba5 | ||
|
|
3fed5bb25f | ||
|
|
27955056e0 | ||
|
|
90d70af269 | ||
|
|
b23cb8fba8 | ||
|
|
e4a709eda3 | ||
|
|
7fc1aad195 | ||
|
|
cafb8de132 | ||
|
|
d5325d7ef1 | ||
|
|
24d162814b | ||
|
|
d5694ac5fa | ||
|
|
e43de3ae4b | ||
|
|
75e67b9ee4 | ||
|
|
768f00dedb | ||
|
|
4dc07e93a8 | ||
|
|
7cc483aa0e | ||
|
|
e1e7d76cf1 | ||
|
|
93247a424a | ||
|
|
5f501ec7e2 | ||
|
|
761d255fdf | ||
|
|
ace8079086 | ||
|
|
7a44c01d89 | ||
|
|
c9bc4b7031 | ||
|
|
ae79764fe5 | ||
|
|
77f1d24de3 | ||
|
|
9ccb4226ba | ||
|
|
95e02b856b | ||
|
|
bf86a41ef1 | ||
|
|
8090fd4664 | ||
|
|
3a743f649c | ||
|
|
adec03395d | ||
|
|
74e494b010 | ||
|
|
ef3a5ae787 | ||
|
|
8c06dd6071 | ||
|
|
60c78666ab | ||
|
|
1786b0e768 | ||
|
|
8ad5f34908 | ||
|
|
6cd5fcd536 | ||
|
|
ccc67d445b | ||
|
|
9fd086e506 | ||
|
|
0b03a97708 | ||
|
|
4824a33c31 | ||
|
|
1e5fcfd14a | ||
|
|
17b8e2bd02 | ||
|
|
a8e2a3df32 | ||
|
|
0d7c7fd907 | ||
|
|
95298783bb | ||
|
|
1a398b19fd | ||
|
|
f4c8cd5e85 | ||
|
|
b8d832a08c | ||
|
|
e3edca3b5d | ||
|
|
cacfa04cb6 | ||
|
|
e591f7b3f0 | ||
|
|
7141f1a5cc | ||
|
|
44edac0497 | ||
|
|
29e1c717c3 | ||
|
|
94133d7ce8 | ||
|
|
b15c2b7971 | ||
|
|
ba8fdc925c | ||
|
|
79b3cf3e02 | ||
|
|
b4fd710e1a | ||
|
|
b68b0ede7a | ||
|
|
68f737702b | ||
|
|
f65e31d22f | ||
|
|
f496399ac4 | ||
|
|
3166ed55b2 | ||
|
|
e1dec2f1a7 | ||
|
|
bb746a9de1 | ||
|
|
ae8d4bb0f0 | ||
|
|
c94ab5976a | ||
|
|
197d82dc07 | ||
|
|
069ae2df12 | ||
|
|
6de74ea6d7 | ||
|
|
72472456d8 | ||
|
|
c5c24c239b | ||
|
|
c5b0e9f485 | ||
|
|
abdefb8a01 | ||
|
|
afbd773dc6 | ||
|
|
2a4b9ea233 | ||
|
|
3b98439eca | ||
|
|
fde63b880d | ||
|
|
2d511defd9 | ||
|
|
dd1ea9763a | ||
|
|
e76d1135dd | ||
|
|
fcf2c0fd1a | ||
|
|
9864efa532 | ||
|
|
aa620d09a0 | ||
|
|
2eabdf3f98 | ||
|
|
5ed109d59f | ||
|
|
47d9848dc4 | ||
|
|
93e504d04e | ||
|
|
b5feaa5a49 | ||
|
|
3f405b34e9 | ||
|
|
290777b3d9 | ||
|
|
77c81ca6ea | ||
|
|
2d1b7955ae | ||
|
|
862c8da560 | ||
|
|
2d9f341c3e | ||
|
|
436ee0a2ea | ||
|
|
b393f5db51 | ||
|
|
a2562f9d74 | ||
|
|
d6dadd95ac | ||
|
|
993d3f710b | ||
|
|
4a94eb3ea4 | ||
|
|
3a0cee28d6 | ||
|
|
4f845a0713 | ||
|
|
473700f016 | ||
|
|
9ce866ed4f | ||
|
|
69ef4987a6 | ||
|
|
53cc8ad35a | ||
|
|
e2fcba038c | ||
|
|
5f59f20636 | ||
|
|
59de2c7afa | ||
|
|
4b616c8cf2 | ||
|
|
4dd61df6f8 | ||
|
|
c0c31656ff | ||
|
|
8b16b43b7f | ||
|
|
dff396de0f | ||
|
|
f06ffdb6fa | ||
|
|
6e67aaa7f2 | ||
|
|
7f0d0ba3bc | ||
|
|
4a9b1cf253 | ||
|
|
6d8799af1a | ||
|
|
258409ef61 | ||
|
|
bf81f3cf2c | ||
|
|
27ebc5c8f2 | ||
|
|
97c544f91f | ||
|
|
934ab76835 | ||
|
|
fc9878f6a4 | ||
|
|
a4d3bfe3d6 | ||
|
|
a7effa8400 | ||
|
|
a04c6bbf8f | ||
|
|
77ea8cbdd7 | ||
|
|
2800983f3e | ||
|
|
8b50fe5330 | ||
|
|
73b4e18c62 | ||
|
|
20b3660495 | ||
|
|
175a01f56c | ||
|
|
046b659ce2 | ||
|
|
413c270723 | ||
|
|
ec3a2dc773 | ||
|
|
012875258c | ||
|
|
692250c6be | ||
|
|
d2352347cf | ||
|
|
92168cbbc5 | ||
|
|
963015005e | ||
|
|
10d8b701a1 | ||
|
|
543c794a76 | ||
|
|
57cd0c3dea | ||
|
|
b524dd4c35 | ||
|
|
09703609fc | ||
|
|
ba3ff7918b | ||
|
|
ef8e578677 | ||
|
|
b880ff190a | ||
|
|
05e21285aa | ||
|
|
eae04f1952 | ||
|
|
5699b05072 | ||
|
|
a1e67bcb97 | ||
|
|
09552f9d9c | ||
|
|
f18373dc5d | ||
|
|
ebbaae5526 | ||
|
|
966a70f1fa | ||
|
|
629cdfb124 | ||
|
|
ed666d3969 | ||
|
|
b76ef6ccb8 | ||
|
|
851aeae7c7 | ||
|
|
d5e32c843f | ||
|
|
96917d5552 | ||
|
|
0401604222 | ||
|
|
b238cf7f6b | ||
|
|
960dae3340 | ||
|
|
2cc998fed8 | ||
|
|
139fe30f47 | ||
|
|
4d793626ff | ||
|
|
c544188ee3 | ||
|
|
0ab153d201 | ||
|
|
8209b5f033 | ||
|
|
b27429729d | ||
|
|
60a9a49f83 | ||
|
|
b3bf6a1218 | ||
|
|
57826d645b | ||
|
|
d7d24750be | ||
|
|
6f443a74cf | ||
|
|
14a34f12d7 | ||
|
|
3431ec55dc | ||
|
|
6027b1992f | ||
|
|
e884ff31d8 | ||
|
|
05c13f6c22 | ||
|
|
94ecd871a0 | ||
|
|
12ed4ee48e | ||
|
|
332839f6ea | ||
|
|
e5ea6dd021 | ||
|
|
cccfcfa7b9 | ||
|
|
68f34e85ce | ||
|
|
3e703eb04e | ||
|
|
508460f240 | ||
|
|
6e9f147faa | ||
|
|
4540730111 | ||
|
|
e96ee95a7e | ||
|
|
2f9eafdd36 | ||
|
|
b3de67234e | ||
|
|
514c2d3c4d | ||
|
|
bfde076022 | ||
|
|
cb3aee8219 | ||
|
|
85fda57208 | ||
|
|
4b203bdba5 | ||
|
|
d3862812ff | ||
|
|
8d26385d76 | ||
|
|
3b0470dba5 | ||
|
|
8575e3160f | ||
|
|
67b7b904ba | ||
|
|
f60218ec41 | ||
|
|
a78cda4baf | ||
|
|
7a39da8cc6 | ||
|
|
5bbb53580a | ||
|
|
26451a09eb | ||
|
|
8d55877c9e | ||
|
|
a62406aaa5 | ||
|
|
91818723a1 | ||
|
|
e9aec001f4 | ||
|
|
28e8c46f29 | ||
|
|
6d586dc05c | ||
|
|
410b4e14a1 | ||
|
|
fe4e885f54 | ||
|
|
bbb739d24a | ||
|
|
26752df503 | ||
|
|
e52c391cd4 | ||
|
|
0aac30d53b | ||
|
|
0184a97dbd | ||
|
|
85b9f76f1d | ||
|
|
6322fbbd41 | ||
|
|
8ba89f1050 | ||
|
|
429925a5e9 | ||
|
|
83936293eb | ||
|
|
e2cb760dcc | ||
|
|
925b3638ff | ||
|
|
9a6fd3ef29 | ||
|
|
2f82de18ee | ||
|
|
b8ca494ee9 | ||
|
|
6e16aca8b0 | ||
|
|
d4d12daed9 | ||
|
|
f467a8f66d | ||
|
|
c9184ed87e | ||
|
|
1fc4a962e4 | ||
|
|
08284c86ed | ||
|
|
f502b0dea1 | ||
|
|
1200f28d66 | ||
|
|
76ed3476d3 | ||
|
|
58dc1f2c78 | ||
|
|
5a7f561a9b | ||
|
|
ed9a7f5436 | ||
|
|
1f64207f26 | ||
|
|
42b50483be | ||
|
|
6264cf9666 | ||
|
|
f386632800 | ||
|
|
5e49a57ecc | ||
|
|
3d31b39297 | ||
|
|
73cfe48031 | ||
|
|
05538587ef | ||
|
|
f92d7416d7 | ||
|
|
1f12d808e7 | ||
|
|
29a4066a4d | ||
|
|
7afb4e3f54 | ||
|
|
495f075b41 | ||
|
|
b5e8d529e6 | ||
|
|
3e279411fe | ||
|
|
47574c9cba | ||
|
|
6ff14ddd2e | ||
|
|
5946aa0877 | ||
|
|
d800ab2847 | ||
|
|
2c365f4723 | ||
|
|
a1a253ea50 | ||
|
|
c72058bcc6 | ||
|
|
27f26e48b7 | ||
|
|
8c23221666 | ||
|
|
731f3c37a0 | ||
|
|
4b444723f0 | ||
|
|
816605a137 | ||
|
|
78cefd78d6 | ||
|
|
a0a561ae85 | ||
|
|
ed3d0170d9 | ||
|
|
976128f368 | ||
|
|
d04d672a80 | ||
|
|
036f439f53 | ||
|
|
1bce3e6b35 | ||
|
|
e3cbec10c1 | ||
|
|
8abdd7b553 | ||
|
|
ff13c5e7af | ||
|
|
27bd0b9a91 | ||
|
|
bce144595c | ||
|
|
75eba3b07d | ||
|
|
1591eddaea | ||
|
|
4fec80ba6f | ||
|
|
7fe8ed1787 | ||
|
|
e204062310 | ||
|
|
44c722931b | ||
|
|
2d520a9826 | ||
|
|
24d894e2e2 | ||
|
|
ccfcef6b59 | ||
|
|
e0004aa28a | ||
|
|
b668112320 | ||
|
|
dae9a00a28 | ||
|
|
71995e1397 | ||
|
|
8177563ebe | ||
|
|
4202fba82a | ||
|
|
812c030e87 | ||
|
|
1217c7da91 | ||
|
|
7d69f2d956 | ||
|
|
385dcb7c60 | ||
|
|
b8b936a6ea | ||
|
|
b5f665de32 | ||
|
|
e5ae386ea4 | ||
|
|
36e51aad3c | ||
|
|
b490299a3b | ||
|
|
5db7070dd1 | ||
|
|
d7fe6b356c | ||
|
|
fcf01dd88e | ||
|
|
4f66312df8 | ||
|
|
3fafb7b189 | ||
|
|
776a070421 | ||
|
|
dfeca6cf40 | ||
|
|
6aa5bc8635 | ||
|
|
d8f47d2efa | ||
|
|
0a9315bbc7 | ||
|
|
1ff419d343 | ||
|
|
24df576795 | ||
|
|
fdf1ca30f0 | ||
|
|
052c5d19d5 | ||
|
|
5ddd199870 | ||
|
|
a9d6fa8b2b | ||
|
|
4564b05483 | ||
|
|
72613bc379 | ||
|
|
ebcd55d641 | ||
|
|
4b461a6931 | ||
|
|
93e7a38370 | ||
|
|
617304b2cf | ||
|
|
ba502fb89a | ||
|
|
6c6b9689bb | ||
|
|
d9fd937e39 | ||
|
|
fe9dc522d4 | ||
|
|
505e7e8b9d | ||
|
|
6fd7e6db3d | ||
|
|
fdca6e36ee | ||
|
|
90ae0cffec | ||
|
|
de4cb50ca6 | ||
|
|
a09e09ce76 | ||
|
|
48d2949416 | ||
|
|
6ae8373d40 | ||
|
|
b58e24cc3c | ||
|
|
d53fe399eb | ||
|
|
a837765e8c | ||
|
|
f540b494a4 | ||
|
|
8060974344 | ||
|
|
b0d975e216 | ||
|
|
e54d7d536e | ||
|
|
1e9b4d5a95 | ||
|
|
efc2b7db95 | ||
|
|
bfd68019c2 | ||
|
|
1946867bc2 | ||
|
|
1664948e41 | ||
|
|
935e588799 | ||
|
|
eed59dcc1e | ||
|
|
2cac7623a5 | ||
|
|
298d83b340 | ||
|
|
0185b75381 | ||
|
|
7132e5cdff | ||
|
|
98bdb4468b | ||
|
|
ea11ee09f3 | ||
|
|
c62c480dc6 | ||
|
|
197bd126f0 | ||
|
|
f45f07ab86 | ||
|
|
a053ff3979 | ||
|
|
ecdd2a3658 | ||
|
|
2f34ad31ac | ||
|
|
671f0afa1d | ||
|
|
64ed74c01e | ||
|
|
1a81a1898e | ||
|
|
6ba21bf2b8 | ||
|
|
09e4bc0501 | ||
|
|
6e2a7ee1bc | ||
|
|
65f0513a33 | ||
|
|
6f83c4537c | ||
|
|
cca94272fa | ||
|
|
66b121b2fc | ||
|
|
8d34120a53 | ||
|
|
1a01af079e | ||
|
|
87e5e05aea | ||
|
|
4d039aa2ca | ||
|
|
21e255a8f1 | ||
|
|
d5477c7afd | ||
|
|
02a6108235 | ||
|
|
7233341eac | ||
|
|
8be6fd95a3 | ||
|
|
59dbb47065 | ||
|
|
9c7db2491b | ||
|
|
0fe6f3c521 | ||
|
|
036362ede6 | ||
|
|
a757dd4863 | ||
|
|
f5cc22bdc6 | ||
|
|
5dd1b2c525 | ||
|
|
cc7609aa9f | ||
|
|
f1378aef91 | ||
|
|
b2d8d07109 | ||
|
|
f9791498ae | ||
|
|
f091061711 | ||
|
|
4abcff0177 | ||
|
|
63c58c2a3f | ||
|
|
304880d185 | ||
|
|
5d79d728f5 | ||
|
|
dc51af3d03 | ||
|
|
350622a107 | ||
|
|
63fda37e20 | ||
|
|
293ef29655 | ||
|
|
535c99f157 | ||
|
|
45a5df5914 | ||
|
|
3b5f22ca40 | ||
|
|
b5db4ed5f6 | ||
|
|
168524543f | ||
|
|
3e123b8497 | ||
|
|
42137efde7 | ||
|
|
eeb2f9e546 | ||
|
|
5dbaa520a5 | ||
|
|
dd48f7204c | ||
|
|
04095f7581 | ||
|
|
a584a81b3e | ||
|
|
619e8ecd0c | ||
|
|
23da638360 | ||
|
|
dfbda5e025 | ||
|
|
2b03751c3c | ||
|
|
dbc0dfd2d5 | ||
|
|
11f139a647 | ||
|
|
6e614e9e10 | ||
|
|
c049472b8a | ||
|
|
9a804b2812 | ||
|
|
fbbc40f385 | ||
|
|
8cf9f0a3e7 | ||
|
|
e6618ece2d | ||
|
|
58c4720293 | ||
|
|
836d5c44b6 | ||
|
|
11c2a3655f | ||
|
|
539aa4d333 | ||
|
|
f85a415279 | ||
|
|
6489455bed | ||
|
|
d668caa79c | ||
|
|
74bf4ee7bf | ||
|
|
33ba90c6e9 | ||
|
|
ccd62415ac | ||
|
|
bd7bb5df71 | ||
|
|
e3417a06e2 | ||
|
|
7fb80b5eae | ||
|
|
2d17b09a6d | ||
|
|
24c8f38784 | ||
|
|
25f03cf8e9 | ||
|
|
270e1c904a | ||
|
|
b4f59c7e27 | ||
|
|
ab4ee2e524 | ||
|
|
58ebb96cce | ||
|
|
99713dc7d3 | ||
|
|
1c1c0257f4 | ||
|
|
cafe659f72 | ||
|
|
72ed8196b3 | ||
|
|
107ac7ac96 | ||
|
|
234772db6d | ||
|
|
760625acba | ||
|
|
c57789d138 | ||
|
|
f33df30732 | ||
|
|
3accee1a8c | ||
|
|
a5425b2e5b | ||
|
|
6e381180ae | ||
|
|
056ba9b795 | ||
|
|
88664afe14 | ||
|
|
f98efea9b1 | ||
|
|
d9e3a4b5db | ||
|
|
66d8ffabbd | ||
|
|
ace23463c5 | ||
|
|
bbfe4e996c | ||
|
|
9f430fa07f | ||
|
|
7c53a27801 | ||
|
|
a8bc7cae56 | ||
|
|
bf1050f7cf | ||
|
|
c6f4ff1475 | ||
|
|
3a431a126d | ||
|
|
ac08316548 | ||
|
|
85e8092cca | ||
|
|
ad53fc3cf4 | ||
|
|
6fa8148ccb | ||
|
|
7c69849a0d | ||
|
|
11bc21b6d9 | ||
|
|
13f540ef1b | ||
|
|
ec5c4499f4 | ||
|
|
f2a5b6dbfd | ||
|
|
b8492b6c2f | ||
|
|
331570ea6f | ||
|
|
55af207321 | ||
|
|
d648f65aaf | ||
|
|
608b5a6317 | ||
|
|
64953c8ed2 | ||
|
|
f451b64c8f | ||
|
|
2c9475b58e | ||
|
|
6d17573c23 | ||
|
|
d12ae7fd1c | ||
|
|
224137fcf9 | ||
|
|
e4435b014e | ||
|
|
871605f4e2 | ||
|
|
e0d2f6d5b0 | ||
|
|
bfbc907cec | ||
|
|
5e9d75b4a5 | ||
|
|
627e6ea2b0 | ||
|
|
9da4316ca5 | ||
|
|
eb7cbf27bc | ||
|
|
6b95e35e96 | ||
|
|
ff3d810ea8 | ||
|
|
34194aaff7 | ||
|
|
114f290947 | ||
|
|
baafb85ba4 | ||
|
|
29ded770b1 | ||
|
|
dc026bb16f | ||
|
|
328378f9cb | ||
|
|
c1935f0a41 | ||
|
|
43cd86ba8a | ||
|
|
8e345ce465 | ||
|
|
b64d312421 | ||
|
|
ccad2ed824 | ||
|
|
369195caa5 | ||
|
|
57ed7f6772 | ||
|
|
a3648f84b2 | ||
|
|
5331cd150a | ||
|
|
7313a23dba | ||
|
|
f7278e612e | ||
|
|
b990b2fce5 | ||
|
|
aedaba018f | ||
|
|
de042b3b88 | ||
|
|
a7e9d8762d | ||
|
|
ca238bc023 | ||
|
|
40dcf0d856 | ||
|
|
d3c3026496 | ||
|
|
093f7e47cc | ||
|
|
6a12998a83 | ||
|
|
b9c84f3f3a | ||
|
|
ffad4fe35b | ||
|
|
94e6ad71f5 | ||
|
|
8571f864d2 | ||
|
|
fc6d4974a6 | ||
|
|
738ccf61c0 | ||
|
|
dcabef952c | ||
|
|
771c8a83c7 | ||
|
|
6631985990 | ||
|
|
e0f20e9425 | ||
|
|
fe7c1b969c | ||
|
|
78f306a6f7 | ||
|
|
9ac98197bb | ||
|
|
27c28eaa27 | ||
|
|
be2672716d | ||
|
|
653d90c1a5 | ||
|
|
310b1ccdc1 | ||
|
|
a59b0ad1a1 | ||
|
|
7b222fc56e | ||
|
|
d0debb2116 | ||
|
|
66f371e8b8 | ||
|
|
b843631d71 | ||
|
|
c2ddd773bc | ||
|
|
7dd3bf5e24 | ||
|
|
db7d0c3127 | ||
|
|
f346048a6e | ||
|
|
e3aa8a7aa8 | ||
|
|
cf589f2c1e | ||
|
|
8af4569583 | ||
|
|
b25db11d08 | ||
|
|
587f07543f | ||
|
|
aa93cb9f44 | ||
|
|
537dbadea0 | ||
|
|
07a07588a0 | ||
|
|
dfaa58f72d | ||
|
|
9ac263ed1b | ||
|
|
d2d8ed4884 | ||
|
|
5d8290429c | ||
|
|
6aa423a1a8 | ||
|
|
3669065466 | ||
|
|
7ebf518c02 | ||
|
|
34ed4f4206 | ||
|
|
60833c8978 | ||
|
|
482a2ad122 | ||
|
|
c0380402bc | ||
|
|
cdbf38728d | ||
|
|
0c27383dd7 | ||
|
|
ef862186dd | ||
|
|
2c2dcf81d0 | ||
|
|
1827057acc | ||
|
|
8346e6e696 | ||
|
|
e4c15fcb5c | ||
|
|
3e5a62ecd8 | ||
|
|
82475a18d9 | ||
|
|
2e996271fe | ||
|
|
a2c89a225c | ||
|
|
7166854f41 | ||
|
|
3033261891 | ||
|
|
2347efc065 | ||
|
|
9b147cd730 | ||
|
|
3a9f5bf6dd | ||
|
|
ab37bef83b | ||
|
|
ad8b316939 | ||
|
|
421fdf7460 | ||
|
|
25a96e0c63 | ||
|
|
46826bb078 | ||
|
|
f87b287291 | ||
|
|
bb9246e525 | ||
|
|
c84770b877 | ||
|
|
380fb87ecc | ||
|
|
87ae59f5e9 | ||
|
|
e42b4ebf0f | ||
|
|
d3c150411c | ||
|
|
1e166470ab | ||
|
|
34e682d385 | ||
|
|
7239258ae6 | ||
|
|
5fd12dce01 | ||
|
|
82ae0238f9 | ||
|
|
81804909d3 | ||
|
|
c366276056 | ||
|
|
1a9255c12e | ||
|
|
94f36b0273 | ||
|
|
c45dc6c62a | ||
|
|
f053a1409e | ||
|
|
22f935ab7c | ||
|
|
9388eece2b | ||
|
|
acb58bfb6a | ||
|
|
f7181615f2 | ||
|
|
f144365281 | ||
|
|
d9aa645f86 | ||
|
|
22f3d3ae76 | ||
|
|
b4da08cad8 | ||
|
|
efab1dadde | ||
|
|
33d5134b59 | ||
|
|
119cb9bbcf | ||
|
|
e6e2627636 | ||
|
|
30f7bfa121 | ||
|
|
7af825bae4 | ||
|
|
26bcda31b8 | ||
|
|
d134d0935e | ||
|
|
e4f3431116 | ||
|
|
a46982cee9 | ||
|
|
70caf49914 | ||
|
|
719aec4064 | ||
|
|
cea7839911 | ||
|
|
a1595cec78 | ||
|
|
2e165295b7 | ||
|
|
a90a0f5c8a | ||
|
|
91b3981800 | ||
|
|
0cdb32fc43 | ||
|
|
838810b76a | ||
|
|
736b9a4784 | ||
|
|
4903ccf159 | ||
|
|
51fb884c52 | ||
|
|
d4040e9e28 | ||
|
|
3fb8784c92 | ||
|
|
c02b6a37d6 | ||
|
|
814fb032eb | ||
|
|
54f9a4cb59 | ||
|
|
8e780b113d | ||
|
|
574d573ac2 | ||
|
|
78f0ddbfad | ||
|
|
6a70647d45 | ||
|
|
c1f52a321d | ||
|
|
b9557064bf | ||
|
|
cf6121e3da | ||
|
|
247c736b9b | ||
|
|
8fbc0d29ee | ||
|
|
c06c00190f | ||
|
|
c0aba0a23e | ||
|
|
b9676a75f6 | ||
|
|
69a18514e9 | ||
|
|
122cd52ce4 | ||
|
|
26ae5178a4 | ||
|
|
bf9060156a | ||
|
|
82301b6c29 | ||
|
|
1745069543 | ||
|
|
c7ddb5ef7a | ||
|
|
7b41013102 | ||
|
|
77fb2b72ae | ||
|
|
7f94709066 | ||
|
|
867822fa1e | ||
|
|
73880268ef | ||
|
|
131485ef66 | ||
|
|
11dbceb761 | ||
|
|
0127423027 | ||
|
|
85657eedf8 | ||
|
|
b48045a8f5 | ||
|
|
6f65e2f90c | ||
|
|
323634bf8b | ||
|
|
9c712a366f | ||
|
|
a8c8e4efd4 | ||
|
|
414522aed5 | ||
|
|
2be8a281d2 | ||
|
|
6308ac45b0 | ||
|
|
b9b72bc6e2 | ||
|
|
d892079844 | ||
|
|
e263c26690 | ||
|
|
f3cf3ff8b6 | ||
|
|
4902db1fc9 | ||
|
|
d563b8d944 | ||
|
|
85a0d6c7ab | ||
|
|
34840cdcef | ||
|
|
28a4649785 | ||
|
|
7c551ec445 | ||
|
|
84fbb80c8f | ||
|
|
40453b3f84 | ||
|
|
29574fd5b3 | ||
|
|
2e6f5a4910 | ||
|
|
405ba4178a | ||
|
|
efcb6db688 | ||
|
|
0018491af2 | ||
|
|
0364d23210 | ||
|
|
8c5f03cec7 | ||
|
|
f8434db549 | ||
|
|
ab904caf33 | ||
|
|
54a59adc7c | ||
|
|
64765e5199 | ||
|
|
0cd01f5c9c | ||
|
|
2a3e822f44 | ||
|
|
a828a64b75 | ||
|
|
d4d176e5d0 | ||
|
|
449d1297ca | ||
|
|
d72667fcce | ||
|
|
a41fe500d6 | ||
|
|
54f59bd7d4 | ||
|
|
98ce212093 | ||
|
|
8a1137ceab | ||
|
|
877c029c16 | ||
|
|
944692ef69 | ||
|
|
391712a4f9 | ||
|
|
ad544c803a | ||
|
|
dbf87282d3 | ||
|
|
69b3fd485d | ||
|
|
5058292537 | ||
|
|
fcc803b2bf | ||
|
|
ea0152b132 | ||
|
|
3f213d908d | ||
|
|
1ca0e78ca1 | ||
|
|
b43d3267e2 | ||
|
|
b5cb6347a4 | ||
|
|
96b9b6c127 | ||
|
|
f10ce8944b | ||
|
|
a5c401bd12 | ||
|
|
b9caf4f726 | ||
|
|
d1d5362267 | ||
|
|
9f26d3b75b | ||
|
|
a76886726b | ||
|
|
ac66e11f2b | ||
|
|
4264ceb31c | ||
|
|
023ee197be | ||
|
|
d1605794ad | ||
|
|
3376f16012 | ||
|
|
6ce6bbedcb | ||
|
|
27cc627e42 | ||
|
|
62b89daac6 | ||
|
|
773e64cc1a | ||
|
|
2d05eb3cf5 | ||
|
|
ac63b92b64 | ||
|
|
30bcbf775a | ||
|
|
7eb9f34cc3 | ||
|
|
0b08c48fc5 | ||
|
|
65e1683680 | ||
|
|
feb496056e | ||
|
|
e2eebf1696 | ||
|
|
36c28bc467 | ||
|
|
3a1f3f8388 | ||
|
|
52bfa604e1 | ||
|
|
0a6a966e2b | ||
|
|
773e1c6d68 | ||
|
|
0d1c85e643 | ||
|
|
1df7c28661 | ||
|
|
36d2b66f90 | ||
|
|
8a240e4f9c | ||
|
|
ec039e6790 | ||
|
|
142b6b4abf | ||
|
|
2a06b44be2 | ||
|
|
2dc57e7413 | ||
|
|
07a32d192c | ||
|
|
9a27448b1b | ||
|
|
9ee397b440 | ||
|
|
9d0170ac6c | ||
|
|
b4276a3896 | ||
|
|
bfcf016714 | ||
|
|
9cee0ce7db | ||
|
|
350333a09a | ||
|
|
639d9ae9a0 | ||
|
|
4d17add8de | ||
|
|
27b1b4a2c9 | ||
|
|
5b5b171f3e | ||
|
|
b282fe7170 | ||
|
|
9ff4e0e91b | ||
|
|
0834d1a70c | ||
|
|
63fcc42990 | ||
|
|
6194a64ae9 | ||
|
|
eefd9fee81 | ||
|
|
014fee93b3 | ||
|
|
86780a8bc3 | ||
|
|
31e0fe9031 | ||
|
|
3ba2859e0c | ||
|
|
e9dd8370b0 | ||
|
|
4d7fc7f977 | ||
|
|
f9b4bb05e0 | ||
|
|
7450693435 | ||
|
|
8da6f0be48 | ||
|
|
11880103b1 | ||
|
|
7984708a55 | ||
|
|
24d35ab47b | ||
|
|
30348c924c | ||
|
|
6cdca71079 | ||
|
|
305d16d612 | ||
|
|
a3810136fe | ||
|
|
3ce8d59176 | ||
|
|
b9c2ae6788 | ||
|
|
85be3dde81 | ||
|
|
2f8b580b64 | ||
|
|
c5b0bdd542 | ||
|
|
e4df0e189d | ||
|
|
4ad613f6be | ||
|
|
ac6bc55512 | ||
|
|
69efd77749 | ||
|
|
276af7b59b | ||
|
|
51b156d48a | ||
|
|
30f5ffdca2 | ||
|
|
650f0e69f2 | ||
|
|
d28db583da | ||
|
|
58a35366be | ||
|
|
dc56a6b8c8 | ||
|
|
bac9bf1b12 | ||
|
|
d82c42837f | ||
|
|
35b4aa04be | ||
|
|
2a28b79e04 | ||
|
|
281553afe6 | ||
|
|
987f4945b4 | ||
|
|
31d56c3fb5 | ||
|
|
09f79aaad0 | ||
|
|
23e0ff840a | ||
|
|
48e7697911 | ||
|
|
f136c89d5e | ||
|
|
01fc847f7f | ||
|
|
7fc1f1e2b6 | ||
|
|
57cfa513f5 | ||
|
|
d58b1ffe94 | ||
|
|
e71940aa64 | ||
|
|
e36950dec5 | ||
|
|
f902e89d4b | ||
|
|
a380f041c2 | ||
|
|
9397edb28b | ||
|
|
06ce7335e9 | ||
|
|
13c8749ac9 | ||
|
|
e1f1784f99 | ||
|
|
86e865d7d2 | ||
|
|
a2dfab12c5 | ||
|
|
00957d1aa4 | ||
|
|
6af0096f4f | ||
|
|
250ce11ab9 | ||
|
|
566641a0b5 | ||
|
|
acafcf1c5b | ||
|
|
e56c79c114 | ||
|
|
6ebe2d23b1 | ||
|
|
0bfea9a2be | ||
|
|
e64655c25d | ||
|
|
5a16cb4bf0 | ||
|
|
b88a323ffb | ||
|
|
59358cd3e7 | ||
|
|
55366814a6 | ||
|
|
b2d40d7363 | ||
|
|
4bd597d9fc | ||
|
|
ad8a26e361 | ||
|
|
19b9366d73 | ||
|
|
e08f81d96a | ||
|
|
8d1dd7eb30 | ||
|
|
35e0cfb54d | ||
|
|
7b67848042 | ||
|
|
95f21c7a66 | ||
|
|
d101488c5f | ||
|
|
64778693be | ||
|
|
37a187bfab | ||
|
|
733896e046 | ||
|
|
a188056364 | ||
|
|
961e242aaf | ||
|
|
e0e214556a | ||
|
|
633dcc316c | ||
|
|
c36d15d2de | ||
|
|
737f283a07 | ||
|
|
aac6d1fc9b | ||
|
|
bd08ee7a46 | ||
|
|
a4cb21659b | ||
|
|
eddce9d74a | ||
|
|
2e05f5d7a4 | ||
|
|
d78d08981a | ||
|
|
7b53e9ebfd | ||
|
|
be20243549 | ||
|
|
f40c2db05a | ||
|
|
067b00d49d | ||
|
|
994d7ae7c5 | ||
|
|
d2d146a314 | ||
|
|
61f471f779 | ||
|
|
0c01f829ae | ||
|
|
5068fb16a5 | ||
|
|
2abe85d50e | ||
|
|
be44558886 | ||
|
|
9adf1991ca | ||
|
|
248eb4638d | ||
|
|
da146657c9 | ||
|
|
a158c36a8a | ||
|
|
6957bfdca6 | ||
|
|
2ccf3b241c | ||
|
|
9ce53a3861 | ||
|
|
54d2b7e596 | ||
|
|
9d527191bc | ||
|
|
a8f96c63aa | ||
|
|
c144292373 | ||
|
|
f83ac78201 | ||
|
|
e6032054bf | ||
|
|
ebf5a6b14c | ||
|
|
e892457a03 | ||
|
|
a297155a97 | ||
|
|
6c82de5100 | ||
|
|
0ad44acb5a | ||
|
|
5f14e7e982 | ||
|
|
0970e0307e | ||
|
|
5aa42d4292 | ||
|
|
e0ff66251f | ||
|
|
29ed09e80a | ||
|
|
3b2dd1b3c2 | ||
|
|
872e75a3d5 | ||
|
|
7827251daf | ||
|
|
b5d1c68beb | ||
|
|
f2ed64eaaf | ||
|
|
ef328b2fc1 | ||
|
|
bad72b0b8e | ||
|
|
1bf84c4b6b | ||
|
|
fd2eef49c8 | ||
|
|
1d09586599 | ||
|
|
7f237800e9 | ||
|
|
1ece06273e | ||
|
|
bb256ac96f | ||
|
|
6a3c5d6891 | ||
|
|
cc7a294e2e | ||
|
|
7b6ed9871e | ||
|
|
d79a687d85 | ||
|
|
f29d85d9e4 | ||
|
|
a175963ba5 | ||
|
|
bbeeb97f75 | ||
|
|
0a9945220e | ||
|
|
73a5f06652 | ||
|
|
c077c3277b | ||
|
|
31f3ca1b2b | ||
|
|
c81f33f73d | ||
|
|
170ccc9de5 | ||
|
|
45c7f12d2a | ||
|
|
2cad971ab4 | ||
|
|
8d86d11fdf | ||
|
|
6037a9804c | ||
|
|
3c69f32402 | ||
|
|
6bfe8e32b5 | ||
|
|
5fc9261929 | ||
|
|
0162994983 | ||
|
|
254b7c5b15 | ||
|
|
672dcf59d3 | ||
|
|
7eae6eaa2f | ||
|
|
8b0f2afbaf | ||
|
|
79926e016e | ||
|
|
a61dd408ed | ||
|
|
53254551f0 | ||
|
|
8ffbe43ba1 | ||
|
|
bcfa5cd00c | ||
|
|
d84bd51e95 | ||
|
|
9072a8c627 | ||
|
|
3872c7a107 | ||
|
|
8f267fa8a8 | ||
|
|
64d62e41b8 | ||
|
|
3545e17f43 | ||
|
|
29235901b8 | ||
|
|
e8b1721290 | ||
|
|
3406333a58 | ||
|
|
45d173a59a | ||
|
|
663396e45d | ||
|
|
ece7e00048 | ||
|
|
9d0d40fc15 | ||
|
|
3edc57296d | ||
|
|
727124a762 | ||
|
|
6ad71cc29d | ||
|
|
d4d3629aaf | ||
|
|
3170c56e07 | ||
|
|
c1f18892bb | ||
|
|
1c99934b28 | ||
|
|
a9e2b9ec16 | ||
|
|
85bb322333 | ||
|
|
65d43f3ca5 | ||
|
|
0e0aee25c4 | ||
|
|
82c5e7de25 | ||
|
|
2e27339add | ||
|
|
88df6c0c9a | ||
|
|
402a7bf63d | ||
|
|
00466e2feb | ||
|
|
c98d91fe94 | ||
|
|
ac5491f563 | ||
|
|
b0effa2160 | ||
|
|
82f7f1543b | ||
|
|
96d79bb532 | ||
|
|
f2581ee8b8 | ||
|
|
9834367eea | ||
|
|
da52d3af31 | ||
|
|
ad882cd54d | ||
|
|
3557cf34dc | ||
|
|
856a18f7a8 | ||
|
|
d766343668 | ||
|
|
0bf2c7f3bc | ||
|
|
36be39b8b3 | ||
|
|
3365117151 | ||
|
|
92312aa3e6 | ||
|
|
6b1ffa5f3d | ||
|
|
f4e7545d88 | ||
|
|
e933a2712d | ||
|
|
d638a7484b | ||
|
|
7eff3afa05 | ||
|
|
b84907bdbb | ||
|
|
e4919b9329 | ||
|
|
8a12b6f1eb | ||
|
|
848cf95ea0 | ||
|
|
9037787f0b | ||
|
|
eda96586ca | ||
|
|
64a2cef9bb | ||
|
|
a41dce8f8a | ||
|
|
c0d6045776 | ||
|
|
49f4bc4709 | ||
|
|
8eec652de5 | ||
|
|
fc5d876dba | ||
|
|
f58dbb02a6 | ||
|
|
ca7ea2a4b5 | ||
|
|
c80439a320 | ||
|
|
acf6d4d2e3 | ||
|
|
aea5461488 | ||
|
|
bf92b7201f | ||
|
|
1a4f8022e6 | ||
|
|
b2d20e94fa | ||
|
|
7455ba436a | ||
|
|
b7442c3e2b | ||
|
|
a3708a1885 | ||
|
|
3346a21324 | ||
|
|
30ecfef5a3 | ||
|
|
c927d6de9b | ||
|
|
0c4cf9372b | ||
|
|
6226a27bf8 | ||
|
|
efff39c030 | ||
|
|
b5c268738b | ||
|
|
17673404fb | ||
|
|
7f026792e1 | ||
|
|
11940d462a | ||
|
|
6184f6fcbc | ||
|
|
e556aefe0a | ||
|
|
7efb38d1dd | ||
|
|
20746d8150 | ||
|
|
699be7d1be | ||
|
|
2fa14fd48a | ||
|
|
66eb0bd548 | ||
|
|
5aae844e60 | ||
|
|
ec8d7603e6 | ||
|
|
8c87bb550e | ||
|
|
4aa29508af | ||
|
|
b4017539d4 | ||
|
|
b6557f2cfe | ||
|
|
138e030cfe | ||
|
|
502ae6c663 | ||
|
|
e6acf0c399 | ||
|
|
04eca2589d | ||
|
|
474c9aadbe | ||
|
|
7dcbcca68c | ||
|
|
fa467e62a9 | ||
|
|
355d62c499 | ||
|
|
ce3e583d94 | ||
|
|
fc2f29c1d0 | ||
|
|
ce3c8df6df | ||
|
|
095b45c165 | ||
|
|
795f8e3fe7 | ||
|
|
d7457c7661 | ||
|
|
359c97f506 | ||
|
|
9e617cd4c2 | ||
|
|
d0497425f8 | ||
|
|
808ddf0ae7 | ||
|
|
feb15dc99f | ||
|
|
ecd7e36047 | ||
|
|
6bba80241c | ||
|
|
3a46280ca3 | ||
|
|
e1a12e24d2 | ||
|
|
6a3743b0d4 | ||
|
|
481f6c87e7 | ||
|
|
df4407d665 | ||
|
|
70a00eacf9 | ||
|
|
a02d609b1f | ||
|
|
5c3cb8778a | ||
|
|
1beda9c8a7 | ||
|
|
27c005ae2c | ||
|
|
505bfd82bb | ||
|
|
fdbd90e25d | ||
|
|
52cd019a54 | ||
|
|
f20cd34858 | ||
|
|
7723b4caa4 | ||
|
|
9adcd3a514 | ||
|
|
063a1251a9 | ||
|
|
af6da6db2d | ||
|
|
131c0134f5 | ||
|
|
fad3a84335 | ||
|
|
38434a7fbb | ||
|
|
84f600b2ee | ||
|
|
aec1708c53 | ||
|
|
f3c8658217 | ||
|
|
a5d9303283 | ||
|
|
38258a0976 | ||
|
|
a597994fb6 | ||
|
|
82b3e0851c | ||
|
|
f8c407a13b | ||
|
|
8da976fe00 | ||
|
|
1232ae41cf | ||
|
|
99fa03e8b5 | ||
|
|
a8331897aa | ||
|
|
0f3e296cb7 | ||
|
|
6826593b81 | ||
|
|
6b61060b51 | ||
|
|
46ecd9fd6d | ||
|
|
9efcc3f3be | ||
|
|
832e9c52ca | ||
|
|
54a79c1d37 | ||
|
|
2849d3f29d | ||
|
|
5ae38b65c1 | ||
|
|
bfe3f5815f | ||
|
|
cc01eae332 | ||
|
|
85e98fd4e8 | ||
|
|
51adaac953 | ||
|
|
10e0737569 | ||
|
|
fac3c03087 | ||
|
|
14d5e22700 | ||
|
|
fbfe44bb4d | ||
|
|
d61a04583e | ||
|
|
7e919bdbd0 | ||
|
|
96355d2f2f | ||
|
|
df4ecff5a9 | ||
|
|
6d6591880e | ||
|
|
bd84387ac6 | ||
|
|
ebfaff84c9 | ||
|
|
73d676dc8b | ||
|
|
62f6b86ba7 | ||
|
|
f6124311fd | ||
|
|
88a4d54883 | ||
|
|
368c88c487 | ||
|
|
5deaf9e30b | ||
|
|
acb501c46d | ||
|
|
97479d0c54 | ||
|
|
06567ec513 | ||
|
|
692daf6f54 | ||
|
|
458b6f4733 | ||
|
|
fe08db2713 | ||
|
|
21b7375778 | ||
|
|
4c0ec15bdc | ||
|
|
85c590105f | ||
|
|
ae7a132f38 | ||
|
|
ac001dabdc | ||
|
|
bfb3d255b1 | ||
|
|
ab55794b6f | ||
|
|
d3169e8d28 | ||
|
|
05b9f48ee5 | ||
|
|
4c9812f5da | ||
|
|
4b3403ca9b | ||
|
|
1c13c9f6b6 | ||
|
|
c7a26b7c32 | ||
|
|
fd1c18c088 | ||
|
|
c2c9a78db9 | ||
|
|
e75a779d9e | ||
|
|
828db669ec | ||
|
|
9636b2407d | ||
|
|
3670025e64 | ||
|
|
4ac363a168 | ||
|
|
d360c97ae1 | ||
|
|
76100203ab | ||
|
|
d1e1fd6210 | ||
|
|
252b503fc8 | ||
|
|
84a35f32c7 | ||
|
|
c517a19c2d | ||
|
|
738a2867c8 | ||
|
|
755adff0e4 | ||
|
|
888c59c955 | ||
|
|
f25a4a4692 | ||
|
|
b3e1f2aa7a | ||
|
|
31aca5589c | ||
|
|
76d40f4904 | ||
|
|
fbfad76c03 | ||
|
|
c974116f19 | ||
|
|
e978247fe5 | ||
|
|
51e9fe36e4 | ||
|
|
2367c5568c | ||
|
|
10e48d8310 | ||
|
|
ba8e144554 | ||
|
|
f5b46482f4 | ||
|
|
fdf2a31a51 | ||
|
|
41dab8a222 | ||
|
|
c77b24c092 | ||
|
|
5d2134d485 | ||
|
|
a55fa2047f | ||
|
|
3d9d48fffb | ||
|
|
a0d03f2e15 | ||
|
|
d0897dead5 | ||
|
|
567aa35b67 | ||
|
|
f2f40e64a9 | ||
|
|
4c6a31cd6e | ||
|
|
83333498a5 | ||
|
|
86063d4321 | ||
|
|
09eb08f910 | ||
|
|
97efe99ae9 | ||
|
|
691c8198b7 | ||
|
|
86e6165687 | ||
|
|
1e38be3a7a | ||
|
|
841c228533 | ||
|
|
c430111d0e | ||
|
|
97d3918377 | ||
|
|
6f6bf2a1eb | ||
|
|
8c5009b628 | ||
|
|
ae7b4da4cc | ||
|
|
fc7cae8aa3 | ||
|
|
f9058ca785 | ||
|
|
f648313f98 | ||
|
|
15f012032c | ||
|
|
4ec1cf49e2 | ||
|
|
f878f64f43 | ||
|
|
5f027d1fc5 | ||
|
|
380dba1020 | ||
|
|
ed4d176152 | ||
|
|
c6064a7ba6 | ||
|
|
a8594fd19f | ||
|
|
7fae460402 | ||
|
|
37b4c7d8a9 | ||
|
|
e5d2df9c34 | ||
|
|
04006bb7f0 | ||
|
|
ce59a2faad | ||
|
|
633f97151c | ||
|
|
e6153e1bd1 | ||
|
|
5d6bad1b3c | ||
|
|
e8ecbb6f20 | ||
|
|
d11d7cdf87 | ||
|
|
9e8e236d98 | ||
|
|
d6c75cb7c2 | ||
|
|
1ccd5676e3 | ||
|
|
d906206049 | ||
|
|
f85b6ca494 | ||
|
|
f2f179dce2 | ||
|
|
6d00213e80 | ||
|
|
897f8752da | ||
|
|
beda469bc6 | ||
|
|
46aebbbcbf | ||
|
|
01521299c7 | ||
|
|
2fae34bd2c | ||
|
|
95a22ae194 | ||
|
|
ec0a523ac3 | ||
|
|
e178feca3f | ||
|
|
f0325a9ccc | ||
|
|
c050f493dd | ||
|
|
a3e4a198e3 | ||
|
|
8b2fa38256 | ||
|
|
641ccdbb14 | ||
|
|
6f5e41e420 | ||
|
|
0d37a7bf83 | ||
|
|
ebf94aff8d | ||
|
|
7a13fe16f7 | ||
|
|
bf5c9706d9 | ||
|
|
7b62d0bc70 | ||
|
|
7e6c2937c3 | ||
|
|
b1dfd20292 | ||
|
|
edd6cdfc9a | ||
|
|
3cb1799347 | ||
|
|
8a0fddfd73 | ||
|
|
d524bc9110 | ||
|
|
d2b00d0866 | ||
|
|
ab655dca33 | ||
|
|
5a32e9273e | ||
|
|
caddadfc5a | ||
|
|
dd52d4de4c | ||
|
|
024eb98524 | ||
|
|
32019c9897 | ||
|
|
657488113e | ||
|
|
3b4de17d2b | ||
|
|
7d0981b312 | ||
|
|
07c3c08fad | ||
|
|
f477370c0c | ||
|
|
586f474a44 | ||
|
|
6823fe5241 | ||
|
|
f7085ac84f | ||
|
|
9898bbd9dc | ||
|
|
9a8ae6f1bf | ||
|
|
2f4b2f4783 | ||
|
|
6d363cea9d | ||
|
|
f0e4bac64e | ||
|
|
4304e7e593 | ||
|
|
6515b9c0d4 | ||
|
|
8c48971b51 | ||
|
|
e10c527930 | ||
|
|
2f5be2d8dc | ||
|
|
4086026524 | ||
|
|
9d914454c8 | ||
|
|
19e2fb4386 | ||
|
|
189fd15564 | ||
|
|
8404f132c3 | ||
|
|
b2850e62db | ||
|
|
06c00bd19b | ||
|
|
b42a972b71 | ||
|
|
2c8ac84a26 | ||
|
|
1ef6084b75 | ||
|
|
bd85434cb3 | ||
|
|
c18f7fc410 | ||
|
|
dafd50d178 | ||
|
|
883ff92a7f | ||
|
|
d79d165761 | ||
|
|
8cfc0165e9 | ||
|
|
62451800e7 | ||
|
|
b31ed22738 | ||
|
|
7738329672 | ||
|
|
dd3df11c55 | ||
|
|
e1c5463efc | ||
|
|
468749c9fc | ||
|
|
eedf400d05 | ||
|
|
5175094707 | ||
|
|
8e82611f37 | ||
|
|
6028718b1a | ||
|
|
f784980d2b | ||
|
|
0d766c8ccf | ||
|
|
e02bdaf08b | ||
|
|
b6b67715ed | ||
|
|
555d702e34 | ||
|
|
899a3a1268 | ||
|
|
f3de4f8cb7 | ||
|
|
321d5b73d8 | ||
|
|
62ce3034f3 | ||
|
|
0aff09f6c9 | ||
|
|
48c3b7dc19 | ||
|
|
cc50b1ae53 | ||
|
|
f576c34594 | ||
|
|
0eac4fa525 | ||
|
|
822cb39dfa | ||
|
|
342fb8dae9 | ||
|
|
f023be9293 | ||
|
|
828c58522e | ||
|
|
97ffc5690b | ||
|
|
b4bc6fef5b | ||
|
|
68030fd37b | ||
|
|
b7336ff32d | ||
|
|
5b6672c66d | ||
|
|
84cf00c645 | ||
|
|
bea15fb599 | ||
|
|
0c88ab1844 | ||
|
|
b7f4f902fa | ||
|
|
702c020e58 | ||
|
|
09f15918be | ||
|
|
da2c8f3c94 | ||
|
|
a58e4e0d48 | ||
|
|
f2a5aebf98 | ||
|
|
a9c1b419a9 | ||
|
|
f5cd5ebd7b | ||
|
|
1859af9b2a | ||
|
|
c95e9fff99 | ||
|
|
7dfd70fc83 | ||
|
|
b2f8642d3d | ||
|
|
f5a4001bb1 | ||
|
|
b9b6d17ab1 | ||
|
|
c824dc727a | ||
|
|
edc6a1e4f9 | ||
|
|
35129ac998 | ||
|
|
ed02a0018c | ||
|
|
8bb8cc993a | ||
|
|
aa1336c00a | ||
|
|
4da3fc0ea0 | ||
|
|
24c16fc349 | ||
|
|
b8255eba26 | ||
|
|
b2999a7055 | ||
|
|
c3208e45c9 | ||
|
|
9d95351cad | ||
|
|
1de53a7a1a | ||
|
|
bae1115e55 | ||
|
|
b3d398343e | ||
|
|
0648e76979 | ||
|
|
8588d0eb3d | ||
|
|
1574b839e0 | ||
|
|
7ec2bf9b77 | ||
|
|
d431c0924c | ||
|
|
2bf5a47b3e | ||
|
|
d3bd94805f | ||
|
|
09cbcb78d3 | ||
|
|
631376e2ac | ||
|
|
abed247182 | ||
|
|
9240948346 | ||
|
|
62e6d40b39 | ||
|
|
d45c984653 | ||
|
|
d53a80af25 | ||
|
|
85cd30b1fd | ||
|
|
deca951241 | ||
|
|
9f07f4c559 | ||
|
|
6e18805ac2 | ||
|
|
77692b52b5 | ||
|
|
efa4ccfaee | ||
|
|
e721a7f2c1 | ||
|
|
1233d244ff | ||
|
|
b541fac7c3 | ||
|
|
af32d3b773 | ||
|
|
2fda8134f1 | ||
|
|
8b34f71bea | ||
|
|
fbaf868f62 | ||
|
|
4a9c38bfa3 | ||
|
|
be14c24cea | ||
|
|
1697f6a323 | ||
|
|
52d12ca782 | ||
|
|
c45d8e9ba2 | ||
|
|
da13b4aa86 | ||
|
|
b08f76bd23 | ||
|
|
bd07a35c29 | ||
|
|
de796f27e6 | ||
|
|
2687af82d4 | ||
|
|
3727d66a0e | ||
|
|
0d81e26769 | ||
|
|
59bc64328f | ||
|
|
f32fb65552 | ||
|
|
39a76b9cba | ||
|
|
1529c19675 | ||
|
|
194b6259c5 | ||
|
|
5a2c33c12e | ||
|
|
7dae7087d3 | ||
|
|
12aefb9dfc | ||
|
|
9609c91e7d | ||
|
|
338df4f409 | ||
|
|
3e90250ea3 | ||
|
|
0b1e287e81 | ||
|
|
6c9a0ba415 | ||
|
|
0697bb2247 | ||
|
|
24081224d1 | ||
|
|
c46e7a9c9b | ||
|
|
a2849a18a5 | ||
|
|
59984e9f58 | ||
|
|
546ec1a5cf | ||
|
|
7a00178832 | ||
|
|
9df84dd22d | ||
|
|
3f23154088 | ||
|
|
f6270a8fe2 | ||
|
|
235407a78e | ||
|
|
77bf92e3c6 | ||
|
|
bb3d0c270d | ||
|
|
f8c45d428c | ||
|
|
153535fc56 | ||
|
|
a8d8225ead | ||
|
|
cc03f4c58b | ||
|
|
32c8b5507c | ||
|
|
971edd04af | ||
|
|
471200074b | ||
|
|
6841d8ff55 | ||
|
|
12f3b9000c | ||
|
|
aa09d6b8f0 | ||
|
|
dc4b23e1a1 | ||
|
|
8379a741cc | ||
|
|
321fe5c44c | ||
|
|
b5b3a7e867 | ||
|
|
4febfe47f0 | ||
|
|
77eca2487c | ||
|
|
1c4f05db41 | ||
|
|
7d855447ef | ||
|
|
debbea5b29 | ||
|
|
5c4edc83b5 | ||
|
|
b6146537d2 | ||
|
|
f62b69e32a | ||
|
|
7f02e4d008 | ||
|
|
9192e593ec | ||
|
|
11bfe438a2 | ||
|
|
aaecffba3a | ||
|
|
e1d7c96814 | ||
|
|
7e03f9a484 | ||
|
|
46ca345b06 | ||
|
|
f36ea03741 | ||
|
|
c9d4e7b716 | ||
|
|
f681aab895 | ||
|
|
11254bdf6d | ||
|
|
1985860c6e | ||
|
|
2ac516850b | ||
|
|
302fbd218d | ||
|
|
b2d6e63b79 | ||
|
|
feec718265 | ||
|
|
ee5e8d71ac | ||
|
|
26072df6af | ||
|
|
b69f76c106 | ||
|
|
4d9b5c60f9 | ||
|
|
0163466d72 | ||
|
|
4c79a63fd7 | ||
|
|
54fed21c04 | ||
|
|
90565d015e | ||
|
|
0cf2a64974 | ||
|
|
83bcdcee61 | ||
|
|
d4a459f7cb | ||
|
|
c3d963ac24 | ||
|
|
6d4e6d4cba | ||
|
|
baf9e74a73 | ||
|
|
f9834a3d1a | ||
|
|
aac06e8f74 | ||
|
|
2bbc4cab60 | ||
|
|
cea4e4e7b2 | ||
|
|
0a8b0eeca1 | ||
|
|
51e89709aa | ||
|
|
53b27bbf06 | ||
|
|
70a2157b64 | ||
|
|
f97511a1f3 | ||
|
|
73dc099645 | ||
|
|
88d85ebae1 | ||
|
|
50934ce460 | ||
|
|
e90fcd9edd | ||
|
|
9687e039e7 | ||
|
|
a28ec23273 | ||
|
|
a2a6c1c22f | ||
|
|
524d61bf7e | ||
|
|
7c9cdb2245 | ||
|
|
a289150943 | ||
|
|
544722bad2 | ||
|
|
f8ee66250a | ||
|
|
ed787cf09e | ||
|
|
1587b5a033 | ||
|
|
59ef517e6b | ||
|
|
847d5db1d1 | ||
|
|
daec6fc355 | ||
|
|
0e830d3770 | ||
|
|
dc6cede78e | ||
|
|
c7546b3cdb | ||
|
|
d56c39cf24 | ||
|
|
f9d156d270 | ||
|
|
9d58ccc547 | ||
|
|
9355a5c42b | ||
|
|
3991b4cbdb | ||
|
|
af4a1bac50 | ||
|
|
0964005d84 | ||
|
|
1c93cd9f9f | ||
|
|
8ecaff51a1 | ||
|
|
f6c48802f5 | ||
|
|
a88bc67f88 | ||
|
|
42c43cfafd | ||
|
|
c7daf3136c | ||
|
|
64038b806c | ||
|
|
2bd4513a4d | ||
|
|
d073cb7ead | ||
|
|
8a8ad46f48 | ||
|
|
2771447c29 | ||
|
|
6cc4fcf25c | ||
|
|
ac507e7ab8 | ||
|
|
e6651e8046 | ||
|
|
17e0a58020 | ||
|
|
587d8ac60f | ||
|
|
34449cfc6c | ||
|
|
a4632783fb | ||
|
|
24772ba56e | ||
|
|
eeda4e618c | ||
|
|
d24197bead | ||
|
|
c6bbad109b | ||
|
|
16dc9064d4 | ||
|
|
63772443e6 | ||
|
|
a3f6576084 | ||
|
|
7fc2b5c063 | ||
|
|
89e3e39d52 | ||
|
|
2938a00825 | ||
|
|
5219f7e060 | ||
|
|
93ebeb2aa8 | ||
|
|
c1b077cd19 | ||
|
|
06cc0bb762 | ||
|
|
64c6566980 | ||
|
|
8fd4d9129f | ||
|
|
9164bfa1c3 | ||
|
|
9084720993 | ||
|
|
80d5d3baa1 | ||
|
|
b1c27975d0 | ||
|
|
dc155f4c2c | ||
|
|
2746e805fe | ||
|
|
0aeb1324b7 | ||
|
|
760469c812 | ||
|
|
f09d2b692f | ||
|
|
4c3eb14d68 | ||
|
|
3bb3f02517 |
5
.dockerignore
Normal file
5
.dockerignore
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
Dockerfile
|
||||||
|
.travis.yml
|
||||||
|
.gitignore
|
||||||
|
demo/etc
|
||||||
|
tox.ini
|
||||||
47
.github/ISSUE_TEMPLATE.md
vendored
Normal file
47
.github/ISSUE_TEMPLATE.md
vendored
Normal file
@@ -0,0 +1,47 @@
|
|||||||
|
<!--
|
||||||
|
|
||||||
|
**IF YOU HAVE SUPPORT QUESTIONS ABOUT RUNNING OR CONFIGURING YOUR OWN HOME SERVER**:
|
||||||
|
You will likely get better support more quickly if you ask in ** #matrix:matrix.org ** ;)
|
||||||
|
|
||||||
|
|
||||||
|
This is a bug report template. By following the instructions below and
|
||||||
|
filling out the sections with your information, you will help the us to get all
|
||||||
|
the necessary data to fix your issue.
|
||||||
|
|
||||||
|
You can also preview your report before submitting it. You may remove sections
|
||||||
|
that aren't relevant to your particular case.
|
||||||
|
|
||||||
|
Text between <!-- and --> marks will be invisible in the report.
|
||||||
|
|
||||||
|
-->
|
||||||
|
|
||||||
|
### Description
|
||||||
|
|
||||||
|
Describe here the problem that you are experiencing, or the feature you are requesting.
|
||||||
|
|
||||||
|
### Steps to reproduce
|
||||||
|
|
||||||
|
- For bugs, list the steps
|
||||||
|
- that reproduce the bug
|
||||||
|
- using hyphens as bullet points
|
||||||
|
|
||||||
|
Describe how what happens differs from what you expected.
|
||||||
|
|
||||||
|
If you can identify any relevant log snippets from _homeserver.log_, please include
|
||||||
|
those here (please be careful to remove any personal or private data):
|
||||||
|
|
||||||
|
### Version information
|
||||||
|
|
||||||
|
<!-- IMPORTANT: please answer the following questions, to help us narrow down the problem -->
|
||||||
|
|
||||||
|
- **Homeserver**: Was this issue identified on matrix.org or another homeserver?
|
||||||
|
|
||||||
|
If not matrix.org:
|
||||||
|
- **Version**: What version of Synapse is running? <!--
|
||||||
|
You can find the Synapse version by inspecting the server headers (replace matrix.org with
|
||||||
|
your own homeserver domain):
|
||||||
|
$ curl -v https://matrix.org/_matrix/client/versions 2>&1 | grep "Server:"
|
||||||
|
-->
|
||||||
|
- **Install method**: package manager/git clone/pip
|
||||||
|
- **Platform**: Tell us about the environment in which your homeserver is operating
|
||||||
|
- distro, hardware, if it's running in a vm/container, etc.
|
||||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -32,6 +32,7 @@ demo/media_store.*
|
|||||||
demo/etc
|
demo/etc
|
||||||
|
|
||||||
uploads
|
uploads
|
||||||
|
cache
|
||||||
|
|
||||||
.idea/
|
.idea/
|
||||||
media_store/
|
media_store/
|
||||||
@@ -46,3 +47,5 @@ static/client/register/register_config.js
|
|||||||
|
|
||||||
env/
|
env/
|
||||||
*.config
|
*.config
|
||||||
|
|
||||||
|
.vscode/
|
||||||
|
|||||||
25
.travis.yml
Normal file
25
.travis.yml
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
sudo: false
|
||||||
|
language: python
|
||||||
|
|
||||||
|
# tell travis to cache ~/.cache/pip
|
||||||
|
cache: pip
|
||||||
|
|
||||||
|
matrix:
|
||||||
|
include:
|
||||||
|
- python: 2.7
|
||||||
|
env: TOX_ENV=packaging
|
||||||
|
|
||||||
|
- python: 2.7
|
||||||
|
env: TOX_ENV=pep8
|
||||||
|
|
||||||
|
- python: 2.7
|
||||||
|
env: TOX_ENV=py27
|
||||||
|
|
||||||
|
- python: 3.6
|
||||||
|
env: TOX_ENV=py36
|
||||||
|
|
||||||
|
install:
|
||||||
|
- pip install tox
|
||||||
|
|
||||||
|
script:
|
||||||
|
- tox -e $TOX_ENV
|
||||||
@@ -60,3 +60,6 @@ Niklas Riekenbrauck <nikriek at gmail dot.com>
|
|||||||
|
|
||||||
Christoph Witzany <christoph at web.crofting.com>
|
Christoph Witzany <christoph at web.crofting.com>
|
||||||
* Add LDAP support for authentication
|
* Add LDAP support for authentication
|
||||||
|
|
||||||
|
Pierre Jaury <pierre at jaury.eu>
|
||||||
|
* Docker packaging
|
||||||
1042
CHANGES.rst
1042
CHANGES.rst
File diff suppressed because it is too large
Load Diff
@@ -30,8 +30,12 @@ use github's pull request workflow to review the contribution, and either ask
|
|||||||
you to make any refinements needed or merge it and make them ourselves. The
|
you to make any refinements needed or merge it and make them ourselves. The
|
||||||
changes will then land on master when we next do a release.
|
changes will then land on master when we next do a release.
|
||||||
|
|
||||||
We use Jenkins for continuous integration (http://matrix.org/jenkins), and
|
We use `Jenkins <http://matrix.org/jenkins>`_ and
|
||||||
typically all pull requests get automatically tested Jenkins: if your change breaks the build, Jenkins will yell about it in #matrix-dev:matrix.org so please lurk there and keep an eye open.
|
`Travis <https://travis-ci.org/matrix-org/synapse>`_ for continuous
|
||||||
|
integration. All pull requests to synapse get automatically tested by Travis;
|
||||||
|
the Jenkins builds require an adminstrator to start them. If your change
|
||||||
|
breaks the build, this will be shown in github, so please keep an eye on the
|
||||||
|
pull request for feedback.
|
||||||
|
|
||||||
Code style
|
Code style
|
||||||
~~~~~~~~~~
|
~~~~~~~~~~
|
||||||
|
|||||||
19
Dockerfile
Normal file
19
Dockerfile
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
FROM docker.io/python:2-alpine3.7
|
||||||
|
|
||||||
|
RUN apk add --no-cache --virtual .nacl_deps su-exec build-base libffi-dev zlib-dev libressl-dev libjpeg-turbo-dev linux-headers postgresql-dev
|
||||||
|
|
||||||
|
COPY . /synapse
|
||||||
|
|
||||||
|
# A wheel cache may be provided in ./cache for faster build
|
||||||
|
RUN cd /synapse \
|
||||||
|
&& pip install --upgrade pip setuptools psycopg2 \
|
||||||
|
&& mkdir -p /synapse/cache \
|
||||||
|
&& pip install -f /synapse/cache --upgrade --process-dependency-links . \
|
||||||
|
&& mv /synapse/contrib/docker/start.py /synapse/contrib/docker/conf / \
|
||||||
|
&& rm -rf setup.py setup.cfg synapse
|
||||||
|
|
||||||
|
VOLUME ["/data"]
|
||||||
|
|
||||||
|
EXPOSE 8008/tcp 8448/tcp
|
||||||
|
|
||||||
|
ENTRYPOINT ["/start.py"]
|
||||||
@@ -25,6 +25,9 @@ recursive-include synapse/static *.js
|
|||||||
exclude jenkins.sh
|
exclude jenkins.sh
|
||||||
exclude jenkins*.sh
|
exclude jenkins*.sh
|
||||||
exclude jenkins*
|
exclude jenkins*
|
||||||
|
exclude Dockerfile
|
||||||
|
exclude .dockerignore
|
||||||
recursive-exclude jenkins *.sh
|
recursive-exclude jenkins *.sh
|
||||||
|
|
||||||
|
prune .github
|
||||||
prune demo/etc
|
prune demo/etc
|
||||||
|
|||||||
698
README.rst
698
README.rst
@@ -20,12 +20,13 @@ The overall architecture is::
|
|||||||
https://somewhere.org/_matrix https://elsewhere.net/_matrix
|
https://somewhere.org/_matrix https://elsewhere.net/_matrix
|
||||||
|
|
||||||
``#matrix:matrix.org`` is the official support room for Matrix, and can be
|
``#matrix:matrix.org`` is the official support room for Matrix, and can be
|
||||||
accessed by any client from https://matrix.org/blog/try-matrix-now or via IRC
|
accessed by any client from https://matrix.org/docs/projects/try-matrix-now.html or
|
||||||
bridge at irc://irc.freenode.net/matrix.
|
via IRC bridge at irc://irc.freenode.net/matrix.
|
||||||
|
|
||||||
Synapse is currently in rapid development, but as of version 0.5 we believe it
|
Synapse is currently in rapid development, but as of version 0.5 we believe it
|
||||||
is sufficiently stable to be run as an internet-facing service for real usage!
|
is sufficiently stable to be run as an internet-facing service for real usage!
|
||||||
|
|
||||||
|
|
||||||
About Matrix
|
About Matrix
|
||||||
============
|
============
|
||||||
|
|
||||||
@@ -52,10 +53,10 @@ generation of fully open and interoperable messaging and VoIP apps for the
|
|||||||
internet.
|
internet.
|
||||||
|
|
||||||
Synapse is a reference "homeserver" implementation of Matrix from the core
|
Synapse is a reference "homeserver" implementation of Matrix from the core
|
||||||
development team at matrix.org, written in Python/Twisted for clarity and
|
development team at matrix.org, written in Python/Twisted. It is intended to
|
||||||
simplicity. It is intended to showcase the concept of Matrix and let folks see
|
showcase the concept of Matrix and let folks see the spec in the context of a
|
||||||
the spec in the context of a codebase and let you run your own homeserver and
|
codebase and let you run your own homeserver and generally help bootstrap the
|
||||||
generally help bootstrap the ecosystem.
|
ecosystem.
|
||||||
|
|
||||||
In Matrix, every user runs one or more Matrix clients, which connect through to
|
In Matrix, every user runs one or more Matrix clients, which connect through to
|
||||||
a Matrix homeserver. The homeserver stores all their personal chat history and
|
a Matrix homeserver. The homeserver stores all their personal chat history and
|
||||||
@@ -66,26 +67,16 @@ hosted by someone else (e.g. matrix.org) - there is no single point of control
|
|||||||
or mandatory service provider in Matrix, unlike WhatsApp, Facebook, Hangouts,
|
or mandatory service provider in Matrix, unlike WhatsApp, Facebook, Hangouts,
|
||||||
etc.
|
etc.
|
||||||
|
|
||||||
Synapse ships with two basic demo Matrix clients: webclient (a basic group chat
|
|
||||||
web client demo implemented in AngularJS) and cmdclient (a basic Python
|
|
||||||
command line utility which lets you easily see what the JSON APIs are up to).
|
|
||||||
|
|
||||||
Meanwhile, iOS and Android SDKs and clients are available from:
|
|
||||||
|
|
||||||
- https://github.com/matrix-org/matrix-ios-sdk
|
|
||||||
- https://github.com/matrix-org/matrix-ios-kit
|
|
||||||
- https://github.com/matrix-org/matrix-ios-console
|
|
||||||
- https://github.com/matrix-org/matrix-android-sdk
|
|
||||||
|
|
||||||
We'd like to invite you to join #matrix:matrix.org (via
|
We'd like to invite you to join #matrix:matrix.org (via
|
||||||
https://matrix.org/blog/try-matrix-now), run a homeserver, take a look at the
|
https://matrix.org/docs/projects/try-matrix-now.html), run a homeserver, take a look
|
||||||
Matrix spec at https://matrix.org/docs/spec and API docs at
|
at the `Matrix spec <https://matrix.org/docs/spec>`_, and experiment with the
|
||||||
https://matrix.org/docs/api, experiment with the APIs and the demo clients, and
|
`APIs <https://matrix.org/docs/api>`_ and `Client SDKs
|
||||||
report any bugs via https://matrix.org/jira.
|
<http://matrix.org/docs/projects/try-matrix-now.html#client-sdks>`_.
|
||||||
|
|
||||||
Thanks for using Matrix!
|
Thanks for using Matrix!
|
||||||
|
|
||||||
[1] End-to-end encryption is currently in development - see https://matrix.org/git/olm
|
[1] End-to-end encryption is currently in beta: `blog post <https://matrix.org/blog/2016/11/21/matrixs-olm-end-to-end-encryption-security-assessment-released-and-implemented-cross-platform-on-riot-at-last>`_.
|
||||||
|
|
||||||
|
|
||||||
Synapse Installation
|
Synapse Installation
|
||||||
====================
|
====================
|
||||||
@@ -93,11 +84,17 @@ Synapse Installation
|
|||||||
Synapse is the reference python/twisted Matrix homeserver implementation.
|
Synapse is the reference python/twisted Matrix homeserver implementation.
|
||||||
|
|
||||||
System requirements:
|
System requirements:
|
||||||
|
|
||||||
- POSIX-compliant system (tested on Linux & OS X)
|
- POSIX-compliant system (tested on Linux & OS X)
|
||||||
- Python 2.7
|
- Python 2.7
|
||||||
- At least 1GB of free RAM if you want to join large public rooms like #matrix:matrix.org
|
- At least 1GB of free RAM if you want to join large public rooms like #matrix:matrix.org
|
||||||
|
|
||||||
Synapse is written in python but some of the libraries is uses are written in
|
Installing from source
|
||||||
|
----------------------
|
||||||
|
(Prebuilt packages are available for some platforms - see `Platform-Specific
|
||||||
|
Instructions`_.)
|
||||||
|
|
||||||
|
Synapse is written in python but some of the libraries it uses are written in
|
||||||
C. So before we can install synapse itself we need a working C compiler and the
|
C. So before we can install synapse itself we need a working C compiler and the
|
||||||
header files for python C extensions.
|
header files for python C extensions.
|
||||||
|
|
||||||
@@ -112,10 +109,10 @@ Installing prerequisites on ArchLinux::
|
|||||||
sudo pacman -S base-devel python2 python-pip \
|
sudo pacman -S base-devel python2 python-pip \
|
||||||
python-setuptools python-virtualenv sqlite3
|
python-setuptools python-virtualenv sqlite3
|
||||||
|
|
||||||
Installing prerequisites on CentOS 7::
|
Installing prerequisites on CentOS 7 or Fedora 25::
|
||||||
|
|
||||||
sudo yum install libtiff-devel libjpeg-devel libzip-devel freetype-devel \
|
sudo yum install libtiff-devel libjpeg-devel libzip-devel freetype-devel \
|
||||||
lcms2-devel libwebp-devel tcl-devel tk-devel \
|
lcms2-devel libwebp-devel tcl-devel tk-devel redhat-rpm-config \
|
||||||
python-virtualenv libffi-devel openssl-devel
|
python-virtualenv libffi-devel openssl-devel
|
||||||
sudo yum groupinstall "Development Tools"
|
sudo yum groupinstall "Development Tools"
|
||||||
|
|
||||||
@@ -124,6 +121,7 @@ Installing prerequisites on Mac OS X::
|
|||||||
xcode-select --install
|
xcode-select --install
|
||||||
sudo easy_install pip
|
sudo easy_install pip
|
||||||
sudo pip install virtualenv
|
sudo pip install virtualenv
|
||||||
|
brew install pkg-config libffi
|
||||||
|
|
||||||
Installing prerequisites on Raspbian::
|
Installing prerequisites on Raspbian::
|
||||||
|
|
||||||
@@ -140,10 +138,16 @@ Installing prerequisites on openSUSE::
|
|||||||
sudo zypper in python-pip python-setuptools sqlite3 python-virtualenv \
|
sudo zypper in python-pip python-setuptools sqlite3 python-virtualenv \
|
||||||
python-devel libffi-devel libopenssl-devel libjpeg62-devel
|
python-devel libffi-devel libopenssl-devel libjpeg62-devel
|
||||||
|
|
||||||
|
Installing prerequisites on OpenBSD::
|
||||||
|
|
||||||
|
doas pkg_add python libffi py-pip py-setuptools sqlite3 py-virtualenv \
|
||||||
|
libxslt
|
||||||
|
|
||||||
To install the synapse homeserver run::
|
To install the synapse homeserver run::
|
||||||
|
|
||||||
virtualenv -p python2.7 ~/.synapse
|
virtualenv -p python2.7 ~/.synapse
|
||||||
source ~/.synapse/bin/activate
|
source ~/.synapse/bin/activate
|
||||||
|
pip install --upgrade pip
|
||||||
pip install --upgrade setuptools
|
pip install --upgrade setuptools
|
||||||
pip install https://github.com/matrix-org/synapse/tarball/master
|
pip install https://github.com/matrix-org/synapse/tarball/master
|
||||||
|
|
||||||
@@ -151,38 +155,74 @@ This installs synapse, along with the libraries it uses, into a virtual
|
|||||||
environment under ``~/.synapse``. Feel free to pick a different directory
|
environment under ``~/.synapse``. Feel free to pick a different directory
|
||||||
if you prefer.
|
if you prefer.
|
||||||
|
|
||||||
In case of problems, please see the _Troubleshooting section below.
|
In case of problems, please see the _`Troubleshooting` section below.
|
||||||
|
|
||||||
Alternatively, Silvio Fricke has contributed a Dockerfile to automate the
|
Alternatively, Andreas Peters (previously Silvio Fricke) has contributed a Dockerfile to automate the
|
||||||
above in Docker at https://registry.hub.docker.com/u/silviof/docker-matrix/.
|
above in Docker at https://hub.docker.com/r/avhost/docker-matrix/tags/
|
||||||
|
|
||||||
Also, Martin Giess has created an auto-deployment process with vagrant/ansible,
|
Also, Martin Giess has created an auto-deployment process with vagrant/ansible,
|
||||||
tested with VirtualBox/AWS/DigitalOcean - see https://github.com/EMnify/matrix-synapse-auto-deploy
|
tested with VirtualBox/AWS/DigitalOcean - see https://github.com/EMnify/matrix-synapse-auto-deploy
|
||||||
for details.
|
for details.
|
||||||
|
|
||||||
To set up your homeserver, run (in your virtualenv, as before)::
|
Configuring synapse
|
||||||
|
-------------------
|
||||||
|
|
||||||
|
Before you can start Synapse, you will need to generate a configuration
|
||||||
|
file. To do this, run (in your virtualenv, as before)::
|
||||||
|
|
||||||
cd ~/.synapse
|
cd ~/.synapse
|
||||||
python -m synapse.app.homeserver \
|
python -m synapse.app.homeserver \
|
||||||
--server-name machine.my.domain.name \
|
--server-name my.domain.name \
|
||||||
--config-path homeserver.yaml \
|
--config-path homeserver.yaml \
|
||||||
--generate-config \
|
--generate-config \
|
||||||
--report-stats=[yes|no]
|
--report-stats=[yes|no]
|
||||||
|
|
||||||
...substituting your host and domain name as appropriate.
|
... substituting an appropriate value for ``--server-name``. The server name
|
||||||
|
determines the "domain" part of user-ids for users on your server: these will
|
||||||
|
all be of the format ``@user:my.domain.name``. It also determines how other
|
||||||
|
matrix servers will reach yours for `Federation`_. For a test configuration,
|
||||||
|
set this to the hostname of your server. For a more production-ready setup, you
|
||||||
|
will probably want to specify your domain (``example.com``) rather than a
|
||||||
|
matrix-specific hostname here (in the same way that your email address is
|
||||||
|
probably ``user@example.com`` rather than ``user@email.example.com``) - but
|
||||||
|
doing so may require more advanced setup - see `Setting up
|
||||||
|
Federation`_. Beware that the server name cannot be changed later.
|
||||||
|
|
||||||
This will generate you a config file that you can then customise, but it will
|
This command will generate you a config file that you can then customise, but it will
|
||||||
also generate a set of keys for you. These keys will allow your Home Server to
|
also generate a set of keys for you. These keys will allow your Home Server to
|
||||||
identify itself to other Home Servers, so don't lose or delete them. It would be
|
identify itself to other Home Servers, so don't lose or delete them. It would be
|
||||||
wise to back them up somewhere safe. If, for whatever reason, you do need to
|
wise to back them up somewhere safe. (If, for whatever reason, you do need to
|
||||||
change your Home Server's keys, you may find that other Home Servers have the
|
change your Home Server's keys, you may find that other Home Servers have the
|
||||||
old key cached. If you update the signing key, you should change the name of the
|
old key cached. If you update the signing key, you should change the name of the
|
||||||
key in the <server name>.signing.key file (the second word) to something different.
|
key in the ``<server name>.signing.key`` file (the second word) to something
|
||||||
|
different. See `the spec`__ for more information on key management.)
|
||||||
|
|
||||||
By default, registration of new users is disabled. You can either enable
|
.. __: `key_management`_
|
||||||
registration in the config by specifying ``enable_registration: true``
|
|
||||||
(it is then recommended to also set up CAPTCHA - see docs/CAPTCHA_SETUP), or
|
The default configuration exposes two HTTP ports: 8008 and 8448. Port 8008 is
|
||||||
you can use the command line to register new users::
|
configured without TLS; it should be behind a reverse proxy for TLS/SSL
|
||||||
|
termination on port 443 which in turn should be used for clients. Port 8448
|
||||||
|
is configured to use TLS with a self-signed certificate. If you would like
|
||||||
|
to do initial test with a client without having to setup a reverse proxy,
|
||||||
|
you can temporarly use another certificate. (Note that a self-signed
|
||||||
|
certificate is fine for `Federation`_). You can do so by changing
|
||||||
|
``tls_certificate_path``, ``tls_private_key_path`` and ``tls_dh_params_path``
|
||||||
|
in ``homeserver.yaml``; alternatively, you can use a reverse-proxy, but be sure
|
||||||
|
to read `Using a reverse proxy with Synapse`_ when doing so.
|
||||||
|
|
||||||
|
Apart from port 8448 using TLS, both ports are the same in the default
|
||||||
|
configuration.
|
||||||
|
|
||||||
|
Registering a user
|
||||||
|
------------------
|
||||||
|
|
||||||
|
You will need at least one user on your server in order to use a Matrix
|
||||||
|
client. Users can be registered either `via a Matrix client`__, or via a
|
||||||
|
commandline script.
|
||||||
|
|
||||||
|
.. __: `client-user-reg`_
|
||||||
|
|
||||||
|
To get started, it is easiest to use the command line to register new users::
|
||||||
|
|
||||||
$ source ~/.synapse/bin/activate
|
$ source ~/.synapse/bin/activate
|
||||||
$ synctl start # if not already running
|
$ synctl start # if not already running
|
||||||
@@ -190,10 +230,41 @@ you can use the command line to register new users::
|
|||||||
New user localpart: erikj
|
New user localpart: erikj
|
||||||
Password:
|
Password:
|
||||||
Confirm password:
|
Confirm password:
|
||||||
|
Make admin [no]:
|
||||||
Success!
|
Success!
|
||||||
|
|
||||||
|
This process uses a setting ``registration_shared_secret`` in
|
||||||
|
``homeserver.yaml``, which is shared between Synapse itself and the
|
||||||
|
``register_new_matrix_user`` script. It doesn't matter what it is (a random
|
||||||
|
value is generated by ``--generate-config``), but it should be kept secret, as
|
||||||
|
anyone with knowledge of it can register users on your server even if
|
||||||
|
``enable_registration`` is ``false``.
|
||||||
|
|
||||||
|
Setting up a TURN server
|
||||||
|
------------------------
|
||||||
|
|
||||||
For reliable VoIP calls to be routed via this homeserver, you MUST configure
|
For reliable VoIP calls to be routed via this homeserver, you MUST configure
|
||||||
a TURN server. See docs/turn-howto.rst for details.
|
a TURN server. See `<docs/turn-howto.rst>`_ for details.
|
||||||
|
|
||||||
|
IPv6
|
||||||
|
----
|
||||||
|
|
||||||
|
As of Synapse 0.19 we finally support IPv6, many thanks to @kyrias and @glyph
|
||||||
|
for providing PR #1696.
|
||||||
|
|
||||||
|
However, for federation to work on hosts with IPv6 DNS servers you **must**
|
||||||
|
be running Twisted 17.1.0 or later - see https://github.com/matrix-org/synapse/issues/1002
|
||||||
|
for details. We can't make Synapse depend on Twisted 17.1 by default
|
||||||
|
yet as it will break most older distributions (see https://github.com/matrix-org/synapse/pull/1909)
|
||||||
|
so if you are using operating system dependencies you'll have to install your
|
||||||
|
own Twisted 17.1 package via pip or backports etc.
|
||||||
|
|
||||||
|
If you're running in a virtualenv then pip should have installed the newest
|
||||||
|
Twisted automatically, but if your virtualenv is old you will need to manually
|
||||||
|
upgrade to a newer Twisted dependency via:
|
||||||
|
|
||||||
|
pip install Twisted>=17.1.0
|
||||||
|
|
||||||
|
|
||||||
Running Synapse
|
Running Synapse
|
||||||
===============
|
===============
|
||||||
@@ -205,11 +276,60 @@ run (e.g. ``~/.synapse``), and::
|
|||||||
source ./bin/activate
|
source ./bin/activate
|
||||||
synctl start
|
synctl start
|
||||||
|
|
||||||
|
|
||||||
|
Connecting to Synapse from a client
|
||||||
|
===================================
|
||||||
|
|
||||||
|
The easiest way to try out your new Synapse installation is by connecting to it
|
||||||
|
from a web client. The easiest option is probably the one at
|
||||||
|
http://riot.im/app. You will need to specify a "Custom server" when you log on
|
||||||
|
or register: set this to ``https://domain.tld`` if you setup a reverse proxy
|
||||||
|
following the recommended setup, or ``https://localhost:8448`` - remember to specify the
|
||||||
|
port (``:8448``) if not ``:443`` unless you changed the configuration. (Leave the identity
|
||||||
|
server as the default - see `Identity servers`_.)
|
||||||
|
|
||||||
|
If using port 8448 you will run into errors until you accept the self-signed
|
||||||
|
certificate. You can easily do this by going to ``https://localhost:8448``
|
||||||
|
directly with your browser and accept the presented certificate. You can then
|
||||||
|
go back in your web client and proceed further.
|
||||||
|
|
||||||
|
If all goes well you should at least be able to log in, create a room, and
|
||||||
|
start sending messages.
|
||||||
|
|
||||||
|
(The homeserver runs a web client by default at https://localhost:8448/, though
|
||||||
|
as of the time of writing it is somewhat outdated and not really recommended -
|
||||||
|
https://github.com/matrix-org/synapse/issues/1527).
|
||||||
|
|
||||||
|
.. _`client-user-reg`:
|
||||||
|
|
||||||
|
Registering a new user from a client
|
||||||
|
------------------------------------
|
||||||
|
|
||||||
|
By default, registration of new users via Matrix clients is disabled. To enable
|
||||||
|
it, specify ``enable_registration: true`` in ``homeserver.yaml``. (It is then
|
||||||
|
recommended to also set up CAPTCHA - see `<docs/CAPTCHA_SETUP.rst>`_.)
|
||||||
|
|
||||||
|
Once ``enable_registration`` is set to ``true``, it is possible to register a
|
||||||
|
user via `riot.im <https://riot.im/app/#/register>`_ or other Matrix clients.
|
||||||
|
|
||||||
|
Your new user name will be formed partly from the ``server_name`` (see
|
||||||
|
`Configuring synapse`_), and partly from a localpart you specify when you
|
||||||
|
create the account. Your name will take the form of::
|
||||||
|
|
||||||
|
@localpart:my.domain.name
|
||||||
|
|
||||||
|
(pronounced "at localpart on my dot domain dot name").
|
||||||
|
|
||||||
|
As when logging in, you will need to specify a "Custom server". Specify your
|
||||||
|
desired ``localpart`` in the 'User name' box.
|
||||||
|
|
||||||
|
|
||||||
Security Note
|
Security Note
|
||||||
=============
|
=============
|
||||||
|
|
||||||
Matrix serves raw user generated data in some APIs - specifically the content
|
Matrix serves raw user generated data in some APIs - specifically the `content
|
||||||
repository endpoints: http://matrix.org/docs/spec/client_server/r0.2.0.html#get-matrix-media-r0-download-servername-mediaid
|
repository endpoints <http://matrix.org/docs/spec/client_server/latest.html#get-matrix-media-r0-download-servername-mediaid>`_.
|
||||||
|
|
||||||
Whilst we have tried to mitigate against possible XSS attacks (e.g.
|
Whilst we have tried to mitigate against possible XSS attacks (e.g.
|
||||||
https://github.com/matrix-org/synapse/pull/1021) we recommend running
|
https://github.com/matrix-org/synapse/pull/1021) we recommend running
|
||||||
matrix homeservers on a dedicated domain name, to limit any malicious user generated
|
matrix homeservers on a dedicated domain name, to limit any malicious user generated
|
||||||
@@ -220,26 +340,8 @@ server on the same domain.
|
|||||||
See https://github.com/vector-im/vector-web/issues/1977 and
|
See https://github.com/vector-im/vector-web/issues/1977 and
|
||||||
https://developer.github.com/changes/2014-04-25-user-content-security for more details.
|
https://developer.github.com/changes/2014-04-25-user-content-security for more details.
|
||||||
|
|
||||||
Using PostgreSQL
|
|
||||||
================
|
|
||||||
|
|
||||||
As of Synapse 0.9, `PostgreSQL <http://www.postgresql.org>`_ is supported as an
|
Platform-Specific Instructions
|
||||||
alternative to the `SQLite <http://sqlite.org/>`_ database that Synapse has
|
|
||||||
traditionally used for convenience and simplicity.
|
|
||||||
|
|
||||||
The advantages of Postgres include:
|
|
||||||
|
|
||||||
* significant performance improvements due to the superior threading and
|
|
||||||
caching model, smarter query optimiser
|
|
||||||
* allowing the DB to be run on separate hardware
|
|
||||||
* allowing basic active/backup high-availability with a "hot spare" synapse
|
|
||||||
pointing at the same DB master, as well as enabling DB replication in
|
|
||||||
synapse itself.
|
|
||||||
|
|
||||||
For information on how to install and use PostgreSQL, please see
|
|
||||||
`docs/postgres.rst <docs/postgres.rst>`_.
|
|
||||||
|
|
||||||
Platform Specific Instructions
|
|
||||||
==============================
|
==============================
|
||||||
|
|
||||||
Debian
|
Debian
|
||||||
@@ -247,21 +349,27 @@ Debian
|
|||||||
|
|
||||||
Matrix provides official Debian packages via apt from http://matrix.org/packages/debian/.
|
Matrix provides official Debian packages via apt from http://matrix.org/packages/debian/.
|
||||||
Note that these packages do not include a client - choose one from
|
Note that these packages do not include a client - choose one from
|
||||||
https://matrix.org/blog/try-matrix-now/ (or build your own with one of our SDKs :)
|
https://matrix.org/docs/projects/try-matrix-now.html (or build your own with one of our SDKs :)
|
||||||
|
|
||||||
Fedora
|
Fedora
|
||||||
------
|
------
|
||||||
|
|
||||||
|
Synapse is in the Fedora repositories as ``matrix-synapse``::
|
||||||
|
|
||||||
|
sudo dnf install matrix-synapse
|
||||||
|
|
||||||
Oleg Girko provides Fedora RPMs at
|
Oleg Girko provides Fedora RPMs at
|
||||||
https://obs.infoserver.lv/project/monitor/matrix-synapse
|
https://obs.infoserver.lv/project/monitor/matrix-synapse
|
||||||
|
|
||||||
ArchLinux
|
ArchLinux
|
||||||
---------
|
---------
|
||||||
|
|
||||||
The quickest way to get up and running with ArchLinux is probably with Ivan
|
The quickest way to get up and running with ArchLinux is probably with the community package
|
||||||
Shapovalov's AUR package from
|
https://www.archlinux.org/packages/community/any/matrix-synapse/, which should pull in most of
|
||||||
https://aur.archlinux.org/packages/matrix-synapse/, which should pull in all
|
the necessary dependencies. If the default web client is to be served (enabled by default in
|
||||||
the necessary dependencies.
|
the generated config),
|
||||||
|
https://www.archlinux.org/packages/community/any/python2-matrix-angular-sdk/ will also need to
|
||||||
|
be installed.
|
||||||
|
|
||||||
Alternatively, to install using pip a few changes may be needed as ArchLinux
|
Alternatively, to install using pip a few changes may be needed as ArchLinux
|
||||||
defaults to python 3, but synapse currently assumes python 2.7 by default:
|
defaults to python 3, but synapse currently assumes python 2.7 by default:
|
||||||
@@ -298,9 +406,35 @@ FreeBSD
|
|||||||
|
|
||||||
Synapse can be installed via FreeBSD Ports or Packages contributed by Brendan Molloy from:
|
Synapse can be installed via FreeBSD Ports or Packages contributed by Brendan Molloy from:
|
||||||
|
|
||||||
- Ports: ``cd /usr/ports/net/py-matrix-synapse && make install clean``
|
- Ports: ``cd /usr/ports/net-im/py-matrix-synapse && make install clean``
|
||||||
- Packages: ``pkg install py27-matrix-synapse``
|
- Packages: ``pkg install py27-matrix-synapse``
|
||||||
|
|
||||||
|
|
||||||
|
OpenBSD
|
||||||
|
-------
|
||||||
|
|
||||||
|
There is currently no port for OpenBSD. Additionally, OpenBSD's security
|
||||||
|
settings require a slightly more difficult installation process.
|
||||||
|
|
||||||
|
1) Create a new directory in ``/usr/local`` called ``_synapse``. Also, create a
|
||||||
|
new user called ``_synapse`` and set that directory as the new user's home.
|
||||||
|
This is required because, by default, OpenBSD only allows binaries which need
|
||||||
|
write and execute permissions on the same memory space to be run from
|
||||||
|
``/usr/local``.
|
||||||
|
2) ``su`` to the new ``_synapse`` user and change to their home directory.
|
||||||
|
3) Create a new virtualenv: ``virtualenv -p python2.7 ~/.synapse``
|
||||||
|
4) Source the virtualenv configuration located at
|
||||||
|
``/usr/local/_synapse/.synapse/bin/activate``. This is done in ``ksh`` by
|
||||||
|
using the ``.`` command, rather than ``bash``'s ``source``.
|
||||||
|
5) Optionally, use ``pip`` to install ``lxml``, which Synapse needs to parse
|
||||||
|
webpages for their titles.
|
||||||
|
6) Use ``pip`` to install this repository: ``pip install
|
||||||
|
https://github.com/matrix-org/synapse/tarball/master``
|
||||||
|
7) Optionally, change ``_synapse``'s shell to ``/bin/false`` to reduce the
|
||||||
|
chance of a compromised Synapse server being used to take over your box.
|
||||||
|
|
||||||
|
After this, you may proceed with the rest of the install directions.
|
||||||
|
|
||||||
NixOS
|
NixOS
|
||||||
-----
|
-----
|
||||||
|
|
||||||
@@ -340,6 +474,7 @@ Troubleshooting:
|
|||||||
you do, you may need to create a symlink to ``libsodium.a`` so ``ld`` can find
|
you do, you may need to create a symlink to ``libsodium.a`` so ``ld`` can find
|
||||||
it: ``ln -s /usr/local/lib/libsodium.a /usr/lib/libsodium.a``
|
it: ``ln -s /usr/local/lib/libsodium.a /usr/lib/libsodium.a``
|
||||||
|
|
||||||
|
|
||||||
Troubleshooting
|
Troubleshooting
|
||||||
===============
|
===============
|
||||||
|
|
||||||
@@ -403,6 +538,30 @@ fix try re-installing from PyPI or directly from
|
|||||||
# Install from github
|
# Install from github
|
||||||
pip install --user https://github.com/pyca/pynacl/tarball/master
|
pip install --user https://github.com/pyca/pynacl/tarball/master
|
||||||
|
|
||||||
|
Running out of File Handles
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
If synapse runs out of filehandles, it typically fails badly - live-locking
|
||||||
|
at 100% CPU, and/or failing to accept new TCP connections (blocking the
|
||||||
|
connecting client). Matrix currently can legitimately use a lot of file handles,
|
||||||
|
thanks to busy rooms like #matrix:matrix.org containing hundreds of participating
|
||||||
|
servers. The first time a server talks in a room it will try to connect
|
||||||
|
simultaneously to all participating servers, which could exhaust the available
|
||||||
|
file descriptors between DNS queries & HTTPS sockets, especially if DNS is slow
|
||||||
|
to respond. (We need to improve the routing algorithm used to be better than
|
||||||
|
full mesh, but as of June 2017 this hasn't happened yet).
|
||||||
|
|
||||||
|
If you hit this failure mode, we recommend increasing the maximum number of
|
||||||
|
open file handles to be at least 4096 (assuming a default of 1024 or 256).
|
||||||
|
This is typically done by editing ``/etc/security/limits.conf``
|
||||||
|
|
||||||
|
Separately, Synapse may leak file handles if inbound HTTP requests get stuck
|
||||||
|
during processing - e.g. blocked behind a lock or talking to a remote server etc.
|
||||||
|
This is best diagnosed by matching up the 'Received request' and 'Processed request'
|
||||||
|
log lines and looking for any 'Processed request' lines which take more than
|
||||||
|
a few seconds to execute. Please let us know at #matrix-dev:matrix.org if
|
||||||
|
you see this failure mode so we can help debug it, however.
|
||||||
|
|
||||||
ArchLinux
|
ArchLinux
|
||||||
~~~~~~~~~
|
~~~~~~~~~
|
||||||
|
|
||||||
@@ -413,37 +572,6 @@ you will need to explicitly call Python2.7 - either running as::
|
|||||||
|
|
||||||
...or by editing synctl with the correct python executable.
|
...or by editing synctl with the correct python executable.
|
||||||
|
|
||||||
Synapse Development
|
|
||||||
===================
|
|
||||||
|
|
||||||
To check out a synapse for development, clone the git repo into a working
|
|
||||||
directory of your choice::
|
|
||||||
|
|
||||||
git clone https://github.com/matrix-org/synapse.git
|
|
||||||
cd synapse
|
|
||||||
|
|
||||||
Synapse has a number of external dependencies, that are easiest
|
|
||||||
to install using pip and a virtualenv::
|
|
||||||
|
|
||||||
virtualenv env
|
|
||||||
source env/bin/activate
|
|
||||||
python synapse/python_dependencies.py | xargs -n1 pip install
|
|
||||||
pip install setuptools_trial mock
|
|
||||||
|
|
||||||
This will run a process of downloading and installing all the needed
|
|
||||||
dependencies into a virtual env.
|
|
||||||
|
|
||||||
Once this is done, you may wish to run Synapse's unit tests, to
|
|
||||||
check that everything is installed as it should be::
|
|
||||||
|
|
||||||
python setup.py test
|
|
||||||
|
|
||||||
This should end with a 'PASSED' result::
|
|
||||||
|
|
||||||
Ran 143 tests in 0.601s
|
|
||||||
|
|
||||||
PASSED (successes=143)
|
|
||||||
|
|
||||||
|
|
||||||
Upgrading an existing Synapse
|
Upgrading an existing Synapse
|
||||||
=============================
|
=============================
|
||||||
@@ -454,143 +582,262 @@ versions of synapse.
|
|||||||
|
|
||||||
.. _UPGRADE.rst: UPGRADE.rst
|
.. _UPGRADE.rst: UPGRADE.rst
|
||||||
|
|
||||||
|
.. _federation:
|
||||||
|
|
||||||
Setting up Federation
|
Setting up Federation
|
||||||
=====================
|
=====================
|
||||||
|
|
||||||
In order for other homeservers to send messages to your server, it will need to
|
Federation is the process by which users on different servers can participate
|
||||||
be publicly visible on the internet, and they will need to know its host name.
|
in the same room. For this to work, those other servers must be able to contact
|
||||||
You have two choices here, which will influence the form of your Matrix user
|
yours to send messages.
|
||||||
IDs:
|
|
||||||
|
|
||||||
1) Use the machine's own hostname as available on public DNS in the form of
|
As explained in `Configuring synapse`_, the ``server_name`` in your
|
||||||
its A records. This is easier to set up initially, perhaps for
|
``homeserver.yaml`` file determines the way that other servers will reach
|
||||||
testing, but lacks the flexibility of SRV.
|
yours. By default, they will treat it as a hostname and try to connect to
|
||||||
|
port 8448. This is easy to set up and will work with the default configuration,
|
||||||
|
provided you set the ``server_name`` to match your machine's public DNS
|
||||||
|
hostname.
|
||||||
|
|
||||||
2) Set up a SRV record for your domain name. This requires you create a SRV
|
For a more flexible configuration, you can set up a DNS SRV record. This allows
|
||||||
record in DNS, but gives the flexibility to run the server on your own
|
you to run your server on a machine that might not have the same name as your
|
||||||
choice of TCP port, on a machine that might not be the same name as the
|
domain name. For example, you might want to run your server at
|
||||||
domain name.
|
``synapse.example.com``, but have your Matrix user-ids look like
|
||||||
|
``@user:example.com``. (A SRV record also allows you to change the port from
|
||||||
|
the default 8448. However, if you are thinking of using a reverse-proxy on the
|
||||||
|
federation port, which is not recommended, be sure to read
|
||||||
|
`Reverse-proxying the federation port`_ first.)
|
||||||
|
|
||||||
For the first form, simply pass the required hostname (of the machine) as the
|
To use a SRV record, first create your SRV record and publish it in DNS. This
|
||||||
--server-name parameter::
|
should have the format ``_matrix._tcp.<yourdomain.com> <ttl> IN SRV 10 0 <port>
|
||||||
|
<synapse.server.name>``. The DNS record should then look something like::
|
||||||
|
|
||||||
|
$ dig -t srv _matrix._tcp.example.com
|
||||||
|
_matrix._tcp.example.com. 3600 IN SRV 10 0 8448 synapse.example.com.
|
||||||
|
|
||||||
|
Note that the server hostname cannot be an alias (CNAME record): it has to point
|
||||||
|
directly to the server hosting the synapse instance.
|
||||||
|
|
||||||
|
You can then configure your homeserver to use ``<yourdomain.com>`` as the domain in
|
||||||
|
its user-ids, by setting ``server_name``::
|
||||||
|
|
||||||
python -m synapse.app.homeserver \
|
python -m synapse.app.homeserver \
|
||||||
--server-name machine.my.domain.name \
|
--server-name <yourdomain.com> \
|
||||||
--config-path homeserver.yaml \
|
--config-path homeserver.yaml \
|
||||||
--generate-config
|
--generate-config
|
||||||
python -m synapse.app.homeserver --config-path homeserver.yaml
|
python -m synapse.app.homeserver --config-path homeserver.yaml
|
||||||
|
|
||||||
Alternatively, you can run ``synctl start`` to guide you through the process.
|
If you've already generated the config file, you need to edit the ``server_name``
|
||||||
|
in your ``homeserver.yaml`` file. If you've already started Synapse and a
|
||||||
For the second form, first create your SRV record and publish it in DNS. This
|
|
||||||
needs to be named _matrix._tcp.YOURDOMAIN, and point at at least one hostname
|
|
||||||
and port where the server is running. (At the current time synapse does not
|
|
||||||
support clustering multiple servers into a single logical homeserver). The DNS
|
|
||||||
record would then look something like::
|
|
||||||
|
|
||||||
$ dig -t srv _matrix._tcp.machine.my.domain.name
|
|
||||||
_matrix._tcp IN SRV 10 0 8448 machine.my.domain.name.
|
|
||||||
|
|
||||||
|
|
||||||
At this point, you should then run the homeserver with the hostname of this
|
|
||||||
SRV record, as that is the name other machines will expect it to have::
|
|
||||||
|
|
||||||
python -m synapse.app.homeserver \
|
|
||||||
--server-name YOURDOMAIN \
|
|
||||||
--config-path homeserver.yaml \
|
|
||||||
--generate-config
|
|
||||||
python -m synapse.app.homeserver --config-path homeserver.yaml
|
|
||||||
|
|
||||||
|
|
||||||
If you've already generated the config file, you need to edit the "server_name"
|
|
||||||
in you ```homeserver.yaml``` file. If you've already started Synapse and a
|
|
||||||
database has been created, you will have to recreate the database.
|
database has been created, you will have to recreate the database.
|
||||||
|
|
||||||
You may additionally want to pass one or more "-v" options, in order to
|
If all goes well, you should be able to `connect to your server with a client`__,
|
||||||
increase the verbosity of logging output; at least for initial testing.
|
and then join a room via federation. (Try ``#matrix-dev:matrix.org`` as a first
|
||||||
|
step. "Matrix HQ"'s sheer size and activity level tends to make even the
|
||||||
|
largest boxes pause for thought.)
|
||||||
|
|
||||||
|
.. __: `Connecting to Synapse from a client`_
|
||||||
|
|
||||||
|
Troubleshooting
|
||||||
|
---------------
|
||||||
|
|
||||||
|
You can use the federation tester to check if your homeserver is all set:
|
||||||
|
``https://matrix.org/federationtester/api/report?server_name=<your_server_name>``
|
||||||
|
If any of the attributes under "checks" is false, federation won't work.
|
||||||
|
|
||||||
|
The typical failure mode with federation is that when you try to join a room,
|
||||||
|
it is rejected with "401: Unauthorized". Generally this means that other
|
||||||
|
servers in the room couldn't access yours. (Joining a room over federation is a
|
||||||
|
complicated dance which requires connections in both directions).
|
||||||
|
|
||||||
|
So, things to check are:
|
||||||
|
|
||||||
|
* If you are trying to use a reverse-proxy, read `Reverse-proxying the
|
||||||
|
federation port`_.
|
||||||
|
* If you are not using a SRV record, check that your ``server_name`` (the part
|
||||||
|
of your user-id after the ``:``) matches your hostname, and that port 8448 on
|
||||||
|
that hostname is reachable from outside your network.
|
||||||
|
* If you *are* using a SRV record, check that it matches your ``server_name``
|
||||||
|
(it should be ``_matrix._tcp.<server_name>``), and that the port and hostname
|
||||||
|
it specifies are reachable from outside your network.
|
||||||
|
|
||||||
Running a Demo Federation of Synapses
|
Running a Demo Federation of Synapses
|
||||||
-------------------------------------
|
-------------------------------------
|
||||||
|
|
||||||
If you want to get up and running quickly with a trio of homeservers in a
|
If you want to get up and running quickly with a trio of homeservers in a
|
||||||
private federation (``localhost:8080``, ``localhost:8081`` and
|
private federation, there is a script in the ``demo`` directory. This is mainly
|
||||||
``localhost:8082``) which you can then access through the webclient running at
|
useful just for development purposes. See `<demo/README>`_.
|
||||||
http://localhost:8080. Simply run::
|
|
||||||
|
|
||||||
demo/start.sh
|
|
||||||
|
|
||||||
This is mainly useful just for development purposes.
|
|
||||||
|
|
||||||
Running The Demo Web Client
|
|
||||||
===========================
|
|
||||||
|
|
||||||
The homeserver runs a web client by default at https://localhost:8448/.
|
|
||||||
|
|
||||||
If this is the first time you have used the client from that browser (it uses
|
|
||||||
HTML5 local storage to remember its config), you will need to log in to your
|
|
||||||
account. If you don't yet have an account, because you've just started the
|
|
||||||
homeserver for the first time, then you'll need to register one.
|
|
||||||
|
|
||||||
|
|
||||||
Registering A New Account
|
Using PostgreSQL
|
||||||
-------------------------
|
================
|
||||||
|
|
||||||
Your new user name will be formed partly from the hostname your server is
|
As of Synapse 0.9, `PostgreSQL <http://www.postgresql.org>`_ is supported as an
|
||||||
running as, and partly from a localpart you specify when you create the
|
alternative to the `SQLite <http://sqlite.org/>`_ database that Synapse has
|
||||||
account. Your name will take the form of::
|
traditionally used for convenience and simplicity.
|
||||||
|
|
||||||
@localpart:my.domain.here
|
The advantages of Postgres include:
|
||||||
(pronounced "at localpart on my dot domain dot here")
|
|
||||||
|
|
||||||
Specify your desired localpart in the topmost box of the "Register for an
|
* significant performance improvements due to the superior threading and
|
||||||
account" form, and click the "Register" button. Hostnames can contain ports if
|
caching model, smarter query optimiser
|
||||||
required due to lack of SRV records (e.g. @matthew:localhost:8448 on an
|
* allowing the DB to be run on separate hardware
|
||||||
internal synapse sandbox running on localhost).
|
* allowing basic active/backup high-availability with a "hot spare" synapse
|
||||||
|
pointing at the same DB master, as well as enabling DB replication in
|
||||||
|
synapse itself.
|
||||||
|
|
||||||
If registration fails, you may need to enable it in the homeserver (see
|
For information on how to install and use PostgreSQL, please see
|
||||||
`Synapse Installation`_ above)
|
`docs/postgres.rst <docs/postgres.rst>`_.
|
||||||
|
|
||||||
|
|
||||||
Logging In To An Existing Account
|
.. _reverse-proxy:
|
||||||
---------------------------------
|
|
||||||
|
Using a reverse proxy with Synapse
|
||||||
|
==================================
|
||||||
|
|
||||||
|
It is recommended to put a reverse proxy such as
|
||||||
|
`nginx <https://nginx.org/en/docs/http/ngx_http_proxy_module.html>`_,
|
||||||
|
`Apache <https://httpd.apache.org/docs/current/mod/mod_proxy_http.html>`_ or
|
||||||
|
`HAProxy <http://www.haproxy.org/>`_ in front of Synapse. One advantage of
|
||||||
|
doing so is that it means that you can expose the default https port (443) to
|
||||||
|
Matrix clients without needing to run Synapse with root privileges.
|
||||||
|
|
||||||
|
The most important thing to know here is that Matrix clients and other Matrix
|
||||||
|
servers do not necessarily need to connect to your server via the same
|
||||||
|
port. Indeed, clients will use port 443 by default, whereas servers default to
|
||||||
|
port 8448. Where these are different, we refer to the 'client port' and the
|
||||||
|
'federation port'.
|
||||||
|
|
||||||
|
The next most important thing to know is that using a reverse-proxy on the
|
||||||
|
federation port has a number of pitfalls. It is possible, but be sure to read
|
||||||
|
`Reverse-proxying the federation port`_.
|
||||||
|
|
||||||
|
The recommended setup is therefore to configure your reverse-proxy on port 443
|
||||||
|
to port 8008 of synapse for client connections, but to also directly expose port
|
||||||
|
8448 for server-server connections. All the Matrix endpoints begin ``/_matrix``,
|
||||||
|
so an example nginx configuration might look like::
|
||||||
|
|
||||||
|
server {
|
||||||
|
listen 443 ssl;
|
||||||
|
listen [::]:443 ssl;
|
||||||
|
server_name matrix.example.com;
|
||||||
|
|
||||||
|
location /_matrix {
|
||||||
|
proxy_pass http://localhost:8008;
|
||||||
|
proxy_set_header X-Forwarded-For $remote_addr;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
You will also want to set ``bind_addresses: ['127.0.0.1']`` and ``x_forwarded: true``
|
||||||
|
for port 8008 in ``homeserver.yaml`` to ensure that client IP addresses are
|
||||||
|
recorded correctly.
|
||||||
|
|
||||||
|
Having done so, you can then use ``https://matrix.example.com`` (instead of
|
||||||
|
``https://matrix.example.com:8448``) as the "Custom server" when `Connecting to
|
||||||
|
Synapse from a client`_.
|
||||||
|
|
||||||
|
Reverse-proxying the federation port
|
||||||
|
------------------------------------
|
||||||
|
|
||||||
|
There are two issues to consider before using a reverse-proxy on the federation
|
||||||
|
port:
|
||||||
|
|
||||||
|
* Due to the way SSL certificates are managed in the Matrix federation protocol
|
||||||
|
(see `spec`__), Synapse needs to be configured with the path to the SSL
|
||||||
|
certificate, *even if you do not terminate SSL at Synapse*.
|
||||||
|
|
||||||
|
.. __: `key_management`_
|
||||||
|
|
||||||
|
* Synapse does not currently support SNI on the federation protocol
|
||||||
|
(`bug #1491 <https://github.com/matrix-org/synapse/issues/1491>`_), which
|
||||||
|
means that using name-based virtual hosting is unreliable.
|
||||||
|
|
||||||
|
Furthermore, a number of the normal reasons for using a reverse-proxy do not
|
||||||
|
apply:
|
||||||
|
|
||||||
|
* Other servers will connect on port 8448 by default, so there is no need to
|
||||||
|
listen on port 443 (for federation, at least), which avoids the need for root
|
||||||
|
privileges and virtual hosting.
|
||||||
|
|
||||||
|
* A self-signed SSL certificate is fine for federation, so there is no need to
|
||||||
|
automate renewals. (The certificate generated by ``--generate-config`` is
|
||||||
|
valid for 10 years.)
|
||||||
|
|
||||||
|
If you want to set up a reverse-proxy on the federation port despite these
|
||||||
|
caveats, you will need to do the following:
|
||||||
|
|
||||||
|
* In ``homeserver.yaml``, set ``tls_certificate_path`` to the path to the SSL
|
||||||
|
certificate file used by your reverse-proxy, and set ``no_tls`` to ``True``.
|
||||||
|
(``tls_private_key_path`` will be ignored if ``no_tls`` is ``True``.)
|
||||||
|
|
||||||
|
* In your reverse-proxy configuration:
|
||||||
|
|
||||||
|
* If there are other virtual hosts on the same port, make sure that the
|
||||||
|
*default* one uses the certificate configured above.
|
||||||
|
|
||||||
|
* Forward ``/_matrix`` to Synapse.
|
||||||
|
|
||||||
|
* If your reverse-proxy is not listening on port 8448, publish a SRV record to
|
||||||
|
tell other servers how to find you. See `Setting up Federation`_.
|
||||||
|
|
||||||
|
When updating the SSL certificate, just update the file pointed to by
|
||||||
|
``tls_certificate_path``: there is no need to restart synapse. (You may like to
|
||||||
|
use a symbolic link to help make this process atomic.)
|
||||||
|
|
||||||
|
The most common mistake when setting up federation is not to tell Synapse about
|
||||||
|
your SSL certificate. To check it, you can visit
|
||||||
|
``https://matrix.org/federationtester/api/report?server_name=<your_server_name>``.
|
||||||
|
Unfortunately, there is no UI for this yet, but, you should see
|
||||||
|
``"MatchingTLSFingerprint": true``. If not, check that
|
||||||
|
``Certificates[0].SHA256Fingerprint`` (the fingerprint of the certificate
|
||||||
|
presented by your reverse-proxy) matches ``Keys.tls_fingerprints[0].sha256``
|
||||||
|
(the fingerprint of the certificate Synapse is using).
|
||||||
|
|
||||||
Just enter the ``@localpart:my.domain.here`` Matrix user ID and password into
|
|
||||||
the form and click the Login button.
|
|
||||||
|
|
||||||
Identity Servers
|
Identity Servers
|
||||||
================
|
================
|
||||||
|
|
||||||
The job of authenticating 3PIDs and tracking which 3PIDs are associated with a
|
Identity servers have the job of mapping email addresses and other 3rd Party
|
||||||
given Matrix user is very security-sensitive, as there is obvious risk of spam
|
IDs (3PIDs) to Matrix user IDs, as well as verifying the ownership of 3PIDs
|
||||||
if it is too easy to sign up for Matrix accounts or harvest 3PID data.
|
before creating that mapping.
|
||||||
Meanwhile the job of publishing the end-to-end encryption public keys for
|
|
||||||
Matrix users is also very security-sensitive for similar reasons.
|
|
||||||
|
|
||||||
Therefore the role of managing trusted identity in the Matrix ecosystem is
|
**They are not where accounts or credentials are stored - these live on home
|
||||||
farmed out to a cluster of known trusted ecosystem partners, who run 'Matrix
|
servers. Identity Servers are just for mapping 3rd party IDs to matrix IDs.**
|
||||||
Identity Servers' such as ``sydent``, whose role is purely to authenticate and
|
|
||||||
track 3PID logins and publish end-user public keys.
|
|
||||||
|
|
||||||
It's currently early days for identity servers as Matrix is not yet using 3PIDs
|
This process is very security-sensitive, as there is obvious risk of spam if it
|
||||||
as the primary means of identity and E2E encryption is not complete. As such,
|
is too easy to sign up for Matrix accounts or harvest 3PID data. In the longer
|
||||||
we are running a single identity server (https://matrix.org) at the current
|
term, we hope to create a decentralised system to manage it (`matrix-doc #712
|
||||||
time.
|
<https://github.com/matrix-org/matrix-doc/issues/712>`_), but in the meantime,
|
||||||
|
the role of managing trusted identity in the Matrix ecosystem is farmed out to
|
||||||
|
a cluster of known trusted ecosystem partners, who run 'Matrix Identity
|
||||||
|
Servers' such as `Sydent <https://github.com/matrix-org/sydent>`_, whose role
|
||||||
|
is purely to authenticate and track 3PID logins and publish end-user public
|
||||||
|
keys.
|
||||||
|
|
||||||
|
You can host your own copy of Sydent, but this will prevent you reaching other
|
||||||
|
users in the Matrix ecosystem via their email address, and prevent them finding
|
||||||
|
you. We therefore recommend that you use one of the centralised identity servers
|
||||||
|
at ``https://matrix.org`` or ``https://vector.im`` for now.
|
||||||
|
|
||||||
|
To reiterate: the Identity server will only be used if you choose to associate
|
||||||
|
an email address with your account, or send an invite to another user via their
|
||||||
|
email address.
|
||||||
|
|
||||||
|
|
||||||
URL Previews
|
URL Previews
|
||||||
============
|
============
|
||||||
|
|
||||||
Synapse 0.15.0 introduces an experimental new API for previewing URLs at
|
Synapse 0.15.0 introduces a new API for previewing URLs at
|
||||||
/_matrix/media/r0/preview_url. This is disabled by default. To turn it on
|
``/_matrix/media/r0/preview_url``. This is disabled by default. To turn it on
|
||||||
you must enable the `url_preview_enabled: True` config parameter and explicitly
|
you must enable the ``url_preview_enabled: True`` config parameter and
|
||||||
specify the IP ranges that Synapse is not allowed to spider for previewing in
|
explicitly specify the IP ranges that Synapse is not allowed to spider for
|
||||||
the `url_preview_ip_range_blacklist` configuration parameter. This is critical
|
previewing in the ``url_preview_ip_range_blacklist`` configuration parameter.
|
||||||
from a security perspective to stop arbitrary Matrix users spidering 'internal'
|
This is critical from a security perspective to stop arbitrary Matrix users
|
||||||
URLs on your network. At the very least we recommend that your loopback and
|
spidering 'internal' URLs on your network. At the very least we recommend that
|
||||||
RFC1918 IP addresses are blacklisted.
|
your loopback and RFC1918 IP addresses are blacklisted.
|
||||||
|
|
||||||
This also requires the optional lxml and netaddr python dependencies to be
|
This also requires the optional lxml and netaddr python dependencies to be
|
||||||
installed.
|
installed. This in turn requires the libxml2 library to be available - on
|
||||||
|
Debian/Ubuntu this means ``apt-get install libxml2-dev``, or equivalent for
|
||||||
|
your OS.
|
||||||
|
|
||||||
|
|
||||||
Password reset
|
Password reset
|
||||||
@@ -601,7 +848,7 @@ server, they can request a password-reset token via clients such as Vector.
|
|||||||
|
|
||||||
A manual password reset can be done via direct database access as follows.
|
A manual password reset can be done via direct database access as follows.
|
||||||
|
|
||||||
First calculate the hash of the new password:
|
First calculate the hash of the new password::
|
||||||
|
|
||||||
$ source ~/.synapse/bin/activate
|
$ source ~/.synapse/bin/activate
|
||||||
$ ./scripts/hash_password
|
$ ./scripts/hash_password
|
||||||
@@ -609,17 +856,58 @@ First calculate the hash of the new password:
|
|||||||
Confirm password:
|
Confirm password:
|
||||||
$2a$12$xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
$2a$12$xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
||||||
|
|
||||||
Then update the `users` table in the database:
|
Then update the `users` table in the database::
|
||||||
|
|
||||||
UPDATE users SET password_hash='$2a$12$xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'
|
UPDATE users SET password_hash='$2a$12$xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'
|
||||||
WHERE name='@test:test.com';
|
WHERE name='@test:test.com';
|
||||||
|
|
||||||
Where's the spec?!
|
|
||||||
==================
|
|
||||||
|
|
||||||
The source of the matrix spec lives at https://github.com/matrix-org/matrix-doc.
|
Synapse Development
|
||||||
A recent HTML snapshot of this lives at http://matrix.org/docs/spec
|
===================
|
||||||
|
|
||||||
|
Before setting up a development environment for synapse, make sure you have the
|
||||||
|
system dependencies (such as the python header files) installed - see
|
||||||
|
`Installing from source`_.
|
||||||
|
|
||||||
|
To check out a synapse for development, clone the git repo into a working
|
||||||
|
directory of your choice::
|
||||||
|
|
||||||
|
git clone https://github.com/matrix-org/synapse.git
|
||||||
|
cd synapse
|
||||||
|
|
||||||
|
Synapse has a number of external dependencies, that are easiest
|
||||||
|
to install using pip and a virtualenv::
|
||||||
|
|
||||||
|
virtualenv -p python2.7 env
|
||||||
|
source env/bin/activate
|
||||||
|
python synapse/python_dependencies.py | xargs pip install
|
||||||
|
pip install lxml mock
|
||||||
|
|
||||||
|
This will run a process of downloading and installing all the needed
|
||||||
|
dependencies into a virtual env.
|
||||||
|
|
||||||
|
Once this is done, you may wish to run Synapse's unit tests, to
|
||||||
|
check that everything is installed as it should be::
|
||||||
|
|
||||||
|
PYTHONPATH="." trial tests
|
||||||
|
|
||||||
|
This should end with a 'PASSED' result::
|
||||||
|
|
||||||
|
Ran 143 tests in 0.601s
|
||||||
|
|
||||||
|
PASSED (successes=143)
|
||||||
|
|
||||||
|
Running the Integration Tests
|
||||||
|
=============================
|
||||||
|
|
||||||
|
Synapse is accompanied by `SyTest <https://github.com/matrix-org/sytest>`_,
|
||||||
|
a Matrix homeserver integration testing suite, which uses HTTP requests to
|
||||||
|
access the API as a Matrix client would. It is able to run Synapse directly from
|
||||||
|
the source tree, so installation of the server is not required.
|
||||||
|
|
||||||
|
Testing with SyTest is recommended for verifying that changes related to the
|
||||||
|
Client-Server API are functioning correctly. See the `installation instructions
|
||||||
|
<https://github.com/matrix-org/sytest#installing>`_ for details.
|
||||||
|
|
||||||
Building Internal API Documentation
|
Building Internal API Documentation
|
||||||
===================================
|
===================================
|
||||||
@@ -635,7 +923,6 @@ Building internal API documentation::
|
|||||||
python setup.py build_sphinx
|
python setup.py build_sphinx
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
Help!! Synapse eats all my RAM!
|
Help!! Synapse eats all my RAM!
|
||||||
===============================
|
===============================
|
||||||
|
|
||||||
@@ -644,10 +931,9 @@ cache a lot of recent room data and metadata in RAM in order to speed up
|
|||||||
common requests. We'll improve this in future, but for now the easiest
|
common requests. We'll improve this in future, but for now the easiest
|
||||||
way to either reduce the RAM usage (at the risk of slowing things down)
|
way to either reduce the RAM usage (at the risk of slowing things down)
|
||||||
is to set the almost-undocumented ``SYNAPSE_CACHE_FACTOR`` environment
|
is to set the almost-undocumented ``SYNAPSE_CACHE_FACTOR`` environment
|
||||||
variable. Roughly speaking, a SYNAPSE_CACHE_FACTOR of 1.0 will max out
|
variable. The default is 0.5, which can be decreased to reduce RAM usage
|
||||||
at around 3-4GB of resident memory - this is what we currently run the
|
in memory constrained enviroments, or increased if performance starts to
|
||||||
matrix.org on. The default setting is currently 0.1, which is probably
|
degrade.
|
||||||
around a ~700MB footprint. You can dial it down further to 0.02 if
|
|
||||||
desired, which targets roughly ~512MB. Conversely you can dial it up if
|
|
||||||
you need performance for lots of users and have a box with a lot of RAM.
|
|
||||||
|
|
||||||
|
|
||||||
|
.. _`key_management`: https://matrix.org/docs/spec/server_server/unstable.html#retrieving-server-keys
|
||||||
|
|||||||
38
UPGRADE.rst
38
UPGRADE.rst
@@ -5,20 +5,25 @@ Before upgrading check if any special steps are required to upgrade from the
|
|||||||
what you currently have installed to current version of synapse. The extra
|
what you currently have installed to current version of synapse. The extra
|
||||||
instructions that may be required are listed later in this document.
|
instructions that may be required are listed later in this document.
|
||||||
|
|
||||||
If synapse was installed in a virtualenv then active that virtualenv before
|
1. If synapse was installed in a virtualenv then active that virtualenv before
|
||||||
upgrading. If synapse is installed in a virtualenv in ``~/.synapse/`` then run:
|
upgrading. If synapse is installed in a virtualenv in ``~/.synapse/`` then
|
||||||
|
run:
|
||||||
|
|
||||||
.. code:: bash
|
.. code:: bash
|
||||||
|
|
||||||
source ~/.synapse/bin/activate
|
source ~/.synapse/bin/activate
|
||||||
|
|
||||||
If synapse was installed using pip then upgrade to the latest version by
|
2. If synapse was installed using pip then upgrade to the latest version by
|
||||||
running:
|
running:
|
||||||
|
|
||||||
.. code:: bash
|
.. code:: bash
|
||||||
|
|
||||||
pip install --upgrade --process-dependency-links https://github.com/matrix-org/synapse/tarball/master
|
pip install --upgrade --process-dependency-links https://github.com/matrix-org/synapse/tarball/master
|
||||||
|
|
||||||
|
# restart synapse
|
||||||
|
synctl restart
|
||||||
|
|
||||||
|
|
||||||
If synapse was installed using git then upgrade to the latest version by
|
If synapse was installed using git then upgrade to the latest version by
|
||||||
running:
|
running:
|
||||||
|
|
||||||
@@ -27,8 +32,33 @@ running:
|
|||||||
# Pull the latest version of the master branch.
|
# Pull the latest version of the master branch.
|
||||||
git pull
|
git pull
|
||||||
# Update the versions of synapse's python dependencies.
|
# Update the versions of synapse's python dependencies.
|
||||||
python synapse/python_dependencies.py | xargs -n1 pip install --upgrade
|
python synapse/python_dependencies.py | xargs pip install --upgrade
|
||||||
|
|
||||||
|
# restart synapse
|
||||||
|
./synctl restart
|
||||||
|
|
||||||
|
|
||||||
|
To check whether your update was sucessful, you can check the Server header
|
||||||
|
returned by the Client-Server API:
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
# replace <host.name> with the hostname of your synapse homeserver.
|
||||||
|
# You may need to specify a port (eg, :8448) if your server is not
|
||||||
|
# configured on port 443.
|
||||||
|
curl -kv https://<host.name>/_matrix/client/versions 2>&1 | grep "Server:"
|
||||||
|
|
||||||
|
Upgrading to $NEXT_VERSION
|
||||||
|
====================
|
||||||
|
|
||||||
|
This release expands the anonymous usage stats sent if the opt-in
|
||||||
|
``report_stats`` configuration is set to ``true``. We now capture RSS memory
|
||||||
|
and cpu use at a very coarse level. This requires administrators to install
|
||||||
|
the optional ``psutil`` python module.
|
||||||
|
|
||||||
|
We would appreciate it if you could assist by ensuring this module is available
|
||||||
|
and ``report_stats`` is enabled. This will let us see if performance changes to
|
||||||
|
synapse are having an impact to the general community.
|
||||||
|
|
||||||
Upgrading to v0.15.0
|
Upgrading to v0.15.0
|
||||||
====================
|
====================
|
||||||
|
|||||||
10
contrib/README.rst
Normal file
10
contrib/README.rst
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
Community Contributions
|
||||||
|
=======================
|
||||||
|
|
||||||
|
Everything in this directory are projects submitted by the community that may be useful
|
||||||
|
to others. As such, the project maintainers cannot guarantee support, stability
|
||||||
|
or backwards compatibility of these projects.
|
||||||
|
|
||||||
|
Files in this directory should *not* be relied on directly, as they may not
|
||||||
|
continue to work or exist in future. If you wish to use any of these files then
|
||||||
|
they should be copied to avoid them breaking from underneath you.
|
||||||
@@ -32,7 +32,7 @@ import urlparse
|
|||||||
import nacl.signing
|
import nacl.signing
|
||||||
import nacl.encoding
|
import nacl.encoding
|
||||||
|
|
||||||
from syutil.crypto.jsonsign import verify_signed_json, SignatureVerifyException
|
from signedjson.sign import verify_signed_json, SignatureVerifyException
|
||||||
|
|
||||||
CONFIG_JSON = "cmdclient_config.json"
|
CONFIG_JSON = "cmdclient_config.json"
|
||||||
|
|
||||||
|
|||||||
@@ -36,15 +36,13 @@ class HttpClient(object):
|
|||||||
the request body. This will be encoded as JSON.
|
the request body. This will be encoded as JSON.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Deferred: Succeeds when we get *any* HTTP response.
|
Deferred: Succeeds when we get a 2xx HTTP response. The result
|
||||||
|
will be the decoded JSON body.
|
||||||
The result of the deferred is a tuple of `(code, response)`,
|
|
||||||
where `response` is a dict representing the decoded JSON body.
|
|
||||||
"""
|
"""
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def get_json(self, url, args=None):
|
def get_json(self, url, args=None):
|
||||||
""" Get's some json from the given host homeserver and path
|
""" Gets some json from the given host homeserver and path
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
url (str): The URL to GET data from.
|
url (str): The URL to GET data from.
|
||||||
@@ -54,10 +52,8 @@ class HttpClient(object):
|
|||||||
and *not* a string.
|
and *not* a string.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Deferred: Succeeds when we get *any* HTTP response.
|
Deferred: Succeeds when we get a 2xx HTTP response. The result
|
||||||
|
will be the decoded JSON body.
|
||||||
The result of the deferred is a tuple of `(code, response)`,
|
|
||||||
where `response` is a dict representing the decoded JSON body.
|
|
||||||
"""
|
"""
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|||||||
148
contrib/docker/README.md
Normal file
148
contrib/docker/README.md
Normal file
@@ -0,0 +1,148 @@
|
|||||||
|
# Synapse Docker
|
||||||
|
|
||||||
|
This Docker image will run Synapse as a single process. It does not provide any
|
||||||
|
database server or TURN server that you should run separately.
|
||||||
|
|
||||||
|
If you run a Postgres server, you should simply have it in the same Compose
|
||||||
|
project or set the proper environment variables and the image will automatically
|
||||||
|
use that server.
|
||||||
|
|
||||||
|
## Build
|
||||||
|
|
||||||
|
Build the docker image with the `docker build` command from the root of the synapse repository.
|
||||||
|
|
||||||
|
```
|
||||||
|
docker build -t docker.io/matrixdotorg/synapse .
|
||||||
|
```
|
||||||
|
|
||||||
|
The `-t` option sets the image tag. Official images are tagged `matrixdotorg/synapse:<version>` where `<version>` is the same as the release tag in the synapse git repository.
|
||||||
|
|
||||||
|
You may have a local Python wheel cache available, in which case copy the relevant packages in the ``cache/`` directory at the root of the project.
|
||||||
|
|
||||||
|
## Run
|
||||||
|
|
||||||
|
This image is designed to run either with an automatically generated configuration
|
||||||
|
file or with a custom configuration that requires manual edition.
|
||||||
|
|
||||||
|
### Automated configuration
|
||||||
|
|
||||||
|
It is recommended that you use Docker Compose to run your containers, including
|
||||||
|
this image and a Postgres server. A sample ``docker-compose.yml`` is provided,
|
||||||
|
including example labels for reverse proxying and other artifacts.
|
||||||
|
|
||||||
|
Read the section about environment variables and set at least mandatory variables,
|
||||||
|
then run the server:
|
||||||
|
|
||||||
|
```
|
||||||
|
docker-compose up -d
|
||||||
|
```
|
||||||
|
|
||||||
|
### Manual configuration
|
||||||
|
|
||||||
|
A sample ``docker-compose.yml`` is provided, including example labels for
|
||||||
|
reverse proxying and other artifacts.
|
||||||
|
|
||||||
|
Specify a ``SYNAPSE_CONFIG_PATH``, preferably to a persistent path,
|
||||||
|
to use manual configuration. To generate a fresh ``homeserver.yaml``, simply run:
|
||||||
|
|
||||||
|
```
|
||||||
|
docker-compose run --rm -e SYNAPSE_SERVER_NAME=my.matrix.host synapse generate
|
||||||
|
```
|
||||||
|
|
||||||
|
Then, customize your configuration and run the server:
|
||||||
|
|
||||||
|
```
|
||||||
|
docker-compose up -d
|
||||||
|
```
|
||||||
|
|
||||||
|
### Without Compose
|
||||||
|
|
||||||
|
If you do not wish to use Compose, you may still run this image using plain
|
||||||
|
Docker commands. Note that the following is just a guideline and you may need
|
||||||
|
to add parameters to the docker run command to account for the network situation
|
||||||
|
with your postgres database.
|
||||||
|
|
||||||
|
```
|
||||||
|
docker run \
|
||||||
|
-d \
|
||||||
|
--name synapse \
|
||||||
|
-v ${DATA_PATH}:/data \
|
||||||
|
-e SYNAPSE_SERVER_NAME=my.matrix.host \
|
||||||
|
-e SYNAPSE_REPORT_STATS=yes \
|
||||||
|
docker.io/matrixdotorg/synapse:latest
|
||||||
|
```
|
||||||
|
|
||||||
|
## Volumes
|
||||||
|
|
||||||
|
The image expects a single volume, located at ``/data``, that will hold:
|
||||||
|
|
||||||
|
* temporary files during uploads;
|
||||||
|
* uploaded media and thumbnails;
|
||||||
|
* the SQLite database if you do not configure postgres;
|
||||||
|
* the appservices configuration.
|
||||||
|
|
||||||
|
You are free to use separate volumes depending on storage endpoints at your
|
||||||
|
disposal. For instance, ``/data/media`` coud be stored on a large but low
|
||||||
|
performance hdd storage while other files could be stored on high performance
|
||||||
|
endpoints.
|
||||||
|
|
||||||
|
In order to setup an application service, simply create an ``appservices``
|
||||||
|
directory in the data volume and write the application service Yaml
|
||||||
|
configuration file there. Multiple application services are supported.
|
||||||
|
|
||||||
|
## Environment
|
||||||
|
|
||||||
|
Unless you specify a custom path for the configuration file, a very generic
|
||||||
|
file will be generated, based on the following environment settings.
|
||||||
|
These are a good starting point for setting up your own deployment.
|
||||||
|
|
||||||
|
Global settings:
|
||||||
|
|
||||||
|
* ``UID``, the user id Synapse will run as [default 991]
|
||||||
|
* ``GID``, the group id Synapse will run as [default 991]
|
||||||
|
* ``SYNAPSE_CONFIG_PATH``, path to a custom config file
|
||||||
|
|
||||||
|
If ``SYNAPSE_CONFIG_PATH`` is set, you should generate a configuration file
|
||||||
|
then customize it manually. No other environment variable is required.
|
||||||
|
|
||||||
|
Otherwise, a dynamic configuration file will be used. The following environment
|
||||||
|
variables are available for configuration:
|
||||||
|
|
||||||
|
* ``SYNAPSE_SERVER_NAME`` (mandatory), the current server public hostname.
|
||||||
|
* ``SYNAPSE_REPORT_STATS``, (mandatory, ``yes`` or ``no``), enable anonymous
|
||||||
|
statistics reporting back to the Matrix project which helps us to get funding.
|
||||||
|
* ``SYNAPSE_MACAROON_SECRET_KEY`` (mandatory) secret for signing access tokens
|
||||||
|
to the server, set this to a proper random key.
|
||||||
|
* ``SYNAPSE_NO_TLS``, set this variable to disable TLS in Synapse (use this if
|
||||||
|
you run your own TLS-capable reverse proxy).
|
||||||
|
* ``SYNAPSE_ENABLE_REGISTRATION``, set this variable to enable registration on
|
||||||
|
the Synapse instance.
|
||||||
|
* ``SYNAPSE_ALLOW_GUEST``, set this variable to allow guest joining this server.
|
||||||
|
* ``SYNAPSE_EVENT_CACHE_SIZE``, the event cache size [default `10K`].
|
||||||
|
* ``SYNAPSE_CACHE_FACTOR``, the cache factor [default `0.5`].
|
||||||
|
* ``SYNAPSE_RECAPTCHA_PUBLIC_KEY``, set this variable to the recaptcha public
|
||||||
|
key in order to enable recaptcha upon registration.
|
||||||
|
* ``SYNAPSE_RECAPTCHA_PRIVATE_KEY``, set this variable to the recaptcha private
|
||||||
|
key in order to enable recaptcha upon registration.
|
||||||
|
* ``SYNAPSE_TURN_URIS``, set this variable to the coma-separated list of TURN
|
||||||
|
uris to enable TURN for this homeserver.
|
||||||
|
* ``SYNAPSE_TURN_SECRET``, set this to the TURN shared secret if required.
|
||||||
|
|
||||||
|
Shared secrets, that will be initialized to random values if not set:
|
||||||
|
|
||||||
|
* ``SYNAPSE_REGISTRATION_SHARED_SECRET``, secret for registrering users if
|
||||||
|
registration is disable.
|
||||||
|
|
||||||
|
Database specific values (will use SQLite if not set):
|
||||||
|
|
||||||
|
* `POSTGRES_DB` - The database name for the synapse postgres database. [default: `synapse`]
|
||||||
|
* `POSTGRES_HOST` - The host of the postgres database if you wish to use postgresql instead of sqlite3. [default: `db` which is useful when using a container on the same docker network in a compose file where the postgres service is called `db`]
|
||||||
|
* `POSTGRES_PASSWORD` - The password for the synapse postgres database. **If this is set then postgres will be used instead of sqlite3.** [default: none] **NOTE**: You are highly encouraged to use postgresql! Please use the compose file to make it easier to deploy.
|
||||||
|
* `POSTGRES_USER` - The user for the synapse postgres database. [default: `matrix`]
|
||||||
|
|
||||||
|
Mail server specific values (will not send emails if not set):
|
||||||
|
|
||||||
|
* ``SYNAPSE_SMTP_HOST``, hostname to the mail server.
|
||||||
|
* ``SYNAPSE_SMTP_PORT``, TCP port for accessing the mail server [default ``25``].
|
||||||
|
* ``SYNAPSE_SMTP_USER``, username for authenticating against the mail server if any.
|
||||||
|
* ``SYNAPSE_SMTP_PASSWORD``, password for authenticating against the mail server if any.
|
||||||
219
contrib/docker/conf/homeserver.yaml
Normal file
219
contrib/docker/conf/homeserver.yaml
Normal file
@@ -0,0 +1,219 @@
|
|||||||
|
# vim:ft=yaml
|
||||||
|
|
||||||
|
## TLS ##
|
||||||
|
|
||||||
|
tls_certificate_path: "/data/{{ SYNAPSE_SERVER_NAME }}.tls.crt"
|
||||||
|
tls_private_key_path: "/data/{{ SYNAPSE_SERVER_NAME }}.tls.key"
|
||||||
|
tls_dh_params_path: "/data/{{ SYNAPSE_SERVER_NAME }}.tls.dh"
|
||||||
|
no_tls: {{ "True" if SYNAPSE_NO_TLS else "False" }}
|
||||||
|
tls_fingerprints: []
|
||||||
|
|
||||||
|
## Server ##
|
||||||
|
|
||||||
|
server_name: "{{ SYNAPSE_SERVER_NAME }}"
|
||||||
|
pid_file: /homeserver.pid
|
||||||
|
web_client: False
|
||||||
|
soft_file_limit: 0
|
||||||
|
|
||||||
|
## Ports ##
|
||||||
|
|
||||||
|
listeners:
|
||||||
|
{% if not SYNAPSE_NO_TLS %}
|
||||||
|
-
|
||||||
|
port: 8448
|
||||||
|
bind_addresses: ['0.0.0.0']
|
||||||
|
type: http
|
||||||
|
tls: true
|
||||||
|
x_forwarded: false
|
||||||
|
resources:
|
||||||
|
- names: [client]
|
||||||
|
compress: true
|
||||||
|
- names: [federation] # Federation APIs
|
||||||
|
compress: false
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
- port: 8008
|
||||||
|
tls: false
|
||||||
|
bind_addresses: ['0.0.0.0']
|
||||||
|
type: http
|
||||||
|
x_forwarded: false
|
||||||
|
|
||||||
|
resources:
|
||||||
|
- names: [client]
|
||||||
|
compress: true
|
||||||
|
- names: [federation]
|
||||||
|
compress: false
|
||||||
|
|
||||||
|
## Database ##
|
||||||
|
|
||||||
|
{% if POSTGRES_PASSWORD %}
|
||||||
|
database:
|
||||||
|
name: "psycopg2"
|
||||||
|
args:
|
||||||
|
user: "{{ POSTGRES_USER or "synapse" }}"
|
||||||
|
password: "{{ POSTGRES_PASSWORD }}"
|
||||||
|
database: "{{ POSTGRES_DB or "synapse" }}"
|
||||||
|
host: "{{ POSTGRES_HOST or "db" }}"
|
||||||
|
port: "{{ POSTGRES_PORT or "5432" }}"
|
||||||
|
cp_min: 5
|
||||||
|
cp_max: 10
|
||||||
|
{% else %}
|
||||||
|
database:
|
||||||
|
name: "sqlite3"
|
||||||
|
args:
|
||||||
|
database: "/data/homeserver.db"
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
## Performance ##
|
||||||
|
|
||||||
|
event_cache_size: "{{ SYNAPSE_EVENT_CACHE_SIZE or "10K" }}"
|
||||||
|
verbose: 0
|
||||||
|
log_file: "/data/homeserver.log"
|
||||||
|
log_config: "/compiled/log.config"
|
||||||
|
|
||||||
|
## Ratelimiting ##
|
||||||
|
|
||||||
|
rc_messages_per_second: 0.2
|
||||||
|
rc_message_burst_count: 10.0
|
||||||
|
federation_rc_window_size: 1000
|
||||||
|
federation_rc_sleep_limit: 10
|
||||||
|
federation_rc_sleep_delay: 500
|
||||||
|
federation_rc_reject_limit: 50
|
||||||
|
federation_rc_concurrent: 3
|
||||||
|
|
||||||
|
## Files ##
|
||||||
|
|
||||||
|
media_store_path: "/data/media"
|
||||||
|
uploads_path: "/data/uploads"
|
||||||
|
max_upload_size: "10M"
|
||||||
|
max_image_pixels: "32M"
|
||||||
|
dynamic_thumbnails: false
|
||||||
|
|
||||||
|
# List of thumbnail to precalculate when an image is uploaded.
|
||||||
|
thumbnail_sizes:
|
||||||
|
- width: 32
|
||||||
|
height: 32
|
||||||
|
method: crop
|
||||||
|
- width: 96
|
||||||
|
height: 96
|
||||||
|
method: crop
|
||||||
|
- width: 320
|
||||||
|
height: 240
|
||||||
|
method: scale
|
||||||
|
- width: 640
|
||||||
|
height: 480
|
||||||
|
method: scale
|
||||||
|
- width: 800
|
||||||
|
height: 600
|
||||||
|
method: scale
|
||||||
|
|
||||||
|
url_preview_enabled: False
|
||||||
|
max_spider_size: "10M"
|
||||||
|
|
||||||
|
## Captcha ##
|
||||||
|
|
||||||
|
{% if SYNAPSE_RECAPTCHA_PUBLIC_KEY %}
|
||||||
|
recaptcha_public_key: "{{ SYNAPSE_RECAPTCHA_PUBLIC_KEY }}"
|
||||||
|
recaptcha_private_key: "{{ SYNAPSE_RECAPTCHA_PRIVATE_KEY }}"
|
||||||
|
enable_registration_captcha: True
|
||||||
|
recaptcha_siteverify_api: "https://www.google.com/recaptcha/api/siteverify"
|
||||||
|
{% else %}
|
||||||
|
recaptcha_public_key: "YOUR_PUBLIC_KEY"
|
||||||
|
recaptcha_private_key: "YOUR_PRIVATE_KEY"
|
||||||
|
enable_registration_captcha: False
|
||||||
|
recaptcha_siteverify_api: "https://www.google.com/recaptcha/api/siteverify"
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
## Turn ##
|
||||||
|
|
||||||
|
{% if SYNAPSE_TURN_URIS %}
|
||||||
|
turn_uris:
|
||||||
|
{% for uri in SYNAPSE_TURN_URIS.split(',') %} - "{{ uri }}"
|
||||||
|
{% endfor %}
|
||||||
|
turn_shared_secret: "{{ SYNAPSE_TURN_SECRET }}"
|
||||||
|
turn_user_lifetime: "1h"
|
||||||
|
turn_allow_guests: True
|
||||||
|
{% else %}
|
||||||
|
turn_uris: []
|
||||||
|
turn_shared_secret: "YOUR_SHARED_SECRET"
|
||||||
|
turn_user_lifetime: "1h"
|
||||||
|
turn_allow_guests: True
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
## Registration ##
|
||||||
|
|
||||||
|
enable_registration: {{ "True" if SYNAPSE_ENABLE_REGISTRATION else "False" }}
|
||||||
|
registration_shared_secret: "{{ SYNAPSE_REGISTRATION_SHARED_SECRET }}"
|
||||||
|
bcrypt_rounds: 12
|
||||||
|
allow_guest_access: {{ "True" if SYNAPSE_ALLOW_GUEST else "False" }}
|
||||||
|
enable_group_creation: true
|
||||||
|
|
||||||
|
# The list of identity servers trusted to verify third party
|
||||||
|
# identifiers by this server.
|
||||||
|
trusted_third_party_id_servers:
|
||||||
|
- matrix.org
|
||||||
|
- vector.im
|
||||||
|
- riot.im
|
||||||
|
|
||||||
|
## Metrics ###
|
||||||
|
|
||||||
|
{% if SYNAPSE_REPORT_STATS.lower() == "yes" %}
|
||||||
|
enable_metrics: True
|
||||||
|
report_stats: True
|
||||||
|
{% else %}
|
||||||
|
enable_metrics: False
|
||||||
|
report_stats: False
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
## API Configuration ##
|
||||||
|
|
||||||
|
room_invite_state_types:
|
||||||
|
- "m.room.join_rules"
|
||||||
|
- "m.room.canonical_alias"
|
||||||
|
- "m.room.avatar"
|
||||||
|
- "m.room.name"
|
||||||
|
|
||||||
|
{% if SYNAPSE_APPSERVICES %}
|
||||||
|
app_service_config_files:
|
||||||
|
{% for appservice in SYNAPSE_APPSERVICES %} - "{{ appservice }}"
|
||||||
|
{% endfor %}
|
||||||
|
{% else %}
|
||||||
|
app_service_config_files: []
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
macaroon_secret_key: "{{ SYNAPSE_MACAROON_SECRET_KEY }}"
|
||||||
|
expire_access_token: False
|
||||||
|
|
||||||
|
## Signing Keys ##
|
||||||
|
|
||||||
|
signing_key_path: "/data/{{ SYNAPSE_SERVER_NAME }}.signing.key"
|
||||||
|
old_signing_keys: {}
|
||||||
|
key_refresh_interval: "1d" # 1 Day.
|
||||||
|
|
||||||
|
# The trusted servers to download signing keys from.
|
||||||
|
perspectives:
|
||||||
|
servers:
|
||||||
|
"matrix.org":
|
||||||
|
verify_keys:
|
||||||
|
"ed25519:auto":
|
||||||
|
key: "Noi6WqcDj0QmPxCNQqgezwTlBKrfqehY1u2FyWP9uYw"
|
||||||
|
|
||||||
|
password_config:
|
||||||
|
enabled: true
|
||||||
|
|
||||||
|
{% if SYNAPSE_SMTP_HOST %}
|
||||||
|
email:
|
||||||
|
enable_notifs: false
|
||||||
|
smtp_host: "{{ SYNAPSE_SMTP_HOST }}"
|
||||||
|
smtp_port: {{ SYNAPSE_SMTP_PORT or "25" }}
|
||||||
|
smtp_user: "{{ SYNAPSE_SMTP_USER }}"
|
||||||
|
smtp_pass: "{{ SYNAPSE_SMTP_PASSWORD }}"
|
||||||
|
require_transport_security: False
|
||||||
|
notif_from: "{{ SYNAPSE_SMTP_FROM or "hostmaster@" + SYNAPSE_SERVER_NAME }}"
|
||||||
|
app_name: Matrix
|
||||||
|
template_dir: res/templates
|
||||||
|
notif_template_html: notif_mail.html
|
||||||
|
notif_template_text: notif_mail.txt
|
||||||
|
notif_for_new_users: True
|
||||||
|
riot_base_url: "https://{{ SYNAPSE_SERVER_NAME }}"
|
||||||
|
{% endif %}
|
||||||
29
contrib/docker/conf/log.config
Normal file
29
contrib/docker/conf/log.config
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
version: 1
|
||||||
|
|
||||||
|
formatters:
|
||||||
|
precise:
|
||||||
|
format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s- %(message)s'
|
||||||
|
|
||||||
|
filters:
|
||||||
|
context:
|
||||||
|
(): synapse.util.logcontext.LoggingContextFilter
|
||||||
|
request: ""
|
||||||
|
|
||||||
|
handlers:
|
||||||
|
console:
|
||||||
|
class: logging.StreamHandler
|
||||||
|
formatter: precise
|
||||||
|
filters: [context]
|
||||||
|
|
||||||
|
loggers:
|
||||||
|
synapse:
|
||||||
|
level: {{ SYNAPSE_LOG_LEVEL or "WARNING" }}
|
||||||
|
|
||||||
|
synapse.storage.SQL:
|
||||||
|
# beware: increasing this to DEBUG will make synapse log sensitive
|
||||||
|
# information such as access tokens.
|
||||||
|
level: {{ SYNAPSE_LOG_LEVEL or "WARNING" }}
|
||||||
|
|
||||||
|
root:
|
||||||
|
level: {{ SYNAPSE_LOG_LEVEL or "WARNING" }}
|
||||||
|
handlers: [console]
|
||||||
49
contrib/docker/docker-compose.yml
Normal file
49
contrib/docker/docker-compose.yml
Normal file
@@ -0,0 +1,49 @@
|
|||||||
|
# This compose file is compatible with Compose itself, it might need some
|
||||||
|
# adjustments to run properly with stack.
|
||||||
|
|
||||||
|
version: '3'
|
||||||
|
|
||||||
|
services:
|
||||||
|
|
||||||
|
synapse:
|
||||||
|
image: docker.io/matrixdotorg/synapse:latest
|
||||||
|
# Since snyapse does not retry to connect to the database, restart upon
|
||||||
|
# failure
|
||||||
|
restart: unless-stopped
|
||||||
|
# See the readme for a full documentation of the environment settings
|
||||||
|
environment:
|
||||||
|
- SYNAPSE_SERVER_NAME=my.matrix.host
|
||||||
|
- SYNAPSE_REPORT_STATS=no
|
||||||
|
- SYNAPSE_ENABLE_REGISTRATION=yes
|
||||||
|
- SYNAPSE_LOG_LEVEL=INFO
|
||||||
|
- POSTGRES_PASSWORD=changeme
|
||||||
|
volumes:
|
||||||
|
# You may either store all the files in a local folder
|
||||||
|
- ./files:/data
|
||||||
|
# .. or you may split this between different storage points
|
||||||
|
# - ./files:/data
|
||||||
|
# - /path/to/ssd:/data/uploads
|
||||||
|
# - /path/to/large_hdd:/data/media
|
||||||
|
depends_on:
|
||||||
|
- db
|
||||||
|
# In order to expose Synapse, remove one of the following, you might for
|
||||||
|
# instance expose the TLS port directly:
|
||||||
|
ports:
|
||||||
|
- 8448:8448/tcp
|
||||||
|
# ... or use a reverse proxy, here is an example for traefik:
|
||||||
|
labels:
|
||||||
|
- traefik.enable=true
|
||||||
|
- traefik.frontend.rule=Host:my.matrix.Host
|
||||||
|
- traefik.port=8448
|
||||||
|
|
||||||
|
db:
|
||||||
|
image: docker.io/postgres:10-alpine
|
||||||
|
# Change that password, of course!
|
||||||
|
environment:
|
||||||
|
- POSTGRES_USER=synapse
|
||||||
|
- POSTGRES_PASSWORD=changeme
|
||||||
|
volumes:
|
||||||
|
# You may store the database tables in a local folder..
|
||||||
|
- ./schemas:/var/lib/postgresql/data
|
||||||
|
# .. or store them on some high performance storage for better results
|
||||||
|
# - /path/to/ssd/storage:/var/lib/postfesql/data
|
||||||
66
contrib/docker/start.py
Executable file
66
contrib/docker/start.py
Executable file
@@ -0,0 +1,66 @@
|
|||||||
|
#!/usr/local/bin/python
|
||||||
|
|
||||||
|
import jinja2
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
import subprocess
|
||||||
|
import glob
|
||||||
|
|
||||||
|
# Utility functions
|
||||||
|
convert = lambda src, dst, environ: open(dst, "w").write(jinja2.Template(open(src).read()).render(**environ))
|
||||||
|
|
||||||
|
def check_arguments(environ, args):
|
||||||
|
for argument in args:
|
||||||
|
if argument not in environ:
|
||||||
|
print("Environment variable %s is mandatory, exiting." % argument)
|
||||||
|
sys.exit(2)
|
||||||
|
|
||||||
|
def generate_secrets(environ, secrets):
|
||||||
|
for name, secret in secrets.items():
|
||||||
|
if secret not in environ:
|
||||||
|
filename = "/data/%s.%s.key" % (environ["SYNAPSE_SERVER_NAME"], name)
|
||||||
|
if os.path.exists(filename):
|
||||||
|
with open(filename) as handle: value = handle.read()
|
||||||
|
else:
|
||||||
|
print("Generating a random secret for {}".format(name))
|
||||||
|
value = os.urandom(32).encode("hex")
|
||||||
|
with open(filename, "w") as handle: handle.write(value)
|
||||||
|
environ[secret] = value
|
||||||
|
|
||||||
|
# Prepare the configuration
|
||||||
|
mode = sys.argv[1] if len(sys.argv) > 1 else None
|
||||||
|
environ = os.environ.copy()
|
||||||
|
ownership = "{}:{}".format(environ.get("UID", 991), environ.get("GID", 991))
|
||||||
|
args = ["python", "-m", "synapse.app.homeserver"]
|
||||||
|
|
||||||
|
# In generate mode, generate a configuration, missing keys, then exit
|
||||||
|
if mode == "generate":
|
||||||
|
check_arguments(environ, ("SYNAPSE_SERVER_NAME", "SYNAPSE_REPORT_STATS", "SYNAPSE_CONFIG_PATH"))
|
||||||
|
args += [
|
||||||
|
"--server-name", environ["SYNAPSE_SERVER_NAME"],
|
||||||
|
"--report-stats", environ["SYNAPSE_REPORT_STATS"],
|
||||||
|
"--config-path", environ["SYNAPSE_CONFIG_PATH"],
|
||||||
|
"--generate-config"
|
||||||
|
]
|
||||||
|
os.execv("/usr/local/bin/python", args)
|
||||||
|
|
||||||
|
# In normal mode, generate missing keys if any, then run synapse
|
||||||
|
else:
|
||||||
|
# Parse the configuration file
|
||||||
|
if "SYNAPSE_CONFIG_PATH" in environ:
|
||||||
|
args += ["--config-path", environ["SYNAPSE_CONFIG_PATH"]]
|
||||||
|
else:
|
||||||
|
check_arguments(environ, ("SYNAPSE_SERVER_NAME", "SYNAPSE_REPORT_STATS"))
|
||||||
|
generate_secrets(environ, {
|
||||||
|
"registration": "SYNAPSE_REGISTRATION_SHARED_SECRET",
|
||||||
|
"macaroon": "SYNAPSE_MACAROON_SECRET_KEY"
|
||||||
|
})
|
||||||
|
environ["SYNAPSE_APPSERVICES"] = glob.glob("/data/appservices/*.yaml")
|
||||||
|
if not os.path.exists("/compiled"): os.mkdir("/compiled")
|
||||||
|
convert("/conf/homeserver.yaml", "/compiled/homeserver.yaml", environ)
|
||||||
|
convert("/conf/log.config", "/compiled/log.config", environ)
|
||||||
|
subprocess.check_output(["chown", "-R", ownership, "/data"])
|
||||||
|
args += ["--config-path", "/compiled/homeserver.yaml"]
|
||||||
|
# Generate missing keys and start synapse
|
||||||
|
subprocess.check_output(args + ["--generate-keys"])
|
||||||
|
os.execv("/sbin/su-exec", ["su-exec", ownership] + args)
|
||||||
50
contrib/example_log_config.yaml
Normal file
50
contrib/example_log_config.yaml
Normal file
@@ -0,0 +1,50 @@
|
|||||||
|
# Example log_config file for synapse. To enable, point `log_config` to it in
|
||||||
|
# `homeserver.yaml`, and restart synapse.
|
||||||
|
#
|
||||||
|
# This configuration will produce similar results to the defaults within
|
||||||
|
# synapse, but can be edited to give more flexibility.
|
||||||
|
|
||||||
|
version: 1
|
||||||
|
|
||||||
|
formatters:
|
||||||
|
fmt:
|
||||||
|
format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s- %(message)s'
|
||||||
|
|
||||||
|
filters:
|
||||||
|
context:
|
||||||
|
(): synapse.util.logcontext.LoggingContextFilter
|
||||||
|
request: ""
|
||||||
|
|
||||||
|
handlers:
|
||||||
|
# example output to console
|
||||||
|
console:
|
||||||
|
class: logging.StreamHandler
|
||||||
|
filters: [context]
|
||||||
|
|
||||||
|
# example output to file - to enable, edit 'root' config below.
|
||||||
|
file:
|
||||||
|
class: logging.handlers.RotatingFileHandler
|
||||||
|
formatter: fmt
|
||||||
|
filename: /var/log/synapse/homeserver.log
|
||||||
|
maxBytes: 100000000
|
||||||
|
backupCount: 3
|
||||||
|
filters: [context]
|
||||||
|
|
||||||
|
|
||||||
|
root:
|
||||||
|
level: INFO
|
||||||
|
handlers: [console] # to use file handler instead, switch to [file]
|
||||||
|
|
||||||
|
loggers:
|
||||||
|
synapse:
|
||||||
|
level: INFO
|
||||||
|
|
||||||
|
synapse.storage.SQL:
|
||||||
|
# beware: increasing this to DEBUG will make synapse log sensitive
|
||||||
|
# information such as access tokens.
|
||||||
|
level: INFO
|
||||||
|
|
||||||
|
# example of enabling debugging for a component:
|
||||||
|
#
|
||||||
|
# synapse.federation.transport.server:
|
||||||
|
# level: DEBUG
|
||||||
@@ -22,6 +22,8 @@ import argparse
|
|||||||
from synapse.events import FrozenEvent
|
from synapse.events import FrozenEvent
|
||||||
from synapse.util.frozenutils import unfreeze
|
from synapse.util.frozenutils import unfreeze
|
||||||
|
|
||||||
|
from six import string_types
|
||||||
|
|
||||||
|
|
||||||
def make_graph(file_name, room_id, file_prefix, limit):
|
def make_graph(file_name, room_id, file_prefix, limit):
|
||||||
print "Reading lines"
|
print "Reading lines"
|
||||||
@@ -58,7 +60,7 @@ def make_graph(file_name, room_id, file_prefix, limit):
|
|||||||
for key, value in unfreeze(event.get_dict()["content"]).items():
|
for key, value in unfreeze(event.get_dict()["content"]).items():
|
||||||
if value is None:
|
if value is None:
|
||||||
value = "<null>"
|
value = "<null>"
|
||||||
elif isinstance(value, basestring):
|
elif isinstance(value, string_types):
|
||||||
pass
|
pass
|
||||||
else:
|
else:
|
||||||
value = json.dumps(value)
|
value = json.dumps(value)
|
||||||
|
|||||||
37
contrib/prometheus/README
Normal file
37
contrib/prometheus/README
Normal file
@@ -0,0 +1,37 @@
|
|||||||
|
This directory contains some sample monitoring config for using the
|
||||||
|
'Prometheus' monitoring server against synapse.
|
||||||
|
|
||||||
|
To use it, first install prometheus by following the instructions at
|
||||||
|
|
||||||
|
http://prometheus.io/
|
||||||
|
|
||||||
|
### for Prometheus v1
|
||||||
|
Add a new job to the main prometheus.conf file:
|
||||||
|
|
||||||
|
job: {
|
||||||
|
name: "synapse"
|
||||||
|
|
||||||
|
target_group: {
|
||||||
|
target: "http://SERVER.LOCATION.HERE:PORT/_synapse/metrics"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
### for Prometheus v2
|
||||||
|
Add a new job to the main prometheus.yml file:
|
||||||
|
|
||||||
|
- job_name: "synapse"
|
||||||
|
metrics_path: "/_synapse/metrics"
|
||||||
|
# when endpoint uses https:
|
||||||
|
scheme: "https"
|
||||||
|
|
||||||
|
static_configs:
|
||||||
|
- targets: ['SERVER.LOCATION:PORT']
|
||||||
|
|
||||||
|
To use `synapse.rules` add
|
||||||
|
|
||||||
|
rule_files:
|
||||||
|
- "/PATH/TO/synapse-v2.rules"
|
||||||
|
|
||||||
|
Metrics are disabled by default when running synapse; they must be enabled
|
||||||
|
with the 'enable-metrics' option, either in the synapse config file or as a
|
||||||
|
command-line option.
|
||||||
395
contrib/prometheus/consoles/synapse.html
Normal file
395
contrib/prometheus/consoles/synapse.html
Normal file
@@ -0,0 +1,395 @@
|
|||||||
|
{{ template "head" . }}
|
||||||
|
|
||||||
|
{{ template "prom_content_head" . }}
|
||||||
|
<h1>System Resources</h1>
|
||||||
|
|
||||||
|
<h3>CPU</h3>
|
||||||
|
<div id="process_resource_utime"></div>
|
||||||
|
<script>
|
||||||
|
new PromConsole.Graph({
|
||||||
|
node: document.querySelector("#process_resource_utime"),
|
||||||
|
expr: "rate(process_cpu_seconds_total[2m]) * 100",
|
||||||
|
name: "[[job]]",
|
||||||
|
min: 0,
|
||||||
|
max: 100,
|
||||||
|
renderer: "line",
|
||||||
|
height: 150,
|
||||||
|
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yUnits: "%",
|
||||||
|
yTitle: "CPU Usage"
|
||||||
|
})
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<h3>Memory</h3>
|
||||||
|
<div id="process_resource_maxrss"></div>
|
||||||
|
<script>
|
||||||
|
new PromConsole.Graph({
|
||||||
|
node: document.querySelector("#process_resource_maxrss"),
|
||||||
|
expr: "process_psutil_rss:max",
|
||||||
|
name: "Maxrss",
|
||||||
|
min: 0,
|
||||||
|
renderer: "line",
|
||||||
|
height: 150,
|
||||||
|
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||||
|
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||||
|
yUnits: "bytes",
|
||||||
|
yTitle: "Usage"
|
||||||
|
})
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<h3>File descriptors</h3>
|
||||||
|
<div id="process_fds"></div>
|
||||||
|
<script>
|
||||||
|
new PromConsole.Graph({
|
||||||
|
node: document.querySelector("#process_fds"),
|
||||||
|
expr: "process_open_fds{job='synapse'}",
|
||||||
|
name: "FDs",
|
||||||
|
min: 0,
|
||||||
|
renderer: "line",
|
||||||
|
height: 150,
|
||||||
|
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yUnits: "",
|
||||||
|
yTitle: "Descriptors"
|
||||||
|
})
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<h1>Reactor</h1>
|
||||||
|
|
||||||
|
<h3>Total reactor time</h3>
|
||||||
|
<div id="reactor_total_time"></div>
|
||||||
|
<script>
|
||||||
|
new PromConsole.Graph({
|
||||||
|
node: document.querySelector("#reactor_total_time"),
|
||||||
|
expr: "rate(python_twisted_reactor_tick_time:total[2m]) / 1000",
|
||||||
|
name: "time",
|
||||||
|
max: 1,
|
||||||
|
min: 0,
|
||||||
|
renderer: "area",
|
||||||
|
height: 150,
|
||||||
|
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yUnits: "s/s",
|
||||||
|
yTitle: "Usage"
|
||||||
|
})
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<h3>Average reactor tick time</h3>
|
||||||
|
<div id="reactor_average_time"></div>
|
||||||
|
<script>
|
||||||
|
new PromConsole.Graph({
|
||||||
|
node: document.querySelector("#reactor_average_time"),
|
||||||
|
expr: "rate(python_twisted_reactor_tick_time:total[2m]) / rate(python_twisted_reactor_tick_time:count[2m]) / 1000",
|
||||||
|
name: "time",
|
||||||
|
min: 0,
|
||||||
|
renderer: "line",
|
||||||
|
height: 150,
|
||||||
|
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yUnits: "s",
|
||||||
|
yTitle: "Time"
|
||||||
|
})
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<h3>Pending calls per tick</h3>
|
||||||
|
<div id="reactor_pending_calls"></div>
|
||||||
|
<script>
|
||||||
|
new PromConsole.Graph({
|
||||||
|
node: document.querySelector("#reactor_pending_calls"),
|
||||||
|
expr: "rate(python_twisted_reactor_pending_calls:total[30s])/rate(python_twisted_reactor_pending_calls:count[30s])",
|
||||||
|
name: "calls",
|
||||||
|
min: 0,
|
||||||
|
renderer: "line",
|
||||||
|
height: 150,
|
||||||
|
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yTitle: "Pending Cals"
|
||||||
|
})
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<h1>Storage</h1>
|
||||||
|
|
||||||
|
<h3>Queries</h3>
|
||||||
|
<div id="synapse_storage_query_time"></div>
|
||||||
|
<script>
|
||||||
|
new PromConsole.Graph({
|
||||||
|
node: document.querySelector("#synapse_storage_query_time"),
|
||||||
|
expr: "rate(synapse_storage_query_time:count[2m])",
|
||||||
|
name: "[[verb]]",
|
||||||
|
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||||
|
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||||
|
yUnits: "queries/s",
|
||||||
|
yTitle: "Queries"
|
||||||
|
})
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<h3>Transactions</h3>
|
||||||
|
<div id="synapse_storage_transaction_time"></div>
|
||||||
|
<script>
|
||||||
|
new PromConsole.Graph({
|
||||||
|
node: document.querySelector("#synapse_storage_transaction_time"),
|
||||||
|
expr: "rate(synapse_storage_transaction_time:count[2m])",
|
||||||
|
name: "[[desc]]",
|
||||||
|
min: 0,
|
||||||
|
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||||
|
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||||
|
yUnits: "txn/s",
|
||||||
|
yTitle: "Transactions"
|
||||||
|
})
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<h3>Transaction execution time</h3>
|
||||||
|
<div id="synapse_storage_transactions_time_msec"></div>
|
||||||
|
<script>
|
||||||
|
new PromConsole.Graph({
|
||||||
|
node: document.querySelector("#synapse_storage_transactions_time_msec"),
|
||||||
|
expr: "rate(synapse_storage_transaction_time:total[2m]) / 1000",
|
||||||
|
name: "[[desc]]",
|
||||||
|
min: 0,
|
||||||
|
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yUnits: "s/s",
|
||||||
|
yTitle: "Usage"
|
||||||
|
})
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<h3>Database scheduling latency</h3>
|
||||||
|
<div id="synapse_storage_schedule_time"></div>
|
||||||
|
<script>
|
||||||
|
new PromConsole.Graph({
|
||||||
|
node: document.querySelector("#synapse_storage_schedule_time"),
|
||||||
|
expr: "rate(synapse_storage_schedule_time:total[2m]) / 1000",
|
||||||
|
name: "Total latency",
|
||||||
|
min: 0,
|
||||||
|
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yUnits: "s/s",
|
||||||
|
yTitle: "Usage"
|
||||||
|
})
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<h3>Cache hit ratio</h3>
|
||||||
|
<div id="synapse_cache_ratio"></div>
|
||||||
|
<script>
|
||||||
|
new PromConsole.Graph({
|
||||||
|
node: document.querySelector("#synapse_cache_ratio"),
|
||||||
|
expr: "rate(synapse_util_caches_cache:total[2m]) * 100",
|
||||||
|
name: "[[name]]",
|
||||||
|
min: 0,
|
||||||
|
max: 100,
|
||||||
|
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||||
|
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||||
|
yUnits: "%",
|
||||||
|
yTitle: "Percentage"
|
||||||
|
})
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<h3>Cache size</h3>
|
||||||
|
<div id="synapse_cache_size"></div>
|
||||||
|
<script>
|
||||||
|
new PromConsole.Graph({
|
||||||
|
node: document.querySelector("#synapse_cache_size"),
|
||||||
|
expr: "synapse_util_caches_cache:size",
|
||||||
|
name: "[[name]]",
|
||||||
|
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||||
|
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||||
|
yUnits: "",
|
||||||
|
yTitle: "Items"
|
||||||
|
})
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<h1>Requests</h1>
|
||||||
|
|
||||||
|
<h3>Requests by Servlet</h3>
|
||||||
|
<div id="synapse_http_server_request_count_servlet"></div>
|
||||||
|
<script>
|
||||||
|
new PromConsole.Graph({
|
||||||
|
node: document.querySelector("#synapse_http_server_request_count_servlet"),
|
||||||
|
expr: "rate(synapse_http_server_request_count:servlet[2m])",
|
||||||
|
name: "[[servlet]]",
|
||||||
|
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yUnits: "req/s",
|
||||||
|
yTitle: "Requests"
|
||||||
|
})
|
||||||
|
</script>
|
||||||
|
<h4> (without <tt>EventStreamRestServlet</tt> or <tt>SyncRestServlet</tt>)</h4>
|
||||||
|
<div id="synapse_http_server_request_count_servlet_minus_events"></div>
|
||||||
|
<script>
|
||||||
|
new PromConsole.Graph({
|
||||||
|
node: document.querySelector("#synapse_http_server_request_count_servlet_minus_events"),
|
||||||
|
expr: "rate(synapse_http_server_request_count:servlet{servlet!=\"EventStreamRestServlet\", servlet!=\"SyncRestServlet\"}[2m])",
|
||||||
|
name: "[[servlet]]",
|
||||||
|
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yUnits: "req/s",
|
||||||
|
yTitle: "Requests"
|
||||||
|
})
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<h3>Average response times</h3>
|
||||||
|
<div id="synapse_http_server_response_time_avg"></div>
|
||||||
|
<script>
|
||||||
|
new PromConsole.Graph({
|
||||||
|
node: document.querySelector("#synapse_http_server_response_time_avg"),
|
||||||
|
expr: "rate(synapse_http_server_response_time_seconds[2m]) / rate(synapse_http_server_response_count[2m]) / 1000",
|
||||||
|
name: "[[servlet]]",
|
||||||
|
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yUnits: "s/req",
|
||||||
|
yTitle: "Response time"
|
||||||
|
})
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<h3>All responses by code</h3>
|
||||||
|
<div id="synapse_http_server_responses"></div>
|
||||||
|
<script>
|
||||||
|
new PromConsole.Graph({
|
||||||
|
node: document.querySelector("#synapse_http_server_responses"),
|
||||||
|
expr: "rate(synapse_http_server_responses[2m])",
|
||||||
|
name: "[[method]] / [[code]]",
|
||||||
|
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yUnits: "req/s",
|
||||||
|
yTitle: "Requests"
|
||||||
|
})
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<h3>Error responses by code</h3>
|
||||||
|
<div id="synapse_http_server_responses_err"></div>
|
||||||
|
<script>
|
||||||
|
new PromConsole.Graph({
|
||||||
|
node: document.querySelector("#synapse_http_server_responses_err"),
|
||||||
|
expr: "rate(synapse_http_server_responses{code=~\"[45]..\"}[2m])",
|
||||||
|
name: "[[method]] / [[code]]",
|
||||||
|
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yUnits: "req/s",
|
||||||
|
yTitle: "Requests"
|
||||||
|
})
|
||||||
|
</script>
|
||||||
|
|
||||||
|
|
||||||
|
<h3>CPU Usage</h3>
|
||||||
|
<div id="synapse_http_server_response_ru_utime"></div>
|
||||||
|
<script>
|
||||||
|
new PromConsole.Graph({
|
||||||
|
node: document.querySelector("#synapse_http_server_response_ru_utime"),
|
||||||
|
expr: "rate(synapse_http_server_response_ru_utime_seconds[2m])",
|
||||||
|
name: "[[servlet]]",
|
||||||
|
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yUnits: "s/s",
|
||||||
|
yTitle: "CPU Usage"
|
||||||
|
})
|
||||||
|
</script>
|
||||||
|
|
||||||
|
|
||||||
|
<h3>DB Usage</h3>
|
||||||
|
<div id="synapse_http_server_response_db_txn_duration"></div>
|
||||||
|
<script>
|
||||||
|
new PromConsole.Graph({
|
||||||
|
node: document.querySelector("#synapse_http_server_response_db_txn_duration"),
|
||||||
|
expr: "rate(synapse_http_server_response_db_txn_duration_seconds[2m])",
|
||||||
|
name: "[[servlet]]",
|
||||||
|
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yUnits: "s/s",
|
||||||
|
yTitle: "DB Usage"
|
||||||
|
})
|
||||||
|
</script>
|
||||||
|
|
||||||
|
|
||||||
|
<h3>Average event send times</h3>
|
||||||
|
<div id="synapse_http_server_send_time_avg"></div>
|
||||||
|
<script>
|
||||||
|
new PromConsole.Graph({
|
||||||
|
node: document.querySelector("#synapse_http_server_send_time_avg"),
|
||||||
|
expr: "rate(synapse_http_server_response_time_second{servlet='RoomSendEventRestServlet'}[2m]) / rate(synapse_http_server_response_count{servlet='RoomSendEventRestServlet'}[2m]) / 1000",
|
||||||
|
name: "[[servlet]]",
|
||||||
|
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yUnits: "s/req",
|
||||||
|
yTitle: "Response time"
|
||||||
|
})
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<h1>Federation</h1>
|
||||||
|
|
||||||
|
<h3>Sent Messages</h3>
|
||||||
|
<div id="synapse_federation_client_sent"></div>
|
||||||
|
<script>
|
||||||
|
new PromConsole.Graph({
|
||||||
|
node: document.querySelector("#synapse_federation_client_sent"),
|
||||||
|
expr: "rate(synapse_federation_client_sent[2m])",
|
||||||
|
name: "[[type]]",
|
||||||
|
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yUnits: "req/s",
|
||||||
|
yTitle: "Requests"
|
||||||
|
})
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<h3>Received Messages</h3>
|
||||||
|
<div id="synapse_federation_server_received"></div>
|
||||||
|
<script>
|
||||||
|
new PromConsole.Graph({
|
||||||
|
node: document.querySelector("#synapse_federation_server_received"),
|
||||||
|
expr: "rate(synapse_federation_server_received[2m])",
|
||||||
|
name: "[[type]]",
|
||||||
|
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yUnits: "req/s",
|
||||||
|
yTitle: "Requests"
|
||||||
|
})
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<h3>Pending</h3>
|
||||||
|
<div id="synapse_federation_transaction_queue_pending"></div>
|
||||||
|
<script>
|
||||||
|
new PromConsole.Graph({
|
||||||
|
node: document.querySelector("#synapse_federation_transaction_queue_pending"),
|
||||||
|
expr: "synapse_federation_transaction_queue_pending",
|
||||||
|
name: "[[type]]",
|
||||||
|
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||||
|
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||||
|
yUnits: "",
|
||||||
|
yTitle: "Units"
|
||||||
|
})
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<h1>Clients</h1>
|
||||||
|
|
||||||
|
<h3>Notifiers</h3>
|
||||||
|
<div id="synapse_notifier_listeners"></div>
|
||||||
|
<script>
|
||||||
|
new PromConsole.Graph({
|
||||||
|
node: document.querySelector("#synapse_notifier_listeners"),
|
||||||
|
expr: "synapse_notifier_listeners",
|
||||||
|
name: "listeners",
|
||||||
|
min: 0,
|
||||||
|
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||||
|
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||||
|
yUnits: "",
|
||||||
|
yTitle: "Listeners"
|
||||||
|
})
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<h3>Notified Events</h3>
|
||||||
|
<div id="synapse_notifier_notified_events"></div>
|
||||||
|
<script>
|
||||||
|
new PromConsole.Graph({
|
||||||
|
node: document.querySelector("#synapse_notifier_notified_events"),
|
||||||
|
expr: "rate(synapse_notifier_notified_events[2m])",
|
||||||
|
name: "events",
|
||||||
|
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yUnits: "events/s",
|
||||||
|
yTitle: "Event rate"
|
||||||
|
})
|
||||||
|
</script>
|
||||||
|
|
||||||
|
{{ template "prom_content_tail" . }}
|
||||||
|
|
||||||
|
{{ template "tail" }}
|
||||||
21
contrib/prometheus/synapse-v1.rules
Normal file
21
contrib/prometheus/synapse-v1.rules
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
synapse_federation_transaction_queue_pendingEdus:total = sum(synapse_federation_transaction_queue_pendingEdus or absent(synapse_federation_transaction_queue_pendingEdus)*0)
|
||||||
|
synapse_federation_transaction_queue_pendingPdus:total = sum(synapse_federation_transaction_queue_pendingPdus or absent(synapse_federation_transaction_queue_pendingPdus)*0)
|
||||||
|
|
||||||
|
synapse_http_server_request_count:method{servlet=""} = sum(synapse_http_server_request_count) by (method)
|
||||||
|
synapse_http_server_request_count:servlet{method=""} = sum(synapse_http_server_request_count) by (servlet)
|
||||||
|
|
||||||
|
synapse_http_server_request_count:total{servlet=""} = sum(synapse_http_server_request_count:by_method) by (servlet)
|
||||||
|
|
||||||
|
synapse_cache:hit_ratio_5m = rate(synapse_util_caches_cache:hits[5m]) / rate(synapse_util_caches_cache:total[5m])
|
||||||
|
synapse_cache:hit_ratio_30s = rate(synapse_util_caches_cache:hits[30s]) / rate(synapse_util_caches_cache:total[30s])
|
||||||
|
|
||||||
|
synapse_federation_client_sent{type="EDU"} = synapse_federation_client_sent_edus + 0
|
||||||
|
synapse_federation_client_sent{type="PDU"} = synapse_federation_client_sent_pdu_destinations:count + 0
|
||||||
|
synapse_federation_client_sent{type="Query"} = sum(synapse_federation_client_sent_queries) by (job)
|
||||||
|
|
||||||
|
synapse_federation_server_received{type="EDU"} = synapse_federation_server_received_edus + 0
|
||||||
|
synapse_federation_server_received{type="PDU"} = synapse_federation_server_received_pdus + 0
|
||||||
|
synapse_federation_server_received{type="Query"} = sum(synapse_federation_server_received_queries) by (job)
|
||||||
|
|
||||||
|
synapse_federation_transaction_queue_pending{type="EDU"} = synapse_federation_transaction_queue_pending_edus + 0
|
||||||
|
synapse_federation_transaction_queue_pending{type="PDU"} = synapse_federation_transaction_queue_pending_pdus + 0
|
||||||
60
contrib/prometheus/synapse-v2.rules
Normal file
60
contrib/prometheus/synapse-v2.rules
Normal file
@@ -0,0 +1,60 @@
|
|||||||
|
groups:
|
||||||
|
- name: synapse
|
||||||
|
rules:
|
||||||
|
- record: "synapse_federation_transaction_queue_pendingEdus:total"
|
||||||
|
expr: "sum(synapse_federation_transaction_queue_pendingEdus or absent(synapse_federation_transaction_queue_pendingEdus)*0)"
|
||||||
|
- record: "synapse_federation_transaction_queue_pendingPdus:total"
|
||||||
|
expr: "sum(synapse_federation_transaction_queue_pendingPdus or absent(synapse_federation_transaction_queue_pendingPdus)*0)"
|
||||||
|
- record: 'synapse_http_server_request_count:method'
|
||||||
|
labels:
|
||||||
|
servlet: ""
|
||||||
|
expr: "sum(synapse_http_server_request_count) by (method)"
|
||||||
|
- record: 'synapse_http_server_request_count:servlet'
|
||||||
|
labels:
|
||||||
|
method: ""
|
||||||
|
expr: 'sum(synapse_http_server_request_count) by (servlet)'
|
||||||
|
|
||||||
|
- record: 'synapse_http_server_request_count:total'
|
||||||
|
labels:
|
||||||
|
servlet: ""
|
||||||
|
expr: 'sum(synapse_http_server_request_count:by_method) by (servlet)'
|
||||||
|
|
||||||
|
- record: 'synapse_cache:hit_ratio_5m'
|
||||||
|
expr: 'rate(synapse_util_caches_cache:hits[5m]) / rate(synapse_util_caches_cache:total[5m])'
|
||||||
|
- record: 'synapse_cache:hit_ratio_30s'
|
||||||
|
expr: 'rate(synapse_util_caches_cache:hits[30s]) / rate(synapse_util_caches_cache:total[30s])'
|
||||||
|
|
||||||
|
- record: 'synapse_federation_client_sent'
|
||||||
|
labels:
|
||||||
|
type: "EDU"
|
||||||
|
expr: 'synapse_federation_client_sent_edus + 0'
|
||||||
|
- record: 'synapse_federation_client_sent'
|
||||||
|
labels:
|
||||||
|
type: "PDU"
|
||||||
|
expr: 'synapse_federation_client_sent_pdu_destinations:count + 0'
|
||||||
|
- record: 'synapse_federation_client_sent'
|
||||||
|
labels:
|
||||||
|
type: "Query"
|
||||||
|
expr: 'sum(synapse_federation_client_sent_queries) by (job)'
|
||||||
|
|
||||||
|
- record: 'synapse_federation_server_received'
|
||||||
|
labels:
|
||||||
|
type: "EDU"
|
||||||
|
expr: 'synapse_federation_server_received_edus + 0'
|
||||||
|
- record: 'synapse_federation_server_received'
|
||||||
|
labels:
|
||||||
|
type: "PDU"
|
||||||
|
expr: 'synapse_federation_server_received_pdus + 0'
|
||||||
|
- record: 'synapse_federation_server_received'
|
||||||
|
labels:
|
||||||
|
type: "Query"
|
||||||
|
expr: 'sum(synapse_federation_server_received_queries) by (job)'
|
||||||
|
|
||||||
|
- record: 'synapse_federation_transaction_queue_pending'
|
||||||
|
labels:
|
||||||
|
type: "EDU"
|
||||||
|
expr: 'synapse_federation_transaction_queue_pending_edus + 0'
|
||||||
|
- record: 'synapse_federation_transaction_queue_pending'
|
||||||
|
labels:
|
||||||
|
type: "PDU"
|
||||||
|
expr: 'synapse_federation_transaction_queue_pending_pdus + 0'
|
||||||
@@ -1,7 +1,10 @@
|
|||||||
# This assumes that Synapse has been installed as a system package
|
# This assumes that Synapse has been installed as a system package
|
||||||
# (e.g. https://aur.archlinux.org/packages/matrix-synapse/ for ArchLinux)
|
# (e.g. https://www.archlinux.org/packages/community/any/matrix-synapse/ for ArchLinux)
|
||||||
# rather than in a user home directory or similar under virtualenv.
|
# rather than in a user home directory or similar under virtualenv.
|
||||||
|
|
||||||
|
# **NOTE:** This is an example service file that may change in the future. If you
|
||||||
|
# wish to use this please copy rather than symlink it.
|
||||||
|
|
||||||
[Unit]
|
[Unit]
|
||||||
Description=Synapse Matrix homeserver
|
Description=Synapse Matrix homeserver
|
||||||
|
|
||||||
@@ -9,9 +12,11 @@ Description=Synapse Matrix homeserver
|
|||||||
Type=simple
|
Type=simple
|
||||||
User=synapse
|
User=synapse
|
||||||
Group=synapse
|
Group=synapse
|
||||||
EnvironmentFile=-/etc/sysconfig/synapse
|
|
||||||
WorkingDirectory=/var/lib/synapse
|
WorkingDirectory=/var/lib/synapse
|
||||||
ExecStart=/usr/bin/python2.7 -m synapse.app.homeserver --config-path=/etc/synapse/homeserver.yaml --log-config=/etc/synapse/log_config.yaml
|
ExecStart=/usr/bin/python2.7 -m synapse.app.homeserver --config-path=/etc/synapse/homeserver.yaml
|
||||||
|
ExecStop=/usr/bin/synctl stop /etc/synapse/homeserver.yaml
|
||||||
|
# EnvironmentFile=-/etc/sysconfig/synapse # Can be used to e.g. set SYNAPSE_CACHE_FACTOR
|
||||||
|
|
||||||
[Install]
|
[Install]
|
||||||
WantedBy=multi-user.target
|
WantedBy=multi-user.target
|
||||||
|
|
||||||
|
|||||||
@@ -11,12 +11,12 @@ https://developers.google.com/recaptcha/
|
|||||||
Setting ReCaptcha Keys
|
Setting ReCaptcha Keys
|
||||||
----------------------
|
----------------------
|
||||||
The keys are a config option on the home server config. If they are not
|
The keys are a config option on the home server config. If they are not
|
||||||
visible, you can generate them via --generate-config. Set the following value:
|
visible, you can generate them via --generate-config. Set the following value::
|
||||||
|
|
||||||
recaptcha_public_key: YOUR_PUBLIC_KEY
|
recaptcha_public_key: YOUR_PUBLIC_KEY
|
||||||
recaptcha_private_key: YOUR_PRIVATE_KEY
|
recaptcha_private_key: YOUR_PRIVATE_KEY
|
||||||
|
|
||||||
In addition, you MUST enable captchas via:
|
In addition, you MUST enable captchas via::
|
||||||
|
|
||||||
enable_registration_captcha: true
|
enable_registration_captcha: true
|
||||||
|
|
||||||
@@ -25,7 +25,5 @@ Configuring IP used for auth
|
|||||||
The ReCaptcha API requires that the IP address of the user who solved the
|
The ReCaptcha API requires that the IP address of the user who solved the
|
||||||
captcha is sent. If the client is connecting through a proxy or load balancer,
|
captcha is sent. If the client is connecting through a proxy or load balancer,
|
||||||
it may be required to use the X-Forwarded-For (XFF) header instead of the origin
|
it may be required to use the X-Forwarded-For (XFF) header instead of the origin
|
||||||
IP address. This can be configured as an option on the home server like so:
|
IP address. This can be configured using the x_forwarded directive in the
|
||||||
|
listeners section of the homeserver.yaml configuration file.
|
||||||
captcha_ip_origin_is_x_forwarded: true
|
|
||||||
|
|
||||||
23
docs/admin_api/media_admin_api.md
Normal file
23
docs/admin_api/media_admin_api.md
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
# List all media in a room
|
||||||
|
|
||||||
|
This API gets a list of known media in a room.
|
||||||
|
|
||||||
|
The API is:
|
||||||
|
```
|
||||||
|
GET /_matrix/client/r0/admin/room/<room_id>/media
|
||||||
|
```
|
||||||
|
including an `access_token` of a server admin.
|
||||||
|
|
||||||
|
It returns a JSON body like the following:
|
||||||
|
```
|
||||||
|
{
|
||||||
|
"local": [
|
||||||
|
"mxc://localhost/xwvutsrqponmlkjihgfedcba",
|
||||||
|
"mxc://localhost/abcdefghijklmnopqrstuvwx"
|
||||||
|
],
|
||||||
|
"remote": [
|
||||||
|
"mxc://matrix.org/xwvutsrqponmlkjihgfedcba",
|
||||||
|
"mxc://matrix.org/abcdefghijklmnopqrstuvwx"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
@@ -8,8 +8,56 @@ Depending on the amount of history being purged a call to the API may take
|
|||||||
several minutes or longer. During this period users will not be able to
|
several minutes or longer. During this period users will not be able to
|
||||||
paginate further back in the room from the point being purged from.
|
paginate further back in the room from the point being purged from.
|
||||||
|
|
||||||
The API is simply:
|
The API is:
|
||||||
|
|
||||||
``POST /_matrix/client/r0/admin/purge_history/<room_id>/<event_id>``
|
``POST /_matrix/client/r0/admin/purge_history/<room_id>[/<event_id>]``
|
||||||
|
|
||||||
including an ``access_token`` of a server admin.
|
including an ``access_token`` of a server admin.
|
||||||
|
|
||||||
|
By default, events sent by local users are not deleted, as they may represent
|
||||||
|
the only copies of this content in existence. (Events sent by remote users are
|
||||||
|
deleted.)
|
||||||
|
|
||||||
|
Room state data (such as joins, leaves, topic) is always preserved.
|
||||||
|
|
||||||
|
To delete local message events as well, set ``delete_local_events`` in the body:
|
||||||
|
|
||||||
|
.. code:: json
|
||||||
|
|
||||||
|
{
|
||||||
|
"delete_local_events": true
|
||||||
|
}
|
||||||
|
|
||||||
|
The caller must specify the point in the room to purge up to. This can be
|
||||||
|
specified by including an event_id in the URI, or by setting a
|
||||||
|
``purge_up_to_event_id`` or ``purge_up_to_ts`` in the request body. If an event
|
||||||
|
id is given, that event (and others at the same graph depth) will be retained.
|
||||||
|
If ``purge_up_to_ts`` is given, it should be a timestamp since the unix epoch,
|
||||||
|
in milliseconds.
|
||||||
|
|
||||||
|
The API starts the purge running, and returns immediately with a JSON body with
|
||||||
|
a purge id:
|
||||||
|
|
||||||
|
.. code:: json
|
||||||
|
|
||||||
|
{
|
||||||
|
"purge_id": "<opaque id>"
|
||||||
|
}
|
||||||
|
|
||||||
|
Purge status query
|
||||||
|
------------------
|
||||||
|
|
||||||
|
It is possible to poll for updates on recent purges with a second API;
|
||||||
|
|
||||||
|
``GET /_matrix/client/r0/admin/purge_history_status/<purge_id>``
|
||||||
|
|
||||||
|
(again, with a suitable ``access_token``). This API returns a JSON body like
|
||||||
|
the following:
|
||||||
|
|
||||||
|
.. code:: json
|
||||||
|
|
||||||
|
{
|
||||||
|
"status": "active"
|
||||||
|
}
|
||||||
|
|
||||||
|
The status will be one of ``active``, ``complete``, or ``failed``.
|
||||||
|
|||||||
@@ -6,11 +6,9 @@ media.
|
|||||||
|
|
||||||
The API is::
|
The API is::
|
||||||
|
|
||||||
POST /_matrix/client/r0/admin/purge_media_cache
|
POST /_matrix/client/r0/admin/purge_media_cache?before_ts=<unix_timestamp_in_ms>&access_token=<access_token>
|
||||||
|
|
||||||
{
|
{}
|
||||||
"before_ts": <unix_timestamp_in_ms>
|
|
||||||
}
|
|
||||||
|
|
||||||
Which will remove all cached media that was last accessed before
|
Which will remove all cached media that was last accessed before
|
||||||
``<unix_timestamp_in_ms>``.
|
``<unix_timestamp_in_ms>``.
|
||||||
|
|||||||
73
docs/admin_api/user_admin_api.rst
Normal file
73
docs/admin_api/user_admin_api.rst
Normal file
@@ -0,0 +1,73 @@
|
|||||||
|
Query Account
|
||||||
|
=============
|
||||||
|
|
||||||
|
This API returns information about a specific user account.
|
||||||
|
|
||||||
|
The api is::
|
||||||
|
|
||||||
|
GET /_matrix/client/r0/admin/whois/<user_id>
|
||||||
|
|
||||||
|
including an ``access_token`` of a server admin.
|
||||||
|
|
||||||
|
It returns a JSON body like the following:
|
||||||
|
|
||||||
|
.. code:: json
|
||||||
|
|
||||||
|
{
|
||||||
|
"user_id": "<user_id>",
|
||||||
|
"devices": {
|
||||||
|
"": {
|
||||||
|
"sessions": [
|
||||||
|
{
|
||||||
|
"connections": [
|
||||||
|
{
|
||||||
|
"ip": "1.2.3.4",
|
||||||
|
"last_seen": 1417222374433,
|
||||||
|
"user_agent": "Mozilla/5.0 ..."
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ip": "1.2.3.10",
|
||||||
|
"last_seen": 1417222374500,
|
||||||
|
"user_agent": "Dalvik/2.1.0 ..."
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
``last_seen`` is measured in milliseconds since the Unix epoch.
|
||||||
|
|
||||||
|
Deactivate Account
|
||||||
|
==================
|
||||||
|
|
||||||
|
This API deactivates an account. It removes active access tokens, resets the
|
||||||
|
password, and deletes third-party IDs (to prevent the user requesting a
|
||||||
|
password reset).
|
||||||
|
|
||||||
|
The api is::
|
||||||
|
|
||||||
|
POST /_matrix/client/r0/admin/deactivate/<user_id>
|
||||||
|
|
||||||
|
including an ``access_token`` of a server admin, and an empty request body.
|
||||||
|
|
||||||
|
|
||||||
|
Reset password
|
||||||
|
==============
|
||||||
|
|
||||||
|
Changes the password of another user.
|
||||||
|
|
||||||
|
The api is::
|
||||||
|
|
||||||
|
POST /_matrix/client/r0/admin/reset_password/<user_id>
|
||||||
|
|
||||||
|
with a body of:
|
||||||
|
|
||||||
|
.. code:: json
|
||||||
|
|
||||||
|
{
|
||||||
|
"new_password": "<secret>"
|
||||||
|
}
|
||||||
|
|
||||||
|
including an ``access_token`` of a server admin.
|
||||||
@@ -1,26 +1,13 @@
|
|||||||
Basically, PEP8
|
- Everything should comply with PEP8. Code should pass
|
||||||
|
``pep8 --max-line-length=100`` without any warnings.
|
||||||
|
|
||||||
|
- **Indenting**:
|
||||||
|
|
||||||
- NEVER tabs. 4 spaces to indent.
|
- NEVER tabs. 4 spaces to indent.
|
||||||
- Max line width: 79 chars (with flexibility to overflow by a "few chars" if
|
|
||||||
the overflowing content is not semantically significant and avoids an
|
- follow PEP8; either hanging indent or multiline-visual indent depending
|
||||||
explosion of vertical whitespace).
|
on the size and shape of the arguments and what makes more sense to the
|
||||||
- Use camel case for class and type names
|
author. In other words, both this::
|
||||||
- Use underscores for functions and variables.
|
|
||||||
- Use double quotes.
|
|
||||||
- Use parentheses instead of '\\' for line continuation where ever possible
|
|
||||||
(which is pretty much everywhere)
|
|
||||||
- There should be max a single new line between:
|
|
||||||
- statements
|
|
||||||
- functions in a class
|
|
||||||
- There should be two new lines between:
|
|
||||||
- definitions in a module (e.g., between different classes)
|
|
||||||
- There should be spaces where spaces should be and not where there shouldn't be:
|
|
||||||
- a single space after a comma
|
|
||||||
- a single space before and after for '=' when used as assignment
|
|
||||||
- no spaces before and after for '=' for default values and keyword arguments.
|
|
||||||
- Indenting must follow PEP8; either hanging indent or multiline-visual indent
|
|
||||||
depending on the size and shape of the arguments and what makes more sense to
|
|
||||||
the author. In other words, both this::
|
|
||||||
|
|
||||||
print("I am a fish %s" % "moo")
|
print("I am a fish %s" % "moo")
|
||||||
|
|
||||||
@@ -33,20 +20,100 @@ Basically, PEP8
|
|||||||
|
|
||||||
print(
|
print(
|
||||||
"I am a fish %s" %
|
"I am a fish %s" %
|
||||||
"moo"
|
"moo",
|
||||||
)
|
)
|
||||||
|
|
||||||
...are valid, although given each one takes up 2x more vertical space than
|
...are valid, although given each one takes up 2x more vertical space than
|
||||||
the previous, it's up to the author's discretion as to which layout makes most
|
the previous, it's up to the author's discretion as to which layout makes
|
||||||
sense for their function invocation. (e.g. if they want to add comments
|
most sense for their function invocation. (e.g. if they want to add
|
||||||
per-argument, or put expressions in the arguments, or group related arguments
|
comments per-argument, or put expressions in the arguments, or group
|
||||||
together, or want to deliberately extend or preserve vertical/horizontal
|
related arguments together, or want to deliberately extend or preserve
|
||||||
space)
|
vertical/horizontal space)
|
||||||
|
|
||||||
Comments should follow the `google code style <http://google.github.io/styleguide/pyguide.html?showone=Comments#Comments>`_.
|
- **Line length**:
|
||||||
This is so that we can generate documentation with
|
|
||||||
`sphinx <http://sphinxcontrib-napoleon.readthedocs.org/en/latest/>`_. See the
|
Max line length is 79 chars (with flexibility to overflow by a "few chars" if
|
||||||
`examples <http://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_google.html>`_
|
the overflowing content is not semantically significant and avoids an
|
||||||
|
explosion of vertical whitespace).
|
||||||
|
|
||||||
|
Use parentheses instead of ``\`` for line continuation where ever possible
|
||||||
|
(which is pretty much everywhere).
|
||||||
|
|
||||||
|
- **Naming**:
|
||||||
|
|
||||||
|
- Use camel case for class and type names
|
||||||
|
- Use underscores for functions and variables.
|
||||||
|
|
||||||
|
- Use double quotes ``"foo"`` rather than single quotes ``'foo'``.
|
||||||
|
|
||||||
|
- **Blank lines**:
|
||||||
|
|
||||||
|
- There should be max a single new line between:
|
||||||
|
|
||||||
|
- statements
|
||||||
|
- functions in a class
|
||||||
|
|
||||||
|
- There should be two new lines between:
|
||||||
|
|
||||||
|
- definitions in a module (e.g., between different classes)
|
||||||
|
|
||||||
|
- **Whitespace**:
|
||||||
|
|
||||||
|
There should be spaces where spaces should be and not where there shouldn't
|
||||||
|
be:
|
||||||
|
|
||||||
|
- a single space after a comma
|
||||||
|
- a single space before and after for '=' when used as assignment
|
||||||
|
- no spaces before and after for '=' for default values and keyword arguments.
|
||||||
|
|
||||||
|
- **Comments**: should follow the `google code style
|
||||||
|
<http://google.github.io/styleguide/pyguide.html?showone=Comments#Comments>`_.
|
||||||
|
This is so that we can generate documentation with `sphinx
|
||||||
|
<http://sphinxcontrib-napoleon.readthedocs.org/en/latest/>`_. See the
|
||||||
|
`examples
|
||||||
|
<http://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_google.html>`_
|
||||||
in the sphinx documentation.
|
in the sphinx documentation.
|
||||||
|
|
||||||
Code should pass pep8 --max-line-length=100 without any warnings.
|
- **Imports**:
|
||||||
|
|
||||||
|
- Prefer to import classes and functions than packages or modules.
|
||||||
|
|
||||||
|
Example::
|
||||||
|
|
||||||
|
from synapse.types import UserID
|
||||||
|
...
|
||||||
|
user_id = UserID(local, server)
|
||||||
|
|
||||||
|
is preferred over::
|
||||||
|
|
||||||
|
from synapse import types
|
||||||
|
...
|
||||||
|
user_id = types.UserID(local, server)
|
||||||
|
|
||||||
|
(or any other variant).
|
||||||
|
|
||||||
|
This goes against the advice in the Google style guide, but it means that
|
||||||
|
errors in the name are caught early (at import time).
|
||||||
|
|
||||||
|
- Multiple imports from the same package can be combined onto one line::
|
||||||
|
|
||||||
|
from synapse.types import GroupID, RoomID, UserID
|
||||||
|
|
||||||
|
An effort should be made to keep the individual imports in alphabetical
|
||||||
|
order.
|
||||||
|
|
||||||
|
If the list becomes long, wrap it with parentheses and split it over
|
||||||
|
multiple lines.
|
||||||
|
|
||||||
|
- As per `PEP-8 <https://www.python.org/dev/peps/pep-0008/#imports>`_,
|
||||||
|
imports should be grouped in the following order, with a blank line between
|
||||||
|
each group:
|
||||||
|
|
||||||
|
1. standard library imports
|
||||||
|
2. related third party imports
|
||||||
|
3. local application/library specific imports
|
||||||
|
|
||||||
|
- Imports within each group should be sorted alphabetically by module name.
|
||||||
|
|
||||||
|
- Avoid wildcard imports (``from synapse.types import *``) and relative
|
||||||
|
imports (``from .types import UserID``).
|
||||||
|
|||||||
@@ -1,10 +1,442 @@
|
|||||||
What do I do about "Unexpected logging context" debug log-lines everywhere?
|
Log contexts
|
||||||
|
============
|
||||||
|
|
||||||
<Mjark> The logging context lives in thread local storage
|
.. contents::
|
||||||
<Mjark> Sometimes it gets out of sync with what it should actually be, usually because something scheduled something to run on the reactor without preserving the logging context.
|
|
||||||
<Matthew> what is the impact of it getting out of sync? and how and when should we preserve log context?
|
|
||||||
<Mjark> The impact is that some of the CPU and database metrics will be under-reported, and some log lines will be mis-attributed.
|
|
||||||
<Mjark> It should happen auto-magically in all the APIs that do IO or otherwise defer to the reactor.
|
|
||||||
<Erik> Mjark: the other place is if we branch, e.g. using defer.gatherResults
|
|
||||||
|
|
||||||
Unanswered: how and when should we preserve log context?
|
To help track the processing of individual requests, synapse uses a
|
||||||
|
'log context' to track which request it is handling at any given moment. This
|
||||||
|
is done via a thread-local variable; a ``logging.Filter`` is then used to fish
|
||||||
|
the information back out of the thread-local variable and add it to each log
|
||||||
|
record.
|
||||||
|
|
||||||
|
Logcontexts are also used for CPU and database accounting, so that we can track
|
||||||
|
which requests were responsible for high CPU use or database activity.
|
||||||
|
|
||||||
|
The ``synapse.util.logcontext`` module provides a facilities for managing the
|
||||||
|
current log context (as well as providing the ``LoggingContextFilter`` class).
|
||||||
|
|
||||||
|
Deferreds make the whole thing complicated, so this document describes how it
|
||||||
|
all works, and how to write code which follows the rules.
|
||||||
|
|
||||||
|
Logcontexts without Deferreds
|
||||||
|
-----------------------------
|
||||||
|
|
||||||
|
In the absence of any Deferred voodoo, things are simple enough. As with any
|
||||||
|
code of this nature, the rule is that our function should leave things as it
|
||||||
|
found them:
|
||||||
|
|
||||||
|
.. code:: python
|
||||||
|
|
||||||
|
from synapse.util import logcontext # omitted from future snippets
|
||||||
|
|
||||||
|
def handle_request(request_id):
|
||||||
|
request_context = logcontext.LoggingContext()
|
||||||
|
|
||||||
|
calling_context = logcontext.LoggingContext.current_context()
|
||||||
|
logcontext.LoggingContext.set_current_context(request_context)
|
||||||
|
try:
|
||||||
|
request_context.request = request_id
|
||||||
|
do_request_handling()
|
||||||
|
logger.debug("finished")
|
||||||
|
finally:
|
||||||
|
logcontext.LoggingContext.set_current_context(calling_context)
|
||||||
|
|
||||||
|
def do_request_handling():
|
||||||
|
logger.debug("phew") # this will be logged against request_id
|
||||||
|
|
||||||
|
|
||||||
|
LoggingContext implements the context management methods, so the above can be
|
||||||
|
written much more succinctly as:
|
||||||
|
|
||||||
|
.. code:: python
|
||||||
|
|
||||||
|
def handle_request(request_id):
|
||||||
|
with logcontext.LoggingContext() as request_context:
|
||||||
|
request_context.request = request_id
|
||||||
|
do_request_handling()
|
||||||
|
logger.debug("finished")
|
||||||
|
|
||||||
|
def do_request_handling():
|
||||||
|
logger.debug("phew")
|
||||||
|
|
||||||
|
|
||||||
|
Using logcontexts with Deferreds
|
||||||
|
--------------------------------
|
||||||
|
|
||||||
|
Deferreds — and in particular, ``defer.inlineCallbacks`` — break
|
||||||
|
the linear flow of code so that there is no longer a single entry point where
|
||||||
|
we should set the logcontext and a single exit point where we should remove it.
|
||||||
|
|
||||||
|
Consider the example above, where ``do_request_handling`` needs to do some
|
||||||
|
blocking operation, and returns a deferred:
|
||||||
|
|
||||||
|
.. code:: python
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def handle_request(request_id):
|
||||||
|
with logcontext.LoggingContext() as request_context:
|
||||||
|
request_context.request = request_id
|
||||||
|
yield do_request_handling()
|
||||||
|
logger.debug("finished")
|
||||||
|
|
||||||
|
|
||||||
|
In the above flow:
|
||||||
|
|
||||||
|
* The logcontext is set
|
||||||
|
* ``do_request_handling`` is called, and returns a deferred
|
||||||
|
* ``handle_request`` yields the deferred
|
||||||
|
* The ``inlineCallbacks`` wrapper of ``handle_request`` returns a deferred
|
||||||
|
|
||||||
|
So we have stopped processing the request (and will probably go on to start
|
||||||
|
processing the next), without clearing the logcontext.
|
||||||
|
|
||||||
|
To circumvent this problem, synapse code assumes that, wherever you have a
|
||||||
|
deferred, you will want to yield on it. To that end, whereever functions return
|
||||||
|
a deferred, we adopt the following conventions:
|
||||||
|
|
||||||
|
**Rules for functions returning deferreds:**
|
||||||
|
|
||||||
|
* If the deferred is already complete, the function returns with the same
|
||||||
|
logcontext it started with.
|
||||||
|
* If the deferred is incomplete, the function clears the logcontext before
|
||||||
|
returning; when the deferred completes, it restores the logcontext before
|
||||||
|
running any callbacks.
|
||||||
|
|
||||||
|
That sounds complicated, but actually it means a lot of code (including the
|
||||||
|
example above) "just works". There are two cases:
|
||||||
|
|
||||||
|
* If ``do_request_handling`` returns a completed deferred, then the logcontext
|
||||||
|
will still be in place. In this case, execution will continue immediately
|
||||||
|
after the ``yield``; the "finished" line will be logged against the right
|
||||||
|
context, and the ``with`` block restores the original context before we
|
||||||
|
return to the caller.
|
||||||
|
|
||||||
|
* If the returned deferred is incomplete, ``do_request_handling`` clears the
|
||||||
|
logcontext before returning. The logcontext is therefore clear when
|
||||||
|
``handle_request`` yields the deferred. At that point, the ``inlineCallbacks``
|
||||||
|
wrapper adds a callback to the deferred, and returns another (incomplete)
|
||||||
|
deferred to the caller, and it is safe to begin processing the next request.
|
||||||
|
|
||||||
|
Once ``do_request_handling``'s deferred completes, it will reinstate the
|
||||||
|
logcontext, before running the callback added by the ``inlineCallbacks``
|
||||||
|
wrapper. That callback runs the second half of ``handle_request``, so again
|
||||||
|
the "finished" line will be logged against the right
|
||||||
|
context, and the ``with`` block restores the original context.
|
||||||
|
|
||||||
|
As an aside, it's worth noting that ``handle_request`` follows our rules -
|
||||||
|
though that only matters if the caller has its own logcontext which it cares
|
||||||
|
about.
|
||||||
|
|
||||||
|
The following sections describe pitfalls and helpful patterns when implementing
|
||||||
|
these rules.
|
||||||
|
|
||||||
|
Always yield your deferreds
|
||||||
|
---------------------------
|
||||||
|
|
||||||
|
Whenever you get a deferred back from a function, you should ``yield`` on it
|
||||||
|
as soon as possible. (Returning it directly to your caller is ok too, if you're
|
||||||
|
not doing ``inlineCallbacks``.) Do not pass go; do not do any logging; do not
|
||||||
|
call any other functions.
|
||||||
|
|
||||||
|
.. code:: python
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def fun():
|
||||||
|
logger.debug("starting")
|
||||||
|
yield do_some_stuff() # just like this
|
||||||
|
|
||||||
|
d = more_stuff()
|
||||||
|
result = yield d # also fine, of course
|
||||||
|
|
||||||
|
defer.returnValue(result)
|
||||||
|
|
||||||
|
def nonInlineCallbacksFun():
|
||||||
|
logger.debug("just a wrapper really")
|
||||||
|
return do_some_stuff() # this is ok too - the caller will yield on
|
||||||
|
# it anyway.
|
||||||
|
|
||||||
|
Provided this pattern is followed all the way back up to the callchain to where
|
||||||
|
the logcontext was set, this will make things work out ok: provided
|
||||||
|
``do_some_stuff`` and ``more_stuff`` follow the rules above, then so will
|
||||||
|
``fun`` (as wrapped by ``inlineCallbacks``) and ``nonInlineCallbacksFun``.
|
||||||
|
|
||||||
|
It's all too easy to forget to ``yield``: for instance if we forgot that
|
||||||
|
``do_some_stuff`` returned a deferred, we might plough on regardless. This
|
||||||
|
leads to a mess; it will probably work itself out eventually, but not before
|
||||||
|
a load of stuff has been logged against the wrong content. (Normally, other
|
||||||
|
things will break, more obviously, if you forget to ``yield``, so this tends
|
||||||
|
not to be a major problem in practice.)
|
||||||
|
|
||||||
|
Of course sometimes you need to do something a bit fancier with your Deferreds
|
||||||
|
- not all code follows the linear A-then-B-then-C pattern. Notes on
|
||||||
|
implementing more complex patterns are in later sections.
|
||||||
|
|
||||||
|
Where you create a new Deferred, make it follow the rules
|
||||||
|
---------------------------------------------------------
|
||||||
|
|
||||||
|
Most of the time, a Deferred comes from another synapse function. Sometimes,
|
||||||
|
though, we need to make up a new Deferred, or we get a Deferred back from
|
||||||
|
external code. We need to make it follow our rules.
|
||||||
|
|
||||||
|
The easy way to do it is with a combination of ``defer.inlineCallbacks``, and
|
||||||
|
``logcontext.PreserveLoggingContext``. Suppose we want to implement ``sleep``,
|
||||||
|
which returns a deferred which will run its callbacks after a given number of
|
||||||
|
seconds. That might look like:
|
||||||
|
|
||||||
|
.. code:: python
|
||||||
|
|
||||||
|
# not a logcontext-rules-compliant function
|
||||||
|
def get_sleep_deferred(seconds):
|
||||||
|
d = defer.Deferred()
|
||||||
|
reactor.callLater(seconds, d.callback, None)
|
||||||
|
return d
|
||||||
|
|
||||||
|
That doesn't follow the rules, but we can fix it by wrapping it with
|
||||||
|
``PreserveLoggingContext`` and ``yield`` ing on it:
|
||||||
|
|
||||||
|
.. code:: python
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def sleep(seconds):
|
||||||
|
with PreserveLoggingContext():
|
||||||
|
yield get_sleep_deferred(seconds)
|
||||||
|
|
||||||
|
This technique works equally for external functions which return deferreds,
|
||||||
|
or deferreds we have made ourselves.
|
||||||
|
|
||||||
|
You can also use ``logcontext.make_deferred_yieldable``, which just does the
|
||||||
|
boilerplate for you, so the above could be written:
|
||||||
|
|
||||||
|
.. code:: python
|
||||||
|
|
||||||
|
def sleep(seconds):
|
||||||
|
return logcontext.make_deferred_yieldable(get_sleep_deferred(seconds))
|
||||||
|
|
||||||
|
|
||||||
|
Fire-and-forget
|
||||||
|
---------------
|
||||||
|
|
||||||
|
Sometimes you want to fire off a chain of execution, but not wait for its
|
||||||
|
result. That might look a bit like this:
|
||||||
|
|
||||||
|
.. code:: python
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def do_request_handling():
|
||||||
|
yield foreground_operation()
|
||||||
|
|
||||||
|
# *don't* do this
|
||||||
|
background_operation()
|
||||||
|
|
||||||
|
logger.debug("Request handling complete")
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def background_operation():
|
||||||
|
yield first_background_step()
|
||||||
|
logger.debug("Completed first step")
|
||||||
|
yield second_background_step()
|
||||||
|
logger.debug("Completed second step")
|
||||||
|
|
||||||
|
The above code does a couple of steps in the background after
|
||||||
|
``do_request_handling`` has finished. The log lines are still logged against
|
||||||
|
the ``request_context`` logcontext, which may or may not be desirable. There
|
||||||
|
are two big problems with the above, however. The first problem is that, if
|
||||||
|
``background_operation`` returns an incomplete Deferred, it will expect its
|
||||||
|
caller to ``yield`` immediately, so will have cleared the logcontext. In this
|
||||||
|
example, that means that 'Request handling complete' will be logged without any
|
||||||
|
context.
|
||||||
|
|
||||||
|
The second problem, which is potentially even worse, is that when the Deferred
|
||||||
|
returned by ``background_operation`` completes, it will restore the original
|
||||||
|
logcontext. There is nothing waiting on that Deferred, so the logcontext will
|
||||||
|
leak into the reactor and possibly get attached to some arbitrary future
|
||||||
|
operation.
|
||||||
|
|
||||||
|
There are two potential solutions to this.
|
||||||
|
|
||||||
|
One option is to surround the call to ``background_operation`` with a
|
||||||
|
``PreserveLoggingContext`` call. That will reset the logcontext before
|
||||||
|
starting ``background_operation`` (so the context restored when the deferred
|
||||||
|
completes will be the empty logcontext), and will restore the current
|
||||||
|
logcontext before continuing the foreground process:
|
||||||
|
|
||||||
|
.. code:: python
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def do_request_handling():
|
||||||
|
yield foreground_operation()
|
||||||
|
|
||||||
|
# start background_operation off in the empty logcontext, to
|
||||||
|
# avoid leaking the current context into the reactor.
|
||||||
|
with PreserveLoggingContext():
|
||||||
|
background_operation()
|
||||||
|
|
||||||
|
# this will now be logged against the request context
|
||||||
|
logger.debug("Request handling complete")
|
||||||
|
|
||||||
|
Obviously that option means that the operations done in
|
||||||
|
``background_operation`` would be not be logged against a logcontext (though
|
||||||
|
that might be fixed by setting a different logcontext via a ``with
|
||||||
|
LoggingContext(...)`` in ``background_operation``).
|
||||||
|
|
||||||
|
The second option is to use ``logcontext.run_in_background``, which wraps a
|
||||||
|
function so that it doesn't reset the logcontext even when it returns an
|
||||||
|
incomplete deferred, and adds a callback to the returned deferred to reset the
|
||||||
|
logcontext. In other words, it turns a function that follows the Synapse rules
|
||||||
|
about logcontexts and Deferreds into one which behaves more like an external
|
||||||
|
function — the opposite operation to that described in the previous section.
|
||||||
|
It can be used like this:
|
||||||
|
|
||||||
|
.. code:: python
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def do_request_handling():
|
||||||
|
yield foreground_operation()
|
||||||
|
|
||||||
|
logcontext.run_in_background(background_operation)
|
||||||
|
|
||||||
|
# this will now be logged against the request context
|
||||||
|
logger.debug("Request handling complete")
|
||||||
|
|
||||||
|
Passing synapse deferreds into third-party functions
|
||||||
|
----------------------------------------------------
|
||||||
|
|
||||||
|
A typical example of this is where we want to collect together two or more
|
||||||
|
deferred via ``defer.gatherResults``:
|
||||||
|
|
||||||
|
.. code:: python
|
||||||
|
|
||||||
|
d1 = operation1()
|
||||||
|
d2 = operation2()
|
||||||
|
d3 = defer.gatherResults([d1, d2])
|
||||||
|
|
||||||
|
This is really a variation of the fire-and-forget problem above, in that we are
|
||||||
|
firing off ``d1`` and ``d2`` without yielding on them. The difference
|
||||||
|
is that we now have third-party code attached to their callbacks. Anyway either
|
||||||
|
technique given in the `Fire-and-forget`_ section will work.
|
||||||
|
|
||||||
|
Of course, the new Deferred returned by ``gatherResults`` needs to be wrapped
|
||||||
|
in order to make it follow the logcontext rules before we can yield it, as
|
||||||
|
described in `Where you create a new Deferred, make it follow the rules`_.
|
||||||
|
|
||||||
|
So, option one: reset the logcontext before starting the operations to be
|
||||||
|
gathered:
|
||||||
|
|
||||||
|
.. code:: python
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def do_request_handling():
|
||||||
|
with PreserveLoggingContext():
|
||||||
|
d1 = operation1()
|
||||||
|
d2 = operation2()
|
||||||
|
result = yield defer.gatherResults([d1, d2])
|
||||||
|
|
||||||
|
In this case particularly, though, option two, of using
|
||||||
|
``logcontext.preserve_fn`` almost certainly makes more sense, so that
|
||||||
|
``operation1`` and ``operation2`` are both logged against the original
|
||||||
|
logcontext. This looks like:
|
||||||
|
|
||||||
|
.. code:: python
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def do_request_handling():
|
||||||
|
d1 = logcontext.preserve_fn(operation1)()
|
||||||
|
d2 = logcontext.preserve_fn(operation2)()
|
||||||
|
|
||||||
|
with PreserveLoggingContext():
|
||||||
|
result = yield defer.gatherResults([d1, d2])
|
||||||
|
|
||||||
|
|
||||||
|
Was all this really necessary?
|
||||||
|
------------------------------
|
||||||
|
|
||||||
|
The conventions used work fine for a linear flow where everything happens in
|
||||||
|
series via ``defer.inlineCallbacks`` and ``yield``, but are certainly tricky to
|
||||||
|
follow for any more exotic flows. It's hard not to wonder if we could have done
|
||||||
|
something else.
|
||||||
|
|
||||||
|
We're not going to rewrite Synapse now, so the following is entirely of
|
||||||
|
academic interest, but I'd like to record some thoughts on an alternative
|
||||||
|
approach.
|
||||||
|
|
||||||
|
I briefly prototyped some code following an alternative set of rules. I think
|
||||||
|
it would work, but I certainly didn't get as far as thinking how it would
|
||||||
|
interact with concepts as complicated as the cache descriptors.
|
||||||
|
|
||||||
|
My alternative rules were:
|
||||||
|
|
||||||
|
* functions always preserve the logcontext of their caller, whether or not they
|
||||||
|
are returning a Deferred.
|
||||||
|
|
||||||
|
* Deferreds returned by synapse functions run their callbacks in the same
|
||||||
|
context as the function was orignally called in.
|
||||||
|
|
||||||
|
The main point of this scheme is that everywhere that sets the logcontext is
|
||||||
|
responsible for clearing it before returning control to the reactor.
|
||||||
|
|
||||||
|
So, for example, if you were the function which started a ``with
|
||||||
|
LoggingContext`` block, you wouldn't ``yield`` within it — instead you'd start
|
||||||
|
off the background process, and then leave the ``with`` block to wait for it:
|
||||||
|
|
||||||
|
.. code:: python
|
||||||
|
|
||||||
|
def handle_request(request_id):
|
||||||
|
with logcontext.LoggingContext() as request_context:
|
||||||
|
request_context.request = request_id
|
||||||
|
d = do_request_handling()
|
||||||
|
|
||||||
|
def cb(r):
|
||||||
|
logger.debug("finished")
|
||||||
|
|
||||||
|
d.addCallback(cb)
|
||||||
|
return d
|
||||||
|
|
||||||
|
(in general, mixing ``with LoggingContext`` blocks and
|
||||||
|
``defer.inlineCallbacks`` in the same function leads to slighly
|
||||||
|
counter-intuitive code, under this scheme).
|
||||||
|
|
||||||
|
Because we leave the original ``with`` block as soon as the Deferred is
|
||||||
|
returned (as opposed to waiting for it to be resolved, as we do today), the
|
||||||
|
logcontext is cleared before control passes back to the reactor; so if there is
|
||||||
|
some code within ``do_request_handling`` which needs to wait for a Deferred to
|
||||||
|
complete, there is no need for it to worry about clearing the logcontext before
|
||||||
|
doing so:
|
||||||
|
|
||||||
|
.. code:: python
|
||||||
|
|
||||||
|
def handle_request():
|
||||||
|
r = do_some_stuff()
|
||||||
|
r.addCallback(do_some_more_stuff)
|
||||||
|
return r
|
||||||
|
|
||||||
|
— and provided ``do_some_stuff`` follows the rules of returning a Deferred which
|
||||||
|
runs its callbacks in the original logcontext, all is happy.
|
||||||
|
|
||||||
|
The business of a Deferred which runs its callbacks in the original logcontext
|
||||||
|
isn't hard to achieve — we have it today, in the shape of
|
||||||
|
``logcontext._PreservingContextDeferred``:
|
||||||
|
|
||||||
|
.. code:: python
|
||||||
|
|
||||||
|
def do_some_stuff():
|
||||||
|
deferred = do_some_io()
|
||||||
|
pcd = _PreservingContextDeferred(LoggingContext.current_context())
|
||||||
|
deferred.chainDeferred(pcd)
|
||||||
|
return pcd
|
||||||
|
|
||||||
|
It turns out that, thanks to the way that Deferreds chain together, we
|
||||||
|
automatically get the property of a context-preserving deferred with
|
||||||
|
``defer.inlineCallbacks``, provided the final Defered the function ``yields``
|
||||||
|
on has that property. So we can just write:
|
||||||
|
|
||||||
|
.. code:: python
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def handle_request():
|
||||||
|
yield do_some_stuff()
|
||||||
|
yield do_some_more_stuff()
|
||||||
|
|
||||||
|
To conclude: I think this scheme would have worked equally well, with less
|
||||||
|
danger of messing it up, and probably made some more esoteric code easier to
|
||||||
|
write. But again — changing the conventions of the entire Synapse codebase is
|
||||||
|
not a sensible option for the marginal improvement offered.
|
||||||
|
|||||||
@@ -1,28 +1,84 @@
|
|||||||
How to monitor Synapse metrics using Prometheus
|
How to monitor Synapse metrics using Prometheus
|
||||||
===============================================
|
===============================================
|
||||||
|
|
||||||
1: Install prometheus:
|
1. Install prometheus:
|
||||||
|
|
||||||
Follow instructions at http://prometheus.io/docs/introduction/install/
|
Follow instructions at http://prometheus.io/docs/introduction/install/
|
||||||
|
|
||||||
2: Enable synapse metrics:
|
2. Enable synapse metrics:
|
||||||
|
|
||||||
Simply setting a (local) port number will enable it. Pick a port.
|
Simply setting a (local) port number will enable it. Pick a port.
|
||||||
prometheus itself defaults to 9090, so starting just above that for
|
prometheus itself defaults to 9090, so starting just above that for
|
||||||
locally monitored services seems reasonable. E.g. 9092:
|
locally monitored services seems reasonable. E.g. 9092:
|
||||||
|
|
||||||
Add to homeserver.yaml
|
Add to homeserver.yaml::
|
||||||
|
|
||||||
metrics_port: 9092
|
metrics_port: 9092
|
||||||
|
|
||||||
Restart synapse
|
Also ensure that ``enable_metrics`` is set to ``True``.
|
||||||
|
|
||||||
3: Add a prometheus target for synapse. It needs to set the ``metrics_path``
|
Restart synapse.
|
||||||
to a non-default value::
|
|
||||||
|
3. Add a prometheus target for synapse.
|
||||||
|
|
||||||
|
It needs to set the ``metrics_path`` to a non-default value (under ``scrape_configs``)::
|
||||||
|
|
||||||
- job_name: "synapse"
|
- job_name: "synapse"
|
||||||
metrics_path: "/_synapse/metrics"
|
metrics_path: "/_synapse/metrics"
|
||||||
static_configs:
|
static_configs:
|
||||||
- targets:
|
- targets: ["my.server.here:9092"]
|
||||||
"my.server.here:9092"
|
|
||||||
|
If your prometheus is older than 1.5.2, you will need to replace
|
||||||
|
``static_configs`` in the above with ``target_groups``.
|
||||||
|
|
||||||
|
Restart prometheus.
|
||||||
|
|
||||||
|
|
||||||
|
Block and response metrics renamed for 0.27.0
|
||||||
|
---------------------------------------------
|
||||||
|
|
||||||
|
Synapse 0.27.0 begins the process of rationalising the duplicate ``*:count``
|
||||||
|
metrics reported for the resource tracking for code blocks and HTTP requests.
|
||||||
|
|
||||||
|
At the same time, the corresponding ``*:total`` metrics are being renamed, as
|
||||||
|
the ``:total`` suffix no longer makes sense in the absence of a corresponding
|
||||||
|
``:count`` metric.
|
||||||
|
|
||||||
|
To enable a graceful migration path, this release just adds new names for the
|
||||||
|
metrics being renamed. A future release will remove the old ones.
|
||||||
|
|
||||||
|
The following table shows the new metrics, and the old metrics which they are
|
||||||
|
replacing.
|
||||||
|
|
||||||
|
==================================================== ===================================================
|
||||||
|
New name Old name
|
||||||
|
==================================================== ===================================================
|
||||||
|
synapse_util_metrics_block_count synapse_util_metrics_block_timer:count
|
||||||
|
synapse_util_metrics_block_count synapse_util_metrics_block_ru_utime:count
|
||||||
|
synapse_util_metrics_block_count synapse_util_metrics_block_ru_stime:count
|
||||||
|
synapse_util_metrics_block_count synapse_util_metrics_block_db_txn_count:count
|
||||||
|
synapse_util_metrics_block_count synapse_util_metrics_block_db_txn_duration:count
|
||||||
|
|
||||||
|
synapse_util_metrics_block_time_seconds synapse_util_metrics_block_timer:total
|
||||||
|
synapse_util_metrics_block_ru_utime_seconds synapse_util_metrics_block_ru_utime:total
|
||||||
|
synapse_util_metrics_block_ru_stime_seconds synapse_util_metrics_block_ru_stime:total
|
||||||
|
synapse_util_metrics_block_db_txn_count synapse_util_metrics_block_db_txn_count:total
|
||||||
|
synapse_util_metrics_block_db_txn_duration_seconds synapse_util_metrics_block_db_txn_duration:total
|
||||||
|
|
||||||
|
synapse_http_server_response_count synapse_http_server_requests
|
||||||
|
synapse_http_server_response_count synapse_http_server_response_time:count
|
||||||
|
synapse_http_server_response_count synapse_http_server_response_ru_utime:count
|
||||||
|
synapse_http_server_response_count synapse_http_server_response_ru_stime:count
|
||||||
|
synapse_http_server_response_count synapse_http_server_response_db_txn_count:count
|
||||||
|
synapse_http_server_response_count synapse_http_server_response_db_txn_duration:count
|
||||||
|
|
||||||
|
synapse_http_server_response_time_seconds synapse_http_server_response_time:total
|
||||||
|
synapse_http_server_response_ru_utime_seconds synapse_http_server_response_ru_utime:total
|
||||||
|
synapse_http_server_response_ru_stime_seconds synapse_http_server_response_ru_stime:total
|
||||||
|
synapse_http_server_response_db_txn_count synapse_http_server_response_db_txn_count:total
|
||||||
|
synapse_http_server_response_db_txn_duration_seconds synapse_http_server_response_db_txn_duration:total
|
||||||
|
==================================================== ===================================================
|
||||||
|
|
||||||
|
|
||||||
Standard Metric Names
|
Standard Metric Names
|
||||||
---------------------
|
---------------------
|
||||||
@@ -33,7 +89,7 @@ have been changed to seconds, from miliseconds.
|
|||||||
|
|
||||||
================================== =============================
|
================================== =============================
|
||||||
New name Old name
|
New name Old name
|
||||||
---------------------------------- -----------------------------
|
================================== =============================
|
||||||
process_cpu_user_seconds_total process_resource_utime / 1000
|
process_cpu_user_seconds_total process_resource_utime / 1000
|
||||||
process_cpu_system_seconds_total process_resource_stime / 1000
|
process_cpu_system_seconds_total process_resource_stime / 1000
|
||||||
process_open_fds (no 'type' label) process_fds
|
process_open_fds (no 'type' label) process_fds
|
||||||
@@ -43,7 +99,7 @@ The python-specific counts of garbage collector performance have been renamed.
|
|||||||
|
|
||||||
=========================== ======================
|
=========================== ======================
|
||||||
New name Old name
|
New name Old name
|
||||||
--------------------------- ----------------------
|
=========================== ======================
|
||||||
python_gc_time reactor_gc_time
|
python_gc_time reactor_gc_time
|
||||||
python_gc_unreachable_total reactor_gc_unreachable
|
python_gc_unreachable_total reactor_gc_unreachable
|
||||||
python_gc_counts reactor_gc_counts
|
python_gc_counts reactor_gc_counts
|
||||||
@@ -51,9 +107,9 @@ python_gc_counts reactor_gc_counts
|
|||||||
|
|
||||||
The twisted-specific reactor metrics have been renamed.
|
The twisted-specific reactor metrics have been renamed.
|
||||||
|
|
||||||
==================================== =================
|
==================================== =====================
|
||||||
New name Old name
|
New name Old name
|
||||||
------------------------------------ -----------------
|
==================================== =====================
|
||||||
python_twisted_reactor_pending_calls reactor_tick_time
|
python_twisted_reactor_pending_calls reactor_pending_calls
|
||||||
python_twisted_reactor_tick_time reactor_tick_time
|
python_twisted_reactor_tick_time reactor_tick_time
|
||||||
==================================== =================
|
==================================== =====================
|
||||||
|
|||||||
99
docs/password_auth_providers.rst
Normal file
99
docs/password_auth_providers.rst
Normal file
@@ -0,0 +1,99 @@
|
|||||||
|
Password auth provider modules
|
||||||
|
==============================
|
||||||
|
|
||||||
|
Password auth providers offer a way for server administrators to integrate
|
||||||
|
their Synapse installation with an existing authentication system.
|
||||||
|
|
||||||
|
A password auth provider is a Python class which is dynamically loaded into
|
||||||
|
Synapse, and provides a number of methods by which it can integrate with the
|
||||||
|
authentication system.
|
||||||
|
|
||||||
|
This document serves as a reference for those looking to implement their own
|
||||||
|
password auth providers.
|
||||||
|
|
||||||
|
Required methods
|
||||||
|
----------------
|
||||||
|
|
||||||
|
Password auth provider classes must provide the following methods:
|
||||||
|
|
||||||
|
*class* ``SomeProvider.parse_config``\(*config*)
|
||||||
|
|
||||||
|
This method is passed the ``config`` object for this module from the
|
||||||
|
homeserver configuration file.
|
||||||
|
|
||||||
|
It should perform any appropriate sanity checks on the provided
|
||||||
|
configuration, and return an object which is then passed into ``__init__``.
|
||||||
|
|
||||||
|
*class* ``SomeProvider``\(*config*, *account_handler*)
|
||||||
|
|
||||||
|
The constructor is passed the config object returned by ``parse_config``,
|
||||||
|
and a ``synapse.module_api.ModuleApi`` object which allows the
|
||||||
|
password provider to check if accounts exist and/or create new ones.
|
||||||
|
|
||||||
|
Optional methods
|
||||||
|
----------------
|
||||||
|
|
||||||
|
Password auth provider classes may optionally provide the following methods.
|
||||||
|
|
||||||
|
*class* ``SomeProvider.get_db_schema_files``\()
|
||||||
|
|
||||||
|
This method, if implemented, should return an Iterable of ``(name,
|
||||||
|
stream)`` pairs of database schema files. Each file is applied in turn at
|
||||||
|
initialisation, and a record is then made in the database so that it is
|
||||||
|
not re-applied on the next start.
|
||||||
|
|
||||||
|
``someprovider.get_supported_login_types``\()
|
||||||
|
|
||||||
|
This method, if implemented, should return a ``dict`` mapping from a login
|
||||||
|
type identifier (such as ``m.login.password``) to an iterable giving the
|
||||||
|
fields which must be provided by the user in the submission to the
|
||||||
|
``/login`` api. These fields are passed in the ``login_dict`` dictionary
|
||||||
|
to ``check_auth``.
|
||||||
|
|
||||||
|
For example, if a password auth provider wants to implement a custom login
|
||||||
|
type of ``com.example.custom_login``, where the client is expected to pass
|
||||||
|
the fields ``secret1`` and ``secret2``, the provider should implement this
|
||||||
|
method and return the following dict::
|
||||||
|
|
||||||
|
{"com.example.custom_login": ("secret1", "secret2")}
|
||||||
|
|
||||||
|
``someprovider.check_auth``\(*username*, *login_type*, *login_dict*)
|
||||||
|
|
||||||
|
This method is the one that does the real work. If implemented, it will be
|
||||||
|
called for each login attempt where the login type matches one of the keys
|
||||||
|
returned by ``get_supported_login_types``.
|
||||||
|
|
||||||
|
It is passed the (possibly UNqualified) ``user`` provided by the client,
|
||||||
|
the login type, and a dictionary of login secrets passed by the client.
|
||||||
|
|
||||||
|
The method should return a Twisted ``Deferred`` object, which resolves to
|
||||||
|
the canonical ``@localpart:domain`` user id if authentication is successful,
|
||||||
|
and ``None`` if not.
|
||||||
|
|
||||||
|
Alternatively, the ``Deferred`` can resolve to a ``(str, func)`` tuple, in
|
||||||
|
which case the second field is a callback which will be called with the
|
||||||
|
result from the ``/login`` call (including ``access_token``, ``device_id``,
|
||||||
|
etc.)
|
||||||
|
|
||||||
|
``someprovider.check_password``\(*user_id*, *password*)
|
||||||
|
|
||||||
|
This method provides a simpler interface than ``get_supported_login_types``
|
||||||
|
and ``check_auth`` for password auth providers that just want to provide a
|
||||||
|
mechanism for validating ``m.login.password`` logins.
|
||||||
|
|
||||||
|
Iif implemented, it will be called to check logins with an
|
||||||
|
``m.login.password`` login type. It is passed a qualified
|
||||||
|
``@localpart:domain`` user id, and the password provided by the user.
|
||||||
|
|
||||||
|
The method should return a Twisted ``Deferred`` object, which resolves to
|
||||||
|
``True`` if authentication is successful, and ``False`` if not.
|
||||||
|
|
||||||
|
``someprovider.on_logged_out``\(*user_id*, *device_id*, *access_token*)
|
||||||
|
|
||||||
|
This method, if implemented, is called when a user logs out. It is passed
|
||||||
|
the qualified user ID, the ID of the deactivated device (if any: access
|
||||||
|
tokens are occasionally created without an associated device ID), and the
|
||||||
|
(now deactivated) access token.
|
||||||
|
|
||||||
|
It may return a Twisted ``Deferred`` object; the logout request will wait
|
||||||
|
for the deferred to complete but the result is ignored.
|
||||||
@@ -1,6 +1,8 @@
|
|||||||
Using Postgres
|
Using Postgres
|
||||||
--------------
|
--------------
|
||||||
|
|
||||||
|
Postgres version 9.4 or later is known to work.
|
||||||
|
|
||||||
Set up database
|
Set up database
|
||||||
===============
|
===============
|
||||||
|
|
||||||
@@ -112,9 +114,9 @@ script one last time, e.g. if the SQLite database is at ``homeserver.db``
|
|||||||
run::
|
run::
|
||||||
|
|
||||||
synapse_port_db --sqlite-database homeserver.db \
|
synapse_port_db --sqlite-database homeserver.db \
|
||||||
--postgres-config database_config.yaml
|
--postgres-config homeserver-postgres.yaml
|
||||||
|
|
||||||
Once that has completed, change the synapse config to point at the PostgreSQL
|
Once that has completed, change the synapse config to point at the PostgreSQL
|
||||||
database configuration file using the ``database_config`` parameter (see
|
database configuration file ``homeserver-postgres.yaml`` (i.e. rename it to
|
||||||
`Synapse Config`_) and restart synapse. Synapse should now be running against
|
``homeserver.yaml``) and restart synapse. Synapse should now be running against
|
||||||
PostgreSQL.
|
PostgreSQL.
|
||||||
|
|||||||
@@ -26,28 +26,10 @@ expose the append-only log to the readers should be fairly minimal.
|
|||||||
Architecture
|
Architecture
|
||||||
------------
|
------------
|
||||||
|
|
||||||
The Replication API
|
The Replication Protocol
|
||||||
~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
Synapse will optionally expose a long poll HTTP API for extracting updates. The
|
See ``tcp_replication.rst``
|
||||||
API will have a similar shape to /sync in that clients provide tokens
|
|
||||||
indicating where in the log they have reached and a timeout. The synapse server
|
|
||||||
then either responds with updates immediately if it already has updates or it
|
|
||||||
waits until the timeout for more updates. If the timeout expires and nothing
|
|
||||||
happened then the server returns an empty response.
|
|
||||||
|
|
||||||
However unlike the /sync API this replication API is returning synapse specific
|
|
||||||
data rather than trying to implement a matrix specification. The replication
|
|
||||||
results are returned as arrays of rows where the rows are mostly lifted
|
|
||||||
directly from the database. This avoids unnecessary JSON parsing on the server
|
|
||||||
and hopefully avoids an impedance mismatch between the data returned and the
|
|
||||||
required updates to the datastore.
|
|
||||||
|
|
||||||
This does not replicate all the database tables as many of the database tables
|
|
||||||
are indexes that can be recovered from the contents of other tables.
|
|
||||||
|
|
||||||
The format and parameters for the api are documented in
|
|
||||||
``synapse/replication/resource.py``.
|
|
||||||
|
|
||||||
|
|
||||||
The Slaved DataStore
|
The Slaved DataStore
|
||||||
|
|||||||
@@ -50,7 +50,7 @@ master_doc = 'index'
|
|||||||
|
|
||||||
# General information about the project.
|
# General information about the project.
|
||||||
project = u'Synapse'
|
project = u'Synapse'
|
||||||
copyright = u'2014, TNG'
|
copyright = u'Copyright 2014-2017 OpenMarket Ltd, 2017 Vector Creations Ltd, 2017 New Vector Ltd'
|
||||||
|
|
||||||
# The version info for the project you're documenting, acts as replacement for
|
# The version info for the project you're documenting, acts as replacement for
|
||||||
# |version| and |release|, also used in various other places throughout the
|
# |version| and |release|, also used in various other places throughout the
|
||||||
|
|||||||
223
docs/tcp_replication.rst
Normal file
223
docs/tcp_replication.rst
Normal file
@@ -0,0 +1,223 @@
|
|||||||
|
TCP Replication
|
||||||
|
===============
|
||||||
|
|
||||||
|
Motivation
|
||||||
|
----------
|
||||||
|
|
||||||
|
Previously the workers used an HTTP long poll mechanism to get updates from the
|
||||||
|
master, which had the problem of causing a lot of duplicate work on the server.
|
||||||
|
This TCP protocol replaces those APIs with the aim of increased efficiency.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
Overview
|
||||||
|
--------
|
||||||
|
|
||||||
|
The protocol is based on fire and forget, line based commands. An example flow
|
||||||
|
would be (where '>' indicates master to worker and '<' worker to master flows)::
|
||||||
|
|
||||||
|
> SERVER example.com
|
||||||
|
< REPLICATE events 53
|
||||||
|
> RDATA events 54 ["$foo1:bar.com", ...]
|
||||||
|
> RDATA events 55 ["$foo4:bar.com", ...]
|
||||||
|
|
||||||
|
The example shows the server accepting a new connection and sending its identity
|
||||||
|
with the ``SERVER`` command, followed by the client asking to subscribe to the
|
||||||
|
``events`` stream from the token ``53``. The server then periodically sends ``RDATA``
|
||||||
|
commands which have the format ``RDATA <stream_name> <token> <row>``, where the
|
||||||
|
format of ``<row>`` is defined by the individual streams.
|
||||||
|
|
||||||
|
Error reporting happens by either the client or server sending an `ERROR`
|
||||||
|
command, and usually the connection will be closed.
|
||||||
|
|
||||||
|
|
||||||
|
Since the protocol is a simple line based, its possible to manually connect to
|
||||||
|
the server using a tool like netcat. A few things should be noted when manually
|
||||||
|
using the protocol:
|
||||||
|
|
||||||
|
* When subscribing to a stream using ``REPLICATE``, the special token ``NOW`` can
|
||||||
|
be used to get all future updates. The special stream name ``ALL`` can be used
|
||||||
|
with ``NOW`` to subscribe to all available streams.
|
||||||
|
* The federation stream is only available if federation sending has been
|
||||||
|
disabled on the main process.
|
||||||
|
* The server will only time connections out that have sent a ``PING`` command.
|
||||||
|
If a ping is sent then the connection will be closed if no further commands
|
||||||
|
are receieved within 15s. Both the client and server protocol implementations
|
||||||
|
will send an initial PING on connection and ensure at least one command every
|
||||||
|
5s is sent (not necessarily ``PING``).
|
||||||
|
* ``RDATA`` commands *usually* include a numeric token, however if the stream
|
||||||
|
has multiple rows to replicate per token the server will send multiple
|
||||||
|
``RDATA`` commands, with all but the last having a token of ``batch``. See
|
||||||
|
the documentation on ``commands.RdataCommand`` for further details.
|
||||||
|
|
||||||
|
|
||||||
|
Architecture
|
||||||
|
------------
|
||||||
|
|
||||||
|
The basic structure of the protocol is line based, where the initial word of
|
||||||
|
each line specifies the command. The rest of the line is parsed based on the
|
||||||
|
command. For example, the `RDATA` command is defined as::
|
||||||
|
|
||||||
|
RDATA <stream_name> <token> <row_json>
|
||||||
|
|
||||||
|
(Note that `<row_json>` may contains spaces, but cannot contain newlines.)
|
||||||
|
|
||||||
|
Blank lines are ignored.
|
||||||
|
|
||||||
|
|
||||||
|
Keep alives
|
||||||
|
~~~~~~~~~~~
|
||||||
|
|
||||||
|
Both sides are expected to send at least one command every 5s or so, and
|
||||||
|
should send a ``PING`` command if necessary. If either side do not receive a
|
||||||
|
command within e.g. 15s then the connection should be closed.
|
||||||
|
|
||||||
|
Because the server may be connected to manually using e.g. netcat, the timeouts
|
||||||
|
aren't enabled until an initial ``PING`` command is seen. Both the client and
|
||||||
|
server implementations below send a ``PING`` command immediately on connection to
|
||||||
|
ensure the timeouts are enabled.
|
||||||
|
|
||||||
|
This ensures that both sides can quickly realize if the tcp connection has gone
|
||||||
|
and handle the situation appropriately.
|
||||||
|
|
||||||
|
|
||||||
|
Start up
|
||||||
|
~~~~~~~~
|
||||||
|
|
||||||
|
When a new connection is made, the server:
|
||||||
|
|
||||||
|
* Sends a ``SERVER`` command, which includes the identity of the server, allowing
|
||||||
|
the client to detect if its connected to the expected server
|
||||||
|
* Sends a ``PING`` command as above, to enable the client to time out connections
|
||||||
|
promptly.
|
||||||
|
|
||||||
|
The client:
|
||||||
|
|
||||||
|
* Sends a ``NAME`` command, allowing the server to associate a human friendly
|
||||||
|
name with the connection. This is optional.
|
||||||
|
* Sends a ``PING`` as above
|
||||||
|
* For each stream the client wishes to subscribe to it sends a ``REPLICATE``
|
||||||
|
with the stream_name and token it wants to subscribe from.
|
||||||
|
* On receipt of a ``SERVER`` command, checks that the server name matches the
|
||||||
|
expected server name.
|
||||||
|
|
||||||
|
|
||||||
|
Error handling
|
||||||
|
~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
If either side detects an error it can send an ``ERROR`` command and close the
|
||||||
|
connection.
|
||||||
|
|
||||||
|
If the client side loses the connection to the server it should reconnect,
|
||||||
|
following the steps above.
|
||||||
|
|
||||||
|
|
||||||
|
Congestion
|
||||||
|
~~~~~~~~~~
|
||||||
|
|
||||||
|
If the server sends messages faster than the client can consume them the server
|
||||||
|
will first buffer a (fairly large) number of commands and then disconnect the
|
||||||
|
client. This ensures that we don't queue up an unbounded number of commands in
|
||||||
|
memory and gives us a potential oppurtunity to squawk loudly. When/if the client
|
||||||
|
recovers it can reconnect to the server and ask for missed messages.
|
||||||
|
|
||||||
|
|
||||||
|
Reliability
|
||||||
|
~~~~~~~~~~~
|
||||||
|
|
||||||
|
In general the replication stream should be considered an unreliable transport
|
||||||
|
since e.g. commands are not resent if the connection disappears.
|
||||||
|
|
||||||
|
The exception to that are the replication streams, i.e. RDATA commands, since
|
||||||
|
these include tokens which can be used to restart the stream on connection
|
||||||
|
errors.
|
||||||
|
|
||||||
|
The client should keep track of the token in the last RDATA command received
|
||||||
|
for each stream so that on reconneciton it can start streaming from the correct
|
||||||
|
place. Note: not all RDATA have valid tokens due to batching. See
|
||||||
|
``RdataCommand`` for more details.
|
||||||
|
|
||||||
|
|
||||||
|
Example
|
||||||
|
~~~~~~~
|
||||||
|
|
||||||
|
An example iteraction is shown below. Each line is prefixed with '>' or '<' to
|
||||||
|
indicate which side is sending, these are *not* included on the wire::
|
||||||
|
|
||||||
|
* connection established *
|
||||||
|
> SERVER localhost:8823
|
||||||
|
> PING 1490197665618
|
||||||
|
< NAME synapse.app.appservice
|
||||||
|
< PING 1490197665618
|
||||||
|
< REPLICATE events 1
|
||||||
|
< REPLICATE backfill 1
|
||||||
|
< REPLICATE caches 1
|
||||||
|
> POSITION events 1
|
||||||
|
> POSITION backfill 1
|
||||||
|
> POSITION caches 1
|
||||||
|
> RDATA caches 2 ["get_user_by_id",["@01register-user:localhost:8823"],1490197670513]
|
||||||
|
> RDATA events 14 ["$149019767112vOHxz:localhost:8823",
|
||||||
|
"!AFDCvgApUmpdfVjIXm:localhost:8823","m.room.guest_access","",null]
|
||||||
|
< PING 1490197675618
|
||||||
|
> ERROR server stopping
|
||||||
|
* connection closed by server *
|
||||||
|
|
||||||
|
The ``POSITION`` command sent by the server is used to set the clients position
|
||||||
|
without needing to send data with the ``RDATA`` command.
|
||||||
|
|
||||||
|
|
||||||
|
An example of a batched set of ``RDATA`` is::
|
||||||
|
|
||||||
|
> RDATA caches batch ["get_user_by_id",["@test:localhost:8823"],1490197670513]
|
||||||
|
> RDATA caches batch ["get_user_by_id",["@test2:localhost:8823"],1490197670513]
|
||||||
|
> RDATA caches batch ["get_user_by_id",["@test3:localhost:8823"],1490197670513]
|
||||||
|
> RDATA caches 54 ["get_user_by_id",["@test4:localhost:8823"],1490197670513]
|
||||||
|
|
||||||
|
In this case the client shouldn't advance their caches token until it sees the
|
||||||
|
the last ``RDATA``.
|
||||||
|
|
||||||
|
|
||||||
|
List of commands
|
||||||
|
~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
The list of valid commands, with which side can send it: server (S) or client (C):
|
||||||
|
|
||||||
|
SERVER (S)
|
||||||
|
Sent at the start to identify which server the client is talking to
|
||||||
|
|
||||||
|
RDATA (S)
|
||||||
|
A single update in a stream
|
||||||
|
|
||||||
|
POSITION (S)
|
||||||
|
The position of the stream has been updated
|
||||||
|
|
||||||
|
ERROR (S, C)
|
||||||
|
There was an error
|
||||||
|
|
||||||
|
PING (S, C)
|
||||||
|
Sent periodically to ensure the connection is still alive
|
||||||
|
|
||||||
|
NAME (C)
|
||||||
|
Sent at the start by client to inform the server who they are
|
||||||
|
|
||||||
|
REPLICATE (C)
|
||||||
|
Asks the server to replicate a given stream
|
||||||
|
|
||||||
|
USER_SYNC (C)
|
||||||
|
A user has started or stopped syncing
|
||||||
|
|
||||||
|
FEDERATION_ACK (C)
|
||||||
|
Acknowledge receipt of some federation data
|
||||||
|
|
||||||
|
REMOVE_PUSHER (C)
|
||||||
|
Inform the server a pusher should be removed
|
||||||
|
|
||||||
|
INVALIDATE_CACHE (C)
|
||||||
|
Inform the server a cache should be invalidated
|
||||||
|
|
||||||
|
SYNC (S, C)
|
||||||
|
Used exclusively in tests
|
||||||
|
|
||||||
|
|
||||||
|
See ``synapse/replication/tcp/commands.py`` for a detailed description and the
|
||||||
|
format of each command.
|
||||||
@@ -50,14 +50,37 @@ You may be able to setup coturn via your package manager, or set it up manually
|
|||||||
|
|
||||||
pwgen -s 64 1
|
pwgen -s 64 1
|
||||||
|
|
||||||
5. Ensure youe firewall allows traffic into the TURN server on
|
5. Consider your security settings. TURN lets users request a relay
|
||||||
the ports you've configured it to listen on (remember to allow
|
which will connect to arbitrary IP addresses and ports. At the least
|
||||||
both TCP and UDP if you've enabled both).
|
we recommend:
|
||||||
|
|
||||||
6. If you've configured coturn to support TLS/DTLS, generate or
|
# VoIP traffic is all UDP. There is no reason to let users connect to arbitrary TCP endpoints via the relay.
|
||||||
|
no-tcp-relay
|
||||||
|
|
||||||
|
# don't let the relay ever try to connect to private IP address ranges within your network (if any)
|
||||||
|
# given the turn server is likely behind your firewall, remember to include any privileged public IPs too.
|
||||||
|
denied-peer-ip=10.0.0.0-10.255.255.255
|
||||||
|
denied-peer-ip=192.168.0.0-192.168.255.255
|
||||||
|
denied-peer-ip=172.16.0.0-172.31.255.255
|
||||||
|
|
||||||
|
# special case the turn server itself so that client->TURN->TURN->client flows work
|
||||||
|
allowed-peer-ip=10.0.0.1
|
||||||
|
|
||||||
|
# consider whether you want to limit the quota of relayed streams per user (or total) to avoid risk of DoS.
|
||||||
|
user-quota=12 # 4 streams per video call, so 12 streams = 3 simultaneous relayed calls per user.
|
||||||
|
total-quota=1200
|
||||||
|
|
||||||
|
Ideally coturn should refuse to relay traffic which isn't SRTP;
|
||||||
|
see https://github.com/matrix-org/synapse/issues/2009
|
||||||
|
|
||||||
|
6. Ensure your firewall allows traffic into the TURN server on
|
||||||
|
the ports you've configured it to listen on (remember to allow
|
||||||
|
both TCP and UDP TURN traffic)
|
||||||
|
|
||||||
|
7. If you've configured coturn to support TLS/DTLS, generate or
|
||||||
import your private key and certificate.
|
import your private key and certificate.
|
||||||
|
|
||||||
7. Start the turn server::
|
8. Start the turn server::
|
||||||
|
|
||||||
bin/turnserver -o
|
bin/turnserver -o
|
||||||
|
|
||||||
@@ -83,12 +106,19 @@ Your home server configuration file needs the following extra keys:
|
|||||||
to refresh credentials. The TURN REST API specification recommends
|
to refresh credentials. The TURN REST API specification recommends
|
||||||
one day (86400000).
|
one day (86400000).
|
||||||
|
|
||||||
|
4. "turn_allow_guests": Whether to allow guest users to use the TURN
|
||||||
|
server. This is enabled by default, as otherwise VoIP will not
|
||||||
|
work reliably for guests. However, it does introduce a security risk
|
||||||
|
as it lets guests connect to arbitrary endpoints without having gone
|
||||||
|
through a CAPTCHA or similar to register a real account.
|
||||||
|
|
||||||
As an example, here is the relevant section of the config file for
|
As an example, here is the relevant section of the config file for
|
||||||
matrix.org::
|
matrix.org::
|
||||||
|
|
||||||
turn_uris: [ "turn:turn.matrix.org:3478?transport=udp", "turn:turn.matrix.org:3478?transport=tcp" ]
|
turn_uris: [ "turn:turn.matrix.org:3478?transport=udp", "turn:turn.matrix.org:3478?transport=tcp" ]
|
||||||
turn_shared_secret: n0t4ctuAllymatr1Xd0TorgSshar3d5ecret4obvIousreAsons
|
turn_shared_secret: n0t4ctuAllymatr1Xd0TorgSshar3d5ecret4obvIousreAsons
|
||||||
turn_user_lifetime: 86400000
|
turn_user_lifetime: 86400000
|
||||||
|
turn_allow_guests: True
|
||||||
|
|
||||||
Now, restart synapse::
|
Now, restart synapse::
|
||||||
|
|
||||||
|
|||||||
@@ -56,6 +56,7 @@ As a first cut, let's do #2 and have the receiver hit the API to calculate its o
|
|||||||
API
|
API
|
||||||
---
|
---
|
||||||
|
|
||||||
|
```
|
||||||
GET /_matrix/media/r0/preview_url?url=http://wherever.com
|
GET /_matrix/media/r0/preview_url?url=http://wherever.com
|
||||||
200 OK
|
200 OK
|
||||||
{
|
{
|
||||||
@@ -66,6 +67,7 @@ GET /_matrix/media/r0/preview_url?url=http://wherever.com
|
|||||||
"og:description" : "“Synapse 0.12 is out! Lots of polishing, performance &amp; bugfixes: /sync API, /r0 prefix, fulltext search, 3PID invites https://t.co/5alhXLLEGP”"
|
"og:description" : "“Synapse 0.12 is out! Lots of polishing, performance &amp; bugfixes: /sync API, /r0 prefix, fulltext search, 3PID invites https://t.co/5alhXLLEGP”"
|
||||||
"og:site_name" : "Twitter"
|
"og:site_name" : "Twitter"
|
||||||
}
|
}
|
||||||
|
```
|
||||||
|
|
||||||
* Downloads the URL
|
* Downloads the URL
|
||||||
* If HTML, just stores it in RAM and parses it for OG meta tags
|
* If HTML, just stores it in RAM and parses it for OG meta tags
|
||||||
17
docs/user_directory.md
Normal file
17
docs/user_directory.md
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
User Directory API Implementation
|
||||||
|
=================================
|
||||||
|
|
||||||
|
The user directory is currently maintained based on the 'visible' users
|
||||||
|
on this particular server - i.e. ones which your account shares a room with, or
|
||||||
|
who are present in a publicly viewable room present on the server.
|
||||||
|
|
||||||
|
The directory info is stored in various tables, which can (typically after
|
||||||
|
DB corruption) get stale or out of sync. If this happens, for now the
|
||||||
|
quickest solution to fix it is:
|
||||||
|
|
||||||
|
```
|
||||||
|
UPDATE user_directory_stream_pos SET stream_id = NULL;
|
||||||
|
```
|
||||||
|
|
||||||
|
and restart the synapse, which should then start a background task to
|
||||||
|
flush the current tables and regenerate the directory.
|
||||||
203
docs/workers.rst
203
docs/workers.rst
@@ -1,63 +1,90 @@
|
|||||||
Scaling synapse via workers
|
Scaling synapse via workers
|
||||||
---------------------------
|
===========================
|
||||||
|
|
||||||
Synapse has experimental support for splitting out functionality into
|
Synapse has experimental support for splitting out functionality into
|
||||||
multiple separate python processes, helping greatly with scalability. These
|
multiple separate python processes, helping greatly with scalability. These
|
||||||
processes are called 'workers', and are (eventually) intended to scale
|
processes are called 'workers', and are (eventually) intended to scale
|
||||||
horizontally independently.
|
horizontally independently.
|
||||||
|
|
||||||
|
All of the below is highly experimental and subject to change as Synapse evolves,
|
||||||
|
but documenting it here to help folks needing highly scalable Synapses similar
|
||||||
|
to the one running matrix.org!
|
||||||
|
|
||||||
All processes continue to share the same database instance, and as such, workers
|
All processes continue to share the same database instance, and as such, workers
|
||||||
only work with postgres based synapse deployments (sharing a single sqlite
|
only work with postgres based synapse deployments (sharing a single sqlite
|
||||||
across multiple processes is a recipe for disaster, plus you should be using
|
across multiple processes is a recipe for disaster, plus you should be using
|
||||||
postgres anyway if you care about scalability).
|
postgres anyway if you care about scalability).
|
||||||
|
|
||||||
The workers communicate with the master synapse process via a synapse-specific
|
The workers communicate with the master synapse process via a synapse-specific
|
||||||
HTTP protocol called 'replication' - analogous to MySQL or Postgres style
|
TCP protocol called 'replication' - analogous to MySQL or Postgres style
|
||||||
database replication; feeding a stream of relevant data to the workers so they
|
database replication; feeding a stream of relevant data to the workers so they
|
||||||
can be kept in sync with the main synapse process and database state.
|
can be kept in sync with the main synapse process and database state.
|
||||||
|
|
||||||
To enable workers, you need to add a replication listener to the master synapse, e.g.::
|
Configuration
|
||||||
|
-------------
|
||||||
|
|
||||||
|
To make effective use of the workers, you will need to configure an HTTP
|
||||||
|
reverse-proxy such as nginx or haproxy, which will direct incoming requests to
|
||||||
|
the correct worker, or to the main synapse instance. Note that this includes
|
||||||
|
requests made to the federation port. The caveats regarding running a
|
||||||
|
reverse-proxy on the federation port still apply (see
|
||||||
|
https://github.com/matrix-org/synapse/blob/master/README.rst#reverse-proxying-the-federation-port).
|
||||||
|
|
||||||
|
To enable workers, you need to add two replication listeners to the master
|
||||||
|
synapse, e.g.::
|
||||||
|
|
||||||
listeners:
|
listeners:
|
||||||
|
# The TCP replication port
|
||||||
- port: 9092
|
- port: 9092
|
||||||
|
bind_address: '127.0.0.1'
|
||||||
|
type: replication
|
||||||
|
# The HTTP replication port
|
||||||
|
- port: 9093
|
||||||
bind_address: '127.0.0.1'
|
bind_address: '127.0.0.1'
|
||||||
type: http
|
type: http
|
||||||
tls: false
|
|
||||||
x_forwarded: false
|
|
||||||
resources:
|
resources:
|
||||||
- names: [replication]
|
- names: [replication]
|
||||||
compress: false
|
|
||||||
|
|
||||||
Under **no circumstances** should this replication API listener be exposed to the
|
Under **no circumstances** should these replication API listeners be exposed to
|
||||||
public internet; it currently implements no authentication whatsoever and is
|
the public internet; it currently implements no authentication whatsoever and is
|
||||||
unencrypted HTTP.
|
unencrypted.
|
||||||
|
|
||||||
You then create a set of configs for the various worker processes. These should be
|
(Roughly, the TCP port is used for streaming data from the master to the
|
||||||
worker configuration files should be stored in a dedicated subdirectory, to allow
|
workers, and the HTTP port for the workers to send data to the main
|
||||||
synctl to manipulate them.
|
synapse process.)
|
||||||
|
|
||||||
The current available worker applications are:
|
You then create a set of configs for the various worker processes. These
|
||||||
* synapse.app.pusher - handles sending push notifications to sygnal and email
|
should be worker configuration files, and should be stored in a dedicated
|
||||||
* synapse.app.synchrotron - handles /sync endpoints. can scales horizontally through multiple instances.
|
subdirectory, to allow synctl to manipulate them. An additional configuration
|
||||||
* synapse.app.appservice - handles output traffic to Application Services
|
for the master synapse process will need to be created because the process will
|
||||||
* synapse.app.federation_reader - handles receiving federation traffic (including public_rooms API)
|
not be started automatically. That configuration should look like this::
|
||||||
* synapse.app.media_repository - handles the media repository.
|
|
||||||
* synapse.app.client_reader - handles client API endpoints like /publicRooms
|
worker_app: synapse.app.homeserver
|
||||||
|
daemonize: true
|
||||||
|
|
||||||
Each worker configuration file inherits the configuration of the main homeserver
|
Each worker configuration file inherits the configuration of the main homeserver
|
||||||
configuration file. You can then override configuration specific to that worker,
|
configuration file. You can then override configuration specific to that worker,
|
||||||
e.g. the HTTP listener that it provides (if any); logging configuration; etc.
|
e.g. the HTTP listener that it provides (if any); logging configuration; etc.
|
||||||
You should minimise the number of overrides though to maintain a usable config.
|
You should minimise the number of overrides though to maintain a usable config.
|
||||||
|
|
||||||
You must specify the type of worker application (worker_app) and the replication
|
You must specify the type of worker application (``worker_app``). The currently
|
||||||
endpoint that it's talking to on the main synapse process (worker_replication_url).
|
available worker applications are listed below. You must also specify the
|
||||||
|
replication endpoints that it's talking to on the main synapse process.
|
||||||
|
``worker_replication_host`` should specify the host of the main synapse,
|
||||||
|
``worker_replication_port`` should point to the TCP replication listener port and
|
||||||
|
``worker_replication_http_port`` should point to the HTTP replication port.
|
||||||
|
|
||||||
|
Currently, only the ``event_creator`` worker requires specifying
|
||||||
|
``worker_replication_http_port``.
|
||||||
|
|
||||||
For instance::
|
For instance::
|
||||||
|
|
||||||
worker_app: synapse.app.synchrotron
|
worker_app: synapse.app.synchrotron
|
||||||
|
|
||||||
# The replication listener on the synapse to talk to.
|
# The replication listener on the synapse to talk to.
|
||||||
worker_replication_url: http://127.0.0.1:9092/_synapse/replication
|
worker_replication_host: 127.0.0.1
|
||||||
|
worker_replication_port: 9092
|
||||||
|
worker_replication_http_port: 9093
|
||||||
|
|
||||||
worker_listeners:
|
worker_listeners:
|
||||||
- type: http
|
- type: http
|
||||||
@@ -71,11 +98,11 @@ For instance::
|
|||||||
worker_log_config: /home/matrix/synapse/config/synchrotron_log_config.yaml
|
worker_log_config: /home/matrix/synapse/config/synchrotron_log_config.yaml
|
||||||
|
|
||||||
...is a full configuration for a synchrotron worker instance, which will expose a
|
...is a full configuration for a synchrotron worker instance, which will expose a
|
||||||
plain HTTP /sync endpoint on port 8083 separately from the /sync endpoint provided
|
plain HTTP ``/sync`` endpoint on port 8083 separately from the ``/sync`` endpoint provided
|
||||||
by the main synapse.
|
by the main synapse.
|
||||||
|
|
||||||
Obviously you should configure your loadbalancer to route the /sync endpoint to
|
Obviously you should configure your reverse-proxy to route the relevant
|
||||||
the synchrotron instance(s) in this instance.
|
endpoints to the worker (``localhost:8083`` in the above example).
|
||||||
|
|
||||||
Finally, to actually run your worker-based synapse, you must pass synctl the -a
|
Finally, to actually run your worker-based synapse, you must pass synctl the -a
|
||||||
commandline option to tell it to operate on all the worker configurations found
|
commandline option to tell it to operate on all the worker configurations found
|
||||||
@@ -92,7 +119,127 @@ To manipulate a specific worker, you pass the -w option to synctl::
|
|||||||
|
|
||||||
synctl -w $CONFIG/workers/synchrotron.yaml restart
|
synctl -w $CONFIG/workers/synchrotron.yaml restart
|
||||||
|
|
||||||
All of the above is highly experimental and subject to change as Synapse evolves,
|
|
||||||
but documenting it here to help folks needing highly scalable Synapses similar
|
|
||||||
to the one running matrix.org!
|
|
||||||
|
|
||||||
|
Available worker applications
|
||||||
|
-----------------------------
|
||||||
|
|
||||||
|
``synapse.app.pusher``
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Handles sending push notifications to sygnal and email. Doesn't handle any
|
||||||
|
REST endpoints itself, but you should set ``start_pushers: False`` in the
|
||||||
|
shared configuration file to stop the main synapse sending these notifications.
|
||||||
|
|
||||||
|
Note this worker cannot be load-balanced: only one instance should be active.
|
||||||
|
|
||||||
|
``synapse.app.synchrotron``
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
The synchrotron handles ``sync`` requests from clients. In particular, it can
|
||||||
|
handle REST endpoints matching the following regular expressions::
|
||||||
|
|
||||||
|
^/_matrix/client/(v2_alpha|r0)/sync$
|
||||||
|
^/_matrix/client/(api/v1|v2_alpha|r0)/events$
|
||||||
|
^/_matrix/client/(api/v1|r0)/initialSync$
|
||||||
|
^/_matrix/client/(api/v1|r0)/rooms/[^/]+/initialSync$
|
||||||
|
|
||||||
|
The above endpoints should all be routed to the synchrotron worker by the
|
||||||
|
reverse-proxy configuration.
|
||||||
|
|
||||||
|
It is possible to run multiple instances of the synchrotron to scale
|
||||||
|
horizontally. In this case the reverse-proxy should be configured to
|
||||||
|
load-balance across the instances, though it will be more efficient if all
|
||||||
|
requests from a particular user are routed to a single instance. Extracting
|
||||||
|
a userid from the access token is currently left as an exercise for the reader.
|
||||||
|
|
||||||
|
``synapse.app.appservice``
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Handles sending output traffic to Application Services. Doesn't handle any
|
||||||
|
REST endpoints itself, but you should set ``notify_appservices: False`` in the
|
||||||
|
shared configuration file to stop the main synapse sending these notifications.
|
||||||
|
|
||||||
|
Note this worker cannot be load-balanced: only one instance should be active.
|
||||||
|
|
||||||
|
``synapse.app.federation_reader``
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Handles a subset of federation endpoints. In particular, it can handle REST
|
||||||
|
endpoints matching the following regular expressions::
|
||||||
|
|
||||||
|
^/_matrix/federation/v1/event/
|
||||||
|
^/_matrix/federation/v1/state/
|
||||||
|
^/_matrix/federation/v1/state_ids/
|
||||||
|
^/_matrix/federation/v1/backfill/
|
||||||
|
^/_matrix/federation/v1/get_missing_events/
|
||||||
|
^/_matrix/federation/v1/publicRooms
|
||||||
|
|
||||||
|
The above endpoints should all be routed to the federation_reader worker by the
|
||||||
|
reverse-proxy configuration.
|
||||||
|
|
||||||
|
``synapse.app.federation_sender``
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Handles sending federation traffic to other servers. Doesn't handle any
|
||||||
|
REST endpoints itself, but you should set ``send_federation: False`` in the
|
||||||
|
shared configuration file to stop the main synapse sending this traffic.
|
||||||
|
|
||||||
|
Note this worker cannot be load-balanced: only one instance should be active.
|
||||||
|
|
||||||
|
``synapse.app.media_repository``
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Handles the media repository. It can handle all endpoints starting with::
|
||||||
|
|
||||||
|
/_matrix/media/
|
||||||
|
|
||||||
|
You should also set ``enable_media_repo: False`` in the shared configuration
|
||||||
|
file to stop the main synapse running background jobs related to managing the
|
||||||
|
media repository.
|
||||||
|
|
||||||
|
Note this worker cannot be load-balanced: only one instance should be active.
|
||||||
|
|
||||||
|
``synapse.app.client_reader``
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Handles client API endpoints. It can handle REST endpoints matching the
|
||||||
|
following regular expressions::
|
||||||
|
|
||||||
|
^/_matrix/client/(api/v1|r0|unstable)/publicRooms$
|
||||||
|
|
||||||
|
``synapse.app.user_dir``
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Handles searches in the user directory. It can handle REST endpoints matching
|
||||||
|
the following regular expressions::
|
||||||
|
|
||||||
|
^/_matrix/client/(api/v1|r0|unstable)/user_directory/search$
|
||||||
|
|
||||||
|
``synapse.app.frontend_proxy``
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Proxies some frequently-requested client endpoints to add caching and remove
|
||||||
|
load from the main synapse. It can handle REST endpoints matching the following
|
||||||
|
regular expressions::
|
||||||
|
|
||||||
|
^/_matrix/client/(api/v1|r0|unstable)/keys/upload
|
||||||
|
|
||||||
|
It will proxy any requests it cannot handle to the main synapse instance. It
|
||||||
|
must therefore be configured with the location of the main instance, via
|
||||||
|
the ``worker_main_http_uri`` setting in the frontend_proxy worker configuration
|
||||||
|
file. For example::
|
||||||
|
|
||||||
|
worker_main_http_uri: http://127.0.0.1:8008
|
||||||
|
|
||||||
|
|
||||||
|
``synapse.app.event_creator``
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Handles some event creation. It can handle REST endpoints matching::
|
||||||
|
|
||||||
|
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/send
|
||||||
|
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/(join|invite|leave|ban|unban|kick)$
|
||||||
|
^/_matrix/client/(api/v1|r0|unstable)/join/
|
||||||
|
|
||||||
|
It will create events locally and then send them on to the main synapse
|
||||||
|
instance to be persisted and handled.
|
||||||
|
|||||||
23
jenkins-dendron-haproxy-postgres.sh
Executable file
23
jenkins-dendron-haproxy-postgres.sh
Executable file
@@ -0,0 +1,23 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -eux
|
||||||
|
|
||||||
|
: ${WORKSPACE:="$(pwd)"}
|
||||||
|
|
||||||
|
export WORKSPACE
|
||||||
|
export PYTHONDONTWRITEBYTECODE=yep
|
||||||
|
export SYNAPSE_CACHE_FACTOR=1
|
||||||
|
|
||||||
|
export HAPROXY_BIN=/home/haproxy/haproxy-1.6.11/haproxy
|
||||||
|
|
||||||
|
./jenkins/prepare_synapse.sh
|
||||||
|
./jenkins/clone.sh sytest https://github.com/matrix-org/sytest.git
|
||||||
|
./jenkins/clone.sh dendron https://github.com/matrix-org/dendron.git
|
||||||
|
./dendron/jenkins/build_dendron.sh
|
||||||
|
./sytest/jenkins/prep_sytest_for_postgres.sh
|
||||||
|
|
||||||
|
./sytest/jenkins/install_and_run.sh \
|
||||||
|
--python $WORKSPACE/.tox/py27/bin/python \
|
||||||
|
--synapse-directory $WORKSPACE \
|
||||||
|
--dendron $WORKSPACE/dendron/bin/dendron \
|
||||||
|
--haproxy \
|
||||||
@@ -15,10 +15,6 @@ export SYNAPSE_CACHE_FACTOR=1
|
|||||||
./sytest/jenkins/prep_sytest_for_postgres.sh
|
./sytest/jenkins/prep_sytest_for_postgres.sh
|
||||||
|
|
||||||
./sytest/jenkins/install_and_run.sh \
|
./sytest/jenkins/install_and_run.sh \
|
||||||
|
--python $WORKSPACE/.tox/py27/bin/python \
|
||||||
--synapse-directory $WORKSPACE \
|
--synapse-directory $WORKSPACE \
|
||||||
--dendron $WORKSPACE/dendron/bin/dendron \
|
--dendron $WORKSPACE/dendron/bin/dendron \
|
||||||
--pusher \
|
|
||||||
--synchrotron \
|
|
||||||
--federation-reader \
|
|
||||||
--client-reader \
|
|
||||||
--appservice \
|
|
||||||
|
|||||||
@@ -14,4 +14,5 @@ export SYNAPSE_CACHE_FACTOR=1
|
|||||||
./sytest/jenkins/prep_sytest_for_postgres.sh
|
./sytest/jenkins/prep_sytest_for_postgres.sh
|
||||||
|
|
||||||
./sytest/jenkins/install_and_run.sh \
|
./sytest/jenkins/install_and_run.sh \
|
||||||
|
--python $WORKSPACE/.tox/py27/bin/python \
|
||||||
--synapse-directory $WORKSPACE \
|
--synapse-directory $WORKSPACE \
|
||||||
|
|||||||
@@ -12,4 +12,5 @@ export SYNAPSE_CACHE_FACTOR=1
|
|||||||
./jenkins/clone.sh sytest https://github.com/matrix-org/sytest.git
|
./jenkins/clone.sh sytest https://github.com/matrix-org/sytest.git
|
||||||
|
|
||||||
./sytest/jenkins/install_and_run.sh \
|
./sytest/jenkins/install_and_run.sh \
|
||||||
|
--python $WORKSPACE/.tox/py27/bin/python \
|
||||||
--synapse-directory $WORKSPACE \
|
--synapse-directory $WORKSPACE \
|
||||||
|
|||||||
@@ -1,5 +1,7 @@
|
|||||||
#! /bin/bash
|
#! /bin/bash
|
||||||
|
|
||||||
|
set -eux
|
||||||
|
|
||||||
cd "`dirname $0`/.."
|
cd "`dirname $0`/.."
|
||||||
|
|
||||||
TOX_DIR=$WORKSPACE/.tox
|
TOX_DIR=$WORKSPACE/.tox
|
||||||
@@ -14,7 +16,20 @@ fi
|
|||||||
tox -e py27 --notest -v
|
tox -e py27 --notest -v
|
||||||
|
|
||||||
TOX_BIN=$TOX_DIR/py27/bin
|
TOX_BIN=$TOX_DIR/py27/bin
|
||||||
$TOX_BIN/pip install setuptools
|
|
||||||
python synapse/python_dependencies.py | xargs -n1 $TOX_BIN/pip install
|
# cryptography 2.2 requires setuptools >= 18.5.
|
||||||
$TOX_BIN/pip install lxml
|
#
|
||||||
$TOX_BIN/pip install psycopg2
|
# older versions of virtualenv (?) give us a virtualenv with the same version
|
||||||
|
# of setuptools as is installed on the system python (and tox runs virtualenv
|
||||||
|
# under python3, so we get the version of setuptools that is installed on that).
|
||||||
|
#
|
||||||
|
# anyway, make sure that we have a recent enough setuptools.
|
||||||
|
$TOX_BIN/pip install 'setuptools>=18.5'
|
||||||
|
|
||||||
|
# we also need a semi-recent version of pip, because old ones fail to install
|
||||||
|
# the "enum34" dependency of cryptography.
|
||||||
|
$TOX_BIN/pip install 'pip>=10'
|
||||||
|
|
||||||
|
{ python synapse/python_dependencies.py
|
||||||
|
echo lxml psycopg2
|
||||||
|
} | xargs $TOX_BIN/pip install
|
||||||
|
|||||||
125
scripts-dev/federation_client.py
Normal file → Executable file
125
scripts-dev/federation_client.py
Normal file → Executable file
@@ -1,10 +1,30 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
#
|
||||||
|
# Copyright 2015, 2016 OpenMarket Ltd
|
||||||
|
# Copyright 2017 New Vector Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
from __future__ import print_function
|
||||||
|
|
||||||
|
import argparse
|
||||||
import nacl.signing
|
import nacl.signing
|
||||||
import json
|
import json
|
||||||
import base64
|
import base64
|
||||||
import requests
|
import requests
|
||||||
import sys
|
import sys
|
||||||
import srvlookup
|
import srvlookup
|
||||||
|
import yaml
|
||||||
|
|
||||||
def encode_base64(input_bytes):
|
def encode_base64(input_bytes):
|
||||||
"""Encode bytes as a base64 string without any padding."""
|
"""Encode bytes as a base64 string without any padding."""
|
||||||
@@ -103,15 +123,25 @@ def lookup(destination, path):
|
|||||||
except:
|
except:
|
||||||
return "https://%s:%d%s" % (destination, 8448, path)
|
return "https://%s:%d%s" % (destination, 8448, path)
|
||||||
|
|
||||||
def get_json(origin_name, origin_key, destination, path):
|
|
||||||
request_json = {
|
def request_json(method, origin_name, origin_key, destination, path, content):
|
||||||
"method": "GET",
|
if method is None:
|
||||||
|
if content is None:
|
||||||
|
method = "GET"
|
||||||
|
else:
|
||||||
|
method = "POST"
|
||||||
|
|
||||||
|
json_to_sign = {
|
||||||
|
"method": method,
|
||||||
"uri": path,
|
"uri": path,
|
||||||
"origin": origin_name,
|
"origin": origin_name,
|
||||||
"destination": destination,
|
"destination": destination,
|
||||||
}
|
}
|
||||||
|
|
||||||
signed_json = sign_json(request_json, origin_key, origin_name)
|
if content is not None:
|
||||||
|
json_to_sign["content"] = json.loads(content)
|
||||||
|
|
||||||
|
signed_json = sign_json(json_to_sign, origin_key, origin_name)
|
||||||
|
|
||||||
authorization_headers = []
|
authorization_headers = []
|
||||||
|
|
||||||
@@ -120,30 +150,97 @@ def get_json(origin_name, origin_key, destination, path):
|
|||||||
origin_name, key, sig,
|
origin_name, key, sig,
|
||||||
)
|
)
|
||||||
authorization_headers.append(bytes(header))
|
authorization_headers.append(bytes(header))
|
||||||
sys.stderr.write(header)
|
print ("Authorization: %s" % header, file=sys.stderr)
|
||||||
sys.stderr.write("\n")
|
|
||||||
|
|
||||||
result = requests.get(
|
dest = lookup(destination, path)
|
||||||
lookup(destination, path),
|
print ("Requesting %s" % dest, file=sys.stderr)
|
||||||
|
|
||||||
|
result = requests.request(
|
||||||
|
method=method,
|
||||||
|
url=dest,
|
||||||
headers={"Authorization": authorization_headers[0]},
|
headers={"Authorization": authorization_headers[0]},
|
||||||
verify=False,
|
verify=False,
|
||||||
|
data=content,
|
||||||
)
|
)
|
||||||
sys.stderr.write("Status Code: %d\n" % (result.status_code,))
|
sys.stderr.write("Status Code: %d\n" % (result.status_code,))
|
||||||
return result.json()
|
return result.json()
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
origin_name, keyfile, destination, path = sys.argv[1:]
|
parser = argparse.ArgumentParser(
|
||||||
|
description=
|
||||||
|
"Signs and sends a federation request to a matrix homeserver",
|
||||||
|
)
|
||||||
|
|
||||||
with open(keyfile) as f:
|
parser.add_argument(
|
||||||
|
"-N", "--server-name",
|
||||||
|
help="Name to give as the local homeserver. If unspecified, will be "
|
||||||
|
"read from the config file.",
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"-k", "--signing-key-path",
|
||||||
|
help="Path to the file containing the private ed25519 key to sign the "
|
||||||
|
"request with.",
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"-c", "--config",
|
||||||
|
default="homeserver.yaml",
|
||||||
|
help="Path to server config file. Ignored if --server-name and "
|
||||||
|
"--signing-key-path are both given.",
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"-d", "--destination",
|
||||||
|
default="matrix.org",
|
||||||
|
help="name of the remote homeserver. We will do SRV lookups and "
|
||||||
|
"connect appropriately.",
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"-X", "--method",
|
||||||
|
help="HTTP method to use for the request. Defaults to GET if --data is"
|
||||||
|
"unspecified, POST if it is."
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"--body",
|
||||||
|
help="Data to send as the body of the HTTP request"
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"path",
|
||||||
|
help="request path. We will add '/_matrix/federation/v1/' to this."
|
||||||
|
)
|
||||||
|
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
if not args.server_name or not args.signing_key_path:
|
||||||
|
read_args_from_config(args)
|
||||||
|
|
||||||
|
with open(args.signing_key_path) as f:
|
||||||
key = read_signing_keys(f)[0]
|
key = read_signing_keys(f)[0]
|
||||||
|
|
||||||
result = get_json(
|
result = request_json(
|
||||||
origin_name, key, destination, "/_matrix/federation/v1/" + path
|
args.method,
|
||||||
|
args.server_name, key, args.destination,
|
||||||
|
"/_matrix/federation/v1/" + args.path,
|
||||||
|
content=args.body,
|
||||||
)
|
)
|
||||||
|
|
||||||
json.dump(result, sys.stdout)
|
json.dump(result, sys.stdout)
|
||||||
print ""
|
print ("")
|
||||||
|
|
||||||
|
|
||||||
|
def read_args_from_config(args):
|
||||||
|
with open(args.config, 'r') as fh:
|
||||||
|
config = yaml.safe_load(fh)
|
||||||
|
if not args.server_name:
|
||||||
|
args.server_name = config['server_name']
|
||||||
|
if not args.signing_key_path:
|
||||||
|
args.signing_key_path = config['signing_key_path']
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
main()
|
main()
|
||||||
|
|||||||
@@ -6,19 +6,52 @@
|
|||||||
|
|
||||||
## Do not run it lightly.
|
## Do not run it lightly.
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
if [ "$1" == "-h" ] || [ "$1" == "" ]; then
|
||||||
|
echo "Call with ROOM_ID as first option and then pipe it into the database. So for instance you might run"
|
||||||
|
echo " nuke-room-from-db.sh <room_id> | sqlite3 homeserver.db"
|
||||||
|
echo "or"
|
||||||
|
echo " nuke-room-from-db.sh <room_id> | psql --dbname=synapse"
|
||||||
|
exit
|
||||||
|
fi
|
||||||
|
|
||||||
ROOMID="$1"
|
ROOMID="$1"
|
||||||
|
|
||||||
sqlite3 homeserver.db <<EOF
|
cat <<EOF
|
||||||
DELETE FROM context_depth WHERE context = '$ROOMID';
|
DELETE FROM event_forward_extremities WHERE room_id = '$ROOMID';
|
||||||
DELETE FROM current_state WHERE context = '$ROOMID';
|
DELETE FROM event_backward_extremities WHERE room_id = '$ROOMID';
|
||||||
DELETE FROM feedback WHERE room_id = '$ROOMID';
|
DELETE FROM event_edges WHERE room_id = '$ROOMID';
|
||||||
DELETE FROM messages WHERE room_id = '$ROOMID';
|
DELETE FROM room_depth WHERE room_id = '$ROOMID';
|
||||||
DELETE FROM pdu_backward_extremities WHERE context = '$ROOMID';
|
DELETE FROM state_forward_extremities WHERE room_id = '$ROOMID';
|
||||||
DELETE FROM pdu_edges WHERE context = '$ROOMID';
|
DELETE FROM events WHERE room_id = '$ROOMID';
|
||||||
DELETE FROM pdu_forward_extremities WHERE context = '$ROOMID';
|
DELETE FROM event_json WHERE room_id = '$ROOMID';
|
||||||
DELETE FROM pdus WHERE context = '$ROOMID';
|
DELETE FROM state_events WHERE room_id = '$ROOMID';
|
||||||
DELETE FROM room_data WHERE room_id = '$ROOMID';
|
DELETE FROM current_state_events WHERE room_id = '$ROOMID';
|
||||||
DELETE FROM room_memberships WHERE room_id = '$ROOMID';
|
DELETE FROM room_memberships WHERE room_id = '$ROOMID';
|
||||||
|
DELETE FROM feedback WHERE room_id = '$ROOMID';
|
||||||
|
DELETE FROM topics WHERE room_id = '$ROOMID';
|
||||||
|
DELETE FROM room_names WHERE room_id = '$ROOMID';
|
||||||
DELETE FROM rooms WHERE room_id = '$ROOMID';
|
DELETE FROM rooms WHERE room_id = '$ROOMID';
|
||||||
DELETE FROM state_pdus WHERE context = '$ROOMID';
|
DELETE FROM room_hosts WHERE room_id = '$ROOMID';
|
||||||
|
DELETE FROM room_aliases WHERE room_id = '$ROOMID';
|
||||||
|
DELETE FROM state_groups WHERE room_id = '$ROOMID';
|
||||||
|
DELETE FROM state_groups_state WHERE room_id = '$ROOMID';
|
||||||
|
DELETE FROM receipts_graph WHERE room_id = '$ROOMID';
|
||||||
|
DELETE FROM receipts_linearized WHERE room_id = '$ROOMID';
|
||||||
|
DELETE FROM event_search WHERE room_id = '$ROOMID';
|
||||||
|
DELETE FROM guest_access WHERE room_id = '$ROOMID';
|
||||||
|
DELETE FROM history_visibility WHERE room_id = '$ROOMID';
|
||||||
|
DELETE FROM room_tags WHERE room_id = '$ROOMID';
|
||||||
|
DELETE FROM room_tags_revisions WHERE room_id = '$ROOMID';
|
||||||
|
DELETE FROM room_account_data WHERE room_id = '$ROOMID';
|
||||||
|
DELETE FROM event_push_actions WHERE room_id = '$ROOMID';
|
||||||
|
DELETE FROM local_invites WHERE room_id = '$ROOMID';
|
||||||
|
DELETE FROM pusher_throttle WHERE room_id = '$ROOMID';
|
||||||
|
DELETE FROM event_reports WHERE room_id = '$ROOMID';
|
||||||
|
DELETE FROM public_room_list_stream WHERE room_id = '$ROOMID';
|
||||||
|
DELETE FROM stream_ordering_to_exterm WHERE room_id = '$ROOMID';
|
||||||
|
DELETE FROM event_auth WHERE room_id = '$ROOMID';
|
||||||
|
DELETE FROM appservice_room_list WHERE room_id = '$ROOMID';
|
||||||
|
VACUUM;
|
||||||
EOF
|
EOF
|
||||||
|
|||||||
133
scripts/move_remote_media_to_new_store.py
Executable file
133
scripts/move_remote_media_to_new_store.py
Executable file
@@ -0,0 +1,133 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright 2017 New Vector Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
"""
|
||||||
|
Moves a list of remote media from one media store to another.
|
||||||
|
|
||||||
|
The input should be a list of media files to be moved, one per line. Each line
|
||||||
|
should be formatted::
|
||||||
|
|
||||||
|
<origin server>|<file id>
|
||||||
|
|
||||||
|
This can be extracted from postgres with::
|
||||||
|
|
||||||
|
psql --tuples-only -A -c "select media_origin, filesystem_id from
|
||||||
|
matrix.remote_media_cache where ..."
|
||||||
|
|
||||||
|
To use, pipe the above into::
|
||||||
|
|
||||||
|
PYTHON_PATH=. ./scripts/move_remote_media_to_new_store.py <source repo> <dest repo>
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import print_function
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import logging
|
||||||
|
|
||||||
|
import sys
|
||||||
|
|
||||||
|
import os
|
||||||
|
|
||||||
|
import shutil
|
||||||
|
|
||||||
|
from synapse.rest.media.v1.filepath import MediaFilePaths
|
||||||
|
|
||||||
|
logger = logging.getLogger()
|
||||||
|
|
||||||
|
|
||||||
|
def main(src_repo, dest_repo):
|
||||||
|
src_paths = MediaFilePaths(src_repo)
|
||||||
|
dest_paths = MediaFilePaths(dest_repo)
|
||||||
|
for line in sys.stdin:
|
||||||
|
line = line.strip()
|
||||||
|
parts = line.split('|')
|
||||||
|
if len(parts) != 2:
|
||||||
|
print("Unable to parse input line %s" % line, file=sys.stderr)
|
||||||
|
exit(1)
|
||||||
|
|
||||||
|
move_media(parts[0], parts[1], src_paths, dest_paths)
|
||||||
|
|
||||||
|
|
||||||
|
def move_media(origin_server, file_id, src_paths, dest_paths):
|
||||||
|
"""Move the given file, and any thumbnails, to the dest repo
|
||||||
|
|
||||||
|
Args:
|
||||||
|
origin_server (str):
|
||||||
|
file_id (str):
|
||||||
|
src_paths (MediaFilePaths):
|
||||||
|
dest_paths (MediaFilePaths):
|
||||||
|
"""
|
||||||
|
logger.info("%s/%s", origin_server, file_id)
|
||||||
|
|
||||||
|
# check that the original exists
|
||||||
|
original_file = src_paths.remote_media_filepath(origin_server, file_id)
|
||||||
|
if not os.path.exists(original_file):
|
||||||
|
logger.warn(
|
||||||
|
"Original for %s/%s (%s) does not exist",
|
||||||
|
origin_server, file_id, original_file,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
mkdir_and_move(
|
||||||
|
original_file,
|
||||||
|
dest_paths.remote_media_filepath(origin_server, file_id),
|
||||||
|
)
|
||||||
|
|
||||||
|
# now look for thumbnails
|
||||||
|
original_thumb_dir = src_paths.remote_media_thumbnail_dir(
|
||||||
|
origin_server, file_id,
|
||||||
|
)
|
||||||
|
if not os.path.exists(original_thumb_dir):
|
||||||
|
return
|
||||||
|
|
||||||
|
mkdir_and_move(
|
||||||
|
original_thumb_dir,
|
||||||
|
dest_paths.remote_media_thumbnail_dir(origin_server, file_id)
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def mkdir_and_move(original_file, dest_file):
|
||||||
|
dirname = os.path.dirname(dest_file)
|
||||||
|
if not os.path.exists(dirname):
|
||||||
|
logger.debug("mkdir %s", dirname)
|
||||||
|
os.makedirs(dirname)
|
||||||
|
logger.debug("mv %s %s", original_file, dest_file)
|
||||||
|
shutil.move(original_file, dest_file)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
description=__doc__,
|
||||||
|
formatter_class = argparse.RawDescriptionHelpFormatter,
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"-v", action='store_true', help='enable debug logging')
|
||||||
|
parser.add_argument(
|
||||||
|
"src_repo",
|
||||||
|
help="Path to source content repo",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"dest_repo",
|
||||||
|
help="Path to source content repo",
|
||||||
|
)
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
logging_config = {
|
||||||
|
"level": logging.DEBUG if args.v else logging.INFO,
|
||||||
|
"format": "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(message)s"
|
||||||
|
}
|
||||||
|
logging.basicConfig(**logging_config)
|
||||||
|
|
||||||
|
main(args.src_repo, args.dest_repo)
|
||||||
@@ -1,6 +1,7 @@
|
|||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
# Copyright 2015, 2016 OpenMarket Ltd
|
# Copyright 2015, 2016 OpenMarket Ltd
|
||||||
|
# Copyright 2018 New Vector Ltd
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@@ -29,6 +30,8 @@ import time
|
|||||||
import traceback
|
import traceback
|
||||||
import yaml
|
import yaml
|
||||||
|
|
||||||
|
from six import string_types
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger("synapse_port_db")
|
logger = logging.getLogger("synapse_port_db")
|
||||||
|
|
||||||
@@ -40,6 +43,16 @@ BOOLEAN_COLUMNS = {
|
|||||||
"presence_list": ["accepted"],
|
"presence_list": ["accepted"],
|
||||||
"presence_stream": ["currently_active"],
|
"presence_stream": ["currently_active"],
|
||||||
"public_room_list_stream": ["visibility"],
|
"public_room_list_stream": ["visibility"],
|
||||||
|
"device_lists_outbound_pokes": ["sent"],
|
||||||
|
"users_who_share_rooms": ["share_private"],
|
||||||
|
"groups": ["is_public"],
|
||||||
|
"group_rooms": ["is_public"],
|
||||||
|
"group_users": ["is_public", "is_admin"],
|
||||||
|
"group_summary_rooms": ["is_public"],
|
||||||
|
"group_room_categories": ["is_public"],
|
||||||
|
"group_summary_users": ["is_public"],
|
||||||
|
"group_roles": ["is_public"],
|
||||||
|
"local_group_membership": ["is_publicised", "is_admin"],
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@@ -110,6 +123,7 @@ class Store(object):
|
|||||||
|
|
||||||
_simple_update_one = SQLBaseStore.__dict__["_simple_update_one"]
|
_simple_update_one = SQLBaseStore.__dict__["_simple_update_one"]
|
||||||
_simple_update_one_txn = SQLBaseStore.__dict__["_simple_update_one_txn"]
|
_simple_update_one_txn = SQLBaseStore.__dict__["_simple_update_one_txn"]
|
||||||
|
_simple_update_txn = SQLBaseStore.__dict__["_simple_update_txn"]
|
||||||
|
|
||||||
def runInteraction(self, desc, func, *args, **kwargs):
|
def runInteraction(self, desc, func, *args, **kwargs):
|
||||||
def r(conn):
|
def r(conn):
|
||||||
@@ -120,7 +134,7 @@ class Store(object):
|
|||||||
try:
|
try:
|
||||||
txn = conn.cursor()
|
txn = conn.cursor()
|
||||||
return func(
|
return func(
|
||||||
LoggingTransaction(txn, desc, self.database_engine, []),
|
LoggingTransaction(txn, desc, self.database_engine, [], []),
|
||||||
*args, **kwargs
|
*args, **kwargs
|
||||||
)
|
)
|
||||||
except self.database_engine.module.DatabaseError as e:
|
except self.database_engine.module.DatabaseError as e:
|
||||||
@@ -239,6 +253,12 @@ class Porter(object):
|
|||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def handle_table(self, table, postgres_size, table_size, forward_chunk,
|
def handle_table(self, table, postgres_size, table_size, forward_chunk,
|
||||||
backward_chunk):
|
backward_chunk):
|
||||||
|
logger.info(
|
||||||
|
"Table %s: %i/%i (rows %i-%i) already ported",
|
||||||
|
table, postgres_size, table_size,
|
||||||
|
backward_chunk+1, forward_chunk-1,
|
||||||
|
)
|
||||||
|
|
||||||
if not table_size:
|
if not table_size:
|
||||||
return
|
return
|
||||||
|
|
||||||
@@ -250,6 +270,25 @@ class Porter(object):
|
|||||||
)
|
)
|
||||||
return
|
return
|
||||||
|
|
||||||
|
if table in (
|
||||||
|
"user_directory", "user_directory_search", "users_who_share_rooms",
|
||||||
|
"users_in_pubic_room",
|
||||||
|
):
|
||||||
|
# We don't port these tables, as they're a faff and we can regenreate
|
||||||
|
# them anyway.
|
||||||
|
self.progress.update(table, table_size) # Mark table as done
|
||||||
|
return
|
||||||
|
|
||||||
|
if table == "user_directory_stream_pos":
|
||||||
|
# We need to make sure there is a single row, `(X, null), as that is
|
||||||
|
# what synapse expects to be there.
|
||||||
|
yield self.postgres_store._simple_insert(
|
||||||
|
table=table,
|
||||||
|
values={"stream_id": None},
|
||||||
|
)
|
||||||
|
self.progress.update(table, table_size) # Mark table as done
|
||||||
|
return
|
||||||
|
|
||||||
forward_select = (
|
forward_select = (
|
||||||
"SELECT rowid, * FROM %s WHERE rowid >= ? ORDER BY rowid LIMIT ?"
|
"SELECT rowid, * FROM %s WHERE rowid >= ? ORDER BY rowid LIMIT ?"
|
||||||
% (table,)
|
% (table,)
|
||||||
@@ -297,7 +336,7 @@ class Porter(object):
|
|||||||
backward_chunk = min(row[0] for row in brows) - 1
|
backward_chunk = min(row[0] for row in brows) - 1
|
||||||
|
|
||||||
rows = frows + brows
|
rows = frows + brows
|
||||||
self._convert_rows(table, headers, rows)
|
rows = self._convert_rows(table, headers, rows)
|
||||||
|
|
||||||
def insert(txn):
|
def insert(txn):
|
||||||
self.postgres_store.insert_many_txn(
|
self.postgres_store.insert_many_txn(
|
||||||
@@ -355,10 +394,13 @@ class Porter(object):
|
|||||||
" VALUES (?,?,?,?,to_tsvector('english', ?),?,?)"
|
" VALUES (?,?,?,?,to_tsvector('english', ?),?,?)"
|
||||||
)
|
)
|
||||||
|
|
||||||
rows_dict = [
|
rows_dict = []
|
||||||
dict(zip(headers, row))
|
for row in rows:
|
||||||
for row in rows
|
d = dict(zip(headers, row))
|
||||||
]
|
if "\0" in d['value']:
|
||||||
|
logger.warn('dropping search row %s', d)
|
||||||
|
else:
|
||||||
|
rows_dict.append(d)
|
||||||
|
|
||||||
txn.executemany(sql, [
|
txn.executemany(sql, [
|
||||||
(
|
(
|
||||||
@@ -434,33 +476,10 @@ class Porter(object):
|
|||||||
self.progress.set_state("Preparing PostgreSQL")
|
self.progress.set_state("Preparing PostgreSQL")
|
||||||
self.setup_db(postgres_config, postgres_engine)
|
self.setup_db(postgres_config, postgres_engine)
|
||||||
|
|
||||||
# Step 2. Get tables.
|
self.progress.set_state("Creating port tables")
|
||||||
self.progress.set_state("Fetching tables")
|
|
||||||
sqlite_tables = yield self.sqlite_store._simple_select_onecol(
|
|
||||||
table="sqlite_master",
|
|
||||||
keyvalues={
|
|
||||||
"type": "table",
|
|
||||||
},
|
|
||||||
retcol="name",
|
|
||||||
)
|
|
||||||
|
|
||||||
postgres_tables = yield self.postgres_store._simple_select_onecol(
|
|
||||||
table="information_schema.tables",
|
|
||||||
keyvalues={
|
|
||||||
"table_schema": "public",
|
|
||||||
},
|
|
||||||
retcol="distinct table_name",
|
|
||||||
)
|
|
||||||
|
|
||||||
tables = set(sqlite_tables) & set(postgres_tables)
|
|
||||||
|
|
||||||
self.progress.set_state("Creating tables")
|
|
||||||
|
|
||||||
logger.info("Found %d tables", len(tables))
|
|
||||||
|
|
||||||
def create_port_table(txn):
|
def create_port_table(txn):
|
||||||
txn.execute(
|
txn.execute(
|
||||||
"CREATE TABLE port_from_sqlite3 ("
|
"CREATE TABLE IF NOT EXISTS port_from_sqlite3 ("
|
||||||
" table_name varchar(100) NOT NULL UNIQUE,"
|
" table_name varchar(100) NOT NULL UNIQUE,"
|
||||||
" forward_rowid bigint NOT NULL,"
|
" forward_rowid bigint NOT NULL,"
|
||||||
" backward_rowid bigint NOT NULL"
|
" backward_rowid bigint NOT NULL"
|
||||||
@@ -486,18 +505,33 @@ class Porter(object):
|
|||||||
"alter_table", alter_table
|
"alter_table", alter_table
|
||||||
)
|
)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.info("Failed to create port table: %s", e)
|
pass
|
||||||
|
|
||||||
try:
|
|
||||||
yield self.postgres_store.runInteraction(
|
yield self.postgres_store.runInteraction(
|
||||||
"create_port_table", create_port_table
|
"create_port_table", create_port_table
|
||||||
)
|
)
|
||||||
except Exception as e:
|
|
||||||
logger.info("Failed to create port table: %s", e)
|
|
||||||
|
|
||||||
self.progress.set_state("Setting up")
|
# Step 2. Get tables.
|
||||||
|
self.progress.set_state("Fetching tables")
|
||||||
|
sqlite_tables = yield self.sqlite_store._simple_select_onecol(
|
||||||
|
table="sqlite_master",
|
||||||
|
keyvalues={
|
||||||
|
"type": "table",
|
||||||
|
},
|
||||||
|
retcol="name",
|
||||||
|
)
|
||||||
|
|
||||||
# Set up tables.
|
postgres_tables = yield self.postgres_store._simple_select_onecol(
|
||||||
|
table="information_schema.tables",
|
||||||
|
keyvalues={},
|
||||||
|
retcol="distinct table_name",
|
||||||
|
)
|
||||||
|
|
||||||
|
tables = set(sqlite_tables) & set(postgres_tables)
|
||||||
|
logger.info("Found %d tables", len(tables))
|
||||||
|
|
||||||
|
# Step 3. Figure out what still needs copying
|
||||||
|
self.progress.set_state("Checking on port progress")
|
||||||
setup_res = yield defer.gatherResults(
|
setup_res = yield defer.gatherResults(
|
||||||
[
|
[
|
||||||
self.setup_table(table)
|
self.setup_table(table)
|
||||||
@@ -508,7 +542,8 @@ class Porter(object):
|
|||||||
consumeErrors=True,
|
consumeErrors=True,
|
||||||
)
|
)
|
||||||
|
|
||||||
# Process tables.
|
# Step 4. Do the copying.
|
||||||
|
self.progress.set_state("Copying to postgres")
|
||||||
yield defer.gatherResults(
|
yield defer.gatherResults(
|
||||||
[
|
[
|
||||||
self.handle_table(*res)
|
self.handle_table(*res)
|
||||||
@@ -517,6 +552,9 @@ class Porter(object):
|
|||||||
consumeErrors=True,
|
consumeErrors=True,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# Step 5. Do final post-processing
|
||||||
|
yield self._setup_state_group_id_seq()
|
||||||
|
|
||||||
self.progress.done()
|
self.progress.done()
|
||||||
except:
|
except:
|
||||||
global end_error_exec_info
|
global end_error_exec_info
|
||||||
@@ -532,17 +570,29 @@ class Porter(object):
|
|||||||
i for i, h in enumerate(headers) if h in bool_col_names
|
i for i, h in enumerate(headers) if h in bool_col_names
|
||||||
]
|
]
|
||||||
|
|
||||||
|
class BadValueException(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
def conv(j, col):
|
def conv(j, col):
|
||||||
if j in bool_cols:
|
if j in bool_cols:
|
||||||
return bool(col)
|
return bool(col)
|
||||||
|
elif isinstance(col, string_types) and "\0" in col:
|
||||||
|
logger.warn("DROPPING ROW: NUL value in table %s col %s: %r", table, headers[j], col)
|
||||||
|
raise BadValueException();
|
||||||
return col
|
return col
|
||||||
|
|
||||||
|
outrows = []
|
||||||
for i, row in enumerate(rows):
|
for i, row in enumerate(rows):
|
||||||
rows[i] = tuple(
|
try:
|
||||||
|
outrows.append(tuple(
|
||||||
conv(j, col)
|
conv(j, col)
|
||||||
for j, col in enumerate(row)
|
for j, col in enumerate(row)
|
||||||
if j > 0
|
if j > 0
|
||||||
)
|
))
|
||||||
|
except BadValueException:
|
||||||
|
pass
|
||||||
|
|
||||||
|
return outrows
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def _setup_sent_transactions(self):
|
def _setup_sent_transactions(self):
|
||||||
@@ -570,7 +620,7 @@ class Porter(object):
|
|||||||
"select", r,
|
"select", r,
|
||||||
)
|
)
|
||||||
|
|
||||||
self._convert_rows("sent_transactions", headers, rows)
|
rows = self._convert_rows("sent_transactions", headers, rows)
|
||||||
|
|
||||||
inserted_rows = len(rows)
|
inserted_rows = len(rows)
|
||||||
if inserted_rows:
|
if inserted_rows:
|
||||||
@@ -664,6 +714,16 @@ class Porter(object):
|
|||||||
|
|
||||||
defer.returnValue((done, remaining + done))
|
defer.returnValue((done, remaining + done))
|
||||||
|
|
||||||
|
def _setup_state_group_id_seq(self):
|
||||||
|
def r(txn):
|
||||||
|
txn.execute("SELECT MAX(id) FROM state_groups")
|
||||||
|
next_id = txn.fetchone()[0]+1
|
||||||
|
txn.execute(
|
||||||
|
"ALTER SEQUENCE state_group_id_seq RESTART WITH %s",
|
||||||
|
(next_id,),
|
||||||
|
)
|
||||||
|
return self.postgres_store.runInteraction("setup_state_group_id_seq", r)
|
||||||
|
|
||||||
|
|
||||||
##############################################
|
##############################################
|
||||||
###### The following is simply UI stuff ######
|
###### The following is simply UI stuff ######
|
||||||
|
|||||||
45
scripts/sync_room_to_group.pl
Executable file
45
scripts/sync_room_to_group.pl
Executable file
@@ -0,0 +1,45 @@
|
|||||||
|
#!/usr/bin/env perl
|
||||||
|
|
||||||
|
use strict;
|
||||||
|
use warnings;
|
||||||
|
|
||||||
|
use JSON::XS;
|
||||||
|
use LWP::UserAgent;
|
||||||
|
use URI::Escape;
|
||||||
|
|
||||||
|
if (@ARGV < 4) {
|
||||||
|
die "usage: $0 <homeserver url> <access_token> <room_id|room_alias> <group_id>\n";
|
||||||
|
}
|
||||||
|
|
||||||
|
my ($hs, $access_token, $room_id, $group_id) = @ARGV;
|
||||||
|
my $ua = LWP::UserAgent->new();
|
||||||
|
$ua->timeout(10);
|
||||||
|
|
||||||
|
if ($room_id =~ /^#/) {
|
||||||
|
$room_id = uri_escape($room_id);
|
||||||
|
$room_id = decode_json($ua->get("${hs}/_matrix/client/r0/directory/room/${room_id}?access_token=${access_token}")->decoded_content)->{room_id};
|
||||||
|
}
|
||||||
|
|
||||||
|
my $room_users = [ keys %{decode_json($ua->get("${hs}/_matrix/client/r0/rooms/${room_id}/joined_members?access_token=${access_token}")->decoded_content)->{joined}} ];
|
||||||
|
my $group_users = [
|
||||||
|
(map { $_->{user_id} } @{decode_json($ua->get("${hs}/_matrix/client/unstable/groups/${group_id}/users?access_token=${access_token}" )->decoded_content)->{chunk}}),
|
||||||
|
(map { $_->{user_id} } @{decode_json($ua->get("${hs}/_matrix/client/unstable/groups/${group_id}/invited_users?access_token=${access_token}" )->decoded_content)->{chunk}}),
|
||||||
|
];
|
||||||
|
|
||||||
|
die "refusing to sync from empty room" unless (@$room_users);
|
||||||
|
die "refusing to sync to empty group" unless (@$group_users);
|
||||||
|
|
||||||
|
my $diff = {};
|
||||||
|
foreach my $user (@$room_users) { $diff->{$user}++ }
|
||||||
|
foreach my $user (@$group_users) { $diff->{$user}-- }
|
||||||
|
|
||||||
|
foreach my $user (keys %$diff) {
|
||||||
|
if ($diff->{$user} == 1) {
|
||||||
|
warn "inviting $user";
|
||||||
|
print STDERR $ua->put("${hs}/_matrix/client/unstable/groups/${group_id}/admin/users/invite/${user}?access_token=${access_token}", Content=>'{}')->status_line."\n";
|
||||||
|
}
|
||||||
|
elsif ($diff->{$user} == -1) {
|
||||||
|
warn "removing $user";
|
||||||
|
print STDERR $ua->put("${hs}/_matrix/client/unstable/groups/${group_id}/admin/users/remove/${user}?access_token=${access_token}", Content=>'{}')->status_line."\n";
|
||||||
|
}
|
||||||
|
}
|
||||||
73
setup.py
73
setup.py
@@ -23,6 +23,45 @@ import sys
|
|||||||
here = os.path.abspath(os.path.dirname(__file__))
|
here = os.path.abspath(os.path.dirname(__file__))
|
||||||
|
|
||||||
|
|
||||||
|
# Some notes on `setup.py test`:
|
||||||
|
#
|
||||||
|
# Once upon a time we used to try to make `setup.py test` run `tox` to run the
|
||||||
|
# tests. That's a bad idea for three reasons:
|
||||||
|
#
|
||||||
|
# 1: `setup.py test` is supposed to find out whether the tests work in the
|
||||||
|
# *current* environmentt, not whatever tox sets up.
|
||||||
|
# 2: Empirically, trying to install tox during the test run wasn't working ("No
|
||||||
|
# module named virtualenv").
|
||||||
|
# 3: The tox documentation advises against it[1].
|
||||||
|
#
|
||||||
|
# Even further back in time, we used to use setuptools_trial [2]. That has its
|
||||||
|
# own set of issues: for instance, it requires installation of Twisted to build
|
||||||
|
# an sdist (because the recommended mode of usage is to add it to
|
||||||
|
# `setup_requires`). That in turn means that in order to successfully run tox
|
||||||
|
# you have to have the python header files installed for whichever version of
|
||||||
|
# python tox uses (which is python3 on recent ubuntus, for example).
|
||||||
|
#
|
||||||
|
# So, for now at least, we stick with what appears to be the convention among
|
||||||
|
# Twisted projects, and don't attempt to do anything when someone runs
|
||||||
|
# `setup.py test`; instead we direct people to run `trial` directly if they
|
||||||
|
# care.
|
||||||
|
#
|
||||||
|
# [1]: http://tox.readthedocs.io/en/2.5.0/example/basic.html#integration-with-setup-py-test-command
|
||||||
|
# [2]: https://pypi.python.org/pypi/setuptools_trial
|
||||||
|
class TestCommand(Command):
|
||||||
|
user_options = []
|
||||||
|
|
||||||
|
def initialize_options(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def finalize_options(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
print ("""Synapse's tests cannot be run via setup.py. To run them, try:
|
||||||
|
PYTHONPATH="." trial tests
|
||||||
|
""")
|
||||||
|
|
||||||
def read_file(path_segments):
|
def read_file(path_segments):
|
||||||
"""Read a file from the package. Takes a list of strings to join to
|
"""Read a file from the package. Takes a list of strings to join to
|
||||||
make the path"""
|
make the path"""
|
||||||
@@ -39,38 +78,6 @@ def exec_file(path_segments):
|
|||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
class Tox(Command):
|
|
||||||
user_options = [('tox-args=', 'a', "Arguments to pass to tox")]
|
|
||||||
|
|
||||||
def initialize_options(self):
|
|
||||||
self.tox_args = None
|
|
||||||
|
|
||||||
def finalize_options(self):
|
|
||||||
self.test_args = []
|
|
||||||
self.test_suite = True
|
|
||||||
|
|
||||||
def run(self):
|
|
||||||
#import here, cause outside the eggs aren't loaded
|
|
||||||
try:
|
|
||||||
import tox
|
|
||||||
except ImportError:
|
|
||||||
try:
|
|
||||||
self.distribution.fetch_build_eggs("tox")
|
|
||||||
import tox
|
|
||||||
except:
|
|
||||||
raise RuntimeError(
|
|
||||||
"The tests need 'tox' to run. Please install 'tox'."
|
|
||||||
)
|
|
||||||
import shlex
|
|
||||||
args = self.tox_args
|
|
||||||
if args:
|
|
||||||
args = shlex.split(self.tox_args)
|
|
||||||
else:
|
|
||||||
args = []
|
|
||||||
errno = tox.cmdline(args=args)
|
|
||||||
sys.exit(errno)
|
|
||||||
|
|
||||||
|
|
||||||
version = exec_file(("synapse", "__init__.py"))["__version__"]
|
version = exec_file(("synapse", "__init__.py"))["__version__"]
|
||||||
dependencies = exec_file(("synapse", "python_dependencies.py"))
|
dependencies = exec_file(("synapse", "python_dependencies.py"))
|
||||||
long_description = read_file(("README.rst",))
|
long_description = read_file(("README.rst",))
|
||||||
@@ -86,5 +93,5 @@ setup(
|
|||||||
zip_safe=False,
|
zip_safe=False,
|
||||||
long_description=long_description,
|
long_description=long_description,
|
||||||
scripts=["synctl"] + glob.glob("scripts/*"),
|
scripts=["synctl"] + glob.glob("scripts/*"),
|
||||||
cmdclass={'test': Tox},
|
cmdclass={'test': TestCommand},
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -16,4 +16,4 @@
|
|||||||
""" This is a reference implementation of a Matrix home server.
|
""" This is a reference implementation of a Matrix home server.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
__version__ = "0.18.3"
|
__version__ = "0.29.0"
|
||||||
|
|||||||
@@ -16,18 +16,15 @@
|
|||||||
import logging
|
import logging
|
||||||
|
|
||||||
import pymacaroons
|
import pymacaroons
|
||||||
from canonicaljson import encode_canonical_json
|
|
||||||
from signedjson.key import decode_verify_key_bytes
|
|
||||||
from signedjson.sign import verify_signed_json, SignatureVerifyException
|
|
||||||
from twisted.internet import defer
|
from twisted.internet import defer
|
||||||
from unpaddedbase64 import decode_base64
|
|
||||||
|
|
||||||
import synapse.types
|
import synapse.types
|
||||||
|
from synapse import event_auth
|
||||||
from synapse.api.constants import EventTypes, Membership, JoinRules
|
from synapse.api.constants import EventTypes, Membership, JoinRules
|
||||||
from synapse.api.errors import AuthError, Codes, SynapseError, EventSizeError
|
from synapse.api.errors import AuthError, Codes
|
||||||
from synapse.types import UserID, get_domain_from_id
|
from synapse.types import UserID
|
||||||
from synapse.util.logcontext import preserve_context_over_fn
|
from synapse.util.caches import register_cache, CACHE_SIZE_FACTOR
|
||||||
from synapse.util.logutils import log_function
|
from synapse.util.caches.lrucache import LruCache
|
||||||
from synapse.util.metrics import Measure
|
from synapse.util.metrics import Measure
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
@@ -39,6 +36,13 @@ AuthEventTypes = (
|
|||||||
EventTypes.ThirdPartyInvite,
|
EventTypes.ThirdPartyInvite,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# guests always get this device id.
|
||||||
|
GUEST_DEVICE_ID = "guest_device"
|
||||||
|
|
||||||
|
|
||||||
|
class _InvalidMacaroonException(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
class Auth(object):
|
class Auth(object):
|
||||||
"""
|
"""
|
||||||
@@ -51,17 +55,9 @@ class Auth(object):
|
|||||||
self.store = hs.get_datastore()
|
self.store = hs.get_datastore()
|
||||||
self.state = hs.get_state_handler()
|
self.state = hs.get_state_handler()
|
||||||
self.TOKEN_NOT_FOUND_HTTP_STATUS = 401
|
self.TOKEN_NOT_FOUND_HTTP_STATUS = 401
|
||||||
# Docs for these currently lives at
|
|
||||||
# github.com/matrix-org/matrix-doc/blob/master/drafts/macaroons_caveats.rst
|
self.token_cache = LruCache(CACHE_SIZE_FACTOR * 10000)
|
||||||
# In addition, we have type == delete_pusher which grants access only to
|
register_cache("token_cache", self.token_cache)
|
||||||
# delete pushers.
|
|
||||||
self._KNOWN_CAVEAT_PREFIXES = set([
|
|
||||||
"gen = ",
|
|
||||||
"guest = ",
|
|
||||||
"type = ",
|
|
||||||
"time < ",
|
|
||||||
"user_id = ",
|
|
||||||
])
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def check_from_context(self, event, context, do_sig_check=True):
|
def check_from_context(self, event, context, do_sig_check=True):
|
||||||
@@ -86,147 +82,7 @@ class Auth(object):
|
|||||||
True if the auth checks pass.
|
True if the auth checks pass.
|
||||||
"""
|
"""
|
||||||
with Measure(self.clock, "auth.check"):
|
with Measure(self.clock, "auth.check"):
|
||||||
self.check_size_limits(event)
|
event_auth.check(event, auth_events, do_sig_check=do_sig_check)
|
||||||
|
|
||||||
if not hasattr(event, "room_id"):
|
|
||||||
raise AuthError(500, "Event has no room_id: %s" % event)
|
|
||||||
|
|
||||||
if do_sig_check:
|
|
||||||
sender_domain = get_domain_from_id(event.sender)
|
|
||||||
event_id_domain = get_domain_from_id(event.event_id)
|
|
||||||
|
|
||||||
is_invite_via_3pid = (
|
|
||||||
event.type == EventTypes.Member
|
|
||||||
and event.membership == Membership.INVITE
|
|
||||||
and "third_party_invite" in event.content
|
|
||||||
)
|
|
||||||
|
|
||||||
# Check the sender's domain has signed the event
|
|
||||||
if not event.signatures.get(sender_domain):
|
|
||||||
# We allow invites via 3pid to have a sender from a different
|
|
||||||
# HS, as the sender must match the sender of the original
|
|
||||||
# 3pid invite. This is checked further down with the
|
|
||||||
# other dedicated membership checks.
|
|
||||||
if not is_invite_via_3pid:
|
|
||||||
raise AuthError(403, "Event not signed by sender's server")
|
|
||||||
|
|
||||||
# Check the event_id's domain has signed the event
|
|
||||||
if not event.signatures.get(event_id_domain):
|
|
||||||
raise AuthError(403, "Event not signed by sending server")
|
|
||||||
|
|
||||||
if auth_events is None:
|
|
||||||
# Oh, we don't know what the state of the room was, so we
|
|
||||||
# are trusting that this is allowed (at least for now)
|
|
||||||
logger.warn("Trusting event: %s", event.event_id)
|
|
||||||
return True
|
|
||||||
|
|
||||||
if event.type == EventTypes.Create:
|
|
||||||
room_id_domain = get_domain_from_id(event.room_id)
|
|
||||||
if room_id_domain != sender_domain:
|
|
||||||
raise AuthError(
|
|
||||||
403,
|
|
||||||
"Creation event's room_id domain does not match sender's"
|
|
||||||
)
|
|
||||||
# FIXME
|
|
||||||
return True
|
|
||||||
|
|
||||||
creation_event = auth_events.get((EventTypes.Create, ""), None)
|
|
||||||
|
|
||||||
if not creation_event:
|
|
||||||
raise SynapseError(
|
|
||||||
403,
|
|
||||||
"Room %r does not exist" % (event.room_id,)
|
|
||||||
)
|
|
||||||
|
|
||||||
creating_domain = get_domain_from_id(event.room_id)
|
|
||||||
originating_domain = get_domain_from_id(event.sender)
|
|
||||||
if creating_domain != originating_domain:
|
|
||||||
if not self.can_federate(event, auth_events):
|
|
||||||
raise AuthError(
|
|
||||||
403,
|
|
||||||
"This room has been marked as unfederatable."
|
|
||||||
)
|
|
||||||
|
|
||||||
# FIXME: Temp hack
|
|
||||||
if event.type == EventTypes.Aliases:
|
|
||||||
if not event.is_state():
|
|
||||||
raise AuthError(
|
|
||||||
403,
|
|
||||||
"Alias event must be a state event",
|
|
||||||
)
|
|
||||||
if not event.state_key:
|
|
||||||
raise AuthError(
|
|
||||||
403,
|
|
||||||
"Alias event must have non-empty state_key"
|
|
||||||
)
|
|
||||||
sender_domain = get_domain_from_id(event.sender)
|
|
||||||
if event.state_key != sender_domain:
|
|
||||||
raise AuthError(
|
|
||||||
403,
|
|
||||||
"Alias event's state_key does not match sender's domain"
|
|
||||||
)
|
|
||||||
return True
|
|
||||||
|
|
||||||
logger.debug(
|
|
||||||
"Auth events: %s",
|
|
||||||
[a.event_id for a in auth_events.values()]
|
|
||||||
)
|
|
||||||
|
|
||||||
if event.type == EventTypes.Member:
|
|
||||||
allowed = self.is_membership_change_allowed(
|
|
||||||
event, auth_events
|
|
||||||
)
|
|
||||||
if allowed:
|
|
||||||
logger.debug("Allowing! %s", event)
|
|
||||||
else:
|
|
||||||
logger.debug("Denying! %s", event)
|
|
||||||
return allowed
|
|
||||||
|
|
||||||
self.check_event_sender_in_room(event, auth_events)
|
|
||||||
|
|
||||||
# Special case to allow m.room.third_party_invite events wherever
|
|
||||||
# a user is allowed to issue invites. Fixes
|
|
||||||
# https://github.com/vector-im/vector-web/issues/1208 hopefully
|
|
||||||
if event.type == EventTypes.ThirdPartyInvite:
|
|
||||||
user_level = self._get_user_power_level(event.user_id, auth_events)
|
|
||||||
invite_level = self._get_named_level(auth_events, "invite", 0)
|
|
||||||
|
|
||||||
if user_level < invite_level:
|
|
||||||
raise AuthError(
|
|
||||||
403, (
|
|
||||||
"You cannot issue a third party invite for %s." %
|
|
||||||
(event.content.display_name,)
|
|
||||||
)
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
return True
|
|
||||||
|
|
||||||
self._can_send_event(event, auth_events)
|
|
||||||
|
|
||||||
if event.type == EventTypes.PowerLevels:
|
|
||||||
self._check_power_levels(event, auth_events)
|
|
||||||
|
|
||||||
if event.type == EventTypes.Redaction:
|
|
||||||
self.check_redaction(event, auth_events)
|
|
||||||
|
|
||||||
logger.debug("Allowing! %s", event)
|
|
||||||
|
|
||||||
def check_size_limits(self, event):
|
|
||||||
def too_big(field):
|
|
||||||
raise EventSizeError("%s too large" % (field,))
|
|
||||||
|
|
||||||
if len(event.user_id) > 255:
|
|
||||||
too_big("user_id")
|
|
||||||
if len(event.room_id) > 255:
|
|
||||||
too_big("room_id")
|
|
||||||
if event.is_state() and len(event.state_key) > 255:
|
|
||||||
too_big("state_key")
|
|
||||||
if len(event.type) > 255:
|
|
||||||
too_big("type")
|
|
||||||
if len(event.event_id) > 255:
|
|
||||||
too_big("event_id")
|
|
||||||
if len(encode_canonical_json(event.get_pdu_json())) > 65536:
|
|
||||||
too_big("event")
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def check_joined_room(self, room_id, user_id, current_state=None):
|
def check_joined_room(self, room_id, user_id, current_state=None):
|
||||||
@@ -296,26 +152,8 @@ class Auth(object):
|
|||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def check_host_in_room(self, room_id, host):
|
def check_host_in_room(self, room_id, host):
|
||||||
with Measure(self.clock, "check_host_in_room"):
|
with Measure(self.clock, "check_host_in_room"):
|
||||||
latest_event_ids = yield self.store.get_latest_event_ids_in_room(room_id)
|
latest_event_ids = yield self.store.is_host_joined(room_id, host)
|
||||||
|
defer.returnValue(latest_event_ids)
|
||||||
entry = yield self.state.resolve_state_groups(
|
|
||||||
room_id, latest_event_ids
|
|
||||||
)
|
|
||||||
|
|
||||||
ret = yield self.store.is_host_joined(
|
|
||||||
room_id, host, entry.state_group, entry.state
|
|
||||||
)
|
|
||||||
defer.returnValue(ret)
|
|
||||||
|
|
||||||
def check_event_sender_in_room(self, event, auth_events):
|
|
||||||
key = (EventTypes.Member, event.user_id, )
|
|
||||||
member_event = auth_events.get(key)
|
|
||||||
|
|
||||||
return self._check_joined_room(
|
|
||||||
member_event,
|
|
||||||
event.user_id,
|
|
||||||
event.room_id
|
|
||||||
)
|
|
||||||
|
|
||||||
def _check_joined_room(self, member, user_id, room_id):
|
def _check_joined_room(self, member, user_id, room_id):
|
||||||
if not member or member.membership != Membership.JOIN:
|
if not member or member.membership != Membership.JOIN:
|
||||||
@@ -328,267 +166,8 @@ class Auth(object):
|
|||||||
|
|
||||||
return creation_event.content.get("m.federate", True) is True
|
return creation_event.content.get("m.federate", True) is True
|
||||||
|
|
||||||
@log_function
|
|
||||||
def is_membership_change_allowed(self, event, auth_events):
|
|
||||||
membership = event.content["membership"]
|
|
||||||
|
|
||||||
# Check if this is the room creator joining:
|
|
||||||
if len(event.prev_events) == 1 and Membership.JOIN == membership:
|
|
||||||
# Get room creation event:
|
|
||||||
key = (EventTypes.Create, "", )
|
|
||||||
create = auth_events.get(key)
|
|
||||||
if create and event.prev_events[0][0] == create.event_id:
|
|
||||||
if create.content["creator"] == event.state_key:
|
|
||||||
return True
|
|
||||||
|
|
||||||
target_user_id = event.state_key
|
|
||||||
|
|
||||||
creating_domain = get_domain_from_id(event.room_id)
|
|
||||||
target_domain = get_domain_from_id(target_user_id)
|
|
||||||
if creating_domain != target_domain:
|
|
||||||
if not self.can_federate(event, auth_events):
|
|
||||||
raise AuthError(
|
|
||||||
403,
|
|
||||||
"This room has been marked as unfederatable."
|
|
||||||
)
|
|
||||||
|
|
||||||
# get info about the caller
|
|
||||||
key = (EventTypes.Member, event.user_id, )
|
|
||||||
caller = auth_events.get(key)
|
|
||||||
|
|
||||||
caller_in_room = caller and caller.membership == Membership.JOIN
|
|
||||||
caller_invited = caller and caller.membership == Membership.INVITE
|
|
||||||
|
|
||||||
# get info about the target
|
|
||||||
key = (EventTypes.Member, target_user_id, )
|
|
||||||
target = auth_events.get(key)
|
|
||||||
|
|
||||||
target_in_room = target and target.membership == Membership.JOIN
|
|
||||||
target_banned = target and target.membership == Membership.BAN
|
|
||||||
|
|
||||||
key = (EventTypes.JoinRules, "", )
|
|
||||||
join_rule_event = auth_events.get(key)
|
|
||||||
if join_rule_event:
|
|
||||||
join_rule = join_rule_event.content.get(
|
|
||||||
"join_rule", JoinRules.INVITE
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
join_rule = JoinRules.INVITE
|
|
||||||
|
|
||||||
user_level = self._get_user_power_level(event.user_id, auth_events)
|
|
||||||
target_level = self._get_user_power_level(
|
|
||||||
target_user_id, auth_events
|
|
||||||
)
|
|
||||||
|
|
||||||
# FIXME (erikj): What should we do here as the default?
|
|
||||||
ban_level = self._get_named_level(auth_events, "ban", 50)
|
|
||||||
|
|
||||||
logger.debug(
|
|
||||||
"is_membership_change_allowed: %s",
|
|
||||||
{
|
|
||||||
"caller_in_room": caller_in_room,
|
|
||||||
"caller_invited": caller_invited,
|
|
||||||
"target_banned": target_banned,
|
|
||||||
"target_in_room": target_in_room,
|
|
||||||
"membership": membership,
|
|
||||||
"join_rule": join_rule,
|
|
||||||
"target_user_id": target_user_id,
|
|
||||||
"event.user_id": event.user_id,
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
if Membership.INVITE == membership and "third_party_invite" in event.content:
|
|
||||||
if not self._verify_third_party_invite(event, auth_events):
|
|
||||||
raise AuthError(403, "You are not invited to this room.")
|
|
||||||
if target_banned:
|
|
||||||
raise AuthError(
|
|
||||||
403, "%s is banned from the room" % (target_user_id,)
|
|
||||||
)
|
|
||||||
return True
|
|
||||||
|
|
||||||
if Membership.JOIN != membership:
|
|
||||||
if (caller_invited
|
|
||||||
and Membership.LEAVE == membership
|
|
||||||
and target_user_id == event.user_id):
|
|
||||||
return True
|
|
||||||
|
|
||||||
if not caller_in_room: # caller isn't joined
|
|
||||||
raise AuthError(
|
|
||||||
403,
|
|
||||||
"%s not in room %s." % (event.user_id, event.room_id,)
|
|
||||||
)
|
|
||||||
|
|
||||||
if Membership.INVITE == membership:
|
|
||||||
# TODO (erikj): We should probably handle this more intelligently
|
|
||||||
# PRIVATE join rules.
|
|
||||||
|
|
||||||
# Invites are valid iff caller is in the room and target isn't.
|
|
||||||
if target_banned:
|
|
||||||
raise AuthError(
|
|
||||||
403, "%s is banned from the room" % (target_user_id,)
|
|
||||||
)
|
|
||||||
elif target_in_room: # the target is already in the room.
|
|
||||||
raise AuthError(403, "%s is already in the room." %
|
|
||||||
target_user_id)
|
|
||||||
else:
|
|
||||||
invite_level = self._get_named_level(auth_events, "invite", 0)
|
|
||||||
|
|
||||||
if user_level < invite_level:
|
|
||||||
raise AuthError(
|
|
||||||
403, "You cannot invite user %s." % target_user_id
|
|
||||||
)
|
|
||||||
elif Membership.JOIN == membership:
|
|
||||||
# Joins are valid iff caller == target and they were:
|
|
||||||
# invited: They are accepting the invitation
|
|
||||||
# joined: It's a NOOP
|
|
||||||
if event.user_id != target_user_id:
|
|
||||||
raise AuthError(403, "Cannot force another user to join.")
|
|
||||||
elif target_banned:
|
|
||||||
raise AuthError(403, "You are banned from this room")
|
|
||||||
elif join_rule == JoinRules.PUBLIC:
|
|
||||||
pass
|
|
||||||
elif join_rule == JoinRules.INVITE:
|
|
||||||
if not caller_in_room and not caller_invited:
|
|
||||||
raise AuthError(403, "You are not invited to this room.")
|
|
||||||
else:
|
|
||||||
# TODO (erikj): may_join list
|
|
||||||
# TODO (erikj): private rooms
|
|
||||||
raise AuthError(403, "You are not allowed to join this room")
|
|
||||||
elif Membership.LEAVE == membership:
|
|
||||||
# TODO (erikj): Implement kicks.
|
|
||||||
if target_banned and user_level < ban_level:
|
|
||||||
raise AuthError(
|
|
||||||
403, "You cannot unban user &s." % (target_user_id,)
|
|
||||||
)
|
|
||||||
elif target_user_id != event.user_id:
|
|
||||||
kick_level = self._get_named_level(auth_events, "kick", 50)
|
|
||||||
|
|
||||||
if user_level < kick_level or user_level <= target_level:
|
|
||||||
raise AuthError(
|
|
||||||
403, "You cannot kick user %s." % target_user_id
|
|
||||||
)
|
|
||||||
elif Membership.BAN == membership:
|
|
||||||
if user_level < ban_level or user_level <= target_level:
|
|
||||||
raise AuthError(403, "You don't have permission to ban")
|
|
||||||
else:
|
|
||||||
raise AuthError(500, "Unknown membership %s" % membership)
|
|
||||||
|
|
||||||
return True
|
|
||||||
|
|
||||||
def _verify_third_party_invite(self, event, auth_events):
|
|
||||||
"""
|
|
||||||
Validates that the invite event is authorized by a previous third-party invite.
|
|
||||||
|
|
||||||
Checks that the public key, and keyserver, match those in the third party invite,
|
|
||||||
and that the invite event has a signature issued using that public key.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
event: The m.room.member join event being validated.
|
|
||||||
auth_events: All relevant previous context events which may be used
|
|
||||||
for authorization decisions.
|
|
||||||
|
|
||||||
Return:
|
|
||||||
True if the event fulfills the expectations of a previous third party
|
|
||||||
invite event.
|
|
||||||
"""
|
|
||||||
if "third_party_invite" not in event.content:
|
|
||||||
return False
|
|
||||||
if "signed" not in event.content["third_party_invite"]:
|
|
||||||
return False
|
|
||||||
signed = event.content["third_party_invite"]["signed"]
|
|
||||||
for key in {"mxid", "token"}:
|
|
||||||
if key not in signed:
|
|
||||||
return False
|
|
||||||
|
|
||||||
token = signed["token"]
|
|
||||||
|
|
||||||
invite_event = auth_events.get(
|
|
||||||
(EventTypes.ThirdPartyInvite, token,)
|
|
||||||
)
|
|
||||||
if not invite_event:
|
|
||||||
return False
|
|
||||||
|
|
||||||
if invite_event.sender != event.sender:
|
|
||||||
return False
|
|
||||||
|
|
||||||
if event.user_id != invite_event.user_id:
|
|
||||||
return False
|
|
||||||
|
|
||||||
if signed["mxid"] != event.state_key:
|
|
||||||
return False
|
|
||||||
if signed["token"] != token:
|
|
||||||
return False
|
|
||||||
|
|
||||||
for public_key_object in self.get_public_keys(invite_event):
|
|
||||||
public_key = public_key_object["public_key"]
|
|
||||||
try:
|
|
||||||
for server, signature_block in signed["signatures"].items():
|
|
||||||
for key_name, encoded_signature in signature_block.items():
|
|
||||||
if not key_name.startswith("ed25519:"):
|
|
||||||
continue
|
|
||||||
verify_key = decode_verify_key_bytes(
|
|
||||||
key_name,
|
|
||||||
decode_base64(public_key)
|
|
||||||
)
|
|
||||||
verify_signed_json(signed, server, verify_key)
|
|
||||||
|
|
||||||
# We got the public key from the invite, so we know that the
|
|
||||||
# correct server signed the signed bundle.
|
|
||||||
# The caller is responsible for checking that the signing
|
|
||||||
# server has not revoked that public key.
|
|
||||||
return True
|
|
||||||
except (KeyError, SignatureVerifyException,):
|
|
||||||
continue
|
|
||||||
return False
|
|
||||||
|
|
||||||
def get_public_keys(self, invite_event):
|
def get_public_keys(self, invite_event):
|
||||||
public_keys = []
|
return event_auth.get_public_keys(invite_event)
|
||||||
if "public_key" in invite_event.content:
|
|
||||||
o = {
|
|
||||||
"public_key": invite_event.content["public_key"],
|
|
||||||
}
|
|
||||||
if "key_validity_url" in invite_event.content:
|
|
||||||
o["key_validity_url"] = invite_event.content["key_validity_url"]
|
|
||||||
public_keys.append(o)
|
|
||||||
public_keys.extend(invite_event.content.get("public_keys", []))
|
|
||||||
return public_keys
|
|
||||||
|
|
||||||
def _get_power_level_event(self, auth_events):
|
|
||||||
key = (EventTypes.PowerLevels, "", )
|
|
||||||
return auth_events.get(key)
|
|
||||||
|
|
||||||
def _get_user_power_level(self, user_id, auth_events):
|
|
||||||
power_level_event = self._get_power_level_event(auth_events)
|
|
||||||
|
|
||||||
if power_level_event:
|
|
||||||
level = power_level_event.content.get("users", {}).get(user_id)
|
|
||||||
if not level:
|
|
||||||
level = power_level_event.content.get("users_default", 0)
|
|
||||||
|
|
||||||
if level is None:
|
|
||||||
return 0
|
|
||||||
else:
|
|
||||||
return int(level)
|
|
||||||
else:
|
|
||||||
key = (EventTypes.Create, "", )
|
|
||||||
create_event = auth_events.get(key)
|
|
||||||
if (create_event is not None and
|
|
||||||
create_event.content["creator"] == user_id):
|
|
||||||
return 100
|
|
||||||
else:
|
|
||||||
return 0
|
|
||||||
|
|
||||||
def _get_named_level(self, auth_events, name, default):
|
|
||||||
power_level_event = self._get_power_level_event(auth_events)
|
|
||||||
|
|
||||||
if not power_level_event:
|
|
||||||
return default
|
|
||||||
|
|
||||||
level = power_level_event.content.get(name, None)
|
|
||||||
if level is not None:
|
|
||||||
return int(level)
|
|
||||||
else:
|
|
||||||
return default
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def get_user_by_req(self, request, allow_guest=False, rights="access"):
|
def get_user_by_req(self, request, allow_guest=False, rights="access"):
|
||||||
@@ -625,13 +204,12 @@ class Auth(object):
|
|||||||
|
|
||||||
ip_addr = self.hs.get_ip_from_request(request)
|
ip_addr = self.hs.get_ip_from_request(request)
|
||||||
user_agent = request.requestHeaders.getRawHeaders(
|
user_agent = request.requestHeaders.getRawHeaders(
|
||||||
"User-Agent",
|
b"User-Agent",
|
||||||
default=[""]
|
default=[b""]
|
||||||
)[0]
|
)[0]
|
||||||
if user and access_token and ip_addr:
|
if user and access_token and ip_addr:
|
||||||
preserve_context_over_fn(
|
self.store.insert_client_ip(
|
||||||
self.store.insert_client_ip,
|
user_id=user.to_string(),
|
||||||
user=user,
|
|
||||||
access_token=access_token,
|
access_token=access_token,
|
||||||
ip=ip_addr,
|
ip=ip_addr,
|
||||||
user_agent=user_agent,
|
user_agent=user_agent,
|
||||||
@@ -685,50 +263,65 @@ class Auth(object):
|
|||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def get_user_by_access_token(self, token, rights="access"):
|
def get_user_by_access_token(self, token, rights="access"):
|
||||||
""" Get a registered user's ID.
|
""" Validate access token and get user_id from it
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
token (str): The access token to get the user by.
|
token (str): The access token to get the user by.
|
||||||
|
rights (str): The operation being performed; the access token must
|
||||||
|
allow this.
|
||||||
Returns:
|
Returns:
|
||||||
dict : dict that includes the user and the ID of their access token.
|
Deferred[dict]: dict that includes:
|
||||||
|
`user` (UserID)
|
||||||
|
`is_guest` (bool)
|
||||||
|
`token_id` (int|None): access token id. May be None if guest
|
||||||
|
`device_id` (str|None): device corresponding to access token
|
||||||
Raises:
|
Raises:
|
||||||
AuthError if no user by that token exists or the token is invalid.
|
AuthError if no user by that token exists or the token is invalid.
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
ret = yield self.get_user_from_macaroon(token, rights)
|
user_id, guest = self._parse_and_validate_macaroon(token, rights)
|
||||||
except AuthError:
|
except _InvalidMacaroonException:
|
||||||
# TODO(daniel): Remove this fallback when all existing access tokens
|
# doesn't look like a macaroon: treat it as an opaque token which
|
||||||
# have been re-issued as macaroons.
|
# must be in the database.
|
||||||
if self.hs.config.expire_access_token:
|
# TODO: it would be nice to get rid of this, but apparently some
|
||||||
raise
|
# people use access tokens which aren't macaroons
|
||||||
ret = yield self._look_up_user_by_access_token(token)
|
r = yield self._look_up_user_by_access_token(token)
|
||||||
|
defer.returnValue(r)
|
||||||
|
|
||||||
defer.returnValue(ret)
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def get_user_from_macaroon(self, macaroon_str, rights="access"):
|
|
||||||
try:
|
try:
|
||||||
macaroon = pymacaroons.Macaroon.deserialize(macaroon_str)
|
|
||||||
|
|
||||||
user_id = self.get_user_id_from_macaroon(macaroon)
|
|
||||||
user = UserID.from_string(user_id)
|
user = UserID.from_string(user_id)
|
||||||
|
|
||||||
self.validate_macaroon(
|
|
||||||
macaroon, rights, self.hs.config.expire_access_token,
|
|
||||||
user_id=user_id,
|
|
||||||
)
|
|
||||||
|
|
||||||
guest = False
|
|
||||||
for caveat in macaroon.caveats:
|
|
||||||
if caveat.caveat_id == "guest = true":
|
|
||||||
guest = True
|
|
||||||
|
|
||||||
if guest:
|
if guest:
|
||||||
|
# Guest access tokens are not stored in the database (there can
|
||||||
|
# only be one access token per guest, anyway).
|
||||||
|
#
|
||||||
|
# In order to prevent guest access tokens being used as regular
|
||||||
|
# user access tokens (and hence getting around the invalidation
|
||||||
|
# process), we look up the user id and check that it is indeed
|
||||||
|
# a guest user.
|
||||||
|
#
|
||||||
|
# It would of course be much easier to store guest access
|
||||||
|
# tokens in the database as well, but that would break existing
|
||||||
|
# guest tokens.
|
||||||
|
stored_user = yield self.store.get_user_by_id(user_id)
|
||||||
|
if not stored_user:
|
||||||
|
raise AuthError(
|
||||||
|
self.TOKEN_NOT_FOUND_HTTP_STATUS,
|
||||||
|
"Unknown user_id %s" % user_id,
|
||||||
|
errcode=Codes.UNKNOWN_TOKEN
|
||||||
|
)
|
||||||
|
if not stored_user["is_guest"]:
|
||||||
|
raise AuthError(
|
||||||
|
self.TOKEN_NOT_FOUND_HTTP_STATUS,
|
||||||
|
"Guest access token used for regular user",
|
||||||
|
errcode=Codes.UNKNOWN_TOKEN
|
||||||
|
)
|
||||||
ret = {
|
ret = {
|
||||||
"user": user,
|
"user": user,
|
||||||
"is_guest": True,
|
"is_guest": True,
|
||||||
"token_id": None,
|
"token_id": None,
|
||||||
"device_id": None,
|
# all guests get the same device id
|
||||||
|
"device_id": GUEST_DEVICE_ID,
|
||||||
}
|
}
|
||||||
elif rights == "delete_pusher":
|
elif rights == "delete_pusher":
|
||||||
# We don't store these tokens in the database
|
# We don't store these tokens in the database
|
||||||
@@ -750,7 +343,7 @@ class Auth(object):
|
|||||||
# macaroon. They probably should be.
|
# macaroon. They probably should be.
|
||||||
# TODO: build the dictionary from the macaroon once the
|
# TODO: build the dictionary from the macaroon once the
|
||||||
# above are fixed
|
# above are fixed
|
||||||
ret = yield self._look_up_user_by_access_token(macaroon_str)
|
ret = yield self._look_up_user_by_access_token(token)
|
||||||
if ret["user"] != user:
|
if ret["user"] != user:
|
||||||
logger.error(
|
logger.error(
|
||||||
"Macaroon user (%s) != DB user (%s)",
|
"Macaroon user (%s) != DB user (%s)",
|
||||||
@@ -769,6 +362,55 @@ class Auth(object):
|
|||||||
errcode=Codes.UNKNOWN_TOKEN
|
errcode=Codes.UNKNOWN_TOKEN
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def _parse_and_validate_macaroon(self, token, rights="access"):
|
||||||
|
"""Takes a macaroon and tries to parse and validate it. This is cached
|
||||||
|
if and only if rights == access and there isn't an expiry.
|
||||||
|
|
||||||
|
On invalid macaroon raises _InvalidMacaroonException
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
(user_id, is_guest)
|
||||||
|
"""
|
||||||
|
if rights == "access":
|
||||||
|
cached = self.token_cache.get(token, None)
|
||||||
|
if cached:
|
||||||
|
return cached
|
||||||
|
|
||||||
|
try:
|
||||||
|
macaroon = pymacaroons.Macaroon.deserialize(token)
|
||||||
|
except Exception: # deserialize can throw more-or-less anything
|
||||||
|
# doesn't look like a macaroon: treat it as an opaque token which
|
||||||
|
# must be in the database.
|
||||||
|
# TODO: it would be nice to get rid of this, but apparently some
|
||||||
|
# people use access tokens which aren't macaroons
|
||||||
|
raise _InvalidMacaroonException()
|
||||||
|
|
||||||
|
try:
|
||||||
|
user_id = self.get_user_id_from_macaroon(macaroon)
|
||||||
|
|
||||||
|
has_expiry = False
|
||||||
|
guest = False
|
||||||
|
for caveat in macaroon.caveats:
|
||||||
|
if caveat.caveat_id.startswith("time "):
|
||||||
|
has_expiry = True
|
||||||
|
elif caveat.caveat_id == "guest = true":
|
||||||
|
guest = True
|
||||||
|
|
||||||
|
self.validate_macaroon(
|
||||||
|
macaroon, rights, self.hs.config.expire_access_token,
|
||||||
|
user_id=user_id,
|
||||||
|
)
|
||||||
|
except (pymacaroons.exceptions.MacaroonException, TypeError, ValueError):
|
||||||
|
raise AuthError(
|
||||||
|
self.TOKEN_NOT_FOUND_HTTP_STATUS, "Invalid macaroon passed.",
|
||||||
|
errcode=Codes.UNKNOWN_TOKEN
|
||||||
|
)
|
||||||
|
|
||||||
|
if not has_expiry and rights == "access":
|
||||||
|
self.token_cache[token] = (user_id, guest)
|
||||||
|
|
||||||
|
return user_id, guest
|
||||||
|
|
||||||
def get_user_id_from_macaroon(self, macaroon):
|
def get_user_id_from_macaroon(self, macaroon):
|
||||||
"""Retrieve the user_id given by the caveats on the macaroon.
|
"""Retrieve the user_id given by the caveats on the macaroon.
|
||||||
|
|
||||||
@@ -798,27 +440,38 @@ class Auth(object):
|
|||||||
|
|
||||||
Args:
|
Args:
|
||||||
macaroon(pymacaroons.Macaroon): The macaroon to validate
|
macaroon(pymacaroons.Macaroon): The macaroon to validate
|
||||||
type_string(str): The kind of token required (e.g. "access", "refresh",
|
type_string(str): The kind of token required (e.g. "access",
|
||||||
"delete_pusher")
|
"delete_pusher")
|
||||||
verify_expiry(bool): Whether to verify whether the macaroon has expired.
|
verify_expiry(bool): Whether to verify whether the macaroon has expired.
|
||||||
This should really always be True, but no clients currently implement
|
|
||||||
token refresh, so we can't enforce expiry yet.
|
|
||||||
user_id (str): The user_id required
|
user_id (str): The user_id required
|
||||||
"""
|
"""
|
||||||
v = pymacaroons.Verifier()
|
v = pymacaroons.Verifier()
|
||||||
|
|
||||||
|
# the verifier runs a test for every caveat on the macaroon, to check
|
||||||
|
# that it is met for the current request. Each caveat must match at
|
||||||
|
# least one of the predicates specified by satisfy_exact or
|
||||||
|
# specify_general.
|
||||||
v.satisfy_exact("gen = 1")
|
v.satisfy_exact("gen = 1")
|
||||||
v.satisfy_exact("type = " + type_string)
|
v.satisfy_exact("type = " + type_string)
|
||||||
v.satisfy_exact("user_id = %s" % user_id)
|
v.satisfy_exact("user_id = %s" % user_id)
|
||||||
v.satisfy_exact("guest = true")
|
v.satisfy_exact("guest = true")
|
||||||
|
|
||||||
|
# verify_expiry should really always be True, but there exist access
|
||||||
|
# tokens in the wild which expire when they should not, so we can't
|
||||||
|
# enforce expiry yet (so we have to allow any caveat starting with
|
||||||
|
# 'time < ' in access tokens).
|
||||||
|
#
|
||||||
|
# On the other hand, short-term login tokens (as used by CAS login, for
|
||||||
|
# example) have an expiry time which we do want to enforce.
|
||||||
|
|
||||||
if verify_expiry:
|
if verify_expiry:
|
||||||
v.satisfy_general(self._verify_expiry)
|
v.satisfy_general(self._verify_expiry)
|
||||||
else:
|
else:
|
||||||
v.satisfy_general(lambda c: c.startswith("time < "))
|
v.satisfy_general(lambda c: c.startswith("time < "))
|
||||||
|
|
||||||
v.verify(macaroon, self.hs.config.macaroon_secret_key)
|
# access_tokens include a nonce for uniqueness: any value is acceptable
|
||||||
|
v.satisfy_general(lambda c: c.startswith("nonce = "))
|
||||||
|
|
||||||
v = pymacaroons.Verifier()
|
|
||||||
v.satisfy_general(self._verify_recognizes_caveats)
|
|
||||||
v.verify(macaroon, self.hs.config.macaroon_secret_key)
|
v.verify(macaroon, self.hs.config.macaroon_secret_key)
|
||||||
|
|
||||||
def _verify_expiry(self, caveat):
|
def _verify_expiry(self, caveat):
|
||||||
@@ -829,15 +482,6 @@ class Auth(object):
|
|||||||
now = self.hs.get_clock().time_msec()
|
now = self.hs.get_clock().time_msec()
|
||||||
return now < expiry
|
return now < expiry
|
||||||
|
|
||||||
def _verify_recognizes_caveats(self, caveat):
|
|
||||||
first_space = caveat.find(" ")
|
|
||||||
if first_space < 0:
|
|
||||||
return False
|
|
||||||
second_space = caveat.find(" ", first_space + 1)
|
|
||||||
if second_space < 0:
|
|
||||||
return False
|
|
||||||
return caveat[:second_space + 1] in self._KNOWN_CAVEAT_PREFIXES
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def _look_up_user_by_access_token(self, token):
|
def _look_up_user_by_access_token(self, token):
|
||||||
ret = yield self.store.get_user_by_access_token(token)
|
ret = yield self.store.get_user_by_access_token(token)
|
||||||
@@ -879,6 +523,14 @@ class Auth(object):
|
|||||||
)
|
)
|
||||||
|
|
||||||
def is_server_admin(self, user):
|
def is_server_admin(self, user):
|
||||||
|
""" Check if the given user is a local server admin.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
user (str): mxid of user to check
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if the user is an admin
|
||||||
|
"""
|
||||||
return self.store.is_server_admin(user)
|
return self.store.is_server_admin(user)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
@@ -957,56 +609,6 @@ class Auth(object):
|
|||||||
|
|
||||||
defer.returnValue(auth_ids)
|
defer.returnValue(auth_ids)
|
||||||
|
|
||||||
def _get_send_level(self, etype, state_key, auth_events):
|
|
||||||
key = (EventTypes.PowerLevels, "", )
|
|
||||||
send_level_event = auth_events.get(key)
|
|
||||||
send_level = None
|
|
||||||
if send_level_event:
|
|
||||||
send_level = send_level_event.content.get("events", {}).get(
|
|
||||||
etype
|
|
||||||
)
|
|
||||||
if send_level is None:
|
|
||||||
if state_key is not None:
|
|
||||||
send_level = send_level_event.content.get(
|
|
||||||
"state_default", 50
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
send_level = send_level_event.content.get(
|
|
||||||
"events_default", 0
|
|
||||||
)
|
|
||||||
|
|
||||||
if send_level:
|
|
||||||
send_level = int(send_level)
|
|
||||||
else:
|
|
||||||
send_level = 0
|
|
||||||
|
|
||||||
return send_level
|
|
||||||
|
|
||||||
@log_function
|
|
||||||
def _can_send_event(self, event, auth_events):
|
|
||||||
send_level = self._get_send_level(
|
|
||||||
event.type, event.get("state_key", None), auth_events
|
|
||||||
)
|
|
||||||
user_level = self._get_user_power_level(event.user_id, auth_events)
|
|
||||||
|
|
||||||
if user_level < send_level:
|
|
||||||
raise AuthError(
|
|
||||||
403,
|
|
||||||
"You don't have permission to post that to the room. " +
|
|
||||||
"user_level (%d) < send_level (%d)" % (user_level, send_level)
|
|
||||||
)
|
|
||||||
|
|
||||||
# Check state_key
|
|
||||||
if hasattr(event, "state_key"):
|
|
||||||
if event.state_key.startswith("@"):
|
|
||||||
if event.state_key != event.user_id:
|
|
||||||
raise AuthError(
|
|
||||||
403,
|
|
||||||
"You are not allowed to set others state"
|
|
||||||
)
|
|
||||||
|
|
||||||
return True
|
|
||||||
|
|
||||||
def check_redaction(self, event, auth_events):
|
def check_redaction(self, event, auth_events):
|
||||||
"""Check whether the event sender is allowed to redact the target event.
|
"""Check whether the event sender is allowed to redact the target event.
|
||||||
|
|
||||||
@@ -1020,107 +622,7 @@ class Auth(object):
|
|||||||
AuthError if the event sender is definitely not allowed to redact
|
AuthError if the event sender is definitely not allowed to redact
|
||||||
the target event.
|
the target event.
|
||||||
"""
|
"""
|
||||||
user_level = self._get_user_power_level(event.user_id, auth_events)
|
return event_auth.check_redaction(event, auth_events)
|
||||||
|
|
||||||
redact_level = self._get_named_level(auth_events, "redact", 50)
|
|
||||||
|
|
||||||
if user_level >= redact_level:
|
|
||||||
return False
|
|
||||||
|
|
||||||
redacter_domain = get_domain_from_id(event.event_id)
|
|
||||||
redactee_domain = get_domain_from_id(event.redacts)
|
|
||||||
if redacter_domain == redactee_domain:
|
|
||||||
return True
|
|
||||||
|
|
||||||
raise AuthError(
|
|
||||||
403,
|
|
||||||
"You don't have permission to redact events"
|
|
||||||
)
|
|
||||||
|
|
||||||
def _check_power_levels(self, event, auth_events):
|
|
||||||
user_list = event.content.get("users", {})
|
|
||||||
# Validate users
|
|
||||||
for k, v in user_list.items():
|
|
||||||
try:
|
|
||||||
UserID.from_string(k)
|
|
||||||
except:
|
|
||||||
raise SynapseError(400, "Not a valid user_id: %s" % (k,))
|
|
||||||
|
|
||||||
try:
|
|
||||||
int(v)
|
|
||||||
except:
|
|
||||||
raise SynapseError(400, "Not a valid power level: %s" % (v,))
|
|
||||||
|
|
||||||
key = (event.type, event.state_key, )
|
|
||||||
current_state = auth_events.get(key)
|
|
||||||
|
|
||||||
if not current_state:
|
|
||||||
return
|
|
||||||
|
|
||||||
user_level = self._get_user_power_level(event.user_id, auth_events)
|
|
||||||
|
|
||||||
# Check other levels:
|
|
||||||
levels_to_check = [
|
|
||||||
("users_default", None),
|
|
||||||
("events_default", None),
|
|
||||||
("state_default", None),
|
|
||||||
("ban", None),
|
|
||||||
("redact", None),
|
|
||||||
("kick", None),
|
|
||||||
("invite", None),
|
|
||||||
]
|
|
||||||
|
|
||||||
old_list = current_state.content.get("users")
|
|
||||||
for user in set(old_list.keys() + user_list.keys()):
|
|
||||||
levels_to_check.append(
|
|
||||||
(user, "users")
|
|
||||||
)
|
|
||||||
|
|
||||||
old_list = current_state.content.get("events")
|
|
||||||
new_list = event.content.get("events")
|
|
||||||
for ev_id in set(old_list.keys() + new_list.keys()):
|
|
||||||
levels_to_check.append(
|
|
||||||
(ev_id, "events")
|
|
||||||
)
|
|
||||||
|
|
||||||
old_state = current_state.content
|
|
||||||
new_state = event.content
|
|
||||||
|
|
||||||
for level_to_check, dir in levels_to_check:
|
|
||||||
old_loc = old_state
|
|
||||||
new_loc = new_state
|
|
||||||
if dir:
|
|
||||||
old_loc = old_loc.get(dir, {})
|
|
||||||
new_loc = new_loc.get(dir, {})
|
|
||||||
|
|
||||||
if level_to_check in old_loc:
|
|
||||||
old_level = int(old_loc[level_to_check])
|
|
||||||
else:
|
|
||||||
old_level = None
|
|
||||||
|
|
||||||
if level_to_check in new_loc:
|
|
||||||
new_level = int(new_loc[level_to_check])
|
|
||||||
else:
|
|
||||||
new_level = None
|
|
||||||
|
|
||||||
if new_level is not None and old_level is not None:
|
|
||||||
if new_level == old_level:
|
|
||||||
continue
|
|
||||||
|
|
||||||
if dir == "users" and level_to_check != event.user_id:
|
|
||||||
if old_level == user_level:
|
|
||||||
raise AuthError(
|
|
||||||
403,
|
|
||||||
"You don't have permission to remove ops level equal "
|
|
||||||
"to your own"
|
|
||||||
)
|
|
||||||
|
|
||||||
if old_level > user_level or new_level > user_level:
|
|
||||||
raise AuthError(
|
|
||||||
403,
|
|
||||||
"You don't have permission to add ops level greater "
|
|
||||||
"than your own"
|
|
||||||
)
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def check_can_change_room_list(self, room_id, user):
|
def check_can_change_room_list(self, room_id, user):
|
||||||
@@ -1150,10 +652,10 @@ class Auth(object):
|
|||||||
if power_level_event:
|
if power_level_event:
|
||||||
auth_events[(EventTypes.PowerLevels, "")] = power_level_event
|
auth_events[(EventTypes.PowerLevels, "")] = power_level_event
|
||||||
|
|
||||||
send_level = self._get_send_level(
|
send_level = event_auth.get_send_level(
|
||||||
EventTypes.Aliases, "", auth_events
|
EventTypes.Aliases, "", auth_events
|
||||||
)
|
)
|
||||||
user_level = self._get_user_power_level(user_id, auth_events)
|
user_level = event_auth.get_user_power_level(user_id, auth_events)
|
||||||
|
|
||||||
if user_level < send_level:
|
if user_level < send_level:
|
||||||
raise AuthError(
|
raise AuthError(
|
||||||
@@ -1170,7 +672,7 @@ def has_access_token(request):
|
|||||||
bool: False if no access_token was given, True otherwise.
|
bool: False if no access_token was given, True otherwise.
|
||||||
"""
|
"""
|
||||||
query_params = request.args.get("access_token")
|
query_params = request.args.get("access_token")
|
||||||
auth_headers = request.requestHeaders.getRawHeaders("Authorization")
|
auth_headers = request.requestHeaders.getRawHeaders(b"Authorization")
|
||||||
return bool(query_params) or bool(auth_headers)
|
return bool(query_params) or bool(auth_headers)
|
||||||
|
|
||||||
|
|
||||||
@@ -1190,8 +692,8 @@ def get_access_token_from_request(request, token_not_found_http_status=401):
|
|||||||
AuthError: If there isn't an access_token in the request.
|
AuthError: If there isn't an access_token in the request.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
auth_headers = request.requestHeaders.getRawHeaders("Authorization")
|
auth_headers = request.requestHeaders.getRawHeaders(b"Authorization")
|
||||||
query_params = request.args.get("access_token")
|
query_params = request.args.get(b"access_token")
|
||||||
if auth_headers:
|
if auth_headers:
|
||||||
# Try the get the access_token from a "Authorization: Bearer"
|
# Try the get the access_token from a "Authorization: Bearer"
|
||||||
# header
|
# header
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
# Copyright 2014-2016 OpenMarket Ltd
|
# Copyright 2014-2016 OpenMarket Ltd
|
||||||
|
# Copyright 2017 Vector Creations Ltd
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@@ -15,6 +16,9 @@
|
|||||||
|
|
||||||
"""Contains constants from the specification."""
|
"""Contains constants from the specification."""
|
||||||
|
|
||||||
|
# the "depth" field on events is limited to 2**63 - 1
|
||||||
|
MAX_DEPTH = 2**63 - 1
|
||||||
|
|
||||||
|
|
||||||
class Membership(object):
|
class Membership(object):
|
||||||
|
|
||||||
@@ -43,10 +47,8 @@ class JoinRules(object):
|
|||||||
|
|
||||||
class LoginType(object):
|
class LoginType(object):
|
||||||
PASSWORD = u"m.login.password"
|
PASSWORD = u"m.login.password"
|
||||||
OAUTH = u"m.login.oauth2"
|
|
||||||
EMAIL_CODE = u"m.login.email.code"
|
|
||||||
EMAIL_URL = u"m.login.email.url"
|
|
||||||
EMAIL_IDENTITY = u"m.login.email.identity"
|
EMAIL_IDENTITY = u"m.login.email.identity"
|
||||||
|
MSISDN = u"m.login.msisdn"
|
||||||
RECAPTCHA = u"m.login.recaptcha"
|
RECAPTCHA = u"m.login.recaptcha"
|
||||||
DUMMY = u"m.login.dummy"
|
DUMMY = u"m.login.dummy"
|
||||||
|
|
||||||
|
|||||||
@@ -17,6 +17,9 @@
|
|||||||
|
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
|
import simplejson as json
|
||||||
|
from six import iteritems
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
@@ -39,37 +42,58 @@ class Codes(object):
|
|||||||
CAPTCHA_NEEDED = "M_CAPTCHA_NEEDED"
|
CAPTCHA_NEEDED = "M_CAPTCHA_NEEDED"
|
||||||
CAPTCHA_INVALID = "M_CAPTCHA_INVALID"
|
CAPTCHA_INVALID = "M_CAPTCHA_INVALID"
|
||||||
MISSING_PARAM = "M_MISSING_PARAM"
|
MISSING_PARAM = "M_MISSING_PARAM"
|
||||||
|
INVALID_PARAM = "M_INVALID_PARAM"
|
||||||
TOO_LARGE = "M_TOO_LARGE"
|
TOO_LARGE = "M_TOO_LARGE"
|
||||||
EXCLUSIVE = "M_EXCLUSIVE"
|
EXCLUSIVE = "M_EXCLUSIVE"
|
||||||
THREEPID_AUTH_FAILED = "M_THREEPID_AUTH_FAILED"
|
THREEPID_AUTH_FAILED = "M_THREEPID_AUTH_FAILED"
|
||||||
THREEPID_IN_USE = "M_THREEPID_IN_USE"
|
THREEPID_IN_USE = "M_THREEPID_IN_USE"
|
||||||
THREEPID_NOT_FOUND = "M_THREEPID_NOT_FOUND"
|
THREEPID_NOT_FOUND = "M_THREEPID_NOT_FOUND"
|
||||||
|
THREEPID_DENIED = "M_THREEPID_DENIED"
|
||||||
INVALID_USERNAME = "M_INVALID_USERNAME"
|
INVALID_USERNAME = "M_INVALID_USERNAME"
|
||||||
SERVER_NOT_TRUSTED = "M_SERVER_NOT_TRUSTED"
|
SERVER_NOT_TRUSTED = "M_SERVER_NOT_TRUSTED"
|
||||||
|
|
||||||
|
|
||||||
class CodeMessageException(RuntimeError):
|
class CodeMessageException(RuntimeError):
|
||||||
"""An exception with integer code and message string attributes."""
|
"""An exception with integer code and message string attributes.
|
||||||
|
|
||||||
|
Attributes:
|
||||||
|
code (int): HTTP error code
|
||||||
|
msg (str): string describing the error
|
||||||
|
"""
|
||||||
def __init__(self, code, msg):
|
def __init__(self, code, msg):
|
||||||
super(CodeMessageException, self).__init__("%d: %s" % (code, msg))
|
super(CodeMessageException, self).__init__("%d: %s" % (code, msg))
|
||||||
self.code = code
|
self.code = code
|
||||||
self.msg = msg
|
self.msg = msg
|
||||||
self.response_code_message = None
|
|
||||||
|
|
||||||
def error_dict(self):
|
def error_dict(self):
|
||||||
return cs_error(self.msg)
|
return cs_error(self.msg)
|
||||||
|
|
||||||
|
|
||||||
|
class MatrixCodeMessageException(CodeMessageException):
|
||||||
|
"""An error from a general matrix endpoint, eg. from a proxied Matrix API call.
|
||||||
|
|
||||||
|
Attributes:
|
||||||
|
errcode (str): Matrix error code e.g 'M_FORBIDDEN'
|
||||||
|
"""
|
||||||
|
def __init__(self, code, msg, errcode=Codes.UNKNOWN):
|
||||||
|
super(MatrixCodeMessageException, self).__init__(code, msg)
|
||||||
|
self.errcode = errcode
|
||||||
|
|
||||||
|
|
||||||
class SynapseError(CodeMessageException):
|
class SynapseError(CodeMessageException):
|
||||||
"""A base error which can be caught for all synapse events."""
|
"""A base exception type for matrix errors which have an errcode and error
|
||||||
|
message (as well as an HTTP status code).
|
||||||
|
|
||||||
|
Attributes:
|
||||||
|
errcode (str): Matrix error code e.g 'M_FORBIDDEN'
|
||||||
|
"""
|
||||||
def __init__(self, code, msg, errcode=Codes.UNKNOWN):
|
def __init__(self, code, msg, errcode=Codes.UNKNOWN):
|
||||||
"""Constructs a synapse error.
|
"""Constructs a synapse error.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
code (int): The integer error code (an HTTP response code)
|
code (int): The integer error code (an HTTP response code)
|
||||||
msg (str): The human-readable error message.
|
msg (str): The human-readable error message.
|
||||||
err (str): The error code e.g 'M_FORBIDDEN'
|
errcode (str): The matrix error code e.g 'M_FORBIDDEN'
|
||||||
"""
|
"""
|
||||||
super(SynapseError, self).__init__(code, msg)
|
super(SynapseError, self).__init__(code, msg)
|
||||||
self.errcode = errcode
|
self.errcode = errcode
|
||||||
@@ -80,12 +104,87 @@ class SynapseError(CodeMessageException):
|
|||||||
self.errcode,
|
self.errcode,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_http_response_exception(cls, err):
|
||||||
|
"""Make a SynapseError based on an HTTPResponseException
|
||||||
|
|
||||||
|
This is useful when a proxied request has failed, and we need to
|
||||||
|
decide how to map the failure onto a matrix error to send back to the
|
||||||
|
client.
|
||||||
|
|
||||||
|
An attempt is made to parse the body of the http response as a matrix
|
||||||
|
error. If that succeeds, the errcode and error message from the body
|
||||||
|
are used as the errcode and error message in the new synapse error.
|
||||||
|
|
||||||
|
Otherwise, the errcode is set to M_UNKNOWN, and the error message is
|
||||||
|
set to the reason code from the HTTP response.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
err (HttpResponseException):
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
SynapseError:
|
||||||
|
"""
|
||||||
|
# try to parse the body as json, to get better errcode/msg, but
|
||||||
|
# default to M_UNKNOWN with the HTTP status as the error text
|
||||||
|
try:
|
||||||
|
j = json.loads(err.response)
|
||||||
|
except ValueError:
|
||||||
|
j = {}
|
||||||
|
errcode = j.get('errcode', Codes.UNKNOWN)
|
||||||
|
errmsg = j.get('error', err.msg)
|
||||||
|
|
||||||
|
res = SynapseError(err.code, errmsg, errcode)
|
||||||
|
return res
|
||||||
|
|
||||||
|
|
||||||
class RegistrationError(SynapseError):
|
class RegistrationError(SynapseError):
|
||||||
"""An error raised when a registration event fails."""
|
"""An error raised when a registration event fails."""
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class FederationDeniedError(SynapseError):
|
||||||
|
"""An error raised when the server tries to federate with a server which
|
||||||
|
is not on its federation whitelist.
|
||||||
|
|
||||||
|
Attributes:
|
||||||
|
destination (str): The destination which has been denied
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, destination):
|
||||||
|
"""Raised by federation client or server to indicate that we are
|
||||||
|
are deliberately not attempting to contact a given server because it is
|
||||||
|
not on our federation whitelist.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
destination (str): the domain in question
|
||||||
|
"""
|
||||||
|
|
||||||
|
self.destination = destination
|
||||||
|
|
||||||
|
super(FederationDeniedError, self).__init__(
|
||||||
|
code=403,
|
||||||
|
msg="Federation denied with %s." % (self.destination,),
|
||||||
|
errcode=Codes.FORBIDDEN,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class InteractiveAuthIncompleteError(Exception):
|
||||||
|
"""An error raised when UI auth is not yet complete
|
||||||
|
|
||||||
|
(This indicates we should return a 401 with 'result' as the body)
|
||||||
|
|
||||||
|
Attributes:
|
||||||
|
result (dict): the server response to the request, which should be
|
||||||
|
passed back to the client
|
||||||
|
"""
|
||||||
|
def __init__(self, result):
|
||||||
|
super(InteractiveAuthIncompleteError, self).__init__(
|
||||||
|
"Interactive auth not yet complete",
|
||||||
|
)
|
||||||
|
self.result = result
|
||||||
|
|
||||||
|
|
||||||
class UnrecognizedRequestError(SynapseError):
|
class UnrecognizedRequestError(SynapseError):
|
||||||
"""An error indicating we don't understand the request you're trying to make"""
|
"""An error indicating we don't understand the request you're trying to make"""
|
||||||
def __init__(self, *args, **kwargs):
|
def __init__(self, *args, **kwargs):
|
||||||
@@ -105,13 +204,11 @@ class UnrecognizedRequestError(SynapseError):
|
|||||||
|
|
||||||
class NotFoundError(SynapseError):
|
class NotFoundError(SynapseError):
|
||||||
"""An error indicating we can't find the thing you asked for"""
|
"""An error indicating we can't find the thing you asked for"""
|
||||||
def __init__(self, *args, **kwargs):
|
def __init__(self, msg="Not found", errcode=Codes.NOT_FOUND):
|
||||||
if "errcode" not in kwargs:
|
|
||||||
kwargs["errcode"] = Codes.NOT_FOUND
|
|
||||||
super(NotFoundError, self).__init__(
|
super(NotFoundError, self).__init__(
|
||||||
404,
|
404,
|
||||||
"Not found",
|
msg,
|
||||||
**kwargs
|
errcode=errcode
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@@ -172,7 +269,6 @@ class LimitExceededError(SynapseError):
|
|||||||
errcode=Codes.LIMIT_EXCEEDED):
|
errcode=Codes.LIMIT_EXCEEDED):
|
||||||
super(LimitExceededError, self).__init__(code, msg, errcode)
|
super(LimitExceededError, self).__init__(code, msg, errcode)
|
||||||
self.retry_after_ms = retry_after_ms
|
self.retry_after_ms = retry_after_ms
|
||||||
self.response_code_message = "Too Many Requests"
|
|
||||||
|
|
||||||
def error_dict(self):
|
def error_dict(self):
|
||||||
return cs_error(
|
return cs_error(
|
||||||
@@ -202,7 +298,7 @@ def cs_error(msg, code=Codes.UNKNOWN, **kwargs):
|
|||||||
A dict representing the error response JSON.
|
A dict representing the error response JSON.
|
||||||
"""
|
"""
|
||||||
err = {"error": msg, "errcode": code}
|
err = {"error": msg, "errcode": code}
|
||||||
for key, value in kwargs.iteritems():
|
for key, value in iteritems(kwargs):
|
||||||
err[key] = value
|
err[key] = value
|
||||||
return err
|
return err
|
||||||
|
|
||||||
@@ -242,6 +338,19 @@ class FederationError(RuntimeError):
|
|||||||
|
|
||||||
|
|
||||||
class HttpResponseException(CodeMessageException):
|
class HttpResponseException(CodeMessageException):
|
||||||
|
"""
|
||||||
|
Represents an HTTP-level failure of an outbound request
|
||||||
|
|
||||||
|
Attributes:
|
||||||
|
response (str): body of response
|
||||||
|
"""
|
||||||
def __init__(self, code, msg, response):
|
def __init__(self, code, msg, response):
|
||||||
self.response = response
|
"""
|
||||||
|
|
||||||
|
Args:
|
||||||
|
code (int): HTTP status code
|
||||||
|
msg (str): reason phrase from HTTP response status line
|
||||||
|
response (str): body of response
|
||||||
|
"""
|
||||||
super(HttpResponseException, self).__init__(code, msg)
|
super(HttpResponseException, self).__init__(code, msg)
|
||||||
|
self.response = response
|
||||||
|
|||||||
@@ -13,11 +13,174 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
from synapse.api.errors import SynapseError
|
from synapse.api.errors import SynapseError
|
||||||
|
from synapse.storage.presence import UserPresenceState
|
||||||
from synapse.types import UserID, RoomID
|
from synapse.types import UserID, RoomID
|
||||||
|
|
||||||
from twisted.internet import defer
|
from twisted.internet import defer
|
||||||
|
|
||||||
import ujson as json
|
import simplejson as json
|
||||||
|
import jsonschema
|
||||||
|
from jsonschema import FormatChecker
|
||||||
|
|
||||||
|
FILTER_SCHEMA = {
|
||||||
|
"additionalProperties": False,
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"limit": {
|
||||||
|
"type": "number"
|
||||||
|
},
|
||||||
|
"senders": {
|
||||||
|
"$ref": "#/definitions/user_id_array"
|
||||||
|
},
|
||||||
|
"not_senders": {
|
||||||
|
"$ref": "#/definitions/user_id_array"
|
||||||
|
},
|
||||||
|
# TODO: We don't limit event type values but we probably should...
|
||||||
|
# check types are valid event types
|
||||||
|
"types": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {
|
||||||
|
"type": "string"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"not_types": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {
|
||||||
|
"type": "string"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ROOM_FILTER_SCHEMA = {
|
||||||
|
"additionalProperties": False,
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"not_rooms": {
|
||||||
|
"$ref": "#/definitions/room_id_array"
|
||||||
|
},
|
||||||
|
"rooms": {
|
||||||
|
"$ref": "#/definitions/room_id_array"
|
||||||
|
},
|
||||||
|
"ephemeral": {
|
||||||
|
"$ref": "#/definitions/room_event_filter"
|
||||||
|
},
|
||||||
|
"include_leave": {
|
||||||
|
"type": "boolean"
|
||||||
|
},
|
||||||
|
"state": {
|
||||||
|
"$ref": "#/definitions/room_event_filter"
|
||||||
|
},
|
||||||
|
"timeline": {
|
||||||
|
"$ref": "#/definitions/room_event_filter"
|
||||||
|
},
|
||||||
|
"account_data": {
|
||||||
|
"$ref": "#/definitions/room_event_filter"
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ROOM_EVENT_FILTER_SCHEMA = {
|
||||||
|
"additionalProperties": False,
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"limit": {
|
||||||
|
"type": "number"
|
||||||
|
},
|
||||||
|
"senders": {
|
||||||
|
"$ref": "#/definitions/user_id_array"
|
||||||
|
},
|
||||||
|
"not_senders": {
|
||||||
|
"$ref": "#/definitions/user_id_array"
|
||||||
|
},
|
||||||
|
"types": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {
|
||||||
|
"type": "string"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"not_types": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {
|
||||||
|
"type": "string"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"rooms": {
|
||||||
|
"$ref": "#/definitions/room_id_array"
|
||||||
|
},
|
||||||
|
"not_rooms": {
|
||||||
|
"$ref": "#/definitions/room_id_array"
|
||||||
|
},
|
||||||
|
"contains_url": {
|
||||||
|
"type": "boolean"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
USER_ID_ARRAY_SCHEMA = {
|
||||||
|
"type": "array",
|
||||||
|
"items": {
|
||||||
|
"type": "string",
|
||||||
|
"format": "matrix_user_id"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ROOM_ID_ARRAY_SCHEMA = {
|
||||||
|
"type": "array",
|
||||||
|
"items": {
|
||||||
|
"type": "string",
|
||||||
|
"format": "matrix_room_id"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
USER_FILTER_SCHEMA = {
|
||||||
|
"$schema": "http://json-schema.org/draft-04/schema#",
|
||||||
|
"description": "schema for a Sync filter",
|
||||||
|
"type": "object",
|
||||||
|
"definitions": {
|
||||||
|
"room_id_array": ROOM_ID_ARRAY_SCHEMA,
|
||||||
|
"user_id_array": USER_ID_ARRAY_SCHEMA,
|
||||||
|
"filter": FILTER_SCHEMA,
|
||||||
|
"room_filter": ROOM_FILTER_SCHEMA,
|
||||||
|
"room_event_filter": ROOM_EVENT_FILTER_SCHEMA
|
||||||
|
},
|
||||||
|
"properties": {
|
||||||
|
"presence": {
|
||||||
|
"$ref": "#/definitions/filter"
|
||||||
|
},
|
||||||
|
"account_data": {
|
||||||
|
"$ref": "#/definitions/filter"
|
||||||
|
},
|
||||||
|
"room": {
|
||||||
|
"$ref": "#/definitions/room_filter"
|
||||||
|
},
|
||||||
|
"event_format": {
|
||||||
|
"type": "string",
|
||||||
|
"enum": ["client", "federation"]
|
||||||
|
},
|
||||||
|
"event_fields": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {
|
||||||
|
"type": "string",
|
||||||
|
# Don't allow '\\' in event field filters. This makes matching
|
||||||
|
# events a lot easier as we can then use a negative lookbehind
|
||||||
|
# assertion to split '\.' If we allowed \\ then it would
|
||||||
|
# incorrectly split '\\.' See synapse.events.utils.serialize_event
|
||||||
|
"pattern": "^((?!\\\).)*$"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"additionalProperties": False
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@FormatChecker.cls_checks('matrix_room_id')
|
||||||
|
def matrix_room_id_validator(room_id_str):
|
||||||
|
return RoomID.from_string(room_id_str)
|
||||||
|
|
||||||
|
|
||||||
|
@FormatChecker.cls_checks('matrix_user_id')
|
||||||
|
def matrix_user_id_validator(user_id_str):
|
||||||
|
return UserID.from_string(user_id_str)
|
||||||
|
|
||||||
|
|
||||||
class Filtering(object):
|
class Filtering(object):
|
||||||
@@ -52,83 +215,11 @@ class Filtering(object):
|
|||||||
# NB: Filters are the complete json blobs. "Definitions" are an
|
# NB: Filters are the complete json blobs. "Definitions" are an
|
||||||
# individual top-level key e.g. public_user_data. Filters are made of
|
# individual top-level key e.g. public_user_data. Filters are made of
|
||||||
# many definitions.
|
# many definitions.
|
||||||
|
try:
|
||||||
top_level_definitions = [
|
jsonschema.validate(user_filter_json, USER_FILTER_SCHEMA,
|
||||||
"presence", "account_data"
|
format_checker=FormatChecker())
|
||||||
]
|
except jsonschema.ValidationError as e:
|
||||||
|
raise SynapseError(400, e.message)
|
||||||
room_level_definitions = [
|
|
||||||
"state", "timeline", "ephemeral", "account_data"
|
|
||||||
]
|
|
||||||
|
|
||||||
for key in top_level_definitions:
|
|
||||||
if key in user_filter_json:
|
|
||||||
self._check_definition(user_filter_json[key])
|
|
||||||
|
|
||||||
if "room" in user_filter_json:
|
|
||||||
self._check_definition_room_lists(user_filter_json["room"])
|
|
||||||
for key in room_level_definitions:
|
|
||||||
if key in user_filter_json["room"]:
|
|
||||||
self._check_definition(user_filter_json["room"][key])
|
|
||||||
|
|
||||||
def _check_definition_room_lists(self, definition):
|
|
||||||
"""Check that "rooms" and "not_rooms" are lists of room ids if they
|
|
||||||
are present
|
|
||||||
|
|
||||||
Args:
|
|
||||||
definition(dict): The filter definition
|
|
||||||
Raises:
|
|
||||||
SynapseError: If there was a problem with this definition.
|
|
||||||
"""
|
|
||||||
# check rooms are valid room IDs
|
|
||||||
room_id_keys = ["rooms", "not_rooms"]
|
|
||||||
for key in room_id_keys:
|
|
||||||
if key in definition:
|
|
||||||
if type(definition[key]) != list:
|
|
||||||
raise SynapseError(400, "Expected %s to be a list." % key)
|
|
||||||
for room_id in definition[key]:
|
|
||||||
RoomID.from_string(room_id)
|
|
||||||
|
|
||||||
def _check_definition(self, definition):
|
|
||||||
"""Check if the provided definition is valid.
|
|
||||||
|
|
||||||
This inspects not only the types but also the values to make sure they
|
|
||||||
make sense.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
definition(dict): The filter definition
|
|
||||||
Raises:
|
|
||||||
SynapseError: If there was a problem with this definition.
|
|
||||||
"""
|
|
||||||
# NB: Filters are the complete json blobs. "Definitions" are an
|
|
||||||
# individual top-level key e.g. public_user_data. Filters are made of
|
|
||||||
# many definitions.
|
|
||||||
if type(definition) != dict:
|
|
||||||
raise SynapseError(
|
|
||||||
400, "Expected JSON object, not %s" % (definition,)
|
|
||||||
)
|
|
||||||
|
|
||||||
self._check_definition_room_lists(definition)
|
|
||||||
|
|
||||||
# check senders are valid user IDs
|
|
||||||
user_id_keys = ["senders", "not_senders"]
|
|
||||||
for key in user_id_keys:
|
|
||||||
if key in definition:
|
|
||||||
if type(definition[key]) != list:
|
|
||||||
raise SynapseError(400, "Expected %s to be a list." % key)
|
|
||||||
for user_id in definition[key]:
|
|
||||||
UserID.from_string(user_id)
|
|
||||||
|
|
||||||
# TODO: We don't limit event type values but we probably should...
|
|
||||||
# check types are valid event types
|
|
||||||
event_keys = ["types", "not_types"]
|
|
||||||
for key in event_keys:
|
|
||||||
if key in definition:
|
|
||||||
if type(definition[key]) != list:
|
|
||||||
raise SynapseError(400, "Expected %s to be a list." % key)
|
|
||||||
for event_type in definition[key]:
|
|
||||||
if not isinstance(event_type, basestring):
|
|
||||||
raise SynapseError(400, "Event type should be a string")
|
|
||||||
|
|
||||||
|
|
||||||
class FilterCollection(object):
|
class FilterCollection(object):
|
||||||
@@ -152,6 +243,7 @@ class FilterCollection(object):
|
|||||||
self.include_leave = filter_json.get("room", {}).get(
|
self.include_leave = filter_json.get("room", {}).get(
|
||||||
"include_leave", False
|
"include_leave", False
|
||||||
)
|
)
|
||||||
|
self.event_fields = filter_json.get("event_fields", [])
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
return "<FilterCollection %s>" % (json.dumps(self._filter_json),)
|
return "<FilterCollection %s>" % (json.dumps(self._filter_json),)
|
||||||
@@ -186,6 +278,26 @@ class FilterCollection(object):
|
|||||||
def filter_room_account_data(self, events):
|
def filter_room_account_data(self, events):
|
||||||
return self._room_account_data.filter(self._room_filter.filter(events))
|
return self._room_account_data.filter(self._room_filter.filter(events))
|
||||||
|
|
||||||
|
def blocks_all_presence(self):
|
||||||
|
return (
|
||||||
|
self._presence_filter.filters_all_types() or
|
||||||
|
self._presence_filter.filters_all_senders()
|
||||||
|
)
|
||||||
|
|
||||||
|
def blocks_all_room_ephemeral(self):
|
||||||
|
return (
|
||||||
|
self._room_ephemeral_filter.filters_all_types() or
|
||||||
|
self._room_ephemeral_filter.filters_all_senders() or
|
||||||
|
self._room_ephemeral_filter.filters_all_rooms()
|
||||||
|
)
|
||||||
|
|
||||||
|
def blocks_all_room_timeline(self):
|
||||||
|
return (
|
||||||
|
self._room_timeline_filter.filters_all_types() or
|
||||||
|
self._room_timeline_filter.filters_all_senders() or
|
||||||
|
self._room_timeline_filter.filters_all_rooms()
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class Filter(object):
|
class Filter(object):
|
||||||
def __init__(self, filter_json):
|
def __init__(self, filter_json):
|
||||||
@@ -202,25 +314,50 @@ class Filter(object):
|
|||||||
|
|
||||||
self.contains_url = self.filter_json.get("contains_url", None)
|
self.contains_url = self.filter_json.get("contains_url", None)
|
||||||
|
|
||||||
|
def filters_all_types(self):
|
||||||
|
return "*" in self.not_types
|
||||||
|
|
||||||
|
def filters_all_senders(self):
|
||||||
|
return "*" in self.not_senders
|
||||||
|
|
||||||
|
def filters_all_rooms(self):
|
||||||
|
return "*" in self.not_rooms
|
||||||
|
|
||||||
def check(self, event):
|
def check(self, event):
|
||||||
"""Checks whether the filter matches the given event.
|
"""Checks whether the filter matches the given event.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
bool: True if the event matches
|
bool: True if the event matches
|
||||||
"""
|
"""
|
||||||
|
# We usually get the full "events" as dictionaries coming through,
|
||||||
|
# except for presence which actually gets passed around as its own
|
||||||
|
# namedtuple type.
|
||||||
|
if isinstance(event, UserPresenceState):
|
||||||
|
sender = event.user_id
|
||||||
|
room_id = None
|
||||||
|
ev_type = "m.presence"
|
||||||
|
is_url = False
|
||||||
|
else:
|
||||||
sender = event.get("sender", None)
|
sender = event.get("sender", None)
|
||||||
if not sender:
|
if not sender:
|
||||||
# Presence events have their 'sender' in content.user_id
|
# Presence events had their 'sender' in content.user_id, but are
|
||||||
|
# now handled above. We don't know if anything else uses this
|
||||||
|
# form. TODO: Check this and probably remove it.
|
||||||
content = event.get("content")
|
content = event.get("content")
|
||||||
# account_data has been allowed to have non-dict content, so check type first
|
# account_data has been allowed to have non-dict content, so
|
||||||
|
# check type first
|
||||||
if isinstance(content, dict):
|
if isinstance(content, dict):
|
||||||
sender = content.get("user_id")
|
sender = content.get("user_id")
|
||||||
|
|
||||||
|
room_id = event.get("room_id", None)
|
||||||
|
ev_type = event.get("type", None)
|
||||||
|
is_url = "url" in event.get("content", {})
|
||||||
|
|
||||||
return self.check_fields(
|
return self.check_fields(
|
||||||
event.get("room_id", None),
|
room_id,
|
||||||
sender,
|
sender,
|
||||||
event.get("type", None),
|
ev_type,
|
||||||
"url" in event.get("content", {})
|
is_url,
|
||||||
)
|
)
|
||||||
|
|
||||||
def check_fields(self, room_id, sender, event_type, contains_url):
|
def check_fields(self, room_id, sender, event_type, contains_url):
|
||||||
|
|||||||
178
synapse/app/_base.py
Normal file
178
synapse/app/_base.py
Normal file
@@ -0,0 +1,178 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright 2017 New Vector Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import gc
|
||||||
|
import logging
|
||||||
|
import sys
|
||||||
|
|
||||||
|
try:
|
||||||
|
import affinity
|
||||||
|
except Exception:
|
||||||
|
affinity = None
|
||||||
|
|
||||||
|
from daemonize import Daemonize
|
||||||
|
from synapse.util import PreserveLoggingContext
|
||||||
|
from synapse.util.rlimit import change_resource_limit
|
||||||
|
from twisted.internet import error, reactor
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def start_worker_reactor(appname, config):
|
||||||
|
""" Run the reactor in the main process
|
||||||
|
|
||||||
|
Daemonizes if necessary, and then configures some resources, before starting
|
||||||
|
the reactor. Pulls configuration from the 'worker' settings in 'config'.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
appname (str): application name which will be sent to syslog
|
||||||
|
config (synapse.config.Config): config object
|
||||||
|
"""
|
||||||
|
|
||||||
|
logger = logging.getLogger(config.worker_app)
|
||||||
|
|
||||||
|
start_reactor(
|
||||||
|
appname,
|
||||||
|
config.soft_file_limit,
|
||||||
|
config.gc_thresholds,
|
||||||
|
config.worker_pid_file,
|
||||||
|
config.worker_daemonize,
|
||||||
|
config.worker_cpu_affinity,
|
||||||
|
logger,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def start_reactor(
|
||||||
|
appname,
|
||||||
|
soft_file_limit,
|
||||||
|
gc_thresholds,
|
||||||
|
pid_file,
|
||||||
|
daemonize,
|
||||||
|
cpu_affinity,
|
||||||
|
logger,
|
||||||
|
):
|
||||||
|
""" Run the reactor in the main process
|
||||||
|
|
||||||
|
Daemonizes if necessary, and then configures some resources, before starting
|
||||||
|
the reactor
|
||||||
|
|
||||||
|
Args:
|
||||||
|
appname (str): application name which will be sent to syslog
|
||||||
|
soft_file_limit (int):
|
||||||
|
gc_thresholds:
|
||||||
|
pid_file (str): name of pid file to write to if daemonize is True
|
||||||
|
daemonize (bool): true to run the reactor in a background process
|
||||||
|
cpu_affinity (int|None): cpu affinity mask
|
||||||
|
logger (logging.Logger): logger instance to pass to Daemonize
|
||||||
|
"""
|
||||||
|
|
||||||
|
def run():
|
||||||
|
# make sure that we run the reactor with the sentinel log context,
|
||||||
|
# otherwise other PreserveLoggingContext instances will get confused
|
||||||
|
# and complain when they see the logcontext arbitrarily swapping
|
||||||
|
# between the sentinel and `run` logcontexts.
|
||||||
|
with PreserveLoggingContext():
|
||||||
|
logger.info("Running")
|
||||||
|
if cpu_affinity is not None:
|
||||||
|
if not affinity:
|
||||||
|
quit_with_error(
|
||||||
|
"Missing package 'affinity' required for cpu_affinity\n"
|
||||||
|
"option\n\n"
|
||||||
|
"Install by running:\n\n"
|
||||||
|
" pip install affinity\n\n"
|
||||||
|
)
|
||||||
|
logger.info("Setting CPU affinity to %s" % cpu_affinity)
|
||||||
|
affinity.set_process_affinity_mask(0, cpu_affinity)
|
||||||
|
change_resource_limit(soft_file_limit)
|
||||||
|
if gc_thresholds:
|
||||||
|
gc.set_threshold(*gc_thresholds)
|
||||||
|
reactor.run()
|
||||||
|
|
||||||
|
if daemonize:
|
||||||
|
daemon = Daemonize(
|
||||||
|
app=appname,
|
||||||
|
pid=pid_file,
|
||||||
|
action=run,
|
||||||
|
auto_close_fds=False,
|
||||||
|
verbose=True,
|
||||||
|
logger=logger,
|
||||||
|
)
|
||||||
|
daemon.start()
|
||||||
|
else:
|
||||||
|
run()
|
||||||
|
|
||||||
|
|
||||||
|
def quit_with_error(error_string):
|
||||||
|
message_lines = error_string.split("\n")
|
||||||
|
line_length = max([len(l) for l in message_lines if len(l) < 80]) + 2
|
||||||
|
sys.stderr.write("*" * line_length + '\n')
|
||||||
|
for line in message_lines:
|
||||||
|
sys.stderr.write(" %s\n" % (line.rstrip(),))
|
||||||
|
sys.stderr.write("*" * line_length + '\n')
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
|
def listen_tcp(bind_addresses, port, factory, backlog=50):
|
||||||
|
"""
|
||||||
|
Create a TCP socket for a port and several addresses
|
||||||
|
"""
|
||||||
|
for address in bind_addresses:
|
||||||
|
try:
|
||||||
|
reactor.listenTCP(
|
||||||
|
port,
|
||||||
|
factory,
|
||||||
|
backlog,
|
||||||
|
address
|
||||||
|
)
|
||||||
|
except error.CannotListenError as e:
|
||||||
|
check_bind_error(e, address, bind_addresses)
|
||||||
|
|
||||||
|
|
||||||
|
def listen_ssl(bind_addresses, port, factory, context_factory, backlog=50):
|
||||||
|
"""
|
||||||
|
Create an SSL socket for a port and several addresses
|
||||||
|
"""
|
||||||
|
for address in bind_addresses:
|
||||||
|
try:
|
||||||
|
reactor.listenSSL(
|
||||||
|
port,
|
||||||
|
factory,
|
||||||
|
context_factory,
|
||||||
|
backlog,
|
||||||
|
address
|
||||||
|
)
|
||||||
|
except error.CannotListenError as e:
|
||||||
|
check_bind_error(e, address, bind_addresses)
|
||||||
|
|
||||||
|
|
||||||
|
def check_bind_error(e, address, bind_addresses):
|
||||||
|
"""
|
||||||
|
This method checks an exception occurred while binding on 0.0.0.0.
|
||||||
|
If :: is specified in the bind addresses a warning is shown.
|
||||||
|
The exception is still raised otherwise.
|
||||||
|
|
||||||
|
Binding on both 0.0.0.0 and :: causes an exception on Linux and macOS
|
||||||
|
because :: binds on both IPv4 and IPv6 (as per RFC 3493).
|
||||||
|
When binding on 0.0.0.0 after :: this can safely be ignored.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
e (Exception): Exception that was caught.
|
||||||
|
address (str): Address on which binding was attempted.
|
||||||
|
bind_addresses (list): Addresses on which the service listens.
|
||||||
|
"""
|
||||||
|
if address == '0.0.0.0' and '::' in bind_addresses:
|
||||||
|
logger.warn('Failed to listen on 0.0.0.0, continuing because listening on [::]')
|
||||||
|
else:
|
||||||
|
raise e
|
||||||
@@ -13,35 +13,30 @@
|
|||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
import logging
|
||||||
|
import sys
|
||||||
|
|
||||||
import synapse
|
import synapse
|
||||||
|
from synapse import events
|
||||||
from synapse.server import HomeServer
|
from synapse.app import _base
|
||||||
from synapse.config._base import ConfigError
|
from synapse.config._base import ConfigError
|
||||||
from synapse.config.logger import setup_logging
|
|
||||||
from synapse.config.homeserver import HomeServerConfig
|
from synapse.config.homeserver import HomeServerConfig
|
||||||
|
from synapse.config.logger import setup_logging
|
||||||
from synapse.http.site import SynapseSite
|
from synapse.http.site import SynapseSite
|
||||||
from synapse.metrics.resource import MetricsResource, METRICS_PREFIX
|
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||||
|
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||||
from synapse.replication.slave.storage.directory import DirectoryStore
|
from synapse.replication.slave.storage.directory import DirectoryStore
|
||||||
from synapse.replication.slave.storage.events import SlavedEventStore
|
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||||
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
|
||||||
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||||
|
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||||
|
from synapse.server import HomeServer
|
||||||
from synapse.storage.engines import create_engine
|
from synapse.storage.engines import create_engine
|
||||||
from synapse.util.async import sleep
|
|
||||||
from synapse.util.httpresourcetree import create_resource_tree
|
from synapse.util.httpresourcetree import create_resource_tree
|
||||||
from synapse.util.logcontext import LoggingContext
|
from synapse.util.logcontext import LoggingContext, run_in_background
|
||||||
from synapse.util.manhole import manhole
|
from synapse.util.manhole import manhole
|
||||||
from synapse.util.rlimit import change_resource_limit
|
|
||||||
from synapse.util.versionstring import get_version_string
|
from synapse.util.versionstring import get_version_string
|
||||||
|
|
||||||
from twisted.internet import reactor, defer
|
from twisted.internet import reactor, defer
|
||||||
from twisted.web.resource import Resource
|
from twisted.web.resource import NoResource
|
||||||
|
|
||||||
from daemonize import Daemonize
|
|
||||||
|
|
||||||
import sys
|
|
||||||
import logging
|
|
||||||
import gc
|
|
||||||
|
|
||||||
logger = logging.getLogger("synapse.app.appservice")
|
logger = logging.getLogger("synapse.app.appservice")
|
||||||
|
|
||||||
@@ -54,19 +49,6 @@ class AppserviceSlaveStore(
|
|||||||
|
|
||||||
|
|
||||||
class AppserviceServer(HomeServer):
|
class AppserviceServer(HomeServer):
|
||||||
def get_db_conn(self, run_new_connection=True):
|
|
||||||
# Any param beginning with cp_ is a parameter for adbapi, and should
|
|
||||||
# not be passed to the database engine.
|
|
||||||
db_params = {
|
|
||||||
k: v for k, v in self.db_config.get("args", {}).items()
|
|
||||||
if not k.startswith("cp_")
|
|
||||||
}
|
|
||||||
db_conn = self.database_engine.module.connect(**db_params)
|
|
||||||
|
|
||||||
if run_new_connection:
|
|
||||||
self.database_engine.on_new_connection(db_conn)
|
|
||||||
return db_conn
|
|
||||||
|
|
||||||
def setup(self):
|
def setup(self):
|
||||||
logger.info("Setting up.")
|
logger.info("Setting up.")
|
||||||
self.datastore = AppserviceSlaveStore(self.get_db_conn(), self)
|
self.datastore = AppserviceSlaveStore(self.get_db_conn(), self)
|
||||||
@@ -74,7 +56,7 @@ class AppserviceServer(HomeServer):
|
|||||||
|
|
||||||
def _listen_http(self, listener_config):
|
def _listen_http(self, listener_config):
|
||||||
port = listener_config["port"]
|
port = listener_config["port"]
|
||||||
bind_address = listener_config.get("bind_address", "")
|
bind_addresses = listener_config["bind_addresses"]
|
||||||
site_tag = listener_config.get("tag", port)
|
site_tag = listener_config.get("tag", port)
|
||||||
resources = {}
|
resources = {}
|
||||||
for res in listener_config["resources"]:
|
for res in listener_config["resources"]:
|
||||||
@@ -82,17 +64,20 @@ class AppserviceServer(HomeServer):
|
|||||||
if name == "metrics":
|
if name == "metrics":
|
||||||
resources[METRICS_PREFIX] = MetricsResource(self)
|
resources[METRICS_PREFIX] = MetricsResource(self)
|
||||||
|
|
||||||
root_resource = create_resource_tree(resources, Resource())
|
root_resource = create_resource_tree(resources, NoResource())
|
||||||
reactor.listenTCP(
|
|
||||||
|
_base.listen_tcp(
|
||||||
|
bind_addresses,
|
||||||
port,
|
port,
|
||||||
SynapseSite(
|
SynapseSite(
|
||||||
"synapse.access.http.%s" % (site_tag,),
|
"synapse.access.http.%s" % (site_tag,),
|
||||||
site_tag,
|
site_tag,
|
||||||
listener_config,
|
listener_config,
|
||||||
root_resource,
|
root_resource,
|
||||||
),
|
self.version_string,
|
||||||
interface=bind_address
|
|
||||||
)
|
)
|
||||||
|
)
|
||||||
|
|
||||||
logger.info("Synapse appservice now listening on port %d", port)
|
logger.info("Synapse appservice now listening on port %d", port)
|
||||||
|
|
||||||
def start_listening(self, listeners):
|
def start_listening(self, listeners):
|
||||||
@@ -100,42 +85,42 @@ class AppserviceServer(HomeServer):
|
|||||||
if listener["type"] == "http":
|
if listener["type"] == "http":
|
||||||
self._listen_http(listener)
|
self._listen_http(listener)
|
||||||
elif listener["type"] == "manhole":
|
elif listener["type"] == "manhole":
|
||||||
reactor.listenTCP(
|
_base.listen_tcp(
|
||||||
|
listener["bind_addresses"],
|
||||||
listener["port"],
|
listener["port"],
|
||||||
manhole(
|
manhole(
|
||||||
username="matrix",
|
username="matrix",
|
||||||
password="rabbithole",
|
password="rabbithole",
|
||||||
globals={"hs": self},
|
globals={"hs": self},
|
||||||
),
|
)
|
||||||
interface=listener.get("bind_address", '127.0.0.1')
|
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
self.get_tcp_replication().start_replication(self)
|
||||||
def replicate(self):
|
|
||||||
http_client = self.get_simple_http_client()
|
def build_tcp_replication(self):
|
||||||
store = self.get_datastore()
|
return ASReplicationHandler(self)
|
||||||
replication_url = self.config.worker_replication_url
|
|
||||||
appservice_handler = self.get_application_service_handler()
|
|
||||||
|
class ASReplicationHandler(ReplicationClientHandler):
|
||||||
|
def __init__(self, hs):
|
||||||
|
super(ASReplicationHandler, self).__init__(hs.get_datastore())
|
||||||
|
self.appservice_handler = hs.get_application_service_handler()
|
||||||
|
|
||||||
|
def on_rdata(self, stream_name, token, rows):
|
||||||
|
super(ASReplicationHandler, self).on_rdata(stream_name, token, rows)
|
||||||
|
|
||||||
|
if stream_name == "events":
|
||||||
|
max_stream_id = self.store.get_room_max_stream_ordering()
|
||||||
|
run_in_background(self._notify_app_services, max_stream_id)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def replicate(results):
|
def _notify_app_services(self, room_stream_id):
|
||||||
stream = results.get("events")
|
|
||||||
if stream:
|
|
||||||
max_stream_id = stream["position"]
|
|
||||||
yield appservice_handler.notify_interested_services(max_stream_id)
|
|
||||||
|
|
||||||
while True:
|
|
||||||
try:
|
try:
|
||||||
args = store.stream_positions()
|
yield self.appservice_handler.notify_interested_services(room_stream_id)
|
||||||
args["timeout"] = 30000
|
except Exception:
|
||||||
result = yield http_client.get_json(replication_url, args=args)
|
logger.exception("Error notifying application services of event")
|
||||||
yield store.process_replication(result)
|
|
||||||
replicate(result)
|
|
||||||
except:
|
|
||||||
logger.exception("Error replicating from %r", replication_url)
|
|
||||||
yield sleep(30)
|
|
||||||
|
|
||||||
|
|
||||||
def start(config_options):
|
def start(config_options):
|
||||||
@@ -149,7 +134,9 @@ def start(config_options):
|
|||||||
|
|
||||||
assert config.worker_app == "synapse.app.appservice"
|
assert config.worker_app == "synapse.app.appservice"
|
||||||
|
|
||||||
setup_logging(config.worker_log_config, config.worker_log_file)
|
setup_logging(config, use_worker_options=True)
|
||||||
|
|
||||||
|
events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||||
|
|
||||||
database_engine = create_engine(config.database_config)
|
database_engine = create_engine(config.database_config)
|
||||||
|
|
||||||
@@ -176,33 +163,13 @@ def start(config_options):
|
|||||||
ps.setup()
|
ps.setup()
|
||||||
ps.start_listening(config.worker_listeners)
|
ps.start_listening(config.worker_listeners)
|
||||||
|
|
||||||
def run():
|
|
||||||
with LoggingContext("run"):
|
|
||||||
logger.info("Running")
|
|
||||||
change_resource_limit(config.soft_file_limit)
|
|
||||||
if config.gc_thresholds:
|
|
||||||
gc.set_threshold(*config.gc_thresholds)
|
|
||||||
reactor.run()
|
|
||||||
|
|
||||||
def start():
|
def start():
|
||||||
ps.replicate()
|
|
||||||
ps.get_datastore().start_profiling()
|
ps.get_datastore().start_profiling()
|
||||||
ps.get_state_handler().start_caching()
|
ps.get_state_handler().start_caching()
|
||||||
|
|
||||||
reactor.callWhenRunning(start)
|
reactor.callWhenRunning(start)
|
||||||
|
|
||||||
if config.worker_daemonize:
|
_base.start_worker_reactor("synapse-appservice", config)
|
||||||
daemon = Daemonize(
|
|
||||||
app="synapse-appservice",
|
|
||||||
pid=config.worker_pid_file,
|
|
||||||
action=run,
|
|
||||||
auto_close_fds=False,
|
|
||||||
verbose=True,
|
|
||||||
logger=logger,
|
|
||||||
)
|
|
||||||
daemon.start()
|
|
||||||
else:
|
|
||||||
run()
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
|||||||
@@ -13,43 +13,38 @@
|
|||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
import logging
|
||||||
|
import sys
|
||||||
|
|
||||||
import synapse
|
import synapse
|
||||||
|
from synapse import events
|
||||||
|
from synapse.app import _base
|
||||||
from synapse.config._base import ConfigError
|
from synapse.config._base import ConfigError
|
||||||
from synapse.config.homeserver import HomeServerConfig
|
from synapse.config.homeserver import HomeServerConfig
|
||||||
from synapse.config.logger import setup_logging
|
from synapse.config.logger import setup_logging
|
||||||
from synapse.http.site import SynapseSite
|
from synapse.crypto import context_factory
|
||||||
from synapse.http.server import JsonResource
|
from synapse.http.server import JsonResource
|
||||||
from synapse.metrics.resource import MetricsResource, METRICS_PREFIX
|
from synapse.http.site import SynapseSite
|
||||||
|
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||||
from synapse.replication.slave.storage._base import BaseSlavedStore
|
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||||
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||||
|
from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
|
||||||
|
from synapse.replication.slave.storage.directory import DirectoryStore
|
||||||
from synapse.replication.slave.storage.events import SlavedEventStore
|
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||||
from synapse.replication.slave.storage.keys import SlavedKeyStore
|
from synapse.replication.slave.storage.keys import SlavedKeyStore
|
||||||
from synapse.replication.slave.storage.room import RoomStore
|
|
||||||
from synapse.replication.slave.storage.directory import DirectoryStore
|
|
||||||
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||||
|
from synapse.replication.slave.storage.room import RoomStore
|
||||||
|
from synapse.replication.slave.storage.transactions import TransactionStore
|
||||||
|
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||||
from synapse.rest.client.v1.room import PublicRoomListRestServlet
|
from synapse.rest.client.v1.room import PublicRoomListRestServlet
|
||||||
from synapse.server import HomeServer
|
from synapse.server import HomeServer
|
||||||
from synapse.storage.client_ips import ClientIpStore
|
|
||||||
from synapse.storage.engines import create_engine
|
from synapse.storage.engines import create_engine
|
||||||
from synapse.util.async import sleep
|
|
||||||
from synapse.util.httpresourcetree import create_resource_tree
|
from synapse.util.httpresourcetree import create_resource_tree
|
||||||
from synapse.util.logcontext import LoggingContext
|
from synapse.util.logcontext import LoggingContext
|
||||||
from synapse.util.manhole import manhole
|
from synapse.util.manhole import manhole
|
||||||
from synapse.util.rlimit import change_resource_limit
|
|
||||||
from synapse.util.versionstring import get_version_string
|
from synapse.util.versionstring import get_version_string
|
||||||
from synapse.crypto import context_factory
|
from twisted.internet import reactor
|
||||||
|
from twisted.web.resource import NoResource
|
||||||
|
|
||||||
from twisted.internet import reactor, defer
|
|
||||||
from twisted.web.resource import Resource
|
|
||||||
|
|
||||||
from daemonize import Daemonize
|
|
||||||
|
|
||||||
import sys
|
|
||||||
import logging
|
|
||||||
import gc
|
|
||||||
|
|
||||||
logger = logging.getLogger("synapse.app.client_reader")
|
logger = logging.getLogger("synapse.app.client_reader")
|
||||||
|
|
||||||
@@ -61,26 +56,14 @@ class ClientReaderSlavedStore(
|
|||||||
DirectoryStore,
|
DirectoryStore,
|
||||||
SlavedApplicationServiceStore,
|
SlavedApplicationServiceStore,
|
||||||
SlavedRegistrationStore,
|
SlavedRegistrationStore,
|
||||||
|
TransactionStore,
|
||||||
|
SlavedClientIpStore,
|
||||||
BaseSlavedStore,
|
BaseSlavedStore,
|
||||||
ClientIpStore, # After BaseSlavedStore because the constructor is different
|
|
||||||
):
|
):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
class ClientReaderServer(HomeServer):
|
class ClientReaderServer(HomeServer):
|
||||||
def get_db_conn(self, run_new_connection=True):
|
|
||||||
# Any param beginning with cp_ is a parameter for adbapi, and should
|
|
||||||
# not be passed to the database engine.
|
|
||||||
db_params = {
|
|
||||||
k: v for k, v in self.db_config.get("args", {}).items()
|
|
||||||
if not k.startswith("cp_")
|
|
||||||
}
|
|
||||||
db_conn = self.database_engine.module.connect(**db_params)
|
|
||||||
|
|
||||||
if run_new_connection:
|
|
||||||
self.database_engine.on_new_connection(db_conn)
|
|
||||||
return db_conn
|
|
||||||
|
|
||||||
def setup(self):
|
def setup(self):
|
||||||
logger.info("Setting up.")
|
logger.info("Setting up.")
|
||||||
self.datastore = ClientReaderSlavedStore(self.get_db_conn(), self)
|
self.datastore = ClientReaderSlavedStore(self.get_db_conn(), self)
|
||||||
@@ -88,7 +71,7 @@ class ClientReaderServer(HomeServer):
|
|||||||
|
|
||||||
def _listen_http(self, listener_config):
|
def _listen_http(self, listener_config):
|
||||||
port = listener_config["port"]
|
port = listener_config["port"]
|
||||||
bind_address = listener_config.get("bind_address", "")
|
bind_addresses = listener_config["bind_addresses"]
|
||||||
site_tag = listener_config.get("tag", port)
|
site_tag = listener_config.get("tag", port)
|
||||||
resources = {}
|
resources = {}
|
||||||
for res in listener_config["resources"]:
|
for res in listener_config["resources"]:
|
||||||
@@ -105,17 +88,20 @@ class ClientReaderServer(HomeServer):
|
|||||||
"/_matrix/client/api/v1": resource,
|
"/_matrix/client/api/v1": resource,
|
||||||
})
|
})
|
||||||
|
|
||||||
root_resource = create_resource_tree(resources, Resource())
|
root_resource = create_resource_tree(resources, NoResource())
|
||||||
reactor.listenTCP(
|
|
||||||
|
_base.listen_tcp(
|
||||||
|
bind_addresses,
|
||||||
port,
|
port,
|
||||||
SynapseSite(
|
SynapseSite(
|
||||||
"synapse.access.http.%s" % (site_tag,),
|
"synapse.access.http.%s" % (site_tag,),
|
||||||
site_tag,
|
site_tag,
|
||||||
listener_config,
|
listener_config,
|
||||||
root_resource,
|
root_resource,
|
||||||
),
|
self.version_string,
|
||||||
interface=bind_address
|
|
||||||
)
|
)
|
||||||
|
)
|
||||||
|
|
||||||
logger.info("Synapse client reader now listening on port %d", port)
|
logger.info("Synapse client reader now listening on port %d", port)
|
||||||
|
|
||||||
def start_listening(self, listeners):
|
def start_listening(self, listeners):
|
||||||
@@ -123,33 +109,23 @@ class ClientReaderServer(HomeServer):
|
|||||||
if listener["type"] == "http":
|
if listener["type"] == "http":
|
||||||
self._listen_http(listener)
|
self._listen_http(listener)
|
||||||
elif listener["type"] == "manhole":
|
elif listener["type"] == "manhole":
|
||||||
reactor.listenTCP(
|
_base.listen_tcp(
|
||||||
|
listener["bind_addresses"],
|
||||||
listener["port"],
|
listener["port"],
|
||||||
manhole(
|
manhole(
|
||||||
username="matrix",
|
username="matrix",
|
||||||
password="rabbithole",
|
password="rabbithole",
|
||||||
globals={"hs": self},
|
globals={"hs": self},
|
||||||
),
|
|
||||||
interface=listener.get("bind_address", '127.0.0.1')
|
|
||||||
)
|
)
|
||||||
|
)
|
||||||
|
|
||||||
else:
|
else:
|
||||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
self.get_tcp_replication().start_replication(self)
|
||||||
def replicate(self):
|
|
||||||
http_client = self.get_simple_http_client()
|
|
||||||
store = self.get_datastore()
|
|
||||||
replication_url = self.config.worker_replication_url
|
|
||||||
|
|
||||||
while True:
|
def build_tcp_replication(self):
|
||||||
try:
|
return ReplicationClientHandler(self.get_datastore())
|
||||||
args = store.stream_positions()
|
|
||||||
args["timeout"] = 30000
|
|
||||||
result = yield http_client.get_json(replication_url, args=args)
|
|
||||||
yield store.process_replication(result)
|
|
||||||
except:
|
|
||||||
logger.exception("Error replicating from %r", replication_url)
|
|
||||||
yield sleep(5)
|
|
||||||
|
|
||||||
|
|
||||||
def start(config_options):
|
def start(config_options):
|
||||||
@@ -163,7 +139,9 @@ def start(config_options):
|
|||||||
|
|
||||||
assert config.worker_app == "synapse.app.client_reader"
|
assert config.worker_app == "synapse.app.client_reader"
|
||||||
|
|
||||||
setup_logging(config.worker_log_config, config.worker_log_file)
|
setup_logging(config, use_worker_options=True)
|
||||||
|
|
||||||
|
events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||||
|
|
||||||
database_engine = create_engine(config.database_config)
|
database_engine = create_engine(config.database_config)
|
||||||
|
|
||||||
@@ -179,36 +157,15 @@ def start(config_options):
|
|||||||
)
|
)
|
||||||
|
|
||||||
ss.setup()
|
ss.setup()
|
||||||
ss.get_handlers()
|
|
||||||
ss.start_listening(config.worker_listeners)
|
ss.start_listening(config.worker_listeners)
|
||||||
|
|
||||||
def run():
|
|
||||||
with LoggingContext("run"):
|
|
||||||
logger.info("Running")
|
|
||||||
change_resource_limit(config.soft_file_limit)
|
|
||||||
if config.gc_thresholds:
|
|
||||||
gc.set_threshold(*config.gc_thresholds)
|
|
||||||
reactor.run()
|
|
||||||
|
|
||||||
def start():
|
def start():
|
||||||
ss.get_state_handler().start_caching()
|
ss.get_state_handler().start_caching()
|
||||||
ss.get_datastore().start_profiling()
|
ss.get_datastore().start_profiling()
|
||||||
ss.replicate()
|
|
||||||
|
|
||||||
reactor.callWhenRunning(start)
|
reactor.callWhenRunning(start)
|
||||||
|
|
||||||
if config.worker_daemonize:
|
_base.start_worker_reactor("synapse-client-reader", config)
|
||||||
daemon = Daemonize(
|
|
||||||
app="synapse-client-reader",
|
|
||||||
pid=config.worker_pid_file,
|
|
||||||
action=run,
|
|
||||||
auto_close_fds=False,
|
|
||||||
verbose=True,
|
|
||||||
logger=logger,
|
|
||||||
)
|
|
||||||
daemon.start()
|
|
||||||
else:
|
|
||||||
run()
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
|||||||
190
synapse/app/event_creator.py
Normal file
190
synapse/app/event_creator.py
Normal file
@@ -0,0 +1,190 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright 2018 New Vector Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
import logging
|
||||||
|
import sys
|
||||||
|
|
||||||
|
import synapse
|
||||||
|
from synapse import events
|
||||||
|
from synapse.app import _base
|
||||||
|
from synapse.config._base import ConfigError
|
||||||
|
from synapse.config.homeserver import HomeServerConfig
|
||||||
|
from synapse.config.logger import setup_logging
|
||||||
|
from synapse.crypto import context_factory
|
||||||
|
from synapse.http.server import JsonResource
|
||||||
|
from synapse.http.site import SynapseSite
|
||||||
|
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||||
|
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||||
|
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
|
||||||
|
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||||
|
from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
|
||||||
|
from synapse.replication.slave.storage.devices import SlavedDeviceStore
|
||||||
|
from synapse.replication.slave.storage.directory import DirectoryStore
|
||||||
|
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||||
|
from synapse.replication.slave.storage.profile import SlavedProfileStore
|
||||||
|
from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore
|
||||||
|
from synapse.replication.slave.storage.pushers import SlavedPusherStore
|
||||||
|
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
|
||||||
|
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||||
|
from synapse.replication.slave.storage.room import RoomStore
|
||||||
|
from synapse.replication.slave.storage.transactions import TransactionStore
|
||||||
|
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||||
|
from synapse.rest.client.v1.room import (
|
||||||
|
RoomSendEventRestServlet, RoomMembershipRestServlet, RoomStateEventRestServlet,
|
||||||
|
JoinRoomAliasServlet,
|
||||||
|
)
|
||||||
|
from synapse.server import HomeServer
|
||||||
|
from synapse.storage.engines import create_engine
|
||||||
|
from synapse.util.httpresourcetree import create_resource_tree
|
||||||
|
from synapse.util.logcontext import LoggingContext
|
||||||
|
from synapse.util.manhole import manhole
|
||||||
|
from synapse.util.versionstring import get_version_string
|
||||||
|
from twisted.internet import reactor
|
||||||
|
from twisted.web.resource import NoResource
|
||||||
|
|
||||||
|
logger = logging.getLogger("synapse.app.event_creator")
|
||||||
|
|
||||||
|
|
||||||
|
class EventCreatorSlavedStore(
|
||||||
|
DirectoryStore,
|
||||||
|
TransactionStore,
|
||||||
|
SlavedProfileStore,
|
||||||
|
SlavedAccountDataStore,
|
||||||
|
SlavedPusherStore,
|
||||||
|
SlavedReceiptsStore,
|
||||||
|
SlavedPushRuleStore,
|
||||||
|
SlavedDeviceStore,
|
||||||
|
SlavedClientIpStore,
|
||||||
|
SlavedApplicationServiceStore,
|
||||||
|
SlavedEventStore,
|
||||||
|
SlavedRegistrationStore,
|
||||||
|
RoomStore,
|
||||||
|
BaseSlavedStore,
|
||||||
|
):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class EventCreatorServer(HomeServer):
|
||||||
|
def setup(self):
|
||||||
|
logger.info("Setting up.")
|
||||||
|
self.datastore = EventCreatorSlavedStore(self.get_db_conn(), self)
|
||||||
|
logger.info("Finished setting up.")
|
||||||
|
|
||||||
|
def _listen_http(self, listener_config):
|
||||||
|
port = listener_config["port"]
|
||||||
|
bind_addresses = listener_config["bind_addresses"]
|
||||||
|
site_tag = listener_config.get("tag", port)
|
||||||
|
resources = {}
|
||||||
|
for res in listener_config["resources"]:
|
||||||
|
for name in res["names"]:
|
||||||
|
if name == "metrics":
|
||||||
|
resources[METRICS_PREFIX] = MetricsResource(self)
|
||||||
|
elif name == "client":
|
||||||
|
resource = JsonResource(self, canonical_json=False)
|
||||||
|
RoomSendEventRestServlet(self).register(resource)
|
||||||
|
RoomMembershipRestServlet(self).register(resource)
|
||||||
|
RoomStateEventRestServlet(self).register(resource)
|
||||||
|
JoinRoomAliasServlet(self).register(resource)
|
||||||
|
resources.update({
|
||||||
|
"/_matrix/client/r0": resource,
|
||||||
|
"/_matrix/client/unstable": resource,
|
||||||
|
"/_matrix/client/v2_alpha": resource,
|
||||||
|
"/_matrix/client/api/v1": resource,
|
||||||
|
})
|
||||||
|
|
||||||
|
root_resource = create_resource_tree(resources, NoResource())
|
||||||
|
|
||||||
|
_base.listen_tcp(
|
||||||
|
bind_addresses,
|
||||||
|
port,
|
||||||
|
SynapseSite(
|
||||||
|
"synapse.access.http.%s" % (site_tag,),
|
||||||
|
site_tag,
|
||||||
|
listener_config,
|
||||||
|
root_resource,
|
||||||
|
self.version_string,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
logger.info("Synapse event creator now listening on port %d", port)
|
||||||
|
|
||||||
|
def start_listening(self, listeners):
|
||||||
|
for listener in listeners:
|
||||||
|
if listener["type"] == "http":
|
||||||
|
self._listen_http(listener)
|
||||||
|
elif listener["type"] == "manhole":
|
||||||
|
_base.listen_tcp(
|
||||||
|
listener["bind_addresses"],
|
||||||
|
listener["port"],
|
||||||
|
manhole(
|
||||||
|
username="matrix",
|
||||||
|
password="rabbithole",
|
||||||
|
globals={"hs": self},
|
||||||
|
)
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||||
|
|
||||||
|
self.get_tcp_replication().start_replication(self)
|
||||||
|
|
||||||
|
def build_tcp_replication(self):
|
||||||
|
return ReplicationClientHandler(self.get_datastore())
|
||||||
|
|
||||||
|
|
||||||
|
def start(config_options):
|
||||||
|
try:
|
||||||
|
config = HomeServerConfig.load_config(
|
||||||
|
"Synapse event creator", config_options
|
||||||
|
)
|
||||||
|
except ConfigError as e:
|
||||||
|
sys.stderr.write("\n" + e.message + "\n")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
assert config.worker_app == "synapse.app.event_creator"
|
||||||
|
|
||||||
|
assert config.worker_replication_http_port is not None
|
||||||
|
|
||||||
|
setup_logging(config, use_worker_options=True)
|
||||||
|
|
||||||
|
events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||||
|
|
||||||
|
database_engine = create_engine(config.database_config)
|
||||||
|
|
||||||
|
tls_server_context_factory = context_factory.ServerContextFactory(config)
|
||||||
|
|
||||||
|
ss = EventCreatorServer(
|
||||||
|
config.server_name,
|
||||||
|
db_config=config.database_config,
|
||||||
|
tls_server_context_factory=tls_server_context_factory,
|
||||||
|
config=config,
|
||||||
|
version_string="Synapse/" + get_version_string(synapse),
|
||||||
|
database_engine=database_engine,
|
||||||
|
)
|
||||||
|
|
||||||
|
ss.setup()
|
||||||
|
ss.start_listening(config.worker_listeners)
|
||||||
|
|
||||||
|
def start():
|
||||||
|
ss.get_state_handler().start_caching()
|
||||||
|
ss.get_datastore().start_profiling()
|
||||||
|
|
||||||
|
reactor.callWhenRunning(start)
|
||||||
|
|
||||||
|
_base.start_worker_reactor("synapse-event-creator", config)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
with LoggingContext("main"):
|
||||||
|
start(sys.argv[1:])
|
||||||
@@ -13,41 +13,35 @@
|
|||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
import logging
|
||||||
|
import sys
|
||||||
|
|
||||||
import synapse
|
import synapse
|
||||||
|
from synapse import events
|
||||||
|
from synapse.api.urls import FEDERATION_PREFIX
|
||||||
|
from synapse.app import _base
|
||||||
from synapse.config._base import ConfigError
|
from synapse.config._base import ConfigError
|
||||||
from synapse.config.homeserver import HomeServerConfig
|
from synapse.config.homeserver import HomeServerConfig
|
||||||
from synapse.config.logger import setup_logging
|
from synapse.config.logger import setup_logging
|
||||||
|
from synapse.crypto import context_factory
|
||||||
|
from synapse.federation.transport.server import TransportLayerServer
|
||||||
from synapse.http.site import SynapseSite
|
from synapse.http.site import SynapseSite
|
||||||
from synapse.metrics.resource import MetricsResource, METRICS_PREFIX
|
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||||
from synapse.replication.slave.storage._base import BaseSlavedStore
|
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||||
|
from synapse.replication.slave.storage.directory import DirectoryStore
|
||||||
from synapse.replication.slave.storage.events import SlavedEventStore
|
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||||
from synapse.replication.slave.storage.keys import SlavedKeyStore
|
from synapse.replication.slave.storage.keys import SlavedKeyStore
|
||||||
from synapse.replication.slave.storage.room import RoomStore
|
from synapse.replication.slave.storage.room import RoomStore
|
||||||
from synapse.replication.slave.storage.transactions import TransactionStore
|
from synapse.replication.slave.storage.transactions import TransactionStore
|
||||||
from synapse.replication.slave.storage.directory import DirectoryStore
|
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||||
from synapse.server import HomeServer
|
from synapse.server import HomeServer
|
||||||
from synapse.storage.engines import create_engine
|
from synapse.storage.engines import create_engine
|
||||||
from synapse.util.async import sleep
|
|
||||||
from synapse.util.httpresourcetree import create_resource_tree
|
from synapse.util.httpresourcetree import create_resource_tree
|
||||||
from synapse.util.logcontext import LoggingContext
|
from synapse.util.logcontext import LoggingContext
|
||||||
from synapse.util.manhole import manhole
|
from synapse.util.manhole import manhole
|
||||||
from synapse.util.rlimit import change_resource_limit
|
|
||||||
from synapse.util.versionstring import get_version_string
|
from synapse.util.versionstring import get_version_string
|
||||||
from synapse.api.urls import FEDERATION_PREFIX
|
from twisted.internet import reactor
|
||||||
from synapse.federation.transport.server import TransportLayerServer
|
from twisted.web.resource import NoResource
|
||||||
from synapse.crypto import context_factory
|
|
||||||
|
|
||||||
|
|
||||||
from twisted.internet import reactor, defer
|
|
||||||
from twisted.web.resource import Resource
|
|
||||||
|
|
||||||
from daemonize import Daemonize
|
|
||||||
|
|
||||||
import sys
|
|
||||||
import logging
|
|
||||||
import gc
|
|
||||||
|
|
||||||
logger = logging.getLogger("synapse.app.federation_reader")
|
logger = logging.getLogger("synapse.app.federation_reader")
|
||||||
|
|
||||||
@@ -64,19 +58,6 @@ class FederationReaderSlavedStore(
|
|||||||
|
|
||||||
|
|
||||||
class FederationReaderServer(HomeServer):
|
class FederationReaderServer(HomeServer):
|
||||||
def get_db_conn(self, run_new_connection=True):
|
|
||||||
# Any param beginning with cp_ is a parameter for adbapi, and should
|
|
||||||
# not be passed to the database engine.
|
|
||||||
db_params = {
|
|
||||||
k: v for k, v in self.db_config.get("args", {}).items()
|
|
||||||
if not k.startswith("cp_")
|
|
||||||
}
|
|
||||||
db_conn = self.database_engine.module.connect(**db_params)
|
|
||||||
|
|
||||||
if run_new_connection:
|
|
||||||
self.database_engine.on_new_connection(db_conn)
|
|
||||||
return db_conn
|
|
||||||
|
|
||||||
def setup(self):
|
def setup(self):
|
||||||
logger.info("Setting up.")
|
logger.info("Setting up.")
|
||||||
self.datastore = FederationReaderSlavedStore(self.get_db_conn(), self)
|
self.datastore = FederationReaderSlavedStore(self.get_db_conn(), self)
|
||||||
@@ -84,7 +65,7 @@ class FederationReaderServer(HomeServer):
|
|||||||
|
|
||||||
def _listen_http(self, listener_config):
|
def _listen_http(self, listener_config):
|
||||||
port = listener_config["port"]
|
port = listener_config["port"]
|
||||||
bind_address = listener_config.get("bind_address", "")
|
bind_addresses = listener_config["bind_addresses"]
|
||||||
site_tag = listener_config.get("tag", port)
|
site_tag = listener_config.get("tag", port)
|
||||||
resources = {}
|
resources = {}
|
||||||
for res in listener_config["resources"]:
|
for res in listener_config["resources"]:
|
||||||
@@ -96,17 +77,20 @@ class FederationReaderServer(HomeServer):
|
|||||||
FEDERATION_PREFIX: TransportLayerServer(self),
|
FEDERATION_PREFIX: TransportLayerServer(self),
|
||||||
})
|
})
|
||||||
|
|
||||||
root_resource = create_resource_tree(resources, Resource())
|
root_resource = create_resource_tree(resources, NoResource())
|
||||||
reactor.listenTCP(
|
|
||||||
|
_base.listen_tcp(
|
||||||
|
bind_addresses,
|
||||||
port,
|
port,
|
||||||
SynapseSite(
|
SynapseSite(
|
||||||
"synapse.access.http.%s" % (site_tag,),
|
"synapse.access.http.%s" % (site_tag,),
|
||||||
site_tag,
|
site_tag,
|
||||||
listener_config,
|
listener_config,
|
||||||
root_resource,
|
root_resource,
|
||||||
),
|
self.version_string,
|
||||||
interface=bind_address
|
|
||||||
)
|
)
|
||||||
|
)
|
||||||
|
|
||||||
logger.info("Synapse federation reader now listening on port %d", port)
|
logger.info("Synapse federation reader now listening on port %d", port)
|
||||||
|
|
||||||
def start_listening(self, listeners):
|
def start_listening(self, listeners):
|
||||||
@@ -114,33 +98,22 @@ class FederationReaderServer(HomeServer):
|
|||||||
if listener["type"] == "http":
|
if listener["type"] == "http":
|
||||||
self._listen_http(listener)
|
self._listen_http(listener)
|
||||||
elif listener["type"] == "manhole":
|
elif listener["type"] == "manhole":
|
||||||
reactor.listenTCP(
|
_base.listen_tcp(
|
||||||
|
listener["bind_addresses"],
|
||||||
listener["port"],
|
listener["port"],
|
||||||
manhole(
|
manhole(
|
||||||
username="matrix",
|
username="matrix",
|
||||||
password="rabbithole",
|
password="rabbithole",
|
||||||
globals={"hs": self},
|
globals={"hs": self},
|
||||||
),
|
)
|
||||||
interface=listener.get("bind_address", '127.0.0.1')
|
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
self.get_tcp_replication().start_replication(self)
|
||||||
def replicate(self):
|
|
||||||
http_client = self.get_simple_http_client()
|
|
||||||
store = self.get_datastore()
|
|
||||||
replication_url = self.config.worker_replication_url
|
|
||||||
|
|
||||||
while True:
|
def build_tcp_replication(self):
|
||||||
try:
|
return ReplicationClientHandler(self.get_datastore())
|
||||||
args = store.stream_positions()
|
|
||||||
args["timeout"] = 30000
|
|
||||||
result = yield http_client.get_json(replication_url, args=args)
|
|
||||||
yield store.process_replication(result)
|
|
||||||
except:
|
|
||||||
logger.exception("Error replicating from %r", replication_url)
|
|
||||||
yield sleep(5)
|
|
||||||
|
|
||||||
|
|
||||||
def start(config_options):
|
def start(config_options):
|
||||||
@@ -154,7 +127,9 @@ def start(config_options):
|
|||||||
|
|
||||||
assert config.worker_app == "synapse.app.federation_reader"
|
assert config.worker_app == "synapse.app.federation_reader"
|
||||||
|
|
||||||
setup_logging(config.worker_log_config, config.worker_log_file)
|
setup_logging(config, use_worker_options=True)
|
||||||
|
|
||||||
|
events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||||
|
|
||||||
database_engine = create_engine(config.database_config)
|
database_engine = create_engine(config.database_config)
|
||||||
|
|
||||||
@@ -170,36 +145,15 @@ def start(config_options):
|
|||||||
)
|
)
|
||||||
|
|
||||||
ss.setup()
|
ss.setup()
|
||||||
ss.get_handlers()
|
|
||||||
ss.start_listening(config.worker_listeners)
|
ss.start_listening(config.worker_listeners)
|
||||||
|
|
||||||
def run():
|
|
||||||
with LoggingContext("run"):
|
|
||||||
logger.info("Running")
|
|
||||||
change_resource_limit(config.soft_file_limit)
|
|
||||||
if config.gc_thresholds:
|
|
||||||
gc.set_threshold(*config.gc_thresholds)
|
|
||||||
reactor.run()
|
|
||||||
|
|
||||||
def start():
|
def start():
|
||||||
ss.get_state_handler().start_caching()
|
ss.get_state_handler().start_caching()
|
||||||
ss.get_datastore().start_profiling()
|
ss.get_datastore().start_profiling()
|
||||||
ss.replicate()
|
|
||||||
|
|
||||||
reactor.callWhenRunning(start)
|
reactor.callWhenRunning(start)
|
||||||
|
|
||||||
if config.worker_daemonize:
|
_base.start_worker_reactor("synapse-federation-reader", config)
|
||||||
daemon = Daemonize(
|
|
||||||
app="synapse-federation-reader",
|
|
||||||
pid=config.worker_pid_file,
|
|
||||||
action=run,
|
|
||||||
auto_close_fds=False,
|
|
||||||
verbose=True,
|
|
||||||
logger=logger,
|
|
||||||
)
|
|
||||||
daemon.start()
|
|
||||||
else:
|
|
||||||
run()
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
|||||||
261
synapse/app/federation_sender.py
Normal file
261
synapse/app/federation_sender.py
Normal file
@@ -0,0 +1,261 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright 2016 OpenMarket Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
import logging
|
||||||
|
import sys
|
||||||
|
|
||||||
|
import synapse
|
||||||
|
from synapse import events
|
||||||
|
from synapse.app import _base
|
||||||
|
from synapse.config._base import ConfigError
|
||||||
|
from synapse.config.homeserver import HomeServerConfig
|
||||||
|
from synapse.config.logger import setup_logging
|
||||||
|
from synapse.crypto import context_factory
|
||||||
|
from synapse.federation import send_queue
|
||||||
|
from synapse.http.site import SynapseSite
|
||||||
|
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||||
|
from synapse.replication.slave.storage.deviceinbox import SlavedDeviceInboxStore
|
||||||
|
from synapse.replication.slave.storage.devices import SlavedDeviceStore
|
||||||
|
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||||
|
from synapse.replication.slave.storage.presence import SlavedPresenceStore
|
||||||
|
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
|
||||||
|
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||||
|
from synapse.replication.slave.storage.transactions import TransactionStore
|
||||||
|
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||||
|
from synapse.server import HomeServer
|
||||||
|
from synapse.storage.engines import create_engine
|
||||||
|
from synapse.util.async import Linearizer
|
||||||
|
from synapse.util.httpresourcetree import create_resource_tree
|
||||||
|
from synapse.util.logcontext import LoggingContext, run_in_background
|
||||||
|
from synapse.util.manhole import manhole
|
||||||
|
from synapse.util.versionstring import get_version_string
|
||||||
|
from twisted.internet import defer, reactor
|
||||||
|
from twisted.web.resource import NoResource
|
||||||
|
|
||||||
|
logger = logging.getLogger("synapse.app.federation_sender")
|
||||||
|
|
||||||
|
|
||||||
|
class FederationSenderSlaveStore(
|
||||||
|
SlavedDeviceInboxStore, TransactionStore, SlavedReceiptsStore, SlavedEventStore,
|
||||||
|
SlavedRegistrationStore, SlavedDeviceStore, SlavedPresenceStore,
|
||||||
|
):
|
||||||
|
def __init__(self, db_conn, hs):
|
||||||
|
super(FederationSenderSlaveStore, self).__init__(db_conn, hs)
|
||||||
|
|
||||||
|
# We pull out the current federation stream position now so that we
|
||||||
|
# always have a known value for the federation position in memory so
|
||||||
|
# that we don't have to bounce via a deferred once when we start the
|
||||||
|
# replication streams.
|
||||||
|
self.federation_out_pos_startup = self._get_federation_out_pos(db_conn)
|
||||||
|
|
||||||
|
def _get_federation_out_pos(self, db_conn):
|
||||||
|
sql = (
|
||||||
|
"SELECT stream_id FROM federation_stream_position"
|
||||||
|
" WHERE type = ?"
|
||||||
|
)
|
||||||
|
sql = self.database_engine.convert_param_style(sql)
|
||||||
|
|
||||||
|
txn = db_conn.cursor()
|
||||||
|
txn.execute(sql, ("federation",))
|
||||||
|
rows = txn.fetchall()
|
||||||
|
txn.close()
|
||||||
|
|
||||||
|
return rows[0][0] if rows else -1
|
||||||
|
|
||||||
|
|
||||||
|
class FederationSenderServer(HomeServer):
|
||||||
|
def setup(self):
|
||||||
|
logger.info("Setting up.")
|
||||||
|
self.datastore = FederationSenderSlaveStore(self.get_db_conn(), self)
|
||||||
|
logger.info("Finished setting up.")
|
||||||
|
|
||||||
|
def _listen_http(self, listener_config):
|
||||||
|
port = listener_config["port"]
|
||||||
|
bind_addresses = listener_config["bind_addresses"]
|
||||||
|
site_tag = listener_config.get("tag", port)
|
||||||
|
resources = {}
|
||||||
|
for res in listener_config["resources"]:
|
||||||
|
for name in res["names"]:
|
||||||
|
if name == "metrics":
|
||||||
|
resources[METRICS_PREFIX] = MetricsResource(self)
|
||||||
|
|
||||||
|
root_resource = create_resource_tree(resources, NoResource())
|
||||||
|
|
||||||
|
_base.listen_tcp(
|
||||||
|
bind_addresses,
|
||||||
|
port,
|
||||||
|
SynapseSite(
|
||||||
|
"synapse.access.http.%s" % (site_tag,),
|
||||||
|
site_tag,
|
||||||
|
listener_config,
|
||||||
|
root_resource,
|
||||||
|
self.version_string,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
logger.info("Synapse federation_sender now listening on port %d", port)
|
||||||
|
|
||||||
|
def start_listening(self, listeners):
|
||||||
|
for listener in listeners:
|
||||||
|
if listener["type"] == "http":
|
||||||
|
self._listen_http(listener)
|
||||||
|
elif listener["type"] == "manhole":
|
||||||
|
_base.listen_tcp(
|
||||||
|
listener["bind_addresses"],
|
||||||
|
listener["port"],
|
||||||
|
manhole(
|
||||||
|
username="matrix",
|
||||||
|
password="rabbithole",
|
||||||
|
globals={"hs": self},
|
||||||
|
)
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||||
|
|
||||||
|
self.get_tcp_replication().start_replication(self)
|
||||||
|
|
||||||
|
def build_tcp_replication(self):
|
||||||
|
return FederationSenderReplicationHandler(self)
|
||||||
|
|
||||||
|
|
||||||
|
class FederationSenderReplicationHandler(ReplicationClientHandler):
|
||||||
|
def __init__(self, hs):
|
||||||
|
super(FederationSenderReplicationHandler, self).__init__(hs.get_datastore())
|
||||||
|
self.send_handler = FederationSenderHandler(hs, self)
|
||||||
|
|
||||||
|
def on_rdata(self, stream_name, token, rows):
|
||||||
|
super(FederationSenderReplicationHandler, self).on_rdata(
|
||||||
|
stream_name, token, rows
|
||||||
|
)
|
||||||
|
self.send_handler.process_replication_rows(stream_name, token, rows)
|
||||||
|
|
||||||
|
def get_streams_to_replicate(self):
|
||||||
|
args = super(FederationSenderReplicationHandler, self).get_streams_to_replicate()
|
||||||
|
args.update(self.send_handler.stream_positions())
|
||||||
|
return args
|
||||||
|
|
||||||
|
|
||||||
|
def start(config_options):
|
||||||
|
try:
|
||||||
|
config = HomeServerConfig.load_config(
|
||||||
|
"Synapse federation sender", config_options
|
||||||
|
)
|
||||||
|
except ConfigError as e:
|
||||||
|
sys.stderr.write("\n" + e.message + "\n")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
assert config.worker_app == "synapse.app.federation_sender"
|
||||||
|
|
||||||
|
setup_logging(config, use_worker_options=True)
|
||||||
|
|
||||||
|
events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||||
|
|
||||||
|
database_engine = create_engine(config.database_config)
|
||||||
|
|
||||||
|
if config.send_federation:
|
||||||
|
sys.stderr.write(
|
||||||
|
"\nThe send_federation must be disabled in the main synapse process"
|
||||||
|
"\nbefore they can be run in a separate worker."
|
||||||
|
"\nPlease add ``send_federation: false`` to the main config"
|
||||||
|
"\n"
|
||||||
|
)
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
# Force the pushers to start since they will be disabled in the main config
|
||||||
|
config.send_federation = True
|
||||||
|
|
||||||
|
tls_server_context_factory = context_factory.ServerContextFactory(config)
|
||||||
|
|
||||||
|
ps = FederationSenderServer(
|
||||||
|
config.server_name,
|
||||||
|
db_config=config.database_config,
|
||||||
|
tls_server_context_factory=tls_server_context_factory,
|
||||||
|
config=config,
|
||||||
|
version_string="Synapse/" + get_version_string(synapse),
|
||||||
|
database_engine=database_engine,
|
||||||
|
)
|
||||||
|
|
||||||
|
ps.setup()
|
||||||
|
ps.start_listening(config.worker_listeners)
|
||||||
|
|
||||||
|
def start():
|
||||||
|
ps.get_datastore().start_profiling()
|
||||||
|
ps.get_state_handler().start_caching()
|
||||||
|
|
||||||
|
reactor.callWhenRunning(start)
|
||||||
|
_base.start_worker_reactor("synapse-federation-sender", config)
|
||||||
|
|
||||||
|
|
||||||
|
class FederationSenderHandler(object):
|
||||||
|
"""Processes the replication stream and forwards the appropriate entries
|
||||||
|
to the federation sender.
|
||||||
|
"""
|
||||||
|
def __init__(self, hs, replication_client):
|
||||||
|
self.store = hs.get_datastore()
|
||||||
|
self.federation_sender = hs.get_federation_sender()
|
||||||
|
self.replication_client = replication_client
|
||||||
|
|
||||||
|
self.federation_position = self.store.federation_out_pos_startup
|
||||||
|
self._fed_position_linearizer = Linearizer(name="_fed_position_linearizer")
|
||||||
|
|
||||||
|
self._last_ack = self.federation_position
|
||||||
|
|
||||||
|
self._room_serials = {}
|
||||||
|
self._room_typing = {}
|
||||||
|
|
||||||
|
def on_start(self):
|
||||||
|
# There may be some events that are persisted but haven't been sent,
|
||||||
|
# so send them now.
|
||||||
|
self.federation_sender.notify_new_events(
|
||||||
|
self.store.get_room_max_stream_ordering()
|
||||||
|
)
|
||||||
|
|
||||||
|
def stream_positions(self):
|
||||||
|
return {"federation": self.federation_position}
|
||||||
|
|
||||||
|
def process_replication_rows(self, stream_name, token, rows):
|
||||||
|
# The federation stream contains things that we want to send out, e.g.
|
||||||
|
# presence, typing, etc.
|
||||||
|
if stream_name == "federation":
|
||||||
|
send_queue.process_rows_for_federation(self.federation_sender, rows)
|
||||||
|
run_in_background(self.update_token, token)
|
||||||
|
|
||||||
|
# We also need to poke the federation sender when new events happen
|
||||||
|
elif stream_name == "events":
|
||||||
|
self.federation_sender.notify_new_events(token)
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def update_token(self, token):
|
||||||
|
try:
|
||||||
|
self.federation_position = token
|
||||||
|
|
||||||
|
# We linearize here to ensure we don't have races updating the token
|
||||||
|
with (yield self._fed_position_linearizer.queue(None)):
|
||||||
|
if self._last_ack < self.federation_position:
|
||||||
|
yield self.store.update_federation_out_pos(
|
||||||
|
"federation", self.federation_position
|
||||||
|
)
|
||||||
|
|
||||||
|
# We ACK this token over replication so that the master can drop
|
||||||
|
# its in memory queues
|
||||||
|
self.replication_client.send_federation_ack(self.federation_position)
|
||||||
|
self._last_ack = self.federation_position
|
||||||
|
except Exception:
|
||||||
|
logger.exception("Error updating federation stream position")
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
with LoggingContext("main"):
|
||||||
|
start(sys.argv[1:])
|
||||||
228
synapse/app/frontend_proxy.py
Normal file
228
synapse/app/frontend_proxy.py
Normal file
@@ -0,0 +1,228 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright 2016 OpenMarket Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
import logging
|
||||||
|
import sys
|
||||||
|
|
||||||
|
import synapse
|
||||||
|
from synapse import events
|
||||||
|
from synapse.api.errors import SynapseError
|
||||||
|
from synapse.app import _base
|
||||||
|
from synapse.config._base import ConfigError
|
||||||
|
from synapse.config.homeserver import HomeServerConfig
|
||||||
|
from synapse.config.logger import setup_logging
|
||||||
|
from synapse.crypto import context_factory
|
||||||
|
from synapse.http.server import JsonResource
|
||||||
|
from synapse.http.servlet import (
|
||||||
|
RestServlet, parse_json_object_from_request,
|
||||||
|
)
|
||||||
|
from synapse.http.site import SynapseSite
|
||||||
|
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||||
|
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||||
|
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||||
|
from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
|
||||||
|
from synapse.replication.slave.storage.devices import SlavedDeviceStore
|
||||||
|
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||||
|
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||||
|
from synapse.rest.client.v2_alpha._base import client_v2_patterns
|
||||||
|
from synapse.server import HomeServer
|
||||||
|
from synapse.storage.engines import create_engine
|
||||||
|
from synapse.util.httpresourcetree import create_resource_tree
|
||||||
|
from synapse.util.logcontext import LoggingContext
|
||||||
|
from synapse.util.manhole import manhole
|
||||||
|
from synapse.util.versionstring import get_version_string
|
||||||
|
from twisted.internet import defer, reactor
|
||||||
|
from twisted.web.resource import NoResource
|
||||||
|
|
||||||
|
logger = logging.getLogger("synapse.app.frontend_proxy")
|
||||||
|
|
||||||
|
|
||||||
|
class KeyUploadServlet(RestServlet):
|
||||||
|
PATTERNS = client_v2_patterns("/keys/upload(/(?P<device_id>[^/]+))?$")
|
||||||
|
|
||||||
|
def __init__(self, hs):
|
||||||
|
"""
|
||||||
|
Args:
|
||||||
|
hs (synapse.server.HomeServer): server
|
||||||
|
"""
|
||||||
|
super(KeyUploadServlet, self).__init__()
|
||||||
|
self.auth = hs.get_auth()
|
||||||
|
self.store = hs.get_datastore()
|
||||||
|
self.http_client = hs.get_simple_http_client()
|
||||||
|
self.main_uri = hs.config.worker_main_http_uri
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def on_POST(self, request, device_id):
|
||||||
|
requester = yield self.auth.get_user_by_req(request, allow_guest=True)
|
||||||
|
user_id = requester.user.to_string()
|
||||||
|
body = parse_json_object_from_request(request)
|
||||||
|
|
||||||
|
if device_id is not None:
|
||||||
|
# passing the device_id here is deprecated; however, we allow it
|
||||||
|
# for now for compatibility with older clients.
|
||||||
|
if (requester.device_id is not None and
|
||||||
|
device_id != requester.device_id):
|
||||||
|
logger.warning("Client uploading keys for a different device "
|
||||||
|
"(logged in as %s, uploading for %s)",
|
||||||
|
requester.device_id, device_id)
|
||||||
|
else:
|
||||||
|
device_id = requester.device_id
|
||||||
|
|
||||||
|
if device_id is None:
|
||||||
|
raise SynapseError(
|
||||||
|
400,
|
||||||
|
"To upload keys, you must pass device_id when authenticating"
|
||||||
|
)
|
||||||
|
|
||||||
|
if body:
|
||||||
|
# They're actually trying to upload something, proxy to main synapse.
|
||||||
|
# Pass through the auth headers, if any, in case the access token
|
||||||
|
# is there.
|
||||||
|
auth_headers = request.requestHeaders.getRawHeaders(b"Authorization", [])
|
||||||
|
headers = {
|
||||||
|
"Authorization": auth_headers,
|
||||||
|
}
|
||||||
|
result = yield self.http_client.post_json_get_json(
|
||||||
|
self.main_uri + request.uri,
|
||||||
|
body,
|
||||||
|
headers=headers,
|
||||||
|
)
|
||||||
|
|
||||||
|
defer.returnValue((200, result))
|
||||||
|
else:
|
||||||
|
# Just interested in counts.
|
||||||
|
result = yield self.store.count_e2e_one_time_keys(user_id, device_id)
|
||||||
|
defer.returnValue((200, {"one_time_key_counts": result}))
|
||||||
|
|
||||||
|
|
||||||
|
class FrontendProxySlavedStore(
|
||||||
|
SlavedDeviceStore,
|
||||||
|
SlavedClientIpStore,
|
||||||
|
SlavedApplicationServiceStore,
|
||||||
|
SlavedRegistrationStore,
|
||||||
|
BaseSlavedStore,
|
||||||
|
):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class FrontendProxyServer(HomeServer):
|
||||||
|
def setup(self):
|
||||||
|
logger.info("Setting up.")
|
||||||
|
self.datastore = FrontendProxySlavedStore(self.get_db_conn(), self)
|
||||||
|
logger.info("Finished setting up.")
|
||||||
|
|
||||||
|
def _listen_http(self, listener_config):
|
||||||
|
port = listener_config["port"]
|
||||||
|
bind_addresses = listener_config["bind_addresses"]
|
||||||
|
site_tag = listener_config.get("tag", port)
|
||||||
|
resources = {}
|
||||||
|
for res in listener_config["resources"]:
|
||||||
|
for name in res["names"]:
|
||||||
|
if name == "metrics":
|
||||||
|
resources[METRICS_PREFIX] = MetricsResource(self)
|
||||||
|
elif name == "client":
|
||||||
|
resource = JsonResource(self, canonical_json=False)
|
||||||
|
KeyUploadServlet(self).register(resource)
|
||||||
|
resources.update({
|
||||||
|
"/_matrix/client/r0": resource,
|
||||||
|
"/_matrix/client/unstable": resource,
|
||||||
|
"/_matrix/client/v2_alpha": resource,
|
||||||
|
"/_matrix/client/api/v1": resource,
|
||||||
|
})
|
||||||
|
|
||||||
|
root_resource = create_resource_tree(resources, NoResource())
|
||||||
|
|
||||||
|
_base.listen_tcp(
|
||||||
|
bind_addresses,
|
||||||
|
port,
|
||||||
|
SynapseSite(
|
||||||
|
"synapse.access.http.%s" % (site_tag,),
|
||||||
|
site_tag,
|
||||||
|
listener_config,
|
||||||
|
root_resource,
|
||||||
|
self.version_string,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
logger.info("Synapse client reader now listening on port %d", port)
|
||||||
|
|
||||||
|
def start_listening(self, listeners):
|
||||||
|
for listener in listeners:
|
||||||
|
if listener["type"] == "http":
|
||||||
|
self._listen_http(listener)
|
||||||
|
elif listener["type"] == "manhole":
|
||||||
|
_base.listen_tcp(
|
||||||
|
listener["bind_addresses"],
|
||||||
|
listener["port"],
|
||||||
|
manhole(
|
||||||
|
username="matrix",
|
||||||
|
password="rabbithole",
|
||||||
|
globals={"hs": self},
|
||||||
|
)
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||||
|
|
||||||
|
self.get_tcp_replication().start_replication(self)
|
||||||
|
|
||||||
|
def build_tcp_replication(self):
|
||||||
|
return ReplicationClientHandler(self.get_datastore())
|
||||||
|
|
||||||
|
|
||||||
|
def start(config_options):
|
||||||
|
try:
|
||||||
|
config = HomeServerConfig.load_config(
|
||||||
|
"Synapse frontend proxy", config_options
|
||||||
|
)
|
||||||
|
except ConfigError as e:
|
||||||
|
sys.stderr.write("\n" + e.message + "\n")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
assert config.worker_app == "synapse.app.frontend_proxy"
|
||||||
|
|
||||||
|
assert config.worker_main_http_uri is not None
|
||||||
|
|
||||||
|
setup_logging(config, use_worker_options=True)
|
||||||
|
|
||||||
|
events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||||
|
|
||||||
|
database_engine = create_engine(config.database_config)
|
||||||
|
|
||||||
|
tls_server_context_factory = context_factory.ServerContextFactory(config)
|
||||||
|
|
||||||
|
ss = FrontendProxyServer(
|
||||||
|
config.server_name,
|
||||||
|
db_config=config.database_config,
|
||||||
|
tls_server_context_factory=tls_server_context_factory,
|
||||||
|
config=config,
|
||||||
|
version_string="Synapse/" + get_version_string(synapse),
|
||||||
|
database_engine=database_engine,
|
||||||
|
)
|
||||||
|
|
||||||
|
ss.setup()
|
||||||
|
ss.start_listening(config.worker_listeners)
|
||||||
|
|
||||||
|
def start():
|
||||||
|
ss.get_state_handler().start_caching()
|
||||||
|
ss.get_datastore().start_profiling()
|
||||||
|
|
||||||
|
reactor.callWhenRunning(start)
|
||||||
|
|
||||||
|
_base.start_worker_reactor("synapse-frontend-proxy", config)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
with LoggingContext("main"):
|
||||||
|
start(sys.argv[1:])
|
||||||
@@ -13,59 +13,53 @@
|
|||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
import synapse
|
|
||||||
|
|
||||||
import gc
|
import gc
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
|
import synapse
|
||||||
|
import synapse.config.logger
|
||||||
|
from synapse import events
|
||||||
|
from synapse.api.urls import CONTENT_REPO_PREFIX, FEDERATION_PREFIX, \
|
||||||
|
LEGACY_MEDIA_PREFIX, MEDIA_PREFIX, SERVER_KEY_PREFIX, SERVER_KEY_V2_PREFIX, \
|
||||||
|
STATIC_PREFIX, WEB_CLIENT_PREFIX
|
||||||
|
from synapse.app import _base
|
||||||
|
from synapse.app._base import quit_with_error, listen_ssl, listen_tcp
|
||||||
from synapse.config._base import ConfigError
|
from synapse.config._base import ConfigError
|
||||||
|
|
||||||
from synapse.python_dependencies import (
|
|
||||||
check_requirements, DEPENDENCY_LINKS
|
|
||||||
)
|
|
||||||
|
|
||||||
from synapse.rest import ClientRestResource
|
|
||||||
from synapse.storage.engines import create_engine, IncorrectDatabaseSetup
|
|
||||||
from synapse.storage import are_all_users_on_domain
|
|
||||||
from synapse.storage.prepare_database import UpgradeDatabaseException, prepare_database
|
|
||||||
|
|
||||||
from synapse.server import HomeServer
|
|
||||||
|
|
||||||
from twisted.internet import reactor, task, defer
|
|
||||||
from twisted.application import service
|
|
||||||
from twisted.web.resource import Resource, EncodingResourceWrapper
|
|
||||||
from twisted.web.static import File
|
|
||||||
from twisted.web.server import GzipEncoderFactory
|
|
||||||
from synapse.http.server import RootRedirect
|
|
||||||
from synapse.rest.media.v0.content_repository import ContentRepoResource
|
|
||||||
from synapse.rest.media.v1.media_repository import MediaRepositoryResource
|
|
||||||
from synapse.rest.key.v1.server_key_resource import LocalKey
|
|
||||||
from synapse.rest.key.v2 import KeyApiV2Resource
|
|
||||||
from synapse.api.urls import (
|
|
||||||
FEDERATION_PREFIX, WEB_CLIENT_PREFIX, CONTENT_REPO_PREFIX,
|
|
||||||
SERVER_KEY_PREFIX, LEGACY_MEDIA_PREFIX, MEDIA_PREFIX, STATIC_PREFIX,
|
|
||||||
SERVER_KEY_V2_PREFIX,
|
|
||||||
)
|
|
||||||
from synapse.config.homeserver import HomeServerConfig
|
from synapse.config.homeserver import HomeServerConfig
|
||||||
from synapse.crypto import context_factory
|
from synapse.crypto import context_factory
|
||||||
from synapse.util.logcontext import LoggingContext
|
|
||||||
from synapse.metrics import register_memory_metrics, get_metrics_for
|
|
||||||
from synapse.metrics.resource import MetricsResource, METRICS_PREFIX
|
|
||||||
from synapse.replication.resource import ReplicationResource, REPLICATION_PREFIX
|
|
||||||
from synapse.federation.transport.server import TransportLayerServer
|
from synapse.federation.transport.server import TransportLayerServer
|
||||||
|
from synapse.module_api import ModuleApi
|
||||||
|
from synapse.http.additional_resource import AdditionalResource
|
||||||
|
from synapse.http.server import RootRedirect
|
||||||
|
from synapse.http.site import SynapseSite
|
||||||
|
from synapse.metrics import register_memory_metrics
|
||||||
|
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||||
|
from synapse.python_dependencies import CONDITIONAL_REQUIREMENTS, \
|
||||||
|
check_requirements
|
||||||
|
from synapse.replication.http import ReplicationRestResource, REPLICATION_PREFIX
|
||||||
|
from synapse.replication.tcp.resource import ReplicationStreamProtocolFactory
|
||||||
|
from synapse.rest import ClientRestResource
|
||||||
|
from synapse.rest.key.v1.server_key_resource import LocalKey
|
||||||
|
from synapse.rest.key.v2 import KeyApiV2Resource
|
||||||
|
from synapse.rest.media.v0.content_repository import ContentRepoResource
|
||||||
|
from synapse.server import HomeServer
|
||||||
|
from synapse.storage import are_all_users_on_domain
|
||||||
|
from synapse.storage.engines import IncorrectDatabaseSetup, create_engine
|
||||||
|
from synapse.storage.prepare_database import UpgradeDatabaseException, prepare_database
|
||||||
|
from synapse.util.caches import CACHE_SIZE_FACTOR
|
||||||
|
from synapse.util.httpresourcetree import create_resource_tree
|
||||||
|
from synapse.util.logcontext import LoggingContext
|
||||||
|
from synapse.util.manhole import manhole
|
||||||
|
from synapse.util.module_loader import load_module
|
||||||
from synapse.util.rlimit import change_resource_limit
|
from synapse.util.rlimit import change_resource_limit
|
||||||
from synapse.util.versionstring import get_version_string
|
from synapse.util.versionstring import get_version_string
|
||||||
from synapse.util.httpresourcetree import create_resource_tree
|
from twisted.application import service
|
||||||
from synapse.util.manhole import manhole
|
from twisted.internet import defer, reactor
|
||||||
|
from twisted.web.resource import EncodingResourceWrapper, NoResource
|
||||||
from synapse.http.site import SynapseSite
|
from twisted.web.server import GzipEncoderFactory
|
||||||
|
from twisted.web.static import File
|
||||||
from synapse import events
|
|
||||||
|
|
||||||
from daemonize import Daemonize
|
|
||||||
|
|
||||||
logger = logging.getLogger("synapse.app.homeserver")
|
logger = logging.getLogger("synapse.app.homeserver")
|
||||||
|
|
||||||
@@ -90,7 +84,7 @@ def build_resource_for_web_client(hs):
|
|||||||
"\n"
|
"\n"
|
||||||
"You can also disable hosting of the webclient via the\n"
|
"You can also disable hosting of the webclient via the\n"
|
||||||
"configuration option `web_client`\n"
|
"configuration option `web_client`\n"
|
||||||
% {"dep": DEPENDENCY_LINKS["matrix-angular-sdk"]}
|
% {"dep": CONDITIONAL_REQUIREMENTS["web_client"].keys()[0]}
|
||||||
)
|
)
|
||||||
syweb_path = os.path.dirname(syweb.__file__)
|
syweb_path = os.path.dirname(syweb.__file__)
|
||||||
webclient_path = os.path.join(syweb_path, "webclient")
|
webclient_path = os.path.join(syweb_path, "webclient")
|
||||||
@@ -107,7 +101,7 @@ def build_resource_for_web_client(hs):
|
|||||||
class SynapseHomeServer(HomeServer):
|
class SynapseHomeServer(HomeServer):
|
||||||
def _listener_http(self, config, listener_config):
|
def _listener_http(self, config, listener_config):
|
||||||
port = listener_config["port"]
|
port = listener_config["port"]
|
||||||
bind_address = listener_config.get("bind_address", "")
|
bind_addresses = listener_config["bind_addresses"]
|
||||||
tls = listener_config.get("tls", False)
|
tls = listener_config.get("tls", False)
|
||||||
site_tag = listener_config.get("tag", port)
|
site_tag = listener_config.get("tag", port)
|
||||||
|
|
||||||
@@ -117,9 +111,69 @@ class SynapseHomeServer(HomeServer):
|
|||||||
resources = {}
|
resources = {}
|
||||||
for res in listener_config["resources"]:
|
for res in listener_config["resources"]:
|
||||||
for name in res["names"]:
|
for name in res["names"]:
|
||||||
|
resources.update(self._configure_named_resource(
|
||||||
|
name, res.get("compress", False),
|
||||||
|
))
|
||||||
|
|
||||||
|
additional_resources = listener_config.get("additional_resources", {})
|
||||||
|
logger.debug("Configuring additional resources: %r",
|
||||||
|
additional_resources)
|
||||||
|
module_api = ModuleApi(self, self.get_auth_handler())
|
||||||
|
for path, resmodule in additional_resources.items():
|
||||||
|
handler_cls, config = load_module(resmodule)
|
||||||
|
handler = handler_cls(config, module_api)
|
||||||
|
resources[path] = AdditionalResource(self, handler.handle_request)
|
||||||
|
|
||||||
|
if WEB_CLIENT_PREFIX in resources:
|
||||||
|
root_resource = RootRedirect(WEB_CLIENT_PREFIX)
|
||||||
|
else:
|
||||||
|
root_resource = NoResource()
|
||||||
|
|
||||||
|
root_resource = create_resource_tree(resources, root_resource)
|
||||||
|
|
||||||
|
if tls:
|
||||||
|
listen_ssl(
|
||||||
|
bind_addresses,
|
||||||
|
port,
|
||||||
|
SynapseSite(
|
||||||
|
"synapse.access.https.%s" % (site_tag,),
|
||||||
|
site_tag,
|
||||||
|
listener_config,
|
||||||
|
root_resource,
|
||||||
|
self.version_string,
|
||||||
|
),
|
||||||
|
self.tls_server_context_factory,
|
||||||
|
)
|
||||||
|
|
||||||
|
else:
|
||||||
|
listen_tcp(
|
||||||
|
bind_addresses,
|
||||||
|
port,
|
||||||
|
SynapseSite(
|
||||||
|
"synapse.access.http.%s" % (site_tag,),
|
||||||
|
site_tag,
|
||||||
|
listener_config,
|
||||||
|
root_resource,
|
||||||
|
self.version_string,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
logger.info("Synapse now listening on port %d", port)
|
||||||
|
|
||||||
|
def _configure_named_resource(self, name, compress=False):
|
||||||
|
"""Build a resource map for a named resource
|
||||||
|
|
||||||
|
Args:
|
||||||
|
name (str): named resource: one of "client", "federation", etc
|
||||||
|
compress (bool): whether to enable gzip compression for this
|
||||||
|
resource
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
dict[str, Resource]: map from path to HTTP resource
|
||||||
|
"""
|
||||||
|
resources = {}
|
||||||
if name == "client":
|
if name == "client":
|
||||||
client_resource = ClientRestResource(self)
|
client_resource = ClientRestResource(self)
|
||||||
if res["compress"]:
|
if compress:
|
||||||
client_resource = gz_wrap(client_resource)
|
client_resource = gz_wrap(client_resource)
|
||||||
|
|
||||||
resources.update({
|
resources.update({
|
||||||
@@ -143,7 +197,8 @@ class SynapseHomeServer(HomeServer):
|
|||||||
})
|
})
|
||||||
|
|
||||||
if name in ["media", "federation", "client"]:
|
if name in ["media", "federation", "client"]:
|
||||||
media_repo = MediaRepositoryResource(self)
|
if self.get_config().enable_media_repo:
|
||||||
|
media_repo = self.get_media_repository_resource()
|
||||||
resources.update({
|
resources.update({
|
||||||
MEDIA_PREFIX: media_repo,
|
MEDIA_PREFIX: media_repo,
|
||||||
LEGACY_MEDIA_PREFIX: media_repo,
|
LEGACY_MEDIA_PREFIX: media_repo,
|
||||||
@@ -151,6 +206,10 @@ class SynapseHomeServer(HomeServer):
|
|||||||
self, self.config.uploads_path
|
self, self.config.uploads_path
|
||||||
),
|
),
|
||||||
})
|
})
|
||||||
|
elif name == "media":
|
||||||
|
raise ConfigError(
|
||||||
|
"'media' resource conflicts with enable_media_repo=False",
|
||||||
|
)
|
||||||
|
|
||||||
if name in ["keys", "federation"]:
|
if name in ["keys", "federation"]:
|
||||||
resources.update({
|
resources.update({
|
||||||
@@ -165,38 +224,9 @@ class SynapseHomeServer(HomeServer):
|
|||||||
resources[METRICS_PREFIX] = MetricsResource(self)
|
resources[METRICS_PREFIX] = MetricsResource(self)
|
||||||
|
|
||||||
if name == "replication":
|
if name == "replication":
|
||||||
resources[REPLICATION_PREFIX] = ReplicationResource(self)
|
resources[REPLICATION_PREFIX] = ReplicationRestResource(self)
|
||||||
|
|
||||||
if WEB_CLIENT_PREFIX in resources:
|
return resources
|
||||||
root_resource = RootRedirect(WEB_CLIENT_PREFIX)
|
|
||||||
else:
|
|
||||||
root_resource = Resource()
|
|
||||||
|
|
||||||
root_resource = create_resource_tree(resources, root_resource)
|
|
||||||
if tls:
|
|
||||||
reactor.listenSSL(
|
|
||||||
port,
|
|
||||||
SynapseSite(
|
|
||||||
"synapse.access.https.%s" % (site_tag,),
|
|
||||||
site_tag,
|
|
||||||
listener_config,
|
|
||||||
root_resource,
|
|
||||||
),
|
|
||||||
self.tls_server_context_factory,
|
|
||||||
interface=bind_address
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
reactor.listenTCP(
|
|
||||||
port,
|
|
||||||
SynapseSite(
|
|
||||||
"synapse.access.http.%s" % (site_tag,),
|
|
||||||
site_tag,
|
|
||||||
listener_config,
|
|
||||||
root_resource,
|
|
||||||
),
|
|
||||||
interface=bind_address
|
|
||||||
)
|
|
||||||
logger.info("Synapse now listening on port %d", port)
|
|
||||||
|
|
||||||
def start_listening(self):
|
def start_listening(self):
|
||||||
config = self.get_config()
|
config = self.get_config()
|
||||||
@@ -205,14 +235,24 @@ class SynapseHomeServer(HomeServer):
|
|||||||
if listener["type"] == "http":
|
if listener["type"] == "http":
|
||||||
self._listener_http(config, listener)
|
self._listener_http(config, listener)
|
||||||
elif listener["type"] == "manhole":
|
elif listener["type"] == "manhole":
|
||||||
reactor.listenTCP(
|
listen_tcp(
|
||||||
|
listener["bind_addresses"],
|
||||||
listener["port"],
|
listener["port"],
|
||||||
manhole(
|
manhole(
|
||||||
username="matrix",
|
username="matrix",
|
||||||
password="rabbithole",
|
password="rabbithole",
|
||||||
globals={"hs": self},
|
globals={"hs": self},
|
||||||
),
|
)
|
||||||
interface=listener.get("bind_address", '127.0.0.1')
|
)
|
||||||
|
elif listener["type"] == "replication":
|
||||||
|
bind_addresses = listener["bind_addresses"]
|
||||||
|
for address in bind_addresses:
|
||||||
|
factory = ReplicationStreamProtocolFactory(self)
|
||||||
|
server_listener = reactor.listenTCP(
|
||||||
|
listener["port"], factory, interface=address
|
||||||
|
)
|
||||||
|
reactor.addSystemEventTrigger(
|
||||||
|
"before", "shutdown", server_listener.stopListening,
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||||
@@ -233,29 +273,6 @@ class SynapseHomeServer(HomeServer):
|
|||||||
except IncorrectDatabaseSetup as e:
|
except IncorrectDatabaseSetup as e:
|
||||||
quit_with_error(e.message)
|
quit_with_error(e.message)
|
||||||
|
|
||||||
def get_db_conn(self, run_new_connection=True):
|
|
||||||
# Any param beginning with cp_ is a parameter for adbapi, and should
|
|
||||||
# not be passed to the database engine.
|
|
||||||
db_params = {
|
|
||||||
k: v for k, v in self.db_config.get("args", {}).items()
|
|
||||||
if not k.startswith("cp_")
|
|
||||||
}
|
|
||||||
db_conn = self.database_engine.module.connect(**db_params)
|
|
||||||
|
|
||||||
if run_new_connection:
|
|
||||||
self.database_engine.on_new_connection(db_conn)
|
|
||||||
return db_conn
|
|
||||||
|
|
||||||
|
|
||||||
def quit_with_error(error_string):
|
|
||||||
message_lines = error_string.split("\n")
|
|
||||||
line_length = max([len(l) for l in message_lines if len(l) < 80]) + 2
|
|
||||||
sys.stderr.write("*" * line_length + '\n')
|
|
||||||
for line in message_lines:
|
|
||||||
sys.stderr.write(" %s\n" % (line.rstrip(),))
|
|
||||||
sys.stderr.write("*" * line_length + '\n')
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
|
|
||||||
def setup(config_options):
|
def setup(config_options):
|
||||||
"""
|
"""
|
||||||
@@ -280,7 +297,7 @@ def setup(config_options):
|
|||||||
# generating config files and shouldn't try to continue.
|
# generating config files and shouldn't try to continue.
|
||||||
sys.exit(0)
|
sys.exit(0)
|
||||||
|
|
||||||
config.setup_logging()
|
synapse.config.logger.setup_logging(config, use_worker_options=False)
|
||||||
|
|
||||||
# check any extra requirements we have now we have a config
|
# check any extra requirements we have now we have a config
|
||||||
check_requirements(config)
|
check_requirements(config)
|
||||||
@@ -334,7 +351,7 @@ def setup(config_options):
|
|||||||
hs.get_state_handler().start_caching()
|
hs.get_state_handler().start_caching()
|
||||||
hs.get_datastore().start_profiling()
|
hs.get_datastore().start_profiling()
|
||||||
hs.get_datastore().start_doing_background_updates()
|
hs.get_datastore().start_doing_background_updates()
|
||||||
hs.get_replication_layer().start_get_pdu_cache()
|
hs.get_federation_client().start_get_pdu_cache()
|
||||||
|
|
||||||
register_memory_metrics(hs)
|
register_memory_metrics(hs)
|
||||||
|
|
||||||
@@ -383,10 +400,15 @@ def run(hs):
|
|||||||
ThreadPool._worker = profile(ThreadPool._worker)
|
ThreadPool._worker = profile(ThreadPool._worker)
|
||||||
reactor.run = profile(reactor.run)
|
reactor.run = profile(reactor.run)
|
||||||
|
|
||||||
start_time = hs.get_clock().time()
|
clock = hs.get_clock()
|
||||||
|
start_time = clock.time()
|
||||||
|
|
||||||
stats = {}
|
stats = {}
|
||||||
|
|
||||||
|
# Contains the list of processes we will be monitoring
|
||||||
|
# currently either 0 or 1
|
||||||
|
stats_process = []
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def phone_stats_home():
|
def phone_stats_home():
|
||||||
logger.info("Gathering stats for reporting")
|
logger.info("Gathering stats for reporting")
|
||||||
@@ -395,41 +417,36 @@ def run(hs):
|
|||||||
if uptime < 0:
|
if uptime < 0:
|
||||||
uptime = 0
|
uptime = 0
|
||||||
|
|
||||||
# If the stats directory is empty then this is the first time we've
|
|
||||||
# reported stats.
|
|
||||||
first_time = not stats
|
|
||||||
|
|
||||||
stats["homeserver"] = hs.config.server_name
|
stats["homeserver"] = hs.config.server_name
|
||||||
stats["timestamp"] = now
|
stats["timestamp"] = now
|
||||||
stats["uptime_seconds"] = uptime
|
stats["uptime_seconds"] = uptime
|
||||||
stats["total_users"] = yield hs.get_datastore().count_all_users()
|
stats["total_users"] = yield hs.get_datastore().count_all_users()
|
||||||
|
|
||||||
|
total_nonbridged_users = yield hs.get_datastore().count_nonbridged_users()
|
||||||
|
stats["total_nonbridged_users"] = total_nonbridged_users
|
||||||
|
|
||||||
room_count = yield hs.get_datastore().get_room_count()
|
room_count = yield hs.get_datastore().get_room_count()
|
||||||
stats["total_room_count"] = room_count
|
stats["total_room_count"] = room_count
|
||||||
|
|
||||||
stats["daily_active_users"] = yield hs.get_datastore().count_daily_users()
|
stats["daily_active_users"] = yield hs.get_datastore().count_daily_users()
|
||||||
daily_messages = yield hs.get_datastore().count_daily_messages()
|
stats["daily_active_rooms"] = yield hs.get_datastore().count_daily_active_rooms()
|
||||||
if daily_messages is not None:
|
stats["daily_messages"] = yield hs.get_datastore().count_daily_messages()
|
||||||
stats["daily_messages"] = daily_messages
|
|
||||||
else:
|
|
||||||
stats.pop("daily_messages", None)
|
|
||||||
|
|
||||||
if first_time:
|
r30_results = yield hs.get_datastore().count_r30_users()
|
||||||
# Add callbacks to report the synapse stats as metrics whenever
|
for name, count in r30_results.iteritems():
|
||||||
# prometheus requests them, typically every 30s.
|
stats["r30_users_" + name] = count
|
||||||
# As some of the stats are expensive to calculate we only update
|
|
||||||
# them when synapse phones home to matrix.org every 24 hours.
|
daily_sent_messages = yield hs.get_datastore().count_daily_sent_messages()
|
||||||
metrics = get_metrics_for("synapse.usage")
|
stats["daily_sent_messages"] = daily_sent_messages
|
||||||
metrics.add_callback("timestamp", lambda: stats["timestamp"])
|
stats["cache_factor"] = CACHE_SIZE_FACTOR
|
||||||
metrics.add_callback("uptime_seconds", lambda: stats["uptime_seconds"])
|
stats["event_cache_size"] = hs.config.event_cache_size
|
||||||
metrics.add_callback("total_users", lambda: stats["total_users"])
|
|
||||||
metrics.add_callback("total_room_count", lambda: stats["total_room_count"])
|
if len(stats_process) > 0:
|
||||||
metrics.add_callback(
|
stats["memory_rss"] = 0
|
||||||
"daily_active_users", lambda: stats["daily_active_users"]
|
stats["cpu_average"] = 0
|
||||||
)
|
for process in stats_process:
|
||||||
metrics.add_callback(
|
stats["memory_rss"] += process.memory_info().rss
|
||||||
"daily_messages", lambda: stats.get("daily_messages", 0)
|
stats["cpu_average"] += int(process.cpu_percent(interval=None))
|
||||||
)
|
|
||||||
|
|
||||||
logger.info("Reporting stats to matrix.org: %s" % (stats,))
|
logger.info("Reporting stats to matrix.org: %s" % (stats,))
|
||||||
try:
|
try:
|
||||||
@@ -440,37 +457,48 @@ def run(hs):
|
|||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.warn("Error reporting stats: %s", e)
|
logger.warn("Error reporting stats: %s", e)
|
||||||
|
|
||||||
if hs.config.report_stats:
|
def performance_stats_init():
|
||||||
phone_home_task = task.LoopingCall(phone_stats_home)
|
try:
|
||||||
logger.info("Scheduling stats reporting for 24 hour intervals")
|
import psutil
|
||||||
phone_home_task.start(60 * 60 * 24, now=False)
|
process = psutil.Process()
|
||||||
|
# Ensure we can fetch both, and make the initial request for cpu_percent
|
||||||
def in_thread():
|
# so the next request will use this as the initial point.
|
||||||
# Uncomment to enable tracing of log context changes.
|
process.memory_info().rss
|
||||||
# sys.settrace(logcontext_tracer)
|
process.cpu_percent(interval=None)
|
||||||
with LoggingContext("run"):
|
logger.info("report_stats can use psutil")
|
||||||
change_resource_limit(hs.config.soft_file_limit)
|
stats_process.append(process)
|
||||||
if hs.config.gc_thresholds:
|
except (ImportError, AttributeError):
|
||||||
gc.set_threshold(*hs.config.gc_thresholds)
|
logger.warn(
|
||||||
reactor.run()
|
"report_stats enabled but psutil is not installed or incorrect version."
|
||||||
|
" Disabling reporting of memory/cpu stats."
|
||||||
if hs.config.daemonize:
|
" Ensuring psutil is available will help matrix.org track performance"
|
||||||
|
" changes across releases."
|
||||||
if hs.config.print_pidfile:
|
|
||||||
print (hs.config.pid_file)
|
|
||||||
|
|
||||||
daemon = Daemonize(
|
|
||||||
app="synapse-homeserver",
|
|
||||||
pid=hs.config.pid_file,
|
|
||||||
action=lambda: in_thread(),
|
|
||||||
auto_close_fds=False,
|
|
||||||
verbose=True,
|
|
||||||
logger=logger,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
daemon.start()
|
if hs.config.report_stats:
|
||||||
else:
|
logger.info("Scheduling stats reporting for 3 hour intervals")
|
||||||
in_thread()
|
clock.looping_call(phone_stats_home, 3 * 60 * 60 * 1000)
|
||||||
|
|
||||||
|
# We need to defer this init for the cases that we daemonize
|
||||||
|
# otherwise the process ID we get is that of the non-daemon process
|
||||||
|
clock.call_later(0, performance_stats_init)
|
||||||
|
|
||||||
|
# We wait 5 minutes to send the first set of stats as the server can
|
||||||
|
# be quite busy the first few minutes
|
||||||
|
clock.call_later(5 * 60, phone_stats_home)
|
||||||
|
|
||||||
|
if hs.config.daemonize and hs.config.print_pidfile:
|
||||||
|
print (hs.config.pid_file)
|
||||||
|
|
||||||
|
_base.start_reactor(
|
||||||
|
"synapse-homeserver",
|
||||||
|
hs.config.soft_file_limit,
|
||||||
|
hs.config.gc_thresholds,
|
||||||
|
hs.config.pid_file,
|
||||||
|
hs.config.daemonize,
|
||||||
|
hs.config.cpu_affinity,
|
||||||
|
logger,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
|
|||||||
@@ -13,43 +13,37 @@
|
|||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
import logging
|
||||||
|
import sys
|
||||||
|
|
||||||
import synapse
|
import synapse
|
||||||
|
from synapse import events
|
||||||
from synapse.config._base import ConfigError
|
|
||||||
from synapse.config.homeserver import HomeServerConfig
|
|
||||||
from synapse.config.logger import setup_logging
|
|
||||||
from synapse.http.site import SynapseSite
|
|
||||||
from synapse.metrics.resource import MetricsResource, METRICS_PREFIX
|
|
||||||
from synapse.replication.slave.storage._base import BaseSlavedStore
|
|
||||||
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
|
||||||
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
|
||||||
from synapse.rest.media.v0.content_repository import ContentRepoResource
|
|
||||||
from synapse.rest.media.v1.media_repository import MediaRepositoryResource
|
|
||||||
from synapse.server import HomeServer
|
|
||||||
from synapse.storage.client_ips import ClientIpStore
|
|
||||||
from synapse.storage.engines import create_engine
|
|
||||||
from synapse.storage.media_repository import MediaRepositoryStore
|
|
||||||
from synapse.util.async import sleep
|
|
||||||
from synapse.util.httpresourcetree import create_resource_tree
|
|
||||||
from synapse.util.logcontext import LoggingContext
|
|
||||||
from synapse.util.manhole import manhole
|
|
||||||
from synapse.util.rlimit import change_resource_limit
|
|
||||||
from synapse.util.versionstring import get_version_string
|
|
||||||
from synapse.api.urls import (
|
from synapse.api.urls import (
|
||||||
CONTENT_REPO_PREFIX, LEGACY_MEDIA_PREFIX, MEDIA_PREFIX
|
CONTENT_REPO_PREFIX, LEGACY_MEDIA_PREFIX, MEDIA_PREFIX
|
||||||
)
|
)
|
||||||
|
from synapse.app import _base
|
||||||
|
from synapse.config._base import ConfigError
|
||||||
|
from synapse.config.homeserver import HomeServerConfig
|
||||||
|
from synapse.config.logger import setup_logging
|
||||||
from synapse.crypto import context_factory
|
from synapse.crypto import context_factory
|
||||||
|
from synapse.http.site import SynapseSite
|
||||||
|
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||||
from twisted.internet import reactor, defer
|
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||||
from twisted.web.resource import Resource
|
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||||
|
from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
|
||||||
from daemonize import Daemonize
|
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||||
|
from synapse.replication.slave.storage.transactions import TransactionStore
|
||||||
import sys
|
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||||
import logging
|
from synapse.rest.media.v0.content_repository import ContentRepoResource
|
||||||
import gc
|
from synapse.server import HomeServer
|
||||||
|
from synapse.storage.engines import create_engine
|
||||||
|
from synapse.storage.media_repository import MediaRepositoryStore
|
||||||
|
from synapse.util.httpresourcetree import create_resource_tree
|
||||||
|
from synapse.util.logcontext import LoggingContext
|
||||||
|
from synapse.util.manhole import manhole
|
||||||
|
from synapse.util.versionstring import get_version_string
|
||||||
|
from twisted.internet import reactor
|
||||||
|
from twisted.web.resource import NoResource
|
||||||
|
|
||||||
logger = logging.getLogger("synapse.app.media_repository")
|
logger = logging.getLogger("synapse.app.media_repository")
|
||||||
|
|
||||||
@@ -57,27 +51,15 @@ logger = logging.getLogger("synapse.app.media_repository")
|
|||||||
class MediaRepositorySlavedStore(
|
class MediaRepositorySlavedStore(
|
||||||
SlavedApplicationServiceStore,
|
SlavedApplicationServiceStore,
|
||||||
SlavedRegistrationStore,
|
SlavedRegistrationStore,
|
||||||
|
SlavedClientIpStore,
|
||||||
|
TransactionStore,
|
||||||
BaseSlavedStore,
|
BaseSlavedStore,
|
||||||
MediaRepositoryStore,
|
MediaRepositoryStore,
|
||||||
ClientIpStore,
|
|
||||||
):
|
):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
class MediaRepositoryServer(HomeServer):
|
class MediaRepositoryServer(HomeServer):
|
||||||
def get_db_conn(self, run_new_connection=True):
|
|
||||||
# Any param beginning with cp_ is a parameter for adbapi, and should
|
|
||||||
# not be passed to the database engine.
|
|
||||||
db_params = {
|
|
||||||
k: v for k, v in self.db_config.get("args", {}).items()
|
|
||||||
if not k.startswith("cp_")
|
|
||||||
}
|
|
||||||
db_conn = self.database_engine.module.connect(**db_params)
|
|
||||||
|
|
||||||
if run_new_connection:
|
|
||||||
self.database_engine.on_new_connection(db_conn)
|
|
||||||
return db_conn
|
|
||||||
|
|
||||||
def setup(self):
|
def setup(self):
|
||||||
logger.info("Setting up.")
|
logger.info("Setting up.")
|
||||||
self.datastore = MediaRepositorySlavedStore(self.get_db_conn(), self)
|
self.datastore = MediaRepositorySlavedStore(self.get_db_conn(), self)
|
||||||
@@ -85,7 +67,7 @@ class MediaRepositoryServer(HomeServer):
|
|||||||
|
|
||||||
def _listen_http(self, listener_config):
|
def _listen_http(self, listener_config):
|
||||||
port = listener_config["port"]
|
port = listener_config["port"]
|
||||||
bind_address = listener_config.get("bind_address", "")
|
bind_addresses = listener_config["bind_addresses"]
|
||||||
site_tag = listener_config.get("tag", port)
|
site_tag = listener_config.get("tag", port)
|
||||||
resources = {}
|
resources = {}
|
||||||
for res in listener_config["resources"]:
|
for res in listener_config["resources"]:
|
||||||
@@ -93,7 +75,7 @@ class MediaRepositoryServer(HomeServer):
|
|||||||
if name == "metrics":
|
if name == "metrics":
|
||||||
resources[METRICS_PREFIX] = MetricsResource(self)
|
resources[METRICS_PREFIX] = MetricsResource(self)
|
||||||
elif name == "media":
|
elif name == "media":
|
||||||
media_repo = MediaRepositoryResource(self)
|
media_repo = self.get_media_repository_resource()
|
||||||
resources.update({
|
resources.update({
|
||||||
MEDIA_PREFIX: media_repo,
|
MEDIA_PREFIX: media_repo,
|
||||||
LEGACY_MEDIA_PREFIX: media_repo,
|
LEGACY_MEDIA_PREFIX: media_repo,
|
||||||
@@ -102,17 +84,20 @@ class MediaRepositoryServer(HomeServer):
|
|||||||
),
|
),
|
||||||
})
|
})
|
||||||
|
|
||||||
root_resource = create_resource_tree(resources, Resource())
|
root_resource = create_resource_tree(resources, NoResource())
|
||||||
reactor.listenTCP(
|
|
||||||
|
_base.listen_tcp(
|
||||||
|
bind_addresses,
|
||||||
port,
|
port,
|
||||||
SynapseSite(
|
SynapseSite(
|
||||||
"synapse.access.http.%s" % (site_tag,),
|
"synapse.access.http.%s" % (site_tag,),
|
||||||
site_tag,
|
site_tag,
|
||||||
listener_config,
|
listener_config,
|
||||||
root_resource,
|
root_resource,
|
||||||
),
|
self.version_string,
|
||||||
interface=bind_address
|
|
||||||
)
|
)
|
||||||
|
)
|
||||||
|
|
||||||
logger.info("Synapse media repository now listening on port %d", port)
|
logger.info("Synapse media repository now listening on port %d", port)
|
||||||
|
|
||||||
def start_listening(self, listeners):
|
def start_listening(self, listeners):
|
||||||
@@ -120,33 +105,22 @@ class MediaRepositoryServer(HomeServer):
|
|||||||
if listener["type"] == "http":
|
if listener["type"] == "http":
|
||||||
self._listen_http(listener)
|
self._listen_http(listener)
|
||||||
elif listener["type"] == "manhole":
|
elif listener["type"] == "manhole":
|
||||||
reactor.listenTCP(
|
_base.listen_tcp(
|
||||||
|
listener["bind_addresses"],
|
||||||
listener["port"],
|
listener["port"],
|
||||||
manhole(
|
manhole(
|
||||||
username="matrix",
|
username="matrix",
|
||||||
password="rabbithole",
|
password="rabbithole",
|
||||||
globals={"hs": self},
|
globals={"hs": self},
|
||||||
),
|
)
|
||||||
interface=listener.get("bind_address", '127.0.0.1')
|
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
self.get_tcp_replication().start_replication(self)
|
||||||
def replicate(self):
|
|
||||||
http_client = self.get_simple_http_client()
|
|
||||||
store = self.get_datastore()
|
|
||||||
replication_url = self.config.worker_replication_url
|
|
||||||
|
|
||||||
while True:
|
def build_tcp_replication(self):
|
||||||
try:
|
return ReplicationClientHandler(self.get_datastore())
|
||||||
args = store.stream_positions()
|
|
||||||
args["timeout"] = 30000
|
|
||||||
result = yield http_client.get_json(replication_url, args=args)
|
|
||||||
yield store.process_replication(result)
|
|
||||||
except:
|
|
||||||
logger.exception("Error replicating from %r", replication_url)
|
|
||||||
yield sleep(5)
|
|
||||||
|
|
||||||
|
|
||||||
def start(config_options):
|
def start(config_options):
|
||||||
@@ -160,7 +134,16 @@ def start(config_options):
|
|||||||
|
|
||||||
assert config.worker_app == "synapse.app.media_repository"
|
assert config.worker_app == "synapse.app.media_repository"
|
||||||
|
|
||||||
setup_logging(config.worker_log_config, config.worker_log_file)
|
if config.enable_media_repo:
|
||||||
|
_base.quit_with_error(
|
||||||
|
"enable_media_repo must be disabled in the main synapse process\n"
|
||||||
|
"before the media repo can be run in a separate worker.\n"
|
||||||
|
"Please add ``enable_media_repo: false`` to the main config\n"
|
||||||
|
)
|
||||||
|
|
||||||
|
setup_logging(config, use_worker_options=True)
|
||||||
|
|
||||||
|
events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||||
|
|
||||||
database_engine = create_engine(config.database_config)
|
database_engine = create_engine(config.database_config)
|
||||||
|
|
||||||
@@ -176,36 +159,15 @@ def start(config_options):
|
|||||||
)
|
)
|
||||||
|
|
||||||
ss.setup()
|
ss.setup()
|
||||||
ss.get_handlers()
|
|
||||||
ss.start_listening(config.worker_listeners)
|
ss.start_listening(config.worker_listeners)
|
||||||
|
|
||||||
def run():
|
|
||||||
with LoggingContext("run"):
|
|
||||||
logger.info("Running")
|
|
||||||
change_resource_limit(config.soft_file_limit)
|
|
||||||
if config.gc_thresholds:
|
|
||||||
gc.set_threshold(*config.gc_thresholds)
|
|
||||||
reactor.run()
|
|
||||||
|
|
||||||
def start():
|
def start():
|
||||||
ss.get_state_handler().start_caching()
|
ss.get_state_handler().start_caching()
|
||||||
ss.get_datastore().start_profiling()
|
ss.get_datastore().start_profiling()
|
||||||
ss.replicate()
|
|
||||||
|
|
||||||
reactor.callWhenRunning(start)
|
reactor.callWhenRunning(start)
|
||||||
|
|
||||||
if config.worker_daemonize:
|
_base.start_worker_reactor("synapse-media-repository", config)
|
||||||
daemon = Daemonize(
|
|
||||||
app="synapse-media-repository",
|
|
||||||
pid=config.worker_pid_file,
|
|
||||||
action=run,
|
|
||||||
auto_close_fds=False,
|
|
||||||
verbose=True,
|
|
||||||
logger=logger,
|
|
||||||
)
|
|
||||||
daemon.start()
|
|
||||||
else:
|
|
||||||
run()
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
|||||||
@@ -13,37 +13,31 @@
|
|||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
import logging
|
||||||
|
import sys
|
||||||
|
|
||||||
import synapse
|
import synapse
|
||||||
|
from synapse import events
|
||||||
from synapse.server import HomeServer
|
from synapse.app import _base
|
||||||
from synapse.config._base import ConfigError
|
from synapse.config._base import ConfigError
|
||||||
from synapse.config.logger import setup_logging
|
|
||||||
from synapse.config.homeserver import HomeServerConfig
|
from synapse.config.homeserver import HomeServerConfig
|
||||||
|
from synapse.config.logger import setup_logging
|
||||||
from synapse.http.site import SynapseSite
|
from synapse.http.site import SynapseSite
|
||||||
from synapse.metrics.resource import MetricsResource, METRICS_PREFIX
|
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||||
from synapse.storage.roommember import RoomMemberStore
|
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
|
||||||
from synapse.replication.slave.storage.events import SlavedEventStore
|
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||||
from synapse.replication.slave.storage.pushers import SlavedPusherStore
|
from synapse.replication.slave.storage.pushers import SlavedPusherStore
|
||||||
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
|
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
|
||||||
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
|
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||||
from synapse.storage.engines import create_engine
|
from synapse.server import HomeServer
|
||||||
from synapse.storage import DataStore
|
from synapse.storage import DataStore
|
||||||
from synapse.util.async import sleep
|
from synapse.storage.engines import create_engine
|
||||||
from synapse.util.httpresourcetree import create_resource_tree
|
from synapse.util.httpresourcetree import create_resource_tree
|
||||||
from synapse.util.logcontext import LoggingContext, preserve_fn
|
from synapse.util.logcontext import LoggingContext, run_in_background
|
||||||
from synapse.util.manhole import manhole
|
from synapse.util.manhole import manhole
|
||||||
from synapse.util.rlimit import change_resource_limit
|
|
||||||
from synapse.util.versionstring import get_version_string
|
from synapse.util.versionstring import get_version_string
|
||||||
|
from twisted.internet import defer, reactor
|
||||||
from twisted.internet import reactor, defer
|
from twisted.web.resource import NoResource
|
||||||
from twisted.web.resource import Resource
|
|
||||||
|
|
||||||
from daemonize import Daemonize
|
|
||||||
|
|
||||||
import sys
|
|
||||||
import logging
|
|
||||||
import gc
|
|
||||||
|
|
||||||
logger = logging.getLogger("synapse.app.pusher")
|
logger = logging.getLogger("synapse.app.pusher")
|
||||||
|
|
||||||
@@ -80,46 +74,19 @@ class PusherSlaveStore(
|
|||||||
DataStore.get_profile_displayname.__func__
|
DataStore.get_profile_displayname.__func__
|
||||||
)
|
)
|
||||||
|
|
||||||
who_forgot_in_room = (
|
|
||||||
RoomMemberStore.__dict__["who_forgot_in_room"]
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class PusherServer(HomeServer):
|
class PusherServer(HomeServer):
|
||||||
|
|
||||||
def get_db_conn(self, run_new_connection=True):
|
|
||||||
# Any param beginning with cp_ is a parameter for adbapi, and should
|
|
||||||
# not be passed to the database engine.
|
|
||||||
db_params = {
|
|
||||||
k: v for k, v in self.db_config.get("args", {}).items()
|
|
||||||
if not k.startswith("cp_")
|
|
||||||
}
|
|
||||||
db_conn = self.database_engine.module.connect(**db_params)
|
|
||||||
|
|
||||||
if run_new_connection:
|
|
||||||
self.database_engine.on_new_connection(db_conn)
|
|
||||||
return db_conn
|
|
||||||
|
|
||||||
def setup(self):
|
def setup(self):
|
||||||
logger.info("Setting up.")
|
logger.info("Setting up.")
|
||||||
self.datastore = PusherSlaveStore(self.get_db_conn(), self)
|
self.datastore = PusherSlaveStore(self.get_db_conn(), self)
|
||||||
logger.info("Finished setting up.")
|
logger.info("Finished setting up.")
|
||||||
|
|
||||||
def remove_pusher(self, app_id, push_key, user_id):
|
def remove_pusher(self, app_id, push_key, user_id):
|
||||||
http_client = self.get_simple_http_client()
|
self.get_tcp_replication().send_remove_pusher(app_id, push_key, user_id)
|
||||||
replication_url = self.config.worker_replication_url
|
|
||||||
url = replication_url + "/remove_pushers"
|
|
||||||
return http_client.post_json_get_json(url, {
|
|
||||||
"remove": [{
|
|
||||||
"app_id": app_id,
|
|
||||||
"push_key": push_key,
|
|
||||||
"user_id": user_id,
|
|
||||||
}]
|
|
||||||
})
|
|
||||||
|
|
||||||
def _listen_http(self, listener_config):
|
def _listen_http(self, listener_config):
|
||||||
port = listener_config["port"]
|
port = listener_config["port"]
|
||||||
bind_address = listener_config.get("bind_address", "")
|
bind_addresses = listener_config["bind_addresses"]
|
||||||
site_tag = listener_config.get("tag", port)
|
site_tag = listener_config.get("tag", port)
|
||||||
resources = {}
|
resources = {}
|
||||||
for res in listener_config["resources"]:
|
for res in listener_config["resources"]:
|
||||||
@@ -127,17 +94,20 @@ class PusherServer(HomeServer):
|
|||||||
if name == "metrics":
|
if name == "metrics":
|
||||||
resources[METRICS_PREFIX] = MetricsResource(self)
|
resources[METRICS_PREFIX] = MetricsResource(self)
|
||||||
|
|
||||||
root_resource = create_resource_tree(resources, Resource())
|
root_resource = create_resource_tree(resources, NoResource())
|
||||||
reactor.listenTCP(
|
|
||||||
|
_base.listen_tcp(
|
||||||
|
bind_addresses,
|
||||||
port,
|
port,
|
||||||
SynapseSite(
|
SynapseSite(
|
||||||
"synapse.access.http.%s" % (site_tag,),
|
"synapse.access.http.%s" % (site_tag,),
|
||||||
site_tag,
|
site_tag,
|
||||||
listener_config,
|
listener_config,
|
||||||
root_resource,
|
root_resource,
|
||||||
),
|
self.version_string,
|
||||||
interface=bind_address
|
|
||||||
)
|
)
|
||||||
|
)
|
||||||
|
|
||||||
logger.info("Synapse pusher now listening on port %d", port)
|
logger.info("Synapse pusher now listening on port %d", port)
|
||||||
|
|
||||||
def start_listening(self, listeners):
|
def start_listening(self, listeners):
|
||||||
@@ -145,85 +115,67 @@ class PusherServer(HomeServer):
|
|||||||
if listener["type"] == "http":
|
if listener["type"] == "http":
|
||||||
self._listen_http(listener)
|
self._listen_http(listener)
|
||||||
elif listener["type"] == "manhole":
|
elif listener["type"] == "manhole":
|
||||||
reactor.listenTCP(
|
_base.listen_tcp(
|
||||||
|
listener["bind_addresses"],
|
||||||
listener["port"],
|
listener["port"],
|
||||||
manhole(
|
manhole(
|
||||||
username="matrix",
|
username="matrix",
|
||||||
password="rabbithole",
|
password="rabbithole",
|
||||||
globals={"hs": self},
|
globals={"hs": self},
|
||||||
),
|
)
|
||||||
interface=listener.get("bind_address", '127.0.0.1')
|
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
self.get_tcp_replication().start_replication(self)
|
||||||
def replicate(self):
|
|
||||||
http_client = self.get_simple_http_client()
|
|
||||||
store = self.get_datastore()
|
|
||||||
replication_url = self.config.worker_replication_url
|
|
||||||
pusher_pool = self.get_pusherpool()
|
|
||||||
|
|
||||||
def stop_pusher(user_id, app_id, pushkey):
|
def build_tcp_replication(self):
|
||||||
|
return PusherReplicationHandler(self)
|
||||||
|
|
||||||
|
|
||||||
|
class PusherReplicationHandler(ReplicationClientHandler):
|
||||||
|
def __init__(self, hs):
|
||||||
|
super(PusherReplicationHandler, self).__init__(hs.get_datastore())
|
||||||
|
|
||||||
|
self.pusher_pool = hs.get_pusherpool()
|
||||||
|
|
||||||
|
def on_rdata(self, stream_name, token, rows):
|
||||||
|
super(PusherReplicationHandler, self).on_rdata(stream_name, token, rows)
|
||||||
|
run_in_background(self.poke_pushers, stream_name, token, rows)
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def poke_pushers(self, stream_name, token, rows):
|
||||||
|
try:
|
||||||
|
if stream_name == "pushers":
|
||||||
|
for row in rows:
|
||||||
|
if row.deleted:
|
||||||
|
yield self.stop_pusher(row.user_id, row.app_id, row.pushkey)
|
||||||
|
else:
|
||||||
|
yield self.start_pusher(row.user_id, row.app_id, row.pushkey)
|
||||||
|
elif stream_name == "events":
|
||||||
|
yield self.pusher_pool.on_new_notifications(
|
||||||
|
token, token,
|
||||||
|
)
|
||||||
|
elif stream_name == "receipts":
|
||||||
|
yield self.pusher_pool.on_new_receipts(
|
||||||
|
token, token, set(row.room_id for row in rows)
|
||||||
|
)
|
||||||
|
except Exception:
|
||||||
|
logger.exception("Error poking pushers")
|
||||||
|
|
||||||
|
def stop_pusher(self, user_id, app_id, pushkey):
|
||||||
key = "%s:%s" % (app_id, pushkey)
|
key = "%s:%s" % (app_id, pushkey)
|
||||||
pushers_for_user = pusher_pool.pushers.get(user_id, {})
|
pushers_for_user = self.pusher_pool.pushers.get(user_id, {})
|
||||||
pusher = pushers_for_user.pop(key, None)
|
pusher = pushers_for_user.pop(key, None)
|
||||||
if pusher is None:
|
if pusher is None:
|
||||||
return
|
return
|
||||||
logger.info("Stopping pusher %r / %r", user_id, key)
|
logger.info("Stopping pusher %r / %r", user_id, key)
|
||||||
pusher.on_stop()
|
pusher.on_stop()
|
||||||
|
|
||||||
def start_pusher(user_id, app_id, pushkey):
|
def start_pusher(self, user_id, app_id, pushkey):
|
||||||
key = "%s:%s" % (app_id, pushkey)
|
key = "%s:%s" % (app_id, pushkey)
|
||||||
logger.info("Starting pusher %r / %r", user_id, key)
|
logger.info("Starting pusher %r / %r", user_id, key)
|
||||||
return pusher_pool._refresh_pusher(app_id, pushkey, user_id)
|
return self.pusher_pool._refresh_pusher(app_id, pushkey, user_id)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def poke_pushers(results):
|
|
||||||
pushers_rows = set(
|
|
||||||
map(tuple, results.get("pushers", {}).get("rows", []))
|
|
||||||
)
|
|
||||||
deleted_pushers_rows = set(
|
|
||||||
map(tuple, results.get("deleted_pushers", {}).get("rows", []))
|
|
||||||
)
|
|
||||||
for row in sorted(pushers_rows | deleted_pushers_rows):
|
|
||||||
if row in deleted_pushers_rows:
|
|
||||||
user_id, app_id, pushkey = row[1:4]
|
|
||||||
stop_pusher(user_id, app_id, pushkey)
|
|
||||||
elif row in pushers_rows:
|
|
||||||
user_id = row[1]
|
|
||||||
app_id = row[5]
|
|
||||||
pushkey = row[8]
|
|
||||||
yield start_pusher(user_id, app_id, pushkey)
|
|
||||||
|
|
||||||
stream = results.get("events")
|
|
||||||
if stream and stream["rows"]:
|
|
||||||
min_stream_id = stream["rows"][0][0]
|
|
||||||
max_stream_id = stream["position"]
|
|
||||||
preserve_fn(pusher_pool.on_new_notifications)(
|
|
||||||
min_stream_id, max_stream_id
|
|
||||||
)
|
|
||||||
|
|
||||||
stream = results.get("receipts")
|
|
||||||
if stream and stream["rows"]:
|
|
||||||
rows = stream["rows"]
|
|
||||||
affected_room_ids = set(row[1] for row in rows)
|
|
||||||
min_stream_id = rows[0][0]
|
|
||||||
max_stream_id = stream["position"]
|
|
||||||
preserve_fn(pusher_pool.on_new_receipts)(
|
|
||||||
min_stream_id, max_stream_id, affected_room_ids
|
|
||||||
)
|
|
||||||
|
|
||||||
while True:
|
|
||||||
try:
|
|
||||||
args = store.stream_positions()
|
|
||||||
args["timeout"] = 30000
|
|
||||||
result = yield http_client.get_json(replication_url, args=args)
|
|
||||||
yield store.process_replication(result)
|
|
||||||
poke_pushers(result)
|
|
||||||
except:
|
|
||||||
logger.exception("Error replicating from %r", replication_url)
|
|
||||||
yield sleep(30)
|
|
||||||
|
|
||||||
|
|
||||||
def start(config_options):
|
def start(config_options):
|
||||||
@@ -237,7 +189,9 @@ def start(config_options):
|
|||||||
|
|
||||||
assert config.worker_app == "synapse.app.pusher"
|
assert config.worker_app == "synapse.app.pusher"
|
||||||
|
|
||||||
setup_logging(config.worker_log_config, config.worker_log_file)
|
setup_logging(config, use_worker_options=True)
|
||||||
|
|
||||||
|
events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||||
|
|
||||||
if config.start_pushers:
|
if config.start_pushers:
|
||||||
sys.stderr.write(
|
sys.stderr.write(
|
||||||
@@ -264,34 +218,14 @@ def start(config_options):
|
|||||||
ps.setup()
|
ps.setup()
|
||||||
ps.start_listening(config.worker_listeners)
|
ps.start_listening(config.worker_listeners)
|
||||||
|
|
||||||
def run():
|
|
||||||
with LoggingContext("run"):
|
|
||||||
logger.info("Running")
|
|
||||||
change_resource_limit(config.soft_file_limit)
|
|
||||||
if config.gc_thresholds:
|
|
||||||
gc.set_threshold(*config.gc_thresholds)
|
|
||||||
reactor.run()
|
|
||||||
|
|
||||||
def start():
|
def start():
|
||||||
ps.replicate()
|
|
||||||
ps.get_pusherpool().start()
|
ps.get_pusherpool().start()
|
||||||
ps.get_datastore().start_profiling()
|
ps.get_datastore().start_profiling()
|
||||||
ps.get_state_handler().start_caching()
|
ps.get_state_handler().start_caching()
|
||||||
|
|
||||||
reactor.callWhenRunning(start)
|
reactor.callWhenRunning(start)
|
||||||
|
|
||||||
if config.worker_daemonize:
|
_base.start_worker_reactor("synapse-pusher", config)
|
||||||
daemon = Daemonize(
|
|
||||||
app="synapse-pusher",
|
|
||||||
pid=config.worker_pid_file,
|
|
||||||
action=run,
|
|
||||||
auto_close_fds=False,
|
|
||||||
verbose=True,
|
|
||||||
logger=logger,
|
|
||||||
)
|
|
||||||
daemon.start()
|
|
||||||
else:
|
|
||||||
run()
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
|||||||
@@ -13,99 +13,87 @@
|
|||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
import contextlib
|
||||||
|
import logging
|
||||||
|
import sys
|
||||||
|
|
||||||
import synapse
|
import synapse
|
||||||
|
from synapse.api.constants import EventTypes
|
||||||
from synapse.api.constants import EventTypes, PresenceState
|
from synapse.app import _base
|
||||||
from synapse.config._base import ConfigError
|
from synapse.config._base import ConfigError
|
||||||
from synapse.config.homeserver import HomeServerConfig
|
from synapse.config.homeserver import HomeServerConfig
|
||||||
from synapse.config.logger import setup_logging
|
from synapse.config.logger import setup_logging
|
||||||
from synapse.events import FrozenEvent
|
from synapse.handlers.presence import PresenceHandler, get_interested_parties
|
||||||
from synapse.handlers.presence import PresenceHandler
|
|
||||||
from synapse.http.site import SynapseSite
|
|
||||||
from synapse.http.server import JsonResource
|
from synapse.http.server import JsonResource
|
||||||
from synapse.metrics.resource import MetricsResource, METRICS_PREFIX
|
from synapse.http.site import SynapseSite
|
||||||
from synapse.rest.client.v2_alpha import sync
|
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||||
from synapse.rest.client.v1 import events
|
|
||||||
from synapse.rest.client.v1.room import RoomInitialSyncRestServlet
|
|
||||||
from synapse.rest.client.v1.initial_sync import InitialSyncRestServlet
|
|
||||||
from synapse.replication.slave.storage._base import BaseSlavedStore
|
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||||
from synapse.replication.slave.storage.events import SlavedEventStore
|
|
||||||
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
|
|
||||||
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
|
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
|
||||||
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||||
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
|
||||||
from synapse.replication.slave.storage.filtering import SlavedFilteringStore
|
|
||||||
from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore
|
|
||||||
from synapse.replication.slave.storage.presence import SlavedPresenceStore
|
|
||||||
from synapse.replication.slave.storage.deviceinbox import SlavedDeviceInboxStore
|
from synapse.replication.slave.storage.deviceinbox import SlavedDeviceInboxStore
|
||||||
|
from synapse.replication.slave.storage.devices import SlavedDeviceStore
|
||||||
|
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||||
|
from synapse.replication.slave.storage.filtering import SlavedFilteringStore
|
||||||
|
from synapse.replication.slave.storage.presence import SlavedPresenceStore
|
||||||
|
from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore
|
||||||
|
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
|
||||||
|
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||||
from synapse.replication.slave.storage.room import RoomStore
|
from synapse.replication.slave.storage.room import RoomStore
|
||||||
|
from synapse.replication.slave.storage.groups import SlavedGroupServerStore
|
||||||
|
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||||
|
from synapse.rest.client.v1 import events
|
||||||
|
from synapse.rest.client.v1.initial_sync import InitialSyncRestServlet
|
||||||
|
from synapse.rest.client.v1.room import RoomInitialSyncRestServlet
|
||||||
|
from synapse.rest.client.v2_alpha import sync
|
||||||
from synapse.server import HomeServer
|
from synapse.server import HomeServer
|
||||||
from synapse.storage.client_ips import ClientIpStore
|
|
||||||
from synapse.storage.engines import create_engine
|
from synapse.storage.engines import create_engine
|
||||||
from synapse.storage.presence import PresenceStore, UserPresenceState
|
from synapse.storage.presence import UserPresenceState
|
||||||
from synapse.storage.roommember import RoomMemberStore
|
from synapse.storage.roommember import RoomMemberStore
|
||||||
from synapse.util.async import sleep
|
|
||||||
from synapse.util.httpresourcetree import create_resource_tree
|
from synapse.util.httpresourcetree import create_resource_tree
|
||||||
from synapse.util.logcontext import LoggingContext, preserve_fn
|
from synapse.util.logcontext import LoggingContext, run_in_background
|
||||||
from synapse.util.manhole import manhole
|
from synapse.util.manhole import manhole
|
||||||
from synapse.util.rlimit import change_resource_limit
|
|
||||||
from synapse.util.stringutils import random_string
|
from synapse.util.stringutils import random_string
|
||||||
from synapse.util.versionstring import get_version_string
|
from synapse.util.versionstring import get_version_string
|
||||||
|
from twisted.internet import defer, reactor
|
||||||
|
from twisted.web.resource import NoResource
|
||||||
|
|
||||||
from twisted.internet import reactor, defer
|
from six import iteritems
|
||||||
from twisted.web.resource import Resource
|
|
||||||
|
|
||||||
from daemonize import Daemonize
|
|
||||||
|
|
||||||
import sys
|
|
||||||
import logging
|
|
||||||
import contextlib
|
|
||||||
import gc
|
|
||||||
import ujson as json
|
|
||||||
|
|
||||||
logger = logging.getLogger("synapse.app.synchrotron")
|
logger = logging.getLogger("synapse.app.synchrotron")
|
||||||
|
|
||||||
|
|
||||||
class SynchrotronSlavedStore(
|
class SynchrotronSlavedStore(
|
||||||
SlavedPushRuleStore,
|
|
||||||
SlavedEventStore,
|
|
||||||
SlavedReceiptsStore,
|
SlavedReceiptsStore,
|
||||||
SlavedAccountDataStore,
|
SlavedAccountDataStore,
|
||||||
SlavedApplicationServiceStore,
|
SlavedApplicationServiceStore,
|
||||||
SlavedRegistrationStore,
|
SlavedRegistrationStore,
|
||||||
SlavedFilteringStore,
|
SlavedFilteringStore,
|
||||||
SlavedPresenceStore,
|
SlavedPresenceStore,
|
||||||
|
SlavedGroupServerStore,
|
||||||
SlavedDeviceInboxStore,
|
SlavedDeviceInboxStore,
|
||||||
|
SlavedDeviceStore,
|
||||||
|
SlavedPushRuleStore,
|
||||||
|
SlavedEventStore,
|
||||||
|
SlavedClientIpStore,
|
||||||
RoomStore,
|
RoomStore,
|
||||||
BaseSlavedStore,
|
BaseSlavedStore,
|
||||||
ClientIpStore, # After BaseSlavedStore because the constructor is different
|
|
||||||
):
|
):
|
||||||
who_forgot_in_room = (
|
did_forget = (
|
||||||
RoomMemberStore.__dict__["who_forgot_in_room"]
|
RoomMemberStore.__dict__["did_forget"]
|
||||||
)
|
)
|
||||||
|
|
||||||
# XXX: This is a bit broken because we don't persist the accepted list in a
|
|
||||||
# way that can be replicated. This means that we don't have a way to
|
|
||||||
# invalidate the cache correctly.
|
|
||||||
get_presence_list_accepted = PresenceStore.__dict__[
|
|
||||||
"get_presence_list_accepted"
|
|
||||||
]
|
|
||||||
get_presence_list_observers_accepted = PresenceStore.__dict__[
|
|
||||||
"get_presence_list_observers_accepted"
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
UPDATE_SYNCING_USERS_MS = 10 * 1000
|
UPDATE_SYNCING_USERS_MS = 10 * 1000
|
||||||
|
|
||||||
|
|
||||||
class SynchrotronPresence(object):
|
class SynchrotronPresence(object):
|
||||||
def __init__(self, hs):
|
def __init__(self, hs):
|
||||||
|
self.hs = hs
|
||||||
self.is_mine_id = hs.is_mine_id
|
self.is_mine_id = hs.is_mine_id
|
||||||
self.http_client = hs.get_simple_http_client()
|
self.http_client = hs.get_simple_http_client()
|
||||||
self.store = hs.get_datastore()
|
self.store = hs.get_datastore()
|
||||||
self.user_to_num_current_syncs = {}
|
self.user_to_num_current_syncs = {}
|
||||||
self.syncing_users_url = hs.config.worker_replication_url + "/syncing_users"
|
|
||||||
self.clock = hs.get_clock()
|
self.clock = hs.get_clock()
|
||||||
self.notifier = hs.get_notifier()
|
self.notifier = hs.get_notifier()
|
||||||
|
|
||||||
@@ -115,17 +103,52 @@ class SynchrotronPresence(object):
|
|||||||
for state in active_presence
|
for state in active_presence
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# user_id -> last_sync_ms. Lists the users that have stopped syncing
|
||||||
|
# but we haven't notified the master of that yet
|
||||||
|
self.users_going_offline = {}
|
||||||
|
|
||||||
|
self._send_stop_syncing_loop = self.clock.looping_call(
|
||||||
|
self.send_stop_syncing, 10 * 1000
|
||||||
|
)
|
||||||
|
|
||||||
self.process_id = random_string(16)
|
self.process_id = random_string(16)
|
||||||
logger.info("Presence process_id is %r", self.process_id)
|
logger.info("Presence process_id is %r", self.process_id)
|
||||||
|
|
||||||
self._sending_sync = False
|
def send_user_sync(self, user_id, is_syncing, last_sync_ms):
|
||||||
self._need_to_send_sync = False
|
self.hs.get_tcp_replication().send_user_sync(user_id, is_syncing, last_sync_ms)
|
||||||
self.clock.looping_call(
|
|
||||||
self._send_syncing_users_regularly,
|
|
||||||
UPDATE_SYNCING_USERS_MS,
|
|
||||||
)
|
|
||||||
|
|
||||||
reactor.addSystemEventTrigger("before", "shutdown", self._on_shutdown)
|
def mark_as_coming_online(self, user_id):
|
||||||
|
"""A user has started syncing. Send a UserSync to the master, unless they
|
||||||
|
had recently stopped syncing.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
user_id (str)
|
||||||
|
"""
|
||||||
|
going_offline = self.users_going_offline.pop(user_id, None)
|
||||||
|
if not going_offline:
|
||||||
|
# Safe to skip because we haven't yet told the master they were offline
|
||||||
|
self.send_user_sync(user_id, True, self.clock.time_msec())
|
||||||
|
|
||||||
|
def mark_as_going_offline(self, user_id):
|
||||||
|
"""A user has stopped syncing. We wait before notifying the master as
|
||||||
|
its likely they'll come back soon. This allows us to avoid sending
|
||||||
|
a stopped syncing immediately followed by a started syncing notification
|
||||||
|
to the master
|
||||||
|
|
||||||
|
Args:
|
||||||
|
user_id (str)
|
||||||
|
"""
|
||||||
|
self.users_going_offline[user_id] = self.clock.time_msec()
|
||||||
|
|
||||||
|
def send_stop_syncing(self):
|
||||||
|
"""Check if there are any users who have stopped syncing a while ago
|
||||||
|
and haven't come back yet. If there are poke the master about them.
|
||||||
|
"""
|
||||||
|
now = self.clock.time_msec()
|
||||||
|
for user_id, last_sync_ms in self.users_going_offline.items():
|
||||||
|
if now - last_sync_ms > 10 * 1000:
|
||||||
|
self.users_going_offline.pop(user_id, None)
|
||||||
|
self.send_user_sync(user_id, False, last_sync_ms)
|
||||||
|
|
||||||
def set_state(self, user, state, ignore_status_msg=False):
|
def set_state(self, user, state, ignore_status_msg=False):
|
||||||
# TODO Hows this supposed to work?
|
# TODO Hows this supposed to work?
|
||||||
@@ -133,18 +156,16 @@ class SynchrotronPresence(object):
|
|||||||
|
|
||||||
get_states = PresenceHandler.get_states.__func__
|
get_states = PresenceHandler.get_states.__func__
|
||||||
get_state = PresenceHandler.get_state.__func__
|
get_state = PresenceHandler.get_state.__func__
|
||||||
_get_interested_parties = PresenceHandler._get_interested_parties.__func__
|
|
||||||
current_state_for_users = PresenceHandler.current_state_for_users.__func__
|
current_state_for_users = PresenceHandler.current_state_for_users.__func__
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def user_syncing(self, user_id, affect_presence):
|
def user_syncing(self, user_id, affect_presence):
|
||||||
if affect_presence:
|
if affect_presence:
|
||||||
curr_sync = self.user_to_num_current_syncs.get(user_id, 0)
|
curr_sync = self.user_to_num_current_syncs.get(user_id, 0)
|
||||||
self.user_to_num_current_syncs[user_id] = curr_sync + 1
|
self.user_to_num_current_syncs[user_id] = curr_sync + 1
|
||||||
prev_states = yield self.current_state_for_users([user_id])
|
|
||||||
if prev_states[user_id].state == PresenceState.OFFLINE:
|
# If we went from no in flight sync to some, notify replication
|
||||||
# TODO: Don't block the sync request on this HTTP hit.
|
if self.user_to_num_current_syncs[user_id] == 1:
|
||||||
yield self._send_syncing_users_now()
|
self.mark_as_coming_online(user_id)
|
||||||
|
|
||||||
def _end():
|
def _end():
|
||||||
# We check that the user_id is in user_to_num_current_syncs because
|
# We check that the user_id is in user_to_num_current_syncs because
|
||||||
@@ -153,6 +174,10 @@ class SynchrotronPresence(object):
|
|||||||
if affect_presence and user_id in self.user_to_num_current_syncs:
|
if affect_presence and user_id in self.user_to_num_current_syncs:
|
||||||
self.user_to_num_current_syncs[user_id] -= 1
|
self.user_to_num_current_syncs[user_id] -= 1
|
||||||
|
|
||||||
|
# If we went from one in flight sync to non, notify replication
|
||||||
|
if self.user_to_num_current_syncs[user_id] == 0:
|
||||||
|
self.mark_as_going_offline(user_id)
|
||||||
|
|
||||||
@contextlib.contextmanager
|
@contextlib.contextmanager
|
||||||
def _user_syncing():
|
def _user_syncing():
|
||||||
try:
|
try:
|
||||||
@@ -160,56 +185,12 @@ class SynchrotronPresence(object):
|
|||||||
finally:
|
finally:
|
||||||
_end()
|
_end()
|
||||||
|
|
||||||
defer.returnValue(_user_syncing())
|
return defer.succeed(_user_syncing())
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def _on_shutdown(self):
|
|
||||||
# When the synchrotron is shutdown tell the master to clear the in
|
|
||||||
# progress syncs for this process
|
|
||||||
self.user_to_num_current_syncs.clear()
|
|
||||||
yield self._send_syncing_users_now()
|
|
||||||
|
|
||||||
def _send_syncing_users_regularly(self):
|
|
||||||
# Only send an update if we aren't in the middle of sending one.
|
|
||||||
if not self._sending_sync:
|
|
||||||
preserve_fn(self._send_syncing_users_now)()
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def _send_syncing_users_now(self):
|
|
||||||
if self._sending_sync:
|
|
||||||
# We don't want to race with sending another update.
|
|
||||||
# Instead we wait for that update to finish and send another
|
|
||||||
# update afterwards.
|
|
||||||
self._need_to_send_sync = True
|
|
||||||
return
|
|
||||||
|
|
||||||
# Flag that we are sending an update.
|
|
||||||
self._sending_sync = True
|
|
||||||
|
|
||||||
yield self.http_client.post_json_get_json(self.syncing_users_url, {
|
|
||||||
"process_id": self.process_id,
|
|
||||||
"syncing_users": [
|
|
||||||
user_id for user_id, count in self.user_to_num_current_syncs.items()
|
|
||||||
if count > 0
|
|
||||||
],
|
|
||||||
})
|
|
||||||
|
|
||||||
# Unset the flag as we are no longer sending an update.
|
|
||||||
self._sending_sync = False
|
|
||||||
if self._need_to_send_sync:
|
|
||||||
# If something happened while we were sending the update then
|
|
||||||
# we might need to send another update.
|
|
||||||
# TODO: Check if the update that was sent matches the current state
|
|
||||||
# as we only need to send an update if they are different.
|
|
||||||
self._need_to_send_sync = False
|
|
||||||
yield self._send_syncing_users_now()
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def notify_from_replication(self, states, stream_id):
|
def notify_from_replication(self, states, stream_id):
|
||||||
parties = yield self._get_interested_parties(
|
parties = yield get_interested_parties(self.store, states)
|
||||||
states, calculate_remote_hosts=False
|
room_ids_to_states, users_to_states = parties
|
||||||
)
|
|
||||||
room_ids_to_states, users_to_states, _ = parties
|
|
||||||
|
|
||||||
self.notifier.on_new_event(
|
self.notifier.on_new_event(
|
||||||
"presence_key", stream_id, rooms=room_ids_to_states.keys(),
|
"presence_key", stream_id, rooms=room_ids_to_states.keys(),
|
||||||
@@ -217,27 +198,25 @@ class SynchrotronPresence(object):
|
|||||||
)
|
)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def process_replication(self, result):
|
def process_replication_rows(self, token, rows):
|
||||||
stream = result.get("presence", {"rows": []})
|
states = [UserPresenceState(
|
||||||
states = []
|
row.user_id, row.state, row.last_active_ts,
|
||||||
for row in stream["rows"]:
|
row.last_federation_update_ts, row.last_user_sync_ts, row.status_msg,
|
||||||
(
|
row.currently_active
|
||||||
position, user_id, state, last_active_ts,
|
) for row in rows]
|
||||||
last_federation_update_ts, last_user_sync_ts, status_msg,
|
|
||||||
currently_active
|
|
||||||
) = row
|
|
||||||
state = UserPresenceState(
|
|
||||||
user_id, state, last_active_ts,
|
|
||||||
last_federation_update_ts, last_user_sync_ts, status_msg,
|
|
||||||
currently_active
|
|
||||||
)
|
|
||||||
self.user_to_current_state[user_id] = state
|
|
||||||
states.append(state)
|
|
||||||
|
|
||||||
if states and "position" in stream:
|
for state in states:
|
||||||
stream_id = int(stream["position"])
|
self.user_to_current_state[row.user_id] = state
|
||||||
|
|
||||||
|
stream_id = token
|
||||||
yield self.notify_from_replication(states, stream_id)
|
yield self.notify_from_replication(states, stream_id)
|
||||||
|
|
||||||
|
def get_currently_syncing_users(self):
|
||||||
|
return [
|
||||||
|
user_id for user_id, count in iteritems(self.user_to_num_current_syncs)
|
||||||
|
if count > 0
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
class SynchrotronTyping(object):
|
class SynchrotronTyping(object):
|
||||||
def __init__(self, hs):
|
def __init__(self, hs):
|
||||||
@@ -251,16 +230,12 @@ class SynchrotronTyping(object):
|
|||||||
# value which we *must* use for the next replication request.
|
# value which we *must* use for the next replication request.
|
||||||
return {"typing": self._latest_room_serial}
|
return {"typing": self._latest_room_serial}
|
||||||
|
|
||||||
def process_replication(self, result):
|
def process_replication_rows(self, token, rows):
|
||||||
stream = result.get("typing")
|
self._latest_room_serial = token
|
||||||
if stream:
|
|
||||||
self._latest_room_serial = int(stream["position"])
|
|
||||||
|
|
||||||
for row in stream["rows"]:
|
for row in rows:
|
||||||
position, room_id, typing_json = row
|
self._room_serials[row.room_id] = token
|
||||||
typing = json.loads(typing_json)
|
self._room_typing[row.room_id] = row.user_ids
|
||||||
self._room_serials[room_id] = position
|
|
||||||
self._room_typing[room_id] = typing
|
|
||||||
|
|
||||||
|
|
||||||
class SynchrotronApplicationService(object):
|
class SynchrotronApplicationService(object):
|
||||||
@@ -269,19 +244,6 @@ class SynchrotronApplicationService(object):
|
|||||||
|
|
||||||
|
|
||||||
class SynchrotronServer(HomeServer):
|
class SynchrotronServer(HomeServer):
|
||||||
def get_db_conn(self, run_new_connection=True):
|
|
||||||
# Any param beginning with cp_ is a parameter for adbapi, and should
|
|
||||||
# not be passed to the database engine.
|
|
||||||
db_params = {
|
|
||||||
k: v for k, v in self.db_config.get("args", {}).items()
|
|
||||||
if not k.startswith("cp_")
|
|
||||||
}
|
|
||||||
db_conn = self.database_engine.module.connect(**db_params)
|
|
||||||
|
|
||||||
if run_new_connection:
|
|
||||||
self.database_engine.on_new_connection(db_conn)
|
|
||||||
return db_conn
|
|
||||||
|
|
||||||
def setup(self):
|
def setup(self):
|
||||||
logger.info("Setting up.")
|
logger.info("Setting up.")
|
||||||
self.datastore = SynchrotronSlavedStore(self.get_db_conn(), self)
|
self.datastore = SynchrotronSlavedStore(self.get_db_conn(), self)
|
||||||
@@ -289,7 +251,7 @@ class SynchrotronServer(HomeServer):
|
|||||||
|
|
||||||
def _listen_http(self, listener_config):
|
def _listen_http(self, listener_config):
|
||||||
port = listener_config["port"]
|
port = listener_config["port"]
|
||||||
bind_address = listener_config.get("bind_address", "")
|
bind_addresses = listener_config["bind_addresses"]
|
||||||
site_tag = listener_config.get("tag", port)
|
site_tag = listener_config.get("tag", port)
|
||||||
resources = {}
|
resources = {}
|
||||||
for res in listener_config["resources"]:
|
for res in listener_config["resources"]:
|
||||||
@@ -309,17 +271,20 @@ class SynchrotronServer(HomeServer):
|
|||||||
"/_matrix/client/api/v1": resource,
|
"/_matrix/client/api/v1": resource,
|
||||||
})
|
})
|
||||||
|
|
||||||
root_resource = create_resource_tree(resources, Resource())
|
root_resource = create_resource_tree(resources, NoResource())
|
||||||
reactor.listenTCP(
|
|
||||||
|
_base.listen_tcp(
|
||||||
|
bind_addresses,
|
||||||
port,
|
port,
|
||||||
SynapseSite(
|
SynapseSite(
|
||||||
"synapse.access.http.%s" % (site_tag,),
|
"synapse.access.http.%s" % (site_tag,),
|
||||||
site_tag,
|
site_tag,
|
||||||
listener_config,
|
listener_config,
|
||||||
root_resource,
|
root_resource,
|
||||||
),
|
self.version_string,
|
||||||
interface=bind_address
|
|
||||||
)
|
)
|
||||||
|
)
|
||||||
|
|
||||||
logger.info("Synapse synchrotron now listening on port %d", port)
|
logger.info("Synapse synchrotron now listening on port %d", port)
|
||||||
|
|
||||||
def start_listening(self, listeners):
|
def start_listening(self, listeners):
|
||||||
@@ -327,104 +292,22 @@ class SynchrotronServer(HomeServer):
|
|||||||
if listener["type"] == "http":
|
if listener["type"] == "http":
|
||||||
self._listen_http(listener)
|
self._listen_http(listener)
|
||||||
elif listener["type"] == "manhole":
|
elif listener["type"] == "manhole":
|
||||||
reactor.listenTCP(
|
_base.listen_tcp(
|
||||||
|
listener["bind_addresses"],
|
||||||
listener["port"],
|
listener["port"],
|
||||||
manhole(
|
manhole(
|
||||||
username="matrix",
|
username="matrix",
|
||||||
password="rabbithole",
|
password="rabbithole",
|
||||||
globals={"hs": self},
|
globals={"hs": self},
|
||||||
),
|
)
|
||||||
interface=listener.get("bind_address", '127.0.0.1')
|
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
self.get_tcp_replication().start_replication(self)
|
||||||
def replicate(self):
|
|
||||||
http_client = self.get_simple_http_client()
|
|
||||||
store = self.get_datastore()
|
|
||||||
replication_url = self.config.worker_replication_url
|
|
||||||
notifier = self.get_notifier()
|
|
||||||
presence_handler = self.get_presence_handler()
|
|
||||||
typing_handler = self.get_typing_handler()
|
|
||||||
|
|
||||||
def notify_from_stream(
|
def build_tcp_replication(self):
|
||||||
result, stream_name, stream_key, room=None, user=None
|
return SyncReplicationHandler(self)
|
||||||
):
|
|
||||||
stream = result.get(stream_name)
|
|
||||||
if stream:
|
|
||||||
position_index = stream["field_names"].index("position")
|
|
||||||
if room:
|
|
||||||
room_index = stream["field_names"].index(room)
|
|
||||||
if user:
|
|
||||||
user_index = stream["field_names"].index(user)
|
|
||||||
|
|
||||||
users = ()
|
|
||||||
rooms = ()
|
|
||||||
for row in stream["rows"]:
|
|
||||||
position = row[position_index]
|
|
||||||
|
|
||||||
if user:
|
|
||||||
users = (row[user_index],)
|
|
||||||
|
|
||||||
if room:
|
|
||||||
rooms = (row[room_index],)
|
|
||||||
|
|
||||||
notifier.on_new_event(
|
|
||||||
stream_key, position, users=users, rooms=rooms
|
|
||||||
)
|
|
||||||
|
|
||||||
def notify(result):
|
|
||||||
stream = result.get("events")
|
|
||||||
if stream:
|
|
||||||
max_position = stream["position"]
|
|
||||||
for row in stream["rows"]:
|
|
||||||
position = row[0]
|
|
||||||
internal = json.loads(row[1])
|
|
||||||
event_json = json.loads(row[2])
|
|
||||||
event = FrozenEvent(event_json, internal_metadata_dict=internal)
|
|
||||||
extra_users = ()
|
|
||||||
if event.type == EventTypes.Member:
|
|
||||||
extra_users = (event.state_key,)
|
|
||||||
notifier.on_new_room_event(
|
|
||||||
event, position, max_position, extra_users
|
|
||||||
)
|
|
||||||
|
|
||||||
notify_from_stream(
|
|
||||||
result, "push_rules", "push_rules_key", user="user_id"
|
|
||||||
)
|
|
||||||
notify_from_stream(
|
|
||||||
result, "user_account_data", "account_data_key", user="user_id"
|
|
||||||
)
|
|
||||||
notify_from_stream(
|
|
||||||
result, "room_account_data", "account_data_key", user="user_id"
|
|
||||||
)
|
|
||||||
notify_from_stream(
|
|
||||||
result, "tag_account_data", "account_data_key", user="user_id"
|
|
||||||
)
|
|
||||||
notify_from_stream(
|
|
||||||
result, "receipts", "receipt_key", room="room_id"
|
|
||||||
)
|
|
||||||
notify_from_stream(
|
|
||||||
result, "typing", "typing_key", room="room_id"
|
|
||||||
)
|
|
||||||
notify_from_stream(
|
|
||||||
result, "to_device", "to_device_key", user="user_id"
|
|
||||||
)
|
|
||||||
|
|
||||||
while True:
|
|
||||||
try:
|
|
||||||
args = store.stream_positions()
|
|
||||||
args.update(typing_handler.stream_positions())
|
|
||||||
args["timeout"] = 30000
|
|
||||||
result = yield http_client.get_json(replication_url, args=args)
|
|
||||||
yield store.process_replication(result)
|
|
||||||
typing_handler.process_replication(result)
|
|
||||||
yield presence_handler.process_replication(result)
|
|
||||||
notify(result)
|
|
||||||
except:
|
|
||||||
logger.exception("Error replicating from %r", replication_url)
|
|
||||||
yield sleep(5)
|
|
||||||
|
|
||||||
def build_presence_handler(self):
|
def build_presence_handler(self):
|
||||||
return SynchrotronPresence(self)
|
return SynchrotronPresence(self)
|
||||||
@@ -433,6 +316,84 @@ class SynchrotronServer(HomeServer):
|
|||||||
return SynchrotronTyping(self)
|
return SynchrotronTyping(self)
|
||||||
|
|
||||||
|
|
||||||
|
class SyncReplicationHandler(ReplicationClientHandler):
|
||||||
|
def __init__(self, hs):
|
||||||
|
super(SyncReplicationHandler, self).__init__(hs.get_datastore())
|
||||||
|
|
||||||
|
self.store = hs.get_datastore()
|
||||||
|
self.typing_handler = hs.get_typing_handler()
|
||||||
|
# NB this is a SynchrotronPresence, not a normal PresenceHandler
|
||||||
|
self.presence_handler = hs.get_presence_handler()
|
||||||
|
self.notifier = hs.get_notifier()
|
||||||
|
|
||||||
|
def on_rdata(self, stream_name, token, rows):
|
||||||
|
super(SyncReplicationHandler, self).on_rdata(stream_name, token, rows)
|
||||||
|
run_in_background(self.process_and_notify, stream_name, token, rows)
|
||||||
|
|
||||||
|
def get_streams_to_replicate(self):
|
||||||
|
args = super(SyncReplicationHandler, self).get_streams_to_replicate()
|
||||||
|
args.update(self.typing_handler.stream_positions())
|
||||||
|
return args
|
||||||
|
|
||||||
|
def get_currently_syncing_users(self):
|
||||||
|
return self.presence_handler.get_currently_syncing_users()
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def process_and_notify(self, stream_name, token, rows):
|
||||||
|
try:
|
||||||
|
if stream_name == "events":
|
||||||
|
# We shouldn't get multiple rows per token for events stream, so
|
||||||
|
# we don't need to optimise this for multiple rows.
|
||||||
|
for row in rows:
|
||||||
|
event = yield self.store.get_event(row.event_id)
|
||||||
|
extra_users = ()
|
||||||
|
if event.type == EventTypes.Member:
|
||||||
|
extra_users = (event.state_key,)
|
||||||
|
max_token = self.store.get_room_max_stream_ordering()
|
||||||
|
self.notifier.on_new_room_event(
|
||||||
|
event, token, max_token, extra_users
|
||||||
|
)
|
||||||
|
elif stream_name == "push_rules":
|
||||||
|
self.notifier.on_new_event(
|
||||||
|
"push_rules_key", token, users=[row.user_id for row in rows],
|
||||||
|
)
|
||||||
|
elif stream_name in ("account_data", "tag_account_data",):
|
||||||
|
self.notifier.on_new_event(
|
||||||
|
"account_data_key", token, users=[row.user_id for row in rows],
|
||||||
|
)
|
||||||
|
elif stream_name == "receipts":
|
||||||
|
self.notifier.on_new_event(
|
||||||
|
"receipt_key", token, rooms=[row.room_id for row in rows],
|
||||||
|
)
|
||||||
|
elif stream_name == "typing":
|
||||||
|
self.typing_handler.process_replication_rows(token, rows)
|
||||||
|
self.notifier.on_new_event(
|
||||||
|
"typing_key", token, rooms=[row.room_id for row in rows],
|
||||||
|
)
|
||||||
|
elif stream_name == "to_device":
|
||||||
|
entities = [row.entity for row in rows if row.entity.startswith("@")]
|
||||||
|
if entities:
|
||||||
|
self.notifier.on_new_event(
|
||||||
|
"to_device_key", token, users=entities,
|
||||||
|
)
|
||||||
|
elif stream_name == "device_lists":
|
||||||
|
all_room_ids = set()
|
||||||
|
for row in rows:
|
||||||
|
room_ids = yield self.store.get_rooms_for_user(row.user_id)
|
||||||
|
all_room_ids.update(room_ids)
|
||||||
|
self.notifier.on_new_event(
|
||||||
|
"device_list_key", token, rooms=all_room_ids,
|
||||||
|
)
|
||||||
|
elif stream_name == "presence":
|
||||||
|
yield self.presence_handler.process_replication_rows(token, rows)
|
||||||
|
elif stream_name == "receipts":
|
||||||
|
self.notifier.on_new_event(
|
||||||
|
"groups_key", token, users=[row.user_id for row in rows],
|
||||||
|
)
|
||||||
|
except Exception:
|
||||||
|
logger.exception("Error processing replication")
|
||||||
|
|
||||||
|
|
||||||
def start(config_options):
|
def start(config_options):
|
||||||
try:
|
try:
|
||||||
config = HomeServerConfig.load_config(
|
config = HomeServerConfig.load_config(
|
||||||
@@ -444,7 +405,9 @@ def start(config_options):
|
|||||||
|
|
||||||
assert config.worker_app == "synapse.app.synchrotron"
|
assert config.worker_app == "synapse.app.synchrotron"
|
||||||
|
|
||||||
setup_logging(config.worker_log_config, config.worker_log_file)
|
setup_logging(config, use_worker_options=True)
|
||||||
|
|
||||||
|
synapse.events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||||
|
|
||||||
database_engine = create_engine(config.database_config)
|
database_engine = create_engine(config.database_config)
|
||||||
|
|
||||||
@@ -460,33 +423,13 @@ def start(config_options):
|
|||||||
ss.setup()
|
ss.setup()
|
||||||
ss.start_listening(config.worker_listeners)
|
ss.start_listening(config.worker_listeners)
|
||||||
|
|
||||||
def run():
|
|
||||||
with LoggingContext("run"):
|
|
||||||
logger.info("Running")
|
|
||||||
change_resource_limit(config.soft_file_limit)
|
|
||||||
if config.gc_thresholds:
|
|
||||||
gc.set_threshold(*config.gc_thresholds)
|
|
||||||
reactor.run()
|
|
||||||
|
|
||||||
def start():
|
def start():
|
||||||
ss.get_datastore().start_profiling()
|
ss.get_datastore().start_profiling()
|
||||||
ss.replicate()
|
|
||||||
ss.get_state_handler().start_caching()
|
ss.get_state_handler().start_caching()
|
||||||
|
|
||||||
reactor.callWhenRunning(start)
|
reactor.callWhenRunning(start)
|
||||||
|
|
||||||
if config.worker_daemonize:
|
_base.start_worker_reactor("synapse-synchrotron", config)
|
||||||
daemon = Daemonize(
|
|
||||||
app="synapse-synchrotron",
|
|
||||||
pid=config.worker_pid_file,
|
|
||||||
action=run,
|
|
||||||
auto_close_fds=False,
|
|
||||||
verbose=True,
|
|
||||||
logger=logger,
|
|
||||||
)
|
|
||||||
daemon.start()
|
|
||||||
else:
|
|
||||||
run()
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
|||||||
@@ -23,14 +23,27 @@ import signal
|
|||||||
import subprocess
|
import subprocess
|
||||||
import sys
|
import sys
|
||||||
import yaml
|
import yaml
|
||||||
|
import errno
|
||||||
|
import time
|
||||||
|
|
||||||
SYNAPSE = [sys.executable, "-B", "-m", "synapse.app.homeserver"]
|
SYNAPSE = [sys.executable, "-B", "-m", "synapse.app.homeserver"]
|
||||||
|
|
||||||
GREEN = "\x1b[1;32m"
|
GREEN = "\x1b[1;32m"
|
||||||
|
YELLOW = "\x1b[1;33m"
|
||||||
RED = "\x1b[1;31m"
|
RED = "\x1b[1;31m"
|
||||||
NORMAL = "\x1b[m"
|
NORMAL = "\x1b[m"
|
||||||
|
|
||||||
|
|
||||||
|
def pid_running(pid):
|
||||||
|
try:
|
||||||
|
os.kill(pid, 0)
|
||||||
|
return True
|
||||||
|
except OSError as err:
|
||||||
|
if err.errno == errno.EPERM:
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
def write(message, colour=NORMAL, stream=sys.stdout):
|
def write(message, colour=NORMAL, stream=sys.stdout):
|
||||||
if colour == NORMAL:
|
if colour == NORMAL:
|
||||||
stream.write(message + "\n")
|
stream.write(message + "\n")
|
||||||
@@ -38,6 +51,11 @@ def write(message, colour=NORMAL, stream=sys.stdout):
|
|||||||
stream.write(colour + message + NORMAL + "\n")
|
stream.write(colour + message + NORMAL + "\n")
|
||||||
|
|
||||||
|
|
||||||
|
def abort(message, colour=RED, stream=sys.stderr):
|
||||||
|
write(message, colour, stream)
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
def start(configfile):
|
def start(configfile):
|
||||||
write("Starting ...")
|
write("Starting ...")
|
||||||
args = SYNAPSE
|
args = SYNAPSE
|
||||||
@@ -45,7 +63,8 @@ def start(configfile):
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
subprocess.check_call(args)
|
subprocess.check_call(args)
|
||||||
write("started synapse.app.homeserver(%r)" % (configfile,), colour=GREEN)
|
write("started synapse.app.homeserver(%r)" %
|
||||||
|
(configfile,), colour=GREEN)
|
||||||
except subprocess.CalledProcessError as e:
|
except subprocess.CalledProcessError as e:
|
||||||
write(
|
write(
|
||||||
"error starting (exit code: %d); see above for logs" % e.returncode,
|
"error starting (exit code: %d); see above for logs" % e.returncode,
|
||||||
@@ -76,8 +95,16 @@ def start_worker(app, configfile, worker_configfile):
|
|||||||
def stop(pidfile, app):
|
def stop(pidfile, app):
|
||||||
if os.path.exists(pidfile):
|
if os.path.exists(pidfile):
|
||||||
pid = int(open(pidfile).read())
|
pid = int(open(pidfile).read())
|
||||||
|
try:
|
||||||
os.kill(pid, signal.SIGTERM)
|
os.kill(pid, signal.SIGTERM)
|
||||||
write("stopped %s" % (app,), colour=GREEN)
|
write("stopped %s" % (app,), colour=GREEN)
|
||||||
|
except OSError as err:
|
||||||
|
if err.errno == errno.ESRCH:
|
||||||
|
write("%s not running" % (app,), colour=YELLOW)
|
||||||
|
elif err.errno == errno.EPERM:
|
||||||
|
abort("Cannot stop %s: Operation not permitted" % (app,))
|
||||||
|
else:
|
||||||
|
abort("Cannot stop %s: Unknown error" % (app,))
|
||||||
|
|
||||||
|
|
||||||
Worker = collections.namedtuple("Worker", [
|
Worker = collections.namedtuple("Worker", [
|
||||||
@@ -98,7 +125,7 @@ def main():
|
|||||||
"configfile",
|
"configfile",
|
||||||
nargs="?",
|
nargs="?",
|
||||||
default="homeserver.yaml",
|
default="homeserver.yaml",
|
||||||
help="the homeserver config file, defaults to homserver.yaml",
|
help="the homeserver config file, defaults to homeserver.yaml",
|
||||||
)
|
)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"-w", "--worker",
|
"-w", "--worker",
|
||||||
@@ -157,6 +184,9 @@ def main():
|
|||||||
worker_configfiles.append(worker_configfile)
|
worker_configfiles.append(worker_configfile)
|
||||||
|
|
||||||
if options.all_processes:
|
if options.all_processes:
|
||||||
|
# To start the main synapse with -a you need to add a worker file
|
||||||
|
# with worker_app == "synapse.app.homeserver"
|
||||||
|
start_stop_synapse = False
|
||||||
worker_configdir = options.all_processes
|
worker_configdir = options.all_processes
|
||||||
if not os.path.isdir(worker_configdir):
|
if not os.path.isdir(worker_configdir):
|
||||||
write(
|
write(
|
||||||
@@ -173,9 +203,28 @@ def main():
|
|||||||
with open(worker_configfile) as stream:
|
with open(worker_configfile) as stream:
|
||||||
worker_config = yaml.load(stream)
|
worker_config = yaml.load(stream)
|
||||||
worker_app = worker_config["worker_app"]
|
worker_app = worker_config["worker_app"]
|
||||||
|
if worker_app == "synapse.app.homeserver":
|
||||||
|
# We need to special case all of this to pick up options that may
|
||||||
|
# be set in the main config file or in this worker config file.
|
||||||
|
worker_pidfile = (
|
||||||
|
worker_config.get("pid_file")
|
||||||
|
or pidfile
|
||||||
|
)
|
||||||
|
worker_cache_factor = worker_config.get("synctl_cache_factor") or cache_factor
|
||||||
|
daemonize = worker_config.get("daemonize") or config.get("daemonize")
|
||||||
|
assert daemonize, "Main process must have daemonize set to true"
|
||||||
|
|
||||||
|
# The master process doesn't support using worker_* config.
|
||||||
|
for key in worker_config:
|
||||||
|
if key == "worker_app": # But we allow worker_app
|
||||||
|
continue
|
||||||
|
assert not key.startswith("worker_"), \
|
||||||
|
"Main process cannot use worker_* config"
|
||||||
|
else:
|
||||||
worker_pidfile = worker_config["worker_pid_file"]
|
worker_pidfile = worker_config["worker_pid_file"]
|
||||||
worker_daemonize = worker_config["worker_daemonize"]
|
worker_daemonize = worker_config["worker_daemonize"]
|
||||||
assert worker_daemonize # TODO print something more user friendly
|
assert worker_daemonize, "In config %r: expected '%s' to be True" % (
|
||||||
|
worker_configfile, "worker_daemonize")
|
||||||
worker_cache_factor = worker_config.get("synctl_cache_factor")
|
worker_cache_factor = worker_config.get("synctl_cache_factor")
|
||||||
workers.append(Worker(
|
workers.append(Worker(
|
||||||
worker_app, worker_configfile, worker_pidfile, worker_cache_factor,
|
worker_app, worker_configfile, worker_pidfile, worker_cache_factor,
|
||||||
@@ -190,10 +239,26 @@ def main():
|
|||||||
if start_stop_synapse:
|
if start_stop_synapse:
|
||||||
stop(pidfile, "synapse.app.homeserver")
|
stop(pidfile, "synapse.app.homeserver")
|
||||||
|
|
||||||
# TODO: Wait for synapse to actually shutdown before starting it again
|
# Wait for synapse to actually shutdown before starting it again
|
||||||
|
if action == "restart":
|
||||||
|
running_pids = []
|
||||||
|
if start_stop_synapse and os.path.exists(pidfile):
|
||||||
|
running_pids.append(int(open(pidfile).read()))
|
||||||
|
for worker in workers:
|
||||||
|
if os.path.exists(worker.pidfile):
|
||||||
|
running_pids.append(int(open(worker.pidfile).read()))
|
||||||
|
if len(running_pids) > 0:
|
||||||
|
write("Waiting for process to exit before restarting...")
|
||||||
|
for running_pid in running_pids:
|
||||||
|
while pid_running(running_pid):
|
||||||
|
time.sleep(0.2)
|
||||||
|
write("All processes exited; now restarting...")
|
||||||
|
|
||||||
if action == "start" or action == "restart":
|
if action == "start" or action == "restart":
|
||||||
if start_stop_synapse:
|
if start_stop_synapse:
|
||||||
|
# Check if synapse is already running
|
||||||
|
if os.path.exists(pidfile) and pid_running(int(open(pidfile).read())):
|
||||||
|
abort("synapse.app.homeserver already running")
|
||||||
start(configfile)
|
start(configfile)
|
||||||
|
|
||||||
for worker in workers:
|
for worker in workers:
|
||||||
|
|||||||
232
synapse/app/user_dir.py
Normal file
232
synapse/app/user_dir.py
Normal file
@@ -0,0 +1,232 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright 2017 Vector Creations Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import logging
|
||||||
|
import sys
|
||||||
|
|
||||||
|
import synapse
|
||||||
|
from synapse import events
|
||||||
|
from synapse.app import _base
|
||||||
|
from synapse.config._base import ConfigError
|
||||||
|
from synapse.config.homeserver import HomeServerConfig
|
||||||
|
from synapse.config.logger import setup_logging
|
||||||
|
from synapse.crypto import context_factory
|
||||||
|
from synapse.http.server import JsonResource
|
||||||
|
from synapse.http.site import SynapseSite
|
||||||
|
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||||
|
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||||
|
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||||
|
from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
|
||||||
|
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||||
|
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||||
|
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||||
|
from synapse.rest.client.v2_alpha import user_directory
|
||||||
|
from synapse.server import HomeServer
|
||||||
|
from synapse.storage.engines import create_engine
|
||||||
|
from synapse.storage.user_directory import UserDirectoryStore
|
||||||
|
from synapse.util.caches.stream_change_cache import StreamChangeCache
|
||||||
|
from synapse.util.httpresourcetree import create_resource_tree
|
||||||
|
from synapse.util.logcontext import LoggingContext, run_in_background
|
||||||
|
from synapse.util.manhole import manhole
|
||||||
|
from synapse.util.versionstring import get_version_string
|
||||||
|
from twisted.internet import reactor, defer
|
||||||
|
from twisted.web.resource import NoResource
|
||||||
|
|
||||||
|
logger = logging.getLogger("synapse.app.user_dir")
|
||||||
|
|
||||||
|
|
||||||
|
class UserDirectorySlaveStore(
|
||||||
|
SlavedEventStore,
|
||||||
|
SlavedApplicationServiceStore,
|
||||||
|
SlavedRegistrationStore,
|
||||||
|
SlavedClientIpStore,
|
||||||
|
UserDirectoryStore,
|
||||||
|
BaseSlavedStore,
|
||||||
|
):
|
||||||
|
def __init__(self, db_conn, hs):
|
||||||
|
super(UserDirectorySlaveStore, self).__init__(db_conn, hs)
|
||||||
|
|
||||||
|
events_max = self._stream_id_gen.get_current_token()
|
||||||
|
curr_state_delta_prefill, min_curr_state_delta_id = self._get_cache_dict(
|
||||||
|
db_conn, "current_state_delta_stream",
|
||||||
|
entity_column="room_id",
|
||||||
|
stream_column="stream_id",
|
||||||
|
max_value=events_max, # As we share the stream id with events token
|
||||||
|
limit=1000,
|
||||||
|
)
|
||||||
|
self._curr_state_delta_stream_cache = StreamChangeCache(
|
||||||
|
"_curr_state_delta_stream_cache", min_curr_state_delta_id,
|
||||||
|
prefilled_cache=curr_state_delta_prefill,
|
||||||
|
)
|
||||||
|
|
||||||
|
self._current_state_delta_pos = events_max
|
||||||
|
|
||||||
|
def stream_positions(self):
|
||||||
|
result = super(UserDirectorySlaveStore, self).stream_positions()
|
||||||
|
result["current_state_deltas"] = self._current_state_delta_pos
|
||||||
|
return result
|
||||||
|
|
||||||
|
def process_replication_rows(self, stream_name, token, rows):
|
||||||
|
if stream_name == "current_state_deltas":
|
||||||
|
self._current_state_delta_pos = token
|
||||||
|
for row in rows:
|
||||||
|
self._curr_state_delta_stream_cache.entity_has_changed(
|
||||||
|
row.room_id, token
|
||||||
|
)
|
||||||
|
return super(UserDirectorySlaveStore, self).process_replication_rows(
|
||||||
|
stream_name, token, rows
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class UserDirectoryServer(HomeServer):
|
||||||
|
def setup(self):
|
||||||
|
logger.info("Setting up.")
|
||||||
|
self.datastore = UserDirectorySlaveStore(self.get_db_conn(), self)
|
||||||
|
logger.info("Finished setting up.")
|
||||||
|
|
||||||
|
def _listen_http(self, listener_config):
|
||||||
|
port = listener_config["port"]
|
||||||
|
bind_addresses = listener_config["bind_addresses"]
|
||||||
|
site_tag = listener_config.get("tag", port)
|
||||||
|
resources = {}
|
||||||
|
for res in listener_config["resources"]:
|
||||||
|
for name in res["names"]:
|
||||||
|
if name == "metrics":
|
||||||
|
resources[METRICS_PREFIX] = MetricsResource(self)
|
||||||
|
elif name == "client":
|
||||||
|
resource = JsonResource(self, canonical_json=False)
|
||||||
|
user_directory.register_servlets(self, resource)
|
||||||
|
resources.update({
|
||||||
|
"/_matrix/client/r0": resource,
|
||||||
|
"/_matrix/client/unstable": resource,
|
||||||
|
"/_matrix/client/v2_alpha": resource,
|
||||||
|
"/_matrix/client/api/v1": resource,
|
||||||
|
})
|
||||||
|
|
||||||
|
root_resource = create_resource_tree(resources, NoResource())
|
||||||
|
|
||||||
|
_base.listen_tcp(
|
||||||
|
bind_addresses,
|
||||||
|
port,
|
||||||
|
SynapseSite(
|
||||||
|
"synapse.access.http.%s" % (site_tag,),
|
||||||
|
site_tag,
|
||||||
|
listener_config,
|
||||||
|
root_resource,
|
||||||
|
self.version_string,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
logger.info("Synapse user_dir now listening on port %d", port)
|
||||||
|
|
||||||
|
def start_listening(self, listeners):
|
||||||
|
for listener in listeners:
|
||||||
|
if listener["type"] == "http":
|
||||||
|
self._listen_http(listener)
|
||||||
|
elif listener["type"] == "manhole":
|
||||||
|
_base.listen_tcp(
|
||||||
|
listener["bind_addresses"],
|
||||||
|
listener["port"],
|
||||||
|
manhole(
|
||||||
|
username="matrix",
|
||||||
|
password="rabbithole",
|
||||||
|
globals={"hs": self},
|
||||||
|
)
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||||
|
|
||||||
|
self.get_tcp_replication().start_replication(self)
|
||||||
|
|
||||||
|
def build_tcp_replication(self):
|
||||||
|
return UserDirectoryReplicationHandler(self)
|
||||||
|
|
||||||
|
|
||||||
|
class UserDirectoryReplicationHandler(ReplicationClientHandler):
|
||||||
|
def __init__(self, hs):
|
||||||
|
super(UserDirectoryReplicationHandler, self).__init__(hs.get_datastore())
|
||||||
|
self.user_directory = hs.get_user_directory_handler()
|
||||||
|
|
||||||
|
def on_rdata(self, stream_name, token, rows):
|
||||||
|
super(UserDirectoryReplicationHandler, self).on_rdata(
|
||||||
|
stream_name, token, rows
|
||||||
|
)
|
||||||
|
if stream_name == "current_state_deltas":
|
||||||
|
run_in_background(self._notify_directory)
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def _notify_directory(self):
|
||||||
|
try:
|
||||||
|
yield self.user_directory.notify_new_event()
|
||||||
|
except Exception:
|
||||||
|
logger.exception("Error notifiying user directory of state update")
|
||||||
|
|
||||||
|
|
||||||
|
def start(config_options):
|
||||||
|
try:
|
||||||
|
config = HomeServerConfig.load_config(
|
||||||
|
"Synapse user directory", config_options
|
||||||
|
)
|
||||||
|
except ConfigError as e:
|
||||||
|
sys.stderr.write("\n" + e.message + "\n")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
assert config.worker_app == "synapse.app.user_dir"
|
||||||
|
|
||||||
|
setup_logging(config, use_worker_options=True)
|
||||||
|
|
||||||
|
events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||||
|
|
||||||
|
database_engine = create_engine(config.database_config)
|
||||||
|
|
||||||
|
if config.update_user_directory:
|
||||||
|
sys.stderr.write(
|
||||||
|
"\nThe update_user_directory must be disabled in the main synapse process"
|
||||||
|
"\nbefore they can be run in a separate worker."
|
||||||
|
"\nPlease add ``update_user_directory: false`` to the main config"
|
||||||
|
"\n"
|
||||||
|
)
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
# Force the pushers to start since they will be disabled in the main config
|
||||||
|
config.update_user_directory = True
|
||||||
|
|
||||||
|
tls_server_context_factory = context_factory.ServerContextFactory(config)
|
||||||
|
|
||||||
|
ps = UserDirectoryServer(
|
||||||
|
config.server_name,
|
||||||
|
db_config=config.database_config,
|
||||||
|
tls_server_context_factory=tls_server_context_factory,
|
||||||
|
config=config,
|
||||||
|
version_string="Synapse/" + get_version_string(synapse),
|
||||||
|
database_engine=database_engine,
|
||||||
|
)
|
||||||
|
|
||||||
|
ps.setup()
|
||||||
|
ps.start_listening(config.worker_listeners)
|
||||||
|
|
||||||
|
def start():
|
||||||
|
ps.get_datastore().start_profiling()
|
||||||
|
ps.get_state_handler().start_caching()
|
||||||
|
|
||||||
|
reactor.callWhenRunning(start)
|
||||||
|
|
||||||
|
_base.start_worker_reactor("synapse-user-dir", config)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
with LoggingContext("main"):
|
||||||
|
start(sys.argv[1:])
|
||||||
@@ -13,12 +13,16 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
from synapse.api.constants import EventTypes
|
from synapse.api.constants import EventTypes
|
||||||
|
from synapse.util.caches.descriptors import cachedInlineCallbacks
|
||||||
|
from synapse.types import GroupID, get_domain_from_id
|
||||||
|
|
||||||
from twisted.internet import defer
|
from twisted.internet import defer
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
import re
|
import re
|
||||||
|
|
||||||
|
from six import string_types
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
@@ -80,15 +84,19 @@ class ApplicationService(object):
|
|||||||
# values.
|
# values.
|
||||||
NS_LIST = [NS_USERS, NS_ALIASES, NS_ROOMS]
|
NS_LIST = [NS_USERS, NS_ALIASES, NS_ROOMS]
|
||||||
|
|
||||||
def __init__(self, token, url=None, namespaces=None, hs_token=None,
|
def __init__(self, token, hostname, url=None, namespaces=None, hs_token=None,
|
||||||
sender=None, id=None, protocols=None, rate_limited=True):
|
sender=None, id=None, protocols=None, rate_limited=True):
|
||||||
self.token = token
|
self.token = token
|
||||||
self.url = url
|
self.url = url
|
||||||
self.hs_token = hs_token
|
self.hs_token = hs_token
|
||||||
self.sender = sender
|
self.sender = sender
|
||||||
|
self.server_name = hostname
|
||||||
self.namespaces = self._check_namespaces(namespaces)
|
self.namespaces = self._check_namespaces(namespaces)
|
||||||
self.id = id
|
self.id = id
|
||||||
|
|
||||||
|
if "|" in self.id:
|
||||||
|
raise Exception("application service ID cannot contain '|' character")
|
||||||
|
|
||||||
# .protocols is a publicly visible field
|
# .protocols is a publicly visible field
|
||||||
if protocols:
|
if protocols:
|
||||||
self.protocols = set(protocols)
|
self.protocols = set(protocols)
|
||||||
@@ -121,29 +129,41 @@ class ApplicationService(object):
|
|||||||
raise ValueError(
|
raise ValueError(
|
||||||
"Expected bool for 'exclusive' in ns '%s'" % ns
|
"Expected bool for 'exclusive' in ns '%s'" % ns
|
||||||
)
|
)
|
||||||
if not isinstance(regex_obj.get("regex"), basestring):
|
group_id = regex_obj.get("group_id")
|
||||||
|
if group_id:
|
||||||
|
if not isinstance(group_id, str):
|
||||||
|
raise ValueError(
|
||||||
|
"Expected string for 'group_id' in ns '%s'" % ns
|
||||||
|
)
|
||||||
|
try:
|
||||||
|
GroupID.from_string(group_id)
|
||||||
|
except Exception:
|
||||||
|
raise ValueError(
|
||||||
|
"Expected valid group ID for 'group_id' in ns '%s'" % ns
|
||||||
|
)
|
||||||
|
|
||||||
|
if get_domain_from_id(group_id) != self.server_name:
|
||||||
|
raise ValueError(
|
||||||
|
"Expected 'group_id' to be this host in ns '%s'" % ns
|
||||||
|
)
|
||||||
|
|
||||||
|
regex = regex_obj.get("regex")
|
||||||
|
if isinstance(regex, string_types):
|
||||||
|
regex_obj["regex"] = re.compile(regex) # Pre-compile regex
|
||||||
|
else:
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
"Expected string for 'regex' in ns '%s'" % ns
|
"Expected string for 'regex' in ns '%s'" % ns
|
||||||
)
|
)
|
||||||
return namespaces
|
return namespaces
|
||||||
|
|
||||||
def _matches_regex(self, test_string, namespace_key, return_obj=False):
|
def _matches_regex(self, test_string, namespace_key):
|
||||||
if not isinstance(test_string, basestring):
|
|
||||||
logger.error(
|
|
||||||
"Expected a string to test regex against, but got %s",
|
|
||||||
test_string
|
|
||||||
)
|
|
||||||
return False
|
|
||||||
|
|
||||||
for regex_obj in self.namespaces[namespace_key]:
|
for regex_obj in self.namespaces[namespace_key]:
|
||||||
if re.match(regex_obj["regex"], test_string):
|
if regex_obj["regex"].match(test_string):
|
||||||
if return_obj:
|
|
||||||
return regex_obj
|
return regex_obj
|
||||||
return True
|
return None
|
||||||
return False
|
|
||||||
|
|
||||||
def _is_exclusive(self, ns_key, test_string):
|
def _is_exclusive(self, ns_key, test_string):
|
||||||
regex_obj = self._matches_regex(test_string, ns_key, return_obj=True)
|
regex_obj = self._matches_regex(test_string, ns_key)
|
||||||
if regex_obj:
|
if regex_obj:
|
||||||
return regex_obj["exclusive"]
|
return regex_obj["exclusive"]
|
||||||
return False
|
return False
|
||||||
@@ -163,7 +183,14 @@ class ApplicationService(object):
|
|||||||
if not store:
|
if not store:
|
||||||
defer.returnValue(False)
|
defer.returnValue(False)
|
||||||
|
|
||||||
member_list = yield store.get_users_in_room(event.room_id)
|
does_match = yield self._matches_user_in_member_list(event.room_id, store)
|
||||||
|
defer.returnValue(does_match)
|
||||||
|
|
||||||
|
@cachedInlineCallbacks(num_args=1, cache_context=True)
|
||||||
|
def _matches_user_in_member_list(self, room_id, store, cache_context):
|
||||||
|
member_list = yield store.get_users_in_room(
|
||||||
|
room_id, on_invalidate=cache_context.invalidate
|
||||||
|
)
|
||||||
|
|
||||||
# check joined member events
|
# check joined member events
|
||||||
for user_id in member_list:
|
for user_id in member_list:
|
||||||
@@ -216,10 +243,10 @@ class ApplicationService(object):
|
|||||||
)
|
)
|
||||||
|
|
||||||
def is_interested_in_alias(self, alias):
|
def is_interested_in_alias(self, alias):
|
||||||
return self._matches_regex(alias, ApplicationService.NS_ALIASES)
|
return bool(self._matches_regex(alias, ApplicationService.NS_ALIASES))
|
||||||
|
|
||||||
def is_interested_in_room(self, room_id):
|
def is_interested_in_room(self, room_id):
|
||||||
return self._matches_regex(room_id, ApplicationService.NS_ROOMS)
|
return bool(self._matches_regex(room_id, ApplicationService.NS_ROOMS))
|
||||||
|
|
||||||
def is_exclusive_user(self, user_id):
|
def is_exclusive_user(self, user_id):
|
||||||
return (
|
return (
|
||||||
@@ -236,6 +263,31 @@ class ApplicationService(object):
|
|||||||
def is_exclusive_room(self, room_id):
|
def is_exclusive_room(self, room_id):
|
||||||
return self._is_exclusive(ApplicationService.NS_ROOMS, room_id)
|
return self._is_exclusive(ApplicationService.NS_ROOMS, room_id)
|
||||||
|
|
||||||
|
def get_exlusive_user_regexes(self):
|
||||||
|
"""Get the list of regexes used to determine if a user is exclusively
|
||||||
|
registered by the AS
|
||||||
|
"""
|
||||||
|
return [
|
||||||
|
regex_obj["regex"]
|
||||||
|
for regex_obj in self.namespaces[ApplicationService.NS_USERS]
|
||||||
|
if regex_obj["exclusive"]
|
||||||
|
]
|
||||||
|
|
||||||
|
def get_groups_for_user(self, user_id):
|
||||||
|
"""Get the groups that this user is associated with by this AS
|
||||||
|
|
||||||
|
Args:
|
||||||
|
user_id (str): The ID of the user.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
iterable[str]: an iterable that yields group_id strings.
|
||||||
|
"""
|
||||||
|
return (
|
||||||
|
regex_obj["group_id"]
|
||||||
|
for regex_obj in self.namespaces[ApplicationService.NS_USERS]
|
||||||
|
if "group_id" in regex_obj and regex_obj["regex"].match(user_id)
|
||||||
|
)
|
||||||
|
|
||||||
def is_rate_limited(self):
|
def is_rate_limited(self):
|
||||||
return self.rate_limited
|
return self.rate_limited
|
||||||
|
|
||||||
|
|||||||
@@ -19,6 +19,7 @@ from synapse.api.errors import CodeMessageException
|
|||||||
from synapse.http.client import SimpleHttpClient
|
from synapse.http.client import SimpleHttpClient
|
||||||
from synapse.events.utils import serialize_event
|
from synapse.events.utils import serialize_event
|
||||||
from synapse.util.caches.response_cache import ResponseCache
|
from synapse.util.caches.response_cache import ResponseCache
|
||||||
|
from synapse.types import ThirdPartyInstanceID
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
import urllib
|
import urllib
|
||||||
@@ -71,7 +72,8 @@ class ApplicationServiceApi(SimpleHttpClient):
|
|||||||
super(ApplicationServiceApi, self).__init__(hs)
|
super(ApplicationServiceApi, self).__init__(hs)
|
||||||
self.clock = hs.get_clock()
|
self.clock = hs.get_clock()
|
||||||
|
|
||||||
self.protocol_meta_cache = ResponseCache(hs, timeout_ms=HOUR_IN_MS)
|
self.protocol_meta_cache = ResponseCache(hs, "as_protocol_meta",
|
||||||
|
timeout_ms=HOUR_IN_MS)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def query_user(self, service, user_id):
|
def query_user(self, service, user_id):
|
||||||
@@ -177,6 +179,13 @@ class ApplicationServiceApi(SimpleHttpClient):
|
|||||||
" valid result", uri)
|
" valid result", uri)
|
||||||
defer.returnValue(None)
|
defer.returnValue(None)
|
||||||
|
|
||||||
|
for instance in info.get("instances", []):
|
||||||
|
network_id = instance.get("network_id", None)
|
||||||
|
if network_id is not None:
|
||||||
|
instance["instance_id"] = ThirdPartyInstanceID(
|
||||||
|
service.id, network_id,
|
||||||
|
).to_string()
|
||||||
|
|
||||||
defer.returnValue(info)
|
defer.returnValue(info)
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
logger.warning("query_3pe_protocol to %s threw exception %s",
|
logger.warning("query_3pe_protocol to %s threw exception %s",
|
||||||
@@ -184,9 +193,7 @@ class ApplicationServiceApi(SimpleHttpClient):
|
|||||||
defer.returnValue(None)
|
defer.returnValue(None)
|
||||||
|
|
||||||
key = (service.id, protocol)
|
key = (service.id, protocol)
|
||||||
return self.protocol_meta_cache.get(key) or (
|
return self.protocol_meta_cache.wrap(key, _get)
|
||||||
self.protocol_meta_cache.set(key, _get())
|
|
||||||
)
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def push_bulk(self, service, events, txn_id=None):
|
def push_bulk(self, service, events, txn_id=None):
|
||||||
|
|||||||
@@ -51,7 +51,7 @@ components.
|
|||||||
from twisted.internet import defer
|
from twisted.internet import defer
|
||||||
|
|
||||||
from synapse.appservice import ApplicationServiceState
|
from synapse.appservice import ApplicationServiceState
|
||||||
from synapse.util.logcontext import preserve_fn
|
from synapse.util.logcontext import run_in_background
|
||||||
from synapse.util.metrics import Measure
|
from synapse.util.metrics import Measure
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
@@ -106,7 +106,7 @@ class _ServiceQueuer(object):
|
|||||||
def enqueue(self, service, event):
|
def enqueue(self, service, event):
|
||||||
# if this service isn't being sent something
|
# if this service isn't being sent something
|
||||||
self.queued_events.setdefault(service.id, []).append(event)
|
self.queued_events.setdefault(service.id, []).append(event)
|
||||||
preserve_fn(self._send_request)(service)
|
run_in_background(self._send_request, service)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def _send_request(self, service):
|
def _send_request(self, service):
|
||||||
@@ -123,7 +123,7 @@ class _ServiceQueuer(object):
|
|||||||
with Measure(self.clock, "servicequeuer.send"):
|
with Measure(self.clock, "servicequeuer.send"):
|
||||||
try:
|
try:
|
||||||
yield self.txn_ctrl.send(service, events)
|
yield self.txn_ctrl.send(service, events)
|
||||||
except:
|
except Exception:
|
||||||
logger.exception("AS request failed")
|
logger.exception("AS request failed")
|
||||||
finally:
|
finally:
|
||||||
self.requests_in_flight.discard(service.id)
|
self.requests_in_flight.discard(service.id)
|
||||||
@@ -152,10 +152,10 @@ class _TransactionController(object):
|
|||||||
if sent:
|
if sent:
|
||||||
yield txn.complete(self.store)
|
yield txn.complete(self.store)
|
||||||
else:
|
else:
|
||||||
preserve_fn(self._start_recoverer)(service)
|
run_in_background(self._start_recoverer, service)
|
||||||
except Exception as e:
|
except Exception:
|
||||||
logger.exception(e)
|
logger.exception("Error creating appservice transaction")
|
||||||
preserve_fn(self._start_recoverer)(service)
|
run_in_background(self._start_recoverer, service)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def on_recovered(self, recoverer):
|
def on_recovered(self, recoverer):
|
||||||
@@ -176,6 +176,7 @@ class _TransactionController(object):
|
|||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def _start_recoverer(self, service):
|
def _start_recoverer(self, service):
|
||||||
|
try:
|
||||||
yield self.store.set_appservice_state(
|
yield self.store.set_appservice_state(
|
||||||
service,
|
service,
|
||||||
ApplicationServiceState.DOWN
|
ApplicationServiceState.DOWN
|
||||||
@@ -187,6 +188,8 @@ class _TransactionController(object):
|
|||||||
recoverer = self.recoverer_fn(service, self.on_recovered)
|
recoverer = self.recoverer_fn(service, self.on_recovered)
|
||||||
self.add_recoverers([recoverer])
|
self.add_recoverers([recoverer])
|
||||||
recoverer.recover()
|
recoverer.recover()
|
||||||
|
except Exception:
|
||||||
|
logger.exception("Error starting AS recoverer")
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def _is_service_up(self, service):
|
def _is_service_up(self, service):
|
||||||
|
|||||||
@@ -19,6 +19,8 @@ import os
|
|||||||
import yaml
|
import yaml
|
||||||
from textwrap import dedent
|
from textwrap import dedent
|
||||||
|
|
||||||
|
from six import integer_types
|
||||||
|
|
||||||
|
|
||||||
class ConfigError(Exception):
|
class ConfigError(Exception):
|
||||||
pass
|
pass
|
||||||
@@ -49,7 +51,7 @@ Missing mandatory `server_name` config option.
|
|||||||
class Config(object):
|
class Config(object):
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def parse_size(value):
|
def parse_size(value):
|
||||||
if isinstance(value, int) or isinstance(value, long):
|
if isinstance(value, integer_types):
|
||||||
return value
|
return value
|
||||||
sizes = {"K": 1024, "M": 1024 * 1024}
|
sizes = {"K": 1024, "M": 1024 * 1024}
|
||||||
size = 1
|
size = 1
|
||||||
@@ -61,14 +63,15 @@ class Config(object):
|
|||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def parse_duration(value):
|
def parse_duration(value):
|
||||||
if isinstance(value, int) or isinstance(value, long):
|
if isinstance(value, integer_types):
|
||||||
return value
|
return value
|
||||||
second = 1000
|
second = 1000
|
||||||
hour = 60 * 60 * second
|
minute = 60 * second
|
||||||
|
hour = 60 * minute
|
||||||
day = 24 * hour
|
day = 24 * hour
|
||||||
week = 7 * day
|
week = 7 * day
|
||||||
year = 365 * day
|
year = 365 * day
|
||||||
sizes = {"s": second, "h": hour, "d": day, "w": week, "y": year}
|
sizes = {"s": second, "m": minute, "h": hour, "d": day, "w": week, "y": year}
|
||||||
size = 1
|
size = 1
|
||||||
suffix = value[-1]
|
suffix = value[-1]
|
||||||
if suffix in sizes:
|
if suffix in sizes:
|
||||||
@@ -80,22 +83,38 @@ class Config(object):
|
|||||||
def abspath(file_path):
|
def abspath(file_path):
|
||||||
return os.path.abspath(file_path) if file_path else file_path
|
return os.path.abspath(file_path) if file_path else file_path
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def path_exists(cls, file_path):
|
||||||
|
"""Check if a file exists
|
||||||
|
|
||||||
|
Unlike os.path.exists, this throws an exception if there is an error
|
||||||
|
checking if the file exists (for example, if there is a perms error on
|
||||||
|
the parent dir).
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if the file exists; False if not.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
os.stat(file_path)
|
||||||
|
return True
|
||||||
|
except OSError as e:
|
||||||
|
if e.errno != errno.ENOENT:
|
||||||
|
raise e
|
||||||
|
return False
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def check_file(cls, file_path, config_name):
|
def check_file(cls, file_path, config_name):
|
||||||
if file_path is None:
|
if file_path is None:
|
||||||
raise ConfigError(
|
raise ConfigError(
|
||||||
"Missing config for %s."
|
"Missing config for %s."
|
||||||
" You must specify a path for the config file. You can "
|
|
||||||
"do this with the -c or --config-path option. "
|
|
||||||
"Adding --generate-config along with --server-name "
|
|
||||||
"<server name> will generate a config file at the given path."
|
|
||||||
% (config_name,)
|
% (config_name,)
|
||||||
)
|
)
|
||||||
if not os.path.exists(file_path):
|
try:
|
||||||
|
os.stat(file_path)
|
||||||
|
except OSError as e:
|
||||||
raise ConfigError(
|
raise ConfigError(
|
||||||
"File %s config for %s doesn't exist."
|
"Error accessing file '%s' (config for %s): %s"
|
||||||
" Try running again with --generate-config"
|
% (file_path, config_name, e.strerror)
|
||||||
% (file_path, config_name,)
|
|
||||||
)
|
)
|
||||||
return cls.abspath(file_path)
|
return cls.abspath(file_path)
|
||||||
|
|
||||||
@@ -247,7 +266,7 @@ class Config(object):
|
|||||||
" -c CONFIG-FILE\""
|
" -c CONFIG-FILE\""
|
||||||
)
|
)
|
||||||
(config_path,) = config_files
|
(config_path,) = config_files
|
||||||
if not os.path.exists(config_path):
|
if not cls.path_exists(config_path):
|
||||||
if config_args.keys_directory:
|
if config_args.keys_directory:
|
||||||
config_dir_path = config_args.keys_directory
|
config_dir_path = config_args.keys_directory
|
||||||
else:
|
else:
|
||||||
@@ -260,33 +279,33 @@ class Config(object):
|
|||||||
"Must specify a server_name to a generate config for."
|
"Must specify a server_name to a generate config for."
|
||||||
" Pass -H server.name."
|
" Pass -H server.name."
|
||||||
)
|
)
|
||||||
if not os.path.exists(config_dir_path):
|
if not cls.path_exists(config_dir_path):
|
||||||
os.makedirs(config_dir_path)
|
os.makedirs(config_dir_path)
|
||||||
with open(config_path, "wb") as config_file:
|
with open(config_path, "w") as config_file:
|
||||||
config_bytes, config = obj.generate_config(
|
config_str, config = obj.generate_config(
|
||||||
config_dir_path=config_dir_path,
|
config_dir_path=config_dir_path,
|
||||||
server_name=server_name,
|
server_name=server_name,
|
||||||
report_stats=(config_args.report_stats == "yes"),
|
report_stats=(config_args.report_stats == "yes"),
|
||||||
is_generating_file=True
|
is_generating_file=True
|
||||||
)
|
)
|
||||||
obj.invoke_all("generate_files", config)
|
obj.invoke_all("generate_files", config)
|
||||||
config_file.write(config_bytes)
|
config_file.write(config_str)
|
||||||
print (
|
print((
|
||||||
"A config file has been generated in %r for server name"
|
"A config file has been generated in %r for server name"
|
||||||
" %r with corresponding SSL keys and self-signed"
|
" %r with corresponding SSL keys and self-signed"
|
||||||
" certificates. Please review this file and customise it"
|
" certificates. Please review this file and customise it"
|
||||||
" to your needs."
|
" to your needs."
|
||||||
) % (config_path, server_name)
|
) % (config_path, server_name))
|
||||||
print(
|
print(
|
||||||
"If this server name is incorrect, you will need to"
|
"If this server name is incorrect, you will need to"
|
||||||
" regenerate the SSL certificates"
|
" regenerate the SSL certificates"
|
||||||
)
|
)
|
||||||
return
|
return
|
||||||
else:
|
else:
|
||||||
print (
|
print((
|
||||||
"Config file %r already exists. Generating any missing key"
|
"Config file %r already exists. Generating any missing key"
|
||||||
" files."
|
" files."
|
||||||
) % (config_path,)
|
) % (config_path,))
|
||||||
generate_keys = True
|
generate_keys = True
|
||||||
|
|
||||||
parser = argparse.ArgumentParser(
|
parser = argparse.ArgumentParser(
|
||||||
|
|||||||
@@ -17,10 +17,12 @@ from ._base import Config, ConfigError
|
|||||||
from synapse.appservice import ApplicationService
|
from synapse.appservice import ApplicationService
|
||||||
from synapse.types import UserID
|
from synapse.types import UserID
|
||||||
|
|
||||||
import urllib
|
|
||||||
import yaml
|
import yaml
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
|
from six import string_types
|
||||||
|
from six.moves.urllib import parse as urlparse
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
@@ -89,21 +91,21 @@ def _load_appservice(hostname, as_info, config_filename):
|
|||||||
"id", "as_token", "hs_token", "sender_localpart"
|
"id", "as_token", "hs_token", "sender_localpart"
|
||||||
]
|
]
|
||||||
for field in required_string_fields:
|
for field in required_string_fields:
|
||||||
if not isinstance(as_info.get(field), basestring):
|
if not isinstance(as_info.get(field), string_types):
|
||||||
raise KeyError("Required string field: '%s' (%s)" % (
|
raise KeyError("Required string field: '%s' (%s)" % (
|
||||||
field, config_filename,
|
field, config_filename,
|
||||||
))
|
))
|
||||||
|
|
||||||
# 'url' must either be a string or explicitly null, not missing
|
# 'url' must either be a string or explicitly null, not missing
|
||||||
# to avoid accidentally turning off push for ASes.
|
# to avoid accidentally turning off push for ASes.
|
||||||
if (not isinstance(as_info.get("url"), basestring) and
|
if (not isinstance(as_info.get("url"), string_types) and
|
||||||
as_info.get("url", "") is not None):
|
as_info.get("url", "") is not None):
|
||||||
raise KeyError(
|
raise KeyError(
|
||||||
"Required string field or explicit null: 'url' (%s)" % (config_filename,)
|
"Required string field or explicit null: 'url' (%s)" % (config_filename,)
|
||||||
)
|
)
|
||||||
|
|
||||||
localpart = as_info["sender_localpart"]
|
localpart = as_info["sender_localpart"]
|
||||||
if urllib.quote(localpart) != localpart:
|
if urlparse.quote(localpart) != localpart:
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
"sender_localpart needs characters which are not URL encoded."
|
"sender_localpart needs characters which are not URL encoded."
|
||||||
)
|
)
|
||||||
@@ -128,7 +130,7 @@ def _load_appservice(hostname, as_info, config_filename):
|
|||||||
"Expected namespace entry in %s to be an object,"
|
"Expected namespace entry in %s to be an object,"
|
||||||
" but got %s", ns, regex_obj
|
" but got %s", ns, regex_obj
|
||||||
)
|
)
|
||||||
if not isinstance(regex_obj.get("regex"), basestring):
|
if not isinstance(regex_obj.get("regex"), string_types):
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
"Missing/bad type 'regex' key in %s", regex_obj
|
"Missing/bad type 'regex' key in %s", regex_obj
|
||||||
)
|
)
|
||||||
@@ -154,6 +156,7 @@ def _load_appservice(hostname, as_info, config_filename):
|
|||||||
)
|
)
|
||||||
return ApplicationService(
|
return ApplicationService(
|
||||||
token=as_info["as_token"],
|
token=as_info["as_token"],
|
||||||
|
hostname=hostname,
|
||||||
url=as_info["url"],
|
url=as_info["url"],
|
||||||
namespaces=as_info["namespaces"],
|
namespaces=as_info["namespaces"],
|
||||||
hs_token=as_info["hs_token"],
|
hs_token=as_info["hs_token"],
|
||||||
|
|||||||
@@ -41,7 +41,7 @@ class CasConfig(Config):
|
|||||||
#cas_config:
|
#cas_config:
|
||||||
# enabled: true
|
# enabled: true
|
||||||
# server_url: "https://cas-server.com"
|
# server_url: "https://cas-server.com"
|
||||||
# service_url: "https://homesever.domain.com:8448"
|
# service_url: "https://homeserver.domain.com:8448"
|
||||||
# #required_attributes:
|
# #required_attributes:
|
||||||
# # name: value
|
# # name: value
|
||||||
"""
|
"""
|
||||||
|
|||||||
@@ -68,6 +68,18 @@ class EmailConfig(Config):
|
|||||||
self.email_notif_for_new_users = email_config.get(
|
self.email_notif_for_new_users = email_config.get(
|
||||||
"notif_for_new_users", True
|
"notif_for_new_users", True
|
||||||
)
|
)
|
||||||
|
self.email_riot_base_url = email_config.get(
|
||||||
|
"riot_base_url", None
|
||||||
|
)
|
||||||
|
self.email_smtp_user = email_config.get(
|
||||||
|
"smtp_user", None
|
||||||
|
)
|
||||||
|
self.email_smtp_pass = email_config.get(
|
||||||
|
"smtp_pass", None
|
||||||
|
)
|
||||||
|
self.require_transport_security = email_config.get(
|
||||||
|
"require_transport_security", False
|
||||||
|
)
|
||||||
if "app_name" in email_config:
|
if "app_name" in email_config:
|
||||||
self.email_app_name = email_config["app_name"]
|
self.email_app_name = email_config["app_name"]
|
||||||
else:
|
else:
|
||||||
@@ -85,14 +97,25 @@ class EmailConfig(Config):
|
|||||||
def default_config(self, config_dir_path, server_name, **kwargs):
|
def default_config(self, config_dir_path, server_name, **kwargs):
|
||||||
return """
|
return """
|
||||||
# Enable sending emails for notification events
|
# Enable sending emails for notification events
|
||||||
|
# Defining a custom URL for Riot is only needed if email notifications
|
||||||
|
# should contain links to a self-hosted installation of Riot; when set
|
||||||
|
# the "app_name" setting is ignored.
|
||||||
|
#
|
||||||
|
# If your SMTP server requires authentication, the optional smtp_user &
|
||||||
|
# smtp_pass variables should be used
|
||||||
|
#
|
||||||
#email:
|
#email:
|
||||||
# enable_notifs: false
|
# enable_notifs: false
|
||||||
# smtp_host: "localhost"
|
# smtp_host: "localhost"
|
||||||
# smtp_port: 25
|
# smtp_port: 25
|
||||||
|
# smtp_user: "exampleusername"
|
||||||
|
# smtp_pass: "examplepassword"
|
||||||
|
# require_transport_security: False
|
||||||
# notif_from: "Your Friendly %(app)s Home Server <noreply@example.com>"
|
# notif_from: "Your Friendly %(app)s Home Server <noreply@example.com>"
|
||||||
# app_name: Matrix
|
# app_name: Matrix
|
||||||
# template_dir: res/templates
|
# template_dir: res/templates
|
||||||
# notif_template_html: notif_mail.html
|
# notif_template_html: notif_mail.html
|
||||||
# notif_template_text: notif_mail.txt
|
# notif_template_text: notif_mail.txt
|
||||||
# notif_for_new_users: True
|
# notif_for_new_users: True
|
||||||
|
# riot_base_url: "http://localhost/riot"
|
||||||
"""
|
"""
|
||||||
|
|||||||
32
synapse/config/groups.py
Normal file
32
synapse/config/groups.py
Normal file
@@ -0,0 +1,32 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright 2017 New Vector Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
from ._base import Config
|
||||||
|
|
||||||
|
|
||||||
|
class GroupsConfig(Config):
|
||||||
|
def read_config(self, config):
|
||||||
|
self.enable_group_creation = config.get("enable_group_creation", False)
|
||||||
|
self.group_creation_prefix = config.get("group_creation_prefix", "")
|
||||||
|
|
||||||
|
def default_config(self, **kwargs):
|
||||||
|
return """\
|
||||||
|
# Whether to allow non server admins to create groups on this server
|
||||||
|
enable_group_creation: false
|
||||||
|
|
||||||
|
# If enabled, non server admins can only create groups with local parts
|
||||||
|
# starting with this prefix
|
||||||
|
# group_creation_prefix: "unofficial/"
|
||||||
|
"""
|
||||||
@@ -33,6 +33,10 @@ from .jwt import JWTConfig
|
|||||||
from .password_auth_providers import PasswordAuthProviderConfig
|
from .password_auth_providers import PasswordAuthProviderConfig
|
||||||
from .emailconfig import EmailConfig
|
from .emailconfig import EmailConfig
|
||||||
from .workers import WorkerConfig
|
from .workers import WorkerConfig
|
||||||
|
from .push import PushConfig
|
||||||
|
from .spam_checker import SpamCheckerConfig
|
||||||
|
from .groups import GroupsConfig
|
||||||
|
from .user_directory import UserDirectoryConfig
|
||||||
|
|
||||||
|
|
||||||
class HomeServerConfig(TlsConfig, ServerConfig, DatabaseConfig, LoggingConfig,
|
class HomeServerConfig(TlsConfig, ServerConfig, DatabaseConfig, LoggingConfig,
|
||||||
@@ -40,7 +44,8 @@ class HomeServerConfig(TlsConfig, ServerConfig, DatabaseConfig, LoggingConfig,
|
|||||||
VoipConfig, RegistrationConfig, MetricsConfig, ApiConfig,
|
VoipConfig, RegistrationConfig, MetricsConfig, ApiConfig,
|
||||||
AppServiceConfig, KeyConfig, SAML2Config, CasConfig,
|
AppServiceConfig, KeyConfig, SAML2Config, CasConfig,
|
||||||
JWTConfig, PasswordConfig, EmailConfig,
|
JWTConfig, PasswordConfig, EmailConfig,
|
||||||
WorkerConfig, PasswordAuthProviderConfig,):
|
WorkerConfig, PasswordAuthProviderConfig, PushConfig,
|
||||||
|
SpamCheckerConfig, GroupsConfig, UserDirectoryConfig,):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -118,10 +118,9 @@ class KeyConfig(Config):
|
|||||||
signing_keys = self.read_file(signing_key_path, "signing_key")
|
signing_keys = self.read_file(signing_key_path, "signing_key")
|
||||||
try:
|
try:
|
||||||
return read_signing_keys(signing_keys.splitlines(True))
|
return read_signing_keys(signing_keys.splitlines(True))
|
||||||
except Exception:
|
except Exception as e:
|
||||||
raise ConfigError(
|
raise ConfigError(
|
||||||
"Error reading signing_key."
|
"Error reading signing_key: %s" % (str(e))
|
||||||
" Try running again with --generate-config"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
def read_old_signing_keys(self, old_signing_keys):
|
def read_old_signing_keys(self, old_signing_keys):
|
||||||
@@ -141,7 +140,8 @@ class KeyConfig(Config):
|
|||||||
|
|
||||||
def generate_files(self, config):
|
def generate_files(self, config):
|
||||||
signing_key_path = config["signing_key_path"]
|
signing_key_path = config["signing_key_path"]
|
||||||
if not os.path.exists(signing_key_path):
|
|
||||||
|
if not self.path_exists(signing_key_path):
|
||||||
with open(signing_key_path, "w") as signing_key_file:
|
with open(signing_key_path, "w") as signing_key_file:
|
||||||
key_id = "a_" + random_string(4)
|
key_id = "a_" + random_string(4)
|
||||||
write_signing_keys(
|
write_signing_keys(
|
||||||
|
|||||||
@@ -15,14 +15,13 @@
|
|||||||
|
|
||||||
from ._base import Config
|
from ._base import Config
|
||||||
from synapse.util.logcontext import LoggingContextFilter
|
from synapse.util.logcontext import LoggingContextFilter
|
||||||
from twisted.python.log import PythonLoggingObserver
|
from twisted.logger import globalLogBeginner, STDLibLogObserver
|
||||||
import logging
|
import logging
|
||||||
import logging.config
|
import logging.config
|
||||||
import yaml
|
import yaml
|
||||||
from string import Template
|
from string import Template
|
||||||
import os
|
import os
|
||||||
import signal
|
import signal
|
||||||
from synapse.util.debug import debug_deferreds
|
|
||||||
|
|
||||||
|
|
||||||
DEFAULT_LOG_CONFIG = Template("""
|
DEFAULT_LOG_CONFIG = Template("""
|
||||||
@@ -30,8 +29,8 @@ version: 1
|
|||||||
|
|
||||||
formatters:
|
formatters:
|
||||||
precise:
|
precise:
|
||||||
format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s\
|
format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - \
|
||||||
- %(message)s'
|
%(request)s - %(message)s'
|
||||||
|
|
||||||
filters:
|
filters:
|
||||||
context:
|
context:
|
||||||
@@ -46,16 +45,18 @@ handlers:
|
|||||||
maxBytes: 104857600
|
maxBytes: 104857600
|
||||||
backupCount: 10
|
backupCount: 10
|
||||||
filters: [context]
|
filters: [context]
|
||||||
level: INFO
|
|
||||||
console:
|
console:
|
||||||
class: logging.StreamHandler
|
class: logging.StreamHandler
|
||||||
formatter: precise
|
formatter: precise
|
||||||
|
filters: [context]
|
||||||
|
|
||||||
loggers:
|
loggers:
|
||||||
synapse:
|
synapse:
|
||||||
level: INFO
|
level: INFO
|
||||||
|
|
||||||
synapse.storage.SQL:
|
synapse.storage.SQL:
|
||||||
|
# beware: increasing this to DEBUG will make synapse log sensitive
|
||||||
|
# information such as access tokens.
|
||||||
level: INFO
|
level: INFO
|
||||||
|
|
||||||
root:
|
root:
|
||||||
@@ -68,35 +69,24 @@ class LoggingConfig(Config):
|
|||||||
|
|
||||||
def read_config(self, config):
|
def read_config(self, config):
|
||||||
self.verbosity = config.get("verbose", 0)
|
self.verbosity = config.get("verbose", 0)
|
||||||
|
self.no_redirect_stdio = config.get("no_redirect_stdio", False)
|
||||||
self.log_config = self.abspath(config.get("log_config"))
|
self.log_config = self.abspath(config.get("log_config"))
|
||||||
self.log_file = self.abspath(config.get("log_file"))
|
self.log_file = self.abspath(config.get("log_file"))
|
||||||
if config.get("full_twisted_stacktraces"):
|
|
||||||
debug_deferreds()
|
|
||||||
|
|
||||||
def default_config(self, config_dir_path, server_name, **kwargs):
|
def default_config(self, config_dir_path, server_name, **kwargs):
|
||||||
log_file = self.abspath("homeserver.log")
|
|
||||||
log_config = self.abspath(
|
log_config = self.abspath(
|
||||||
os.path.join(config_dir_path, server_name + ".log.config")
|
os.path.join(config_dir_path, server_name + ".log.config")
|
||||||
)
|
)
|
||||||
return """
|
return """
|
||||||
# Logging verbosity level.
|
|
||||||
verbose: 0
|
|
||||||
|
|
||||||
# File to write logging to
|
|
||||||
log_file: "%(log_file)s"
|
|
||||||
|
|
||||||
# A yaml python logging config file
|
# A yaml python logging config file
|
||||||
log_config: "%(log_config)s"
|
log_config: "%(log_config)s"
|
||||||
|
|
||||||
# Stop twisted from discarding the stack traces of exceptions in
|
|
||||||
# deferreds by waiting a reactor tick before running a deferred's
|
|
||||||
# callbacks.
|
|
||||||
# full_twisted_stacktraces: true
|
|
||||||
""" % locals()
|
""" % locals()
|
||||||
|
|
||||||
def read_arguments(self, args):
|
def read_arguments(self, args):
|
||||||
if args.verbose is not None:
|
if args.verbose is not None:
|
||||||
self.verbosity = args.verbose
|
self.verbosity = args.verbose
|
||||||
|
if args.no_redirect_stdio is not None:
|
||||||
|
self.no_redirect_stdio = args.no_redirect_stdio
|
||||||
if args.log_config is not None:
|
if args.log_config is not None:
|
||||||
self.log_config = args.log_config
|
self.log_config = args.log_config
|
||||||
if args.log_file is not None:
|
if args.log_file is not None:
|
||||||
@@ -106,48 +96,68 @@ class LoggingConfig(Config):
|
|||||||
logging_group = parser.add_argument_group("logging")
|
logging_group = parser.add_argument_group("logging")
|
||||||
logging_group.add_argument(
|
logging_group.add_argument(
|
||||||
'-v', '--verbose', dest="verbose", action='count',
|
'-v', '--verbose', dest="verbose", action='count',
|
||||||
help="The verbosity level."
|
help="The verbosity level. Specify multiple times to increase "
|
||||||
|
"verbosity. (Ignored if --log-config is specified.)"
|
||||||
)
|
)
|
||||||
logging_group.add_argument(
|
logging_group.add_argument(
|
||||||
'-f', '--log-file', dest="log_file",
|
'-f', '--log-file', dest="log_file",
|
||||||
help="File to log to."
|
help="File to log to. (Ignored if --log-config is specified.)"
|
||||||
)
|
)
|
||||||
logging_group.add_argument(
|
logging_group.add_argument(
|
||||||
'--log-config', dest="log_config", default=None,
|
'--log-config', dest="log_config", default=None,
|
||||||
help="Python logging config file"
|
help="Python logging config file"
|
||||||
)
|
)
|
||||||
|
logging_group.add_argument(
|
||||||
|
'-n', '--no-redirect-stdio',
|
||||||
|
action='store_true', default=None,
|
||||||
|
help="Do not redirect stdout/stderr to the log"
|
||||||
|
)
|
||||||
|
|
||||||
def generate_files(self, config):
|
def generate_files(self, config):
|
||||||
log_config = config.get("log_config")
|
log_config = config.get("log_config")
|
||||||
if log_config and not os.path.exists(log_config):
|
if log_config and not os.path.exists(log_config):
|
||||||
with open(log_config, "wb") as log_config_file:
|
log_file = self.abspath("homeserver.log")
|
||||||
|
with open(log_config, "w") as log_config_file:
|
||||||
log_config_file.write(
|
log_config_file.write(
|
||||||
DEFAULT_LOG_CONFIG.substitute(log_file=config["log_file"])
|
DEFAULT_LOG_CONFIG.substitute(log_file=log_file)
|
||||||
)
|
)
|
||||||
|
|
||||||
def setup_logging(self):
|
|
||||||
setup_logging(self.log_config, self.log_file, self.verbosity)
|
|
||||||
|
|
||||||
|
def setup_logging(config, use_worker_options=False):
|
||||||
|
""" Set up python logging
|
||||||
|
|
||||||
|
Args:
|
||||||
|
config (LoggingConfig | synapse.config.workers.WorkerConfig):
|
||||||
|
configuration data
|
||||||
|
|
||||||
|
use_worker_options (bool): True to use 'worker_log_config' and
|
||||||
|
'worker_log_file' options instead of 'log_config' and 'log_file'.
|
||||||
|
"""
|
||||||
|
log_config = (config.worker_log_config if use_worker_options
|
||||||
|
else config.log_config)
|
||||||
|
log_file = (config.worker_log_file if use_worker_options
|
||||||
|
else config.log_file)
|
||||||
|
|
||||||
def setup_logging(log_config=None, log_file=None, verbosity=None):
|
|
||||||
log_format = (
|
log_format = (
|
||||||
"%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s"
|
"%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s"
|
||||||
" - %(message)s"
|
" - %(message)s"
|
||||||
)
|
)
|
||||||
if log_config is None:
|
|
||||||
|
|
||||||
|
if log_config is None:
|
||||||
|
# We don't have a logfile, so fall back to the 'verbosity' param from
|
||||||
|
# the config or cmdline. (Note that we generate a log config for new
|
||||||
|
# installs, so this will be an unusual case)
|
||||||
level = logging.INFO
|
level = logging.INFO
|
||||||
level_for_storage = logging.INFO
|
level_for_storage = logging.INFO
|
||||||
if verbosity:
|
if config.verbosity:
|
||||||
level = logging.DEBUG
|
level = logging.DEBUG
|
||||||
if verbosity > 1:
|
if config.verbosity > 1:
|
||||||
level_for_storage = logging.DEBUG
|
level_for_storage = logging.DEBUG
|
||||||
|
|
||||||
# FIXME: we need a logging.WARN for a -q quiet option
|
|
||||||
logger = logging.getLogger('')
|
logger = logging.getLogger('')
|
||||||
logger.setLevel(level)
|
logger.setLevel(level)
|
||||||
|
|
||||||
logging.getLogger('synapse.storage').setLevel(level_for_storage)
|
logging.getLogger('synapse.storage.SQL').setLevel(level_for_storage)
|
||||||
|
|
||||||
formatter = logging.Formatter(log_format)
|
formatter = logging.Formatter(log_format)
|
||||||
if log_file:
|
if log_file:
|
||||||
@@ -160,6 +170,29 @@ def setup_logging(log_config=None, log_file=None, verbosity=None):
|
|||||||
logger.info("Closing log file due to SIGHUP")
|
logger.info("Closing log file due to SIGHUP")
|
||||||
handler.doRollover()
|
handler.doRollover()
|
||||||
logger.info("Opened new log file due to SIGHUP")
|
logger.info("Opened new log file due to SIGHUP")
|
||||||
|
else:
|
||||||
|
handler = logging.StreamHandler()
|
||||||
|
|
||||||
|
def sighup(signum, stack):
|
||||||
|
pass
|
||||||
|
|
||||||
|
handler.setFormatter(formatter)
|
||||||
|
|
||||||
|
handler.addFilter(LoggingContextFilter(request=""))
|
||||||
|
|
||||||
|
logger.addHandler(handler)
|
||||||
|
else:
|
||||||
|
def load_log_config():
|
||||||
|
with open(log_config, 'r') as f:
|
||||||
|
logging.config.dictConfig(yaml.load(f))
|
||||||
|
|
||||||
|
def sighup(signum, stack):
|
||||||
|
# it might be better to use a file watcher or something for this.
|
||||||
|
logging.info("Reloading log config from %s due to SIGHUP",
|
||||||
|
log_config)
|
||||||
|
load_log_config()
|
||||||
|
|
||||||
|
load_log_config()
|
||||||
|
|
||||||
# TODO(paul): obviously this is a terrible mechanism for
|
# TODO(paul): obviously this is a terrible mechanism for
|
||||||
# stealing SIGHUP, because it means no other part of synapse
|
# stealing SIGHUP, because it means no other part of synapse
|
||||||
@@ -168,16 +201,19 @@ def setup_logging(log_config=None, log_file=None, verbosity=None):
|
|||||||
# it around.
|
# it around.
|
||||||
if getattr(signal, "SIGHUP"):
|
if getattr(signal, "SIGHUP"):
|
||||||
signal.signal(signal.SIGHUP, sighup)
|
signal.signal(signal.SIGHUP, sighup)
|
||||||
else:
|
|
||||||
handler = logging.StreamHandler()
|
|
||||||
handler.setFormatter(formatter)
|
|
||||||
|
|
||||||
handler.addFilter(LoggingContextFilter(request=""))
|
# It's critical to point twisted's internal logging somewhere, otherwise it
|
||||||
|
# stacks up and leaks kup to 64K object;
|
||||||
logger.addHandler(handler)
|
# see: https://twistedmatrix.com/trac/ticket/8164
|
||||||
else:
|
#
|
||||||
with open(log_config, 'r') as f:
|
# Routing to the python logging framework could be a performance problem if
|
||||||
logging.config.dictConfig(yaml.load(f))
|
# the handlers blocked for a long time as python.logging is a blocking API
|
||||||
|
# see https://twistedmatrix.com/documents/current/core/howto/logger.html
|
||||||
observer = PythonLoggingObserver()
|
# filed as https://github.com/matrix-org/synapse/issues/1727
|
||||||
observer.start()
|
#
|
||||||
|
# However this may not be too much of a problem if we are just writing to a file.
|
||||||
|
observer = STDLibLogObserver()
|
||||||
|
globalLogBeginner.beginLoggingTo(
|
||||||
|
[observer],
|
||||||
|
redirectStandardIO=not config.no_redirect_stdio,
|
||||||
|
)
|
||||||
|
|||||||
@@ -15,37 +15,45 @@
|
|||||||
|
|
||||||
from ._base import Config
|
from ._base import Config
|
||||||
|
|
||||||
import importlib
|
from synapse.util.module_loader import load_module
|
||||||
|
|
||||||
|
LDAP_PROVIDER = 'ldap_auth_provider.LdapAuthProvider'
|
||||||
|
|
||||||
|
|
||||||
class PasswordAuthProviderConfig(Config):
|
class PasswordAuthProviderConfig(Config):
|
||||||
def read_config(self, config):
|
def read_config(self, config):
|
||||||
self.password_providers = []
|
self.password_providers = []
|
||||||
|
providers = []
|
||||||
|
|
||||||
# We want to be backwards compatible with the old `ldap_config`
|
# We want to be backwards compatible with the old `ldap_config`
|
||||||
# param.
|
# param.
|
||||||
ldap_config = config.get("ldap_config", {})
|
ldap_config = config.get("ldap_config", {})
|
||||||
self.ldap_enabled = ldap_config.get("enabled", False)
|
if ldap_config.get("enabled", False):
|
||||||
if self.ldap_enabled:
|
providers.append({
|
||||||
from synapse.util.ldap_auth_provider import LdapAuthProvider
|
'module': LDAP_PROVIDER,
|
||||||
parsed_config = LdapAuthProvider.parse_config(ldap_config)
|
'config': ldap_config,
|
||||||
self.password_providers.append((LdapAuthProvider, parsed_config))
|
})
|
||||||
|
|
||||||
providers = config.get("password_providers", [])
|
providers.extend(config.get("password_providers", []))
|
||||||
for provider in providers:
|
for provider in providers:
|
||||||
# We need to import the module, and then pick the class out of
|
mod_name = provider['module']
|
||||||
# that, so we split based on the last dot.
|
|
||||||
module, clz = provider['module'].rsplit(".", 1)
|
# This is for backwards compat when the ldap auth provider resided
|
||||||
module = importlib.import_module(module)
|
# in this package.
|
||||||
provider_class = getattr(module, clz)
|
if mod_name == "synapse.util.ldap_auth_provider.LdapAuthProvider":
|
||||||
|
mod_name = LDAP_PROVIDER
|
||||||
|
|
||||||
|
(provider_class, provider_config) = load_module({
|
||||||
|
"module": mod_name,
|
||||||
|
"config": provider['config'],
|
||||||
|
})
|
||||||
|
|
||||||
provider_config = provider_class.parse_config(provider["config"])
|
|
||||||
self.password_providers.append((provider_class, provider_config))
|
self.password_providers.append((provider_class, provider_config))
|
||||||
|
|
||||||
def default_config(self, **kwargs):
|
def default_config(self, **kwargs):
|
||||||
return """\
|
return """\
|
||||||
# password_providers:
|
# password_providers:
|
||||||
# - module: "synapse.util.ldap_auth_provider.LdapAuthProvider"
|
# - module: "ldap_auth_provider.LdapAuthProvider"
|
||||||
# config:
|
# config:
|
||||||
# enabled: true
|
# enabled: true
|
||||||
# uri: "ldap://ldap.example.com:389"
|
# uri: "ldap://ldap.example.com:389"
|
||||||
|
|||||||
61
synapse/config/push.py
Normal file
61
synapse/config/push.py
Normal file
@@ -0,0 +1,61 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright 2015, 2016 OpenMarket Ltd
|
||||||
|
# Copyright 2017 New Vector Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
from ._base import Config
|
||||||
|
|
||||||
|
|
||||||
|
class PushConfig(Config):
|
||||||
|
def read_config(self, config):
|
||||||
|
push_config = config.get("push", {})
|
||||||
|
self.push_include_content = push_config.get("include_content", True)
|
||||||
|
|
||||||
|
# There was a a 'redact_content' setting but mistakenly read from the
|
||||||
|
# 'email'section'. Check for the flag in the 'push' section, and log,
|
||||||
|
# but do not honour it to avoid nasty surprises when people upgrade.
|
||||||
|
if push_config.get("redact_content") is not None:
|
||||||
|
print(
|
||||||
|
"The push.redact_content content option has never worked. "
|
||||||
|
"Please set push.include_content if you want this behaviour"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Now check for the one in the 'email' section and honour it,
|
||||||
|
# with a warning.
|
||||||
|
push_config = config.get("email", {})
|
||||||
|
redact_content = push_config.get("redact_content")
|
||||||
|
if redact_content is not None:
|
||||||
|
print(
|
||||||
|
"The 'email.redact_content' option is deprecated: "
|
||||||
|
"please set push.include_content instead"
|
||||||
|
)
|
||||||
|
self.push_include_content = not redact_content
|
||||||
|
|
||||||
|
def default_config(self, config_dir_path, server_name, **kwargs):
|
||||||
|
return """
|
||||||
|
# Clients requesting push notifications can either have the body of
|
||||||
|
# the message sent in the notification poke along with other details
|
||||||
|
# like the sender, or just the event ID and room ID (`event_id_only`).
|
||||||
|
# If clients choose the former, this option controls whether the
|
||||||
|
# notification request includes the content of the event (other details
|
||||||
|
# like the sender are still included). For `event_id_only` push, it
|
||||||
|
# has no effect.
|
||||||
|
|
||||||
|
# For modern android devices the notification content will still appear
|
||||||
|
# because it is loaded by the app. iPhone, however will send a
|
||||||
|
# notification saying only that a message arrived and who it came from.
|
||||||
|
#
|
||||||
|
#push:
|
||||||
|
# include_content: true
|
||||||
|
"""
|
||||||
@@ -31,8 +31,9 @@ class RegistrationConfig(Config):
|
|||||||
strtobool(str(config["disable_registration"]))
|
strtobool(str(config["disable_registration"]))
|
||||||
)
|
)
|
||||||
|
|
||||||
|
self.registrations_require_3pid = config.get("registrations_require_3pid", [])
|
||||||
|
self.allowed_local_3pids = config.get("allowed_local_3pids", [])
|
||||||
self.registration_shared_secret = config.get("registration_shared_secret")
|
self.registration_shared_secret = config.get("registration_shared_secret")
|
||||||
self.user_creation_max_duration = int(config["user_creation_max_duration"])
|
|
||||||
|
|
||||||
self.bcrypt_rounds = config.get("bcrypt_rounds", 12)
|
self.bcrypt_rounds = config.get("bcrypt_rounds", 12)
|
||||||
self.trusted_third_party_id_servers = config["trusted_third_party_id_servers"]
|
self.trusted_third_party_id_servers = config["trusted_third_party_id_servers"]
|
||||||
@@ -42,6 +43,8 @@ class RegistrationConfig(Config):
|
|||||||
self.allow_guest_access and config.get("invite_3pid_guest", False)
|
self.allow_guest_access and config.get("invite_3pid_guest", False)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
self.auto_join_rooms = config.get("auto_join_rooms", [])
|
||||||
|
|
||||||
def default_config(self, **kwargs):
|
def default_config(self, **kwargs):
|
||||||
registration_shared_secret = random_string_with_symbols(50)
|
registration_shared_secret = random_string_with_symbols(50)
|
||||||
|
|
||||||
@@ -51,18 +54,32 @@ class RegistrationConfig(Config):
|
|||||||
# Enable registration for new users.
|
# Enable registration for new users.
|
||||||
enable_registration: False
|
enable_registration: False
|
||||||
|
|
||||||
|
# The user must provide all of the below types of 3PID when registering.
|
||||||
|
#
|
||||||
|
# registrations_require_3pid:
|
||||||
|
# - email
|
||||||
|
# - msisdn
|
||||||
|
|
||||||
|
# Mandate that users are only allowed to associate certain formats of
|
||||||
|
# 3PIDs with accounts on this server.
|
||||||
|
#
|
||||||
|
# allowed_local_3pids:
|
||||||
|
# - medium: email
|
||||||
|
# pattern: ".*@matrix\\.org"
|
||||||
|
# - medium: email
|
||||||
|
# pattern: ".*@vector\\.im"
|
||||||
|
# - medium: msisdn
|
||||||
|
# pattern: "\\+44"
|
||||||
|
|
||||||
# If set, allows registration by anyone who also has the shared
|
# If set, allows registration by anyone who also has the shared
|
||||||
# secret, even if registration is otherwise disabled.
|
# secret, even if registration is otherwise disabled.
|
||||||
registration_shared_secret: "%(registration_shared_secret)s"
|
registration_shared_secret: "%(registration_shared_secret)s"
|
||||||
|
|
||||||
# Sets the expiry for the short term user creation in
|
|
||||||
# milliseconds. For instance the bellow duration is two weeks
|
|
||||||
# in milliseconds.
|
|
||||||
user_creation_max_duration: 1209600000
|
|
||||||
|
|
||||||
# Set the number of bcrypt rounds used to generate password hash.
|
# Set the number of bcrypt rounds used to generate password hash.
|
||||||
# Larger numbers increase the work factor needed to generate the hash.
|
# Larger numbers increase the work factor needed to generate the hash.
|
||||||
# The default number of rounds is 12.
|
# The default number is 12 (which equates to 2^12 rounds).
|
||||||
|
# N.B. that increasing this will exponentially increase the time required
|
||||||
|
# to register or login - e.g. 24 => 2^24 rounds which will take >20 mins.
|
||||||
bcrypt_rounds: 12
|
bcrypt_rounds: 12
|
||||||
|
|
||||||
# Allows users to register as guests without a password/email/etc, and
|
# Allows users to register as guests without a password/email/etc, and
|
||||||
@@ -75,6 +92,12 @@ class RegistrationConfig(Config):
|
|||||||
trusted_third_party_id_servers:
|
trusted_third_party_id_servers:
|
||||||
- matrix.org
|
- matrix.org
|
||||||
- vector.im
|
- vector.im
|
||||||
|
- riot.im
|
||||||
|
|
||||||
|
# Users who register on this homeserver will automatically be joined
|
||||||
|
# to these rooms
|
||||||
|
#auto_join_rooms:
|
||||||
|
# - "#example:example.com"
|
||||||
""" % locals()
|
""" % locals()
|
||||||
|
|
||||||
def add_arguments(self, parser):
|
def add_arguments(self, parser):
|
||||||
|
|||||||
@@ -16,6 +16,8 @@
|
|||||||
from ._base import Config, ConfigError
|
from ._base import Config, ConfigError
|
||||||
from collections import namedtuple
|
from collections import namedtuple
|
||||||
|
|
||||||
|
from synapse.util.module_loader import load_module
|
||||||
|
|
||||||
|
|
||||||
MISSING_NETADDR = (
|
MISSING_NETADDR = (
|
||||||
"Missing netaddr library. This is required for URL preview API."
|
"Missing netaddr library. This is required for URL preview API."
|
||||||
@@ -36,6 +38,14 @@ ThumbnailRequirement = namedtuple(
|
|||||||
"ThumbnailRequirement", ["width", "height", "method", "media_type"]
|
"ThumbnailRequirement", ["width", "height", "method", "media_type"]
|
||||||
)
|
)
|
||||||
|
|
||||||
|
MediaStorageProviderConfig = namedtuple(
|
||||||
|
"MediaStorageProviderConfig", (
|
||||||
|
"store_local", # Whether to store newly uploaded local files
|
||||||
|
"store_remote", # Whether to store newly downloaded remote files
|
||||||
|
"store_synchronous", # Whether to wait for successful storage for local uploads
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def parse_thumbnail_requirements(thumbnail_sizes):
|
def parse_thumbnail_requirements(thumbnail_sizes):
|
||||||
""" Takes a list of dictionaries with "width", "height", and "method" keys
|
""" Takes a list of dictionaries with "width", "height", and "method" keys
|
||||||
@@ -70,7 +80,64 @@ class ContentRepositoryConfig(Config):
|
|||||||
self.max_upload_size = self.parse_size(config["max_upload_size"])
|
self.max_upload_size = self.parse_size(config["max_upload_size"])
|
||||||
self.max_image_pixels = self.parse_size(config["max_image_pixels"])
|
self.max_image_pixels = self.parse_size(config["max_image_pixels"])
|
||||||
self.max_spider_size = self.parse_size(config["max_spider_size"])
|
self.max_spider_size = self.parse_size(config["max_spider_size"])
|
||||||
|
|
||||||
self.media_store_path = self.ensure_directory(config["media_store_path"])
|
self.media_store_path = self.ensure_directory(config["media_store_path"])
|
||||||
|
|
||||||
|
backup_media_store_path = config.get("backup_media_store_path")
|
||||||
|
|
||||||
|
synchronous_backup_media_store = config.get(
|
||||||
|
"synchronous_backup_media_store", False
|
||||||
|
)
|
||||||
|
|
||||||
|
storage_providers = config.get("media_storage_providers", [])
|
||||||
|
|
||||||
|
if backup_media_store_path:
|
||||||
|
if storage_providers:
|
||||||
|
raise ConfigError(
|
||||||
|
"Cannot use both 'backup_media_store_path' and 'storage_providers'"
|
||||||
|
)
|
||||||
|
|
||||||
|
storage_providers = [{
|
||||||
|
"module": "file_system",
|
||||||
|
"store_local": True,
|
||||||
|
"store_synchronous": synchronous_backup_media_store,
|
||||||
|
"store_remote": True,
|
||||||
|
"config": {
|
||||||
|
"directory": backup_media_store_path,
|
||||||
|
}
|
||||||
|
}]
|
||||||
|
|
||||||
|
# This is a list of config that can be used to create the storage
|
||||||
|
# providers. The entries are tuples of (Class, class_config,
|
||||||
|
# MediaStorageProviderConfig), where Class is the class of the provider,
|
||||||
|
# the class_config the config to pass to it, and
|
||||||
|
# MediaStorageProviderConfig are options for StorageProviderWrapper.
|
||||||
|
#
|
||||||
|
# We don't create the storage providers here as not all workers need
|
||||||
|
# them to be started.
|
||||||
|
self.media_storage_providers = []
|
||||||
|
|
||||||
|
for provider_config in storage_providers:
|
||||||
|
# We special case the module "file_system" so as not to need to
|
||||||
|
# expose FileStorageProviderBackend
|
||||||
|
if provider_config["module"] == "file_system":
|
||||||
|
provider_config["module"] = (
|
||||||
|
"synapse.rest.media.v1.storage_provider"
|
||||||
|
".FileStorageProviderBackend"
|
||||||
|
)
|
||||||
|
|
||||||
|
provider_class, parsed_config = load_module(provider_config)
|
||||||
|
|
||||||
|
wrapper_config = MediaStorageProviderConfig(
|
||||||
|
provider_config.get("store_local", False),
|
||||||
|
provider_config.get("store_remote", False),
|
||||||
|
provider_config.get("store_synchronous", False),
|
||||||
|
)
|
||||||
|
|
||||||
|
self.media_storage_providers.append(
|
||||||
|
(provider_class, parsed_config, wrapper_config,)
|
||||||
|
)
|
||||||
|
|
||||||
self.uploads_path = self.ensure_directory(config["uploads_path"])
|
self.uploads_path = self.ensure_directory(config["uploads_path"])
|
||||||
self.dynamic_thumbnails = config["dynamic_thumbnails"]
|
self.dynamic_thumbnails = config["dynamic_thumbnails"]
|
||||||
self.thumbnail_requirements = parse_thumbnail_requirements(
|
self.thumbnail_requirements = parse_thumbnail_requirements(
|
||||||
@@ -115,6 +182,20 @@ class ContentRepositoryConfig(Config):
|
|||||||
# Directory where uploaded images and attachments are stored.
|
# Directory where uploaded images and attachments are stored.
|
||||||
media_store_path: "%(media_store)s"
|
media_store_path: "%(media_store)s"
|
||||||
|
|
||||||
|
# Media storage providers allow media to be stored in different
|
||||||
|
# locations.
|
||||||
|
# media_storage_providers:
|
||||||
|
# - module: file_system
|
||||||
|
# # Whether to write new local files.
|
||||||
|
# store_local: false
|
||||||
|
# # Whether to write new remote media
|
||||||
|
# store_remote: false
|
||||||
|
# # Whether to block upload requests waiting for write to this
|
||||||
|
# # provider to complete
|
||||||
|
# store_synchronous: false
|
||||||
|
# config:
|
||||||
|
# directory: /mnt/some/other/directory
|
||||||
|
|
||||||
# Directory where in-progress uploads are stored.
|
# Directory where in-progress uploads are stored.
|
||||||
uploads_path: "%(uploads_path)s"
|
uploads_path: "%(uploads_path)s"
|
||||||
|
|
||||||
@@ -167,6 +248,8 @@ class ContentRepositoryConfig(Config):
|
|||||||
# - '10.0.0.0/8'
|
# - '10.0.0.0/8'
|
||||||
# - '172.16.0.0/12'
|
# - '172.16.0.0/12'
|
||||||
# - '192.168.0.0/16'
|
# - '192.168.0.0/16'
|
||||||
|
# - '100.64.0.0/10'
|
||||||
|
# - '169.254.0.0/16'
|
||||||
#
|
#
|
||||||
# List of IP address CIDR ranges that the URL preview spider is allowed
|
# List of IP address CIDR ranges that the URL preview spider is allowed
|
||||||
# to access even if they are specified in url_preview_ip_range_blacklist.
|
# to access even if they are specified in url_preview_ip_range_blacklist.
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
# Copyright 2014-2016 OpenMarket Ltd
|
# Copyright 2014-2016 OpenMarket Ltd
|
||||||
|
# Copyright 2017 New Vector Ltd
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@@ -29,6 +30,41 @@ class ServerConfig(Config):
|
|||||||
self.user_agent_suffix = config.get("user_agent_suffix")
|
self.user_agent_suffix = config.get("user_agent_suffix")
|
||||||
self.use_frozen_dicts = config.get("use_frozen_dicts", False)
|
self.use_frozen_dicts = config.get("use_frozen_dicts", False)
|
||||||
self.public_baseurl = config.get("public_baseurl")
|
self.public_baseurl = config.get("public_baseurl")
|
||||||
|
self.cpu_affinity = config.get("cpu_affinity")
|
||||||
|
|
||||||
|
# Whether to send federation traffic out in this process. This only
|
||||||
|
# applies to some federation traffic, and so shouldn't be used to
|
||||||
|
# "disable" federation
|
||||||
|
self.send_federation = config.get("send_federation", True)
|
||||||
|
|
||||||
|
# Whether to update the user directory or not. This should be set to
|
||||||
|
# false only if we are updating the user directory in a worker
|
||||||
|
self.update_user_directory = config.get("update_user_directory", True)
|
||||||
|
|
||||||
|
# whether to enable the media repository endpoints. This should be set
|
||||||
|
# to false if the media repository is running as a separate endpoint;
|
||||||
|
# doing so ensures that we will not run cache cleanup jobs on the
|
||||||
|
# master, potentially causing inconsistency.
|
||||||
|
self.enable_media_repo = config.get("enable_media_repo", True)
|
||||||
|
|
||||||
|
self.filter_timeline_limit = config.get("filter_timeline_limit", -1)
|
||||||
|
|
||||||
|
# Whether we should block invites sent to users on this server
|
||||||
|
# (other than those sent by local server admins)
|
||||||
|
self.block_non_admin_invites = config.get(
|
||||||
|
"block_non_admin_invites", False,
|
||||||
|
)
|
||||||
|
|
||||||
|
# FIXME: federation_domain_whitelist needs sytests
|
||||||
|
self.federation_domain_whitelist = None
|
||||||
|
federation_domain_whitelist = config.get(
|
||||||
|
"federation_domain_whitelist", None
|
||||||
|
)
|
||||||
|
# turn the whitelist into a hash for speed of lookup
|
||||||
|
if federation_domain_whitelist is not None:
|
||||||
|
self.federation_domain_whitelist = {}
|
||||||
|
for domain in federation_domain_whitelist:
|
||||||
|
self.federation_domain_whitelist[domain] = True
|
||||||
|
|
||||||
if self.public_baseurl is not None:
|
if self.public_baseurl is not None:
|
||||||
if self.public_baseurl[-1] != '/':
|
if self.public_baseurl[-1] != '/':
|
||||||
@@ -37,6 +73,15 @@ class ServerConfig(Config):
|
|||||||
|
|
||||||
self.listeners = config.get("listeners", [])
|
self.listeners = config.get("listeners", [])
|
||||||
|
|
||||||
|
for listener in self.listeners:
|
||||||
|
bind_address = listener.pop("bind_address", None)
|
||||||
|
bind_addresses = listener.setdefault("bind_addresses", [])
|
||||||
|
|
||||||
|
if bind_address:
|
||||||
|
bind_addresses.append(bind_address)
|
||||||
|
elif not bind_addresses:
|
||||||
|
bind_addresses.append('')
|
||||||
|
|
||||||
self.gc_thresholds = read_gc_thresholds(config.get("gc_thresholds", None))
|
self.gc_thresholds = read_gc_thresholds(config.get("gc_thresholds", None))
|
||||||
|
|
||||||
bind_port = config.get("bind_port")
|
bind_port = config.get("bind_port")
|
||||||
@@ -49,7 +94,7 @@ class ServerConfig(Config):
|
|||||||
|
|
||||||
self.listeners.append({
|
self.listeners.append({
|
||||||
"port": bind_port,
|
"port": bind_port,
|
||||||
"bind_address": bind_host,
|
"bind_addresses": [bind_host],
|
||||||
"tls": True,
|
"tls": True,
|
||||||
"type": "http",
|
"type": "http",
|
||||||
"resources": [
|
"resources": [
|
||||||
@@ -68,7 +113,7 @@ class ServerConfig(Config):
|
|||||||
if unsecure_port:
|
if unsecure_port:
|
||||||
self.listeners.append({
|
self.listeners.append({
|
||||||
"port": unsecure_port,
|
"port": unsecure_port,
|
||||||
"bind_address": bind_host,
|
"bind_addresses": [bind_host],
|
||||||
"tls": False,
|
"tls": False,
|
||||||
"type": "http",
|
"type": "http",
|
||||||
"resources": [
|
"resources": [
|
||||||
@@ -87,7 +132,7 @@ class ServerConfig(Config):
|
|||||||
if manhole:
|
if manhole:
|
||||||
self.listeners.append({
|
self.listeners.append({
|
||||||
"port": manhole,
|
"port": manhole,
|
||||||
"bind_address": "127.0.0.1",
|
"bind_addresses": ["127.0.0.1"],
|
||||||
"type": "manhole",
|
"type": "manhole",
|
||||||
})
|
})
|
||||||
|
|
||||||
@@ -95,7 +140,7 @@ class ServerConfig(Config):
|
|||||||
if metrics_port:
|
if metrics_port:
|
||||||
self.listeners.append({
|
self.listeners.append({
|
||||||
"port": metrics_port,
|
"port": metrics_port,
|
||||||
"bind_address": config.get("metrics_bind_host", "127.0.0.1"),
|
"bind_addresses": [config.get("metrics_bind_host", "127.0.0.1")],
|
||||||
"tls": False,
|
"tls": False,
|
||||||
"type": "http",
|
"type": "http",
|
||||||
"resources": [
|
"resources": [
|
||||||
@@ -127,9 +172,36 @@ class ServerConfig(Config):
|
|||||||
# When running as a daemon, the file to store the pid in
|
# When running as a daemon, the file to store the pid in
|
||||||
pid_file: %(pid_file)s
|
pid_file: %(pid_file)s
|
||||||
|
|
||||||
|
# CPU affinity mask. Setting this restricts the CPUs on which the
|
||||||
|
# process will be scheduled. It is represented as a bitmask, with the
|
||||||
|
# lowest order bit corresponding to the first logical CPU and the
|
||||||
|
# highest order bit corresponding to the last logical CPU. Not all CPUs
|
||||||
|
# may exist on a given system but a mask may specify more CPUs than are
|
||||||
|
# present.
|
||||||
|
#
|
||||||
|
# For example:
|
||||||
|
# 0x00000001 is processor #0,
|
||||||
|
# 0x00000003 is processors #0 and #1,
|
||||||
|
# 0xFFFFFFFF is all processors (#0 through #31).
|
||||||
|
#
|
||||||
|
# Pinning a Python process to a single CPU is desirable, because Python
|
||||||
|
# is inherently single-threaded due to the GIL, and can suffer a
|
||||||
|
# 30-40%% slowdown due to cache blow-out and thread context switching
|
||||||
|
# if the scheduler happens to schedule the underlying threads across
|
||||||
|
# different cores. See
|
||||||
|
# https://www.mirantis.com/blog/improve-performance-python-programs-restricting-single-cpu/.
|
||||||
|
#
|
||||||
|
# cpu_affinity: 0xFFFFFFFF
|
||||||
|
|
||||||
# Whether to serve a web client from the HTTP/HTTPS root resource.
|
# Whether to serve a web client from the HTTP/HTTPS root resource.
|
||||||
web_client: True
|
web_client: True
|
||||||
|
|
||||||
|
# The root directory to server for the above web client.
|
||||||
|
# If left undefined, synapse will serve the matrix-angular-sdk web client.
|
||||||
|
# Make sure matrix-angular-sdk is installed with pip if web_client is True
|
||||||
|
# and web_client_location is undefined
|
||||||
|
# web_client_location: "/path/to/web/root"
|
||||||
|
|
||||||
# The public-facing base URL for the client API (not including _matrix/...)
|
# The public-facing base URL for the client API (not including _matrix/...)
|
||||||
# public_baseurl: https://example.com:8448/
|
# public_baseurl: https://example.com:8448/
|
||||||
|
|
||||||
@@ -141,6 +213,25 @@ class ServerConfig(Config):
|
|||||||
# The GC threshold parameters to pass to `gc.set_threshold`, if defined
|
# The GC threshold parameters to pass to `gc.set_threshold`, if defined
|
||||||
# gc_thresholds: [700, 10, 10]
|
# gc_thresholds: [700, 10, 10]
|
||||||
|
|
||||||
|
# Set the limit on the returned events in the timeline in the get
|
||||||
|
# and sync operations. The default value is -1, means no upper limit.
|
||||||
|
# filter_timeline_limit: 5000
|
||||||
|
|
||||||
|
# Whether room invites to users on this server should be blocked
|
||||||
|
# (except those sent by local server admins). The default is False.
|
||||||
|
# block_non_admin_invites: True
|
||||||
|
|
||||||
|
# Restrict federation to the following whitelist of domains.
|
||||||
|
# N.B. we recommend also firewalling your federation listener to limit
|
||||||
|
# inbound federation traffic as early as possible, rather than relying
|
||||||
|
# purely on this application-layer restriction. If not specified, the
|
||||||
|
# default is to whitelist everything.
|
||||||
|
#
|
||||||
|
# federation_domain_whitelist:
|
||||||
|
# - lon.example.com
|
||||||
|
# - nyc.example.com
|
||||||
|
# - syd.example.com
|
||||||
|
|
||||||
# List of ports that Synapse should listen on, their purpose and their
|
# List of ports that Synapse should listen on, their purpose and their
|
||||||
# configuration.
|
# configuration.
|
||||||
listeners:
|
listeners:
|
||||||
@@ -150,9 +241,13 @@ class ServerConfig(Config):
|
|||||||
# The port to listen for HTTPS requests on.
|
# The port to listen for HTTPS requests on.
|
||||||
port: %(bind_port)s
|
port: %(bind_port)s
|
||||||
|
|
||||||
# Local interface to listen on.
|
# Local addresses to listen on.
|
||||||
# The empty string will cause synapse to listen on all interfaces.
|
# On Linux and Mac OS, `::` will listen on all IPv4 and IPv6
|
||||||
bind_address: ''
|
# addresses by default. For most other OSes, this will only listen
|
||||||
|
# on IPv6.
|
||||||
|
bind_addresses:
|
||||||
|
- '::'
|
||||||
|
- '0.0.0.0'
|
||||||
|
|
||||||
# This is a 'http' listener, allows us to specify 'resources'.
|
# This is a 'http' listener, allows us to specify 'resources'.
|
||||||
type: http
|
type: http
|
||||||
@@ -179,11 +274,18 @@ class ServerConfig(Config):
|
|||||||
- names: [federation] # Federation APIs
|
- names: [federation] # Federation APIs
|
||||||
compress: false
|
compress: false
|
||||||
|
|
||||||
|
# optional list of additional endpoints which can be loaded via
|
||||||
|
# dynamic modules
|
||||||
|
# additional_resources:
|
||||||
|
# "/_matrix/my/custom/endpoint":
|
||||||
|
# module: my_module.CustomRequestHandler
|
||||||
|
# config: {}
|
||||||
|
|
||||||
# Unsecure HTTP listener,
|
# Unsecure HTTP listener,
|
||||||
# For when matrix traffic passes through loadbalancer that unwraps TLS.
|
# For when matrix traffic passes through loadbalancer that unwraps TLS.
|
||||||
- port: %(unsecure_port)s
|
- port: %(unsecure_port)s
|
||||||
tls: false
|
tls: false
|
||||||
bind_address: ''
|
bind_addresses: ['::', '0.0.0.0']
|
||||||
type: http
|
type: http
|
||||||
|
|
||||||
x_forwarded: false
|
x_forwarded: false
|
||||||
@@ -197,7 +299,7 @@ class ServerConfig(Config):
|
|||||||
# Turn on the twisted ssh manhole service on localhost on the given
|
# Turn on the twisted ssh manhole service on localhost on the given
|
||||||
# port.
|
# port.
|
||||||
# - port: 9000
|
# - port: 9000
|
||||||
# bind_address: 127.0.0.1
|
# bind_addresses: ['::1', '127.0.0.1']
|
||||||
# type: manhole
|
# type: manhole
|
||||||
""" % locals()
|
""" % locals()
|
||||||
|
|
||||||
@@ -235,7 +337,7 @@ def read_gc_thresholds(thresholds):
|
|||||||
return (
|
return (
|
||||||
int(thresholds[0]), int(thresholds[1]), int(thresholds[2]),
|
int(thresholds[0]), int(thresholds[1]), int(thresholds[2]),
|
||||||
)
|
)
|
||||||
except:
|
except Exception:
|
||||||
raise ConfigError(
|
raise ConfigError(
|
||||||
"Value of `gc_threshold` must be a list of three integers if set"
|
"Value of `gc_threshold` must be a list of three integers if set"
|
||||||
)
|
)
|
||||||
|
|||||||
35
synapse/config/spam_checker.py
Normal file
35
synapse/config/spam_checker.py
Normal file
@@ -0,0 +1,35 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright 2017 New Vector Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
from synapse.util.module_loader import load_module
|
||||||
|
|
||||||
|
from ._base import Config
|
||||||
|
|
||||||
|
|
||||||
|
class SpamCheckerConfig(Config):
|
||||||
|
def read_config(self, config):
|
||||||
|
self.spam_checker = None
|
||||||
|
|
||||||
|
provider = config.get("spam_checker", None)
|
||||||
|
if provider is not None:
|
||||||
|
self.spam_checker = load_module(provider)
|
||||||
|
|
||||||
|
def default_config(self, **kwargs):
|
||||||
|
return """\
|
||||||
|
# spam_checker:
|
||||||
|
# module: "my_custom_project.SuperSpamChecker"
|
||||||
|
# config:
|
||||||
|
# example_option: 'things'
|
||||||
|
"""
|
||||||
@@ -95,8 +95,8 @@ class TlsConfig(Config):
|
|||||||
# make HTTPS requests to this server will check that the TLS
|
# make HTTPS requests to this server will check that the TLS
|
||||||
# certificates returned by this server match one of the fingerprints.
|
# certificates returned by this server match one of the fingerprints.
|
||||||
#
|
#
|
||||||
# Synapse automatically adds its the fingerprint of its own certificate
|
# Synapse automatically adds the fingerprint of its own certificate
|
||||||
# to the list. So if federation traffic is handle directly by synapse
|
# to the list. So if federation traffic is handled directly by synapse
|
||||||
# then no modification to the list is required.
|
# then no modification to the list is required.
|
||||||
#
|
#
|
||||||
# If synapse is run behind a load balancer that handles the TLS then it
|
# If synapse is run behind a load balancer that handles the TLS then it
|
||||||
@@ -109,6 +109,12 @@ class TlsConfig(Config):
|
|||||||
# key. It may be necessary to publish the fingerprints of a new
|
# key. It may be necessary to publish the fingerprints of a new
|
||||||
# certificate and wait until the "valid_until_ts" of the previous key
|
# certificate and wait until the "valid_until_ts" of the previous key
|
||||||
# responses have passed before deploying it.
|
# responses have passed before deploying it.
|
||||||
|
#
|
||||||
|
# You can calculate a fingerprint from a given TLS listener via:
|
||||||
|
# openssl s_client -connect $host:$port < /dev/null 2> /dev/null |
|
||||||
|
# openssl x509 -outform DER | openssl sha256 -binary | base64 | tr -d '='
|
||||||
|
# or by checking matrix.org/federationtester/api/report?server_name=$host
|
||||||
|
#
|
||||||
tls_fingerprints: []
|
tls_fingerprints: []
|
||||||
# tls_fingerprints: [{"sha256": "<base64_encoded_sha256_fingerprint>"}]
|
# tls_fingerprints: [{"sha256": "<base64_encoded_sha256_fingerprint>"}]
|
||||||
""" % locals()
|
""" % locals()
|
||||||
@@ -126,8 +132,8 @@ class TlsConfig(Config):
|
|||||||
tls_private_key_path = config["tls_private_key_path"]
|
tls_private_key_path = config["tls_private_key_path"]
|
||||||
tls_dh_params_path = config["tls_dh_params_path"]
|
tls_dh_params_path = config["tls_dh_params_path"]
|
||||||
|
|
||||||
if not os.path.exists(tls_private_key_path):
|
if not self.path_exists(tls_private_key_path):
|
||||||
with open(tls_private_key_path, "w") as private_key_file:
|
with open(tls_private_key_path, "wb") as private_key_file:
|
||||||
tls_private_key = crypto.PKey()
|
tls_private_key = crypto.PKey()
|
||||||
tls_private_key.generate_key(crypto.TYPE_RSA, 2048)
|
tls_private_key.generate_key(crypto.TYPE_RSA, 2048)
|
||||||
private_key_pem = crypto.dump_privatekey(
|
private_key_pem = crypto.dump_privatekey(
|
||||||
@@ -141,8 +147,8 @@ class TlsConfig(Config):
|
|||||||
crypto.FILETYPE_PEM, private_key_pem
|
crypto.FILETYPE_PEM, private_key_pem
|
||||||
)
|
)
|
||||||
|
|
||||||
if not os.path.exists(tls_certificate_path):
|
if not self.path_exists(tls_certificate_path):
|
||||||
with open(tls_certificate_path, "w") as certificate_file:
|
with open(tls_certificate_path, "wb") as certificate_file:
|
||||||
cert = crypto.X509()
|
cert = crypto.X509()
|
||||||
subject = cert.get_subject()
|
subject = cert.get_subject()
|
||||||
subject.CN = config["server_name"]
|
subject.CN = config["server_name"]
|
||||||
@@ -159,7 +165,7 @@ class TlsConfig(Config):
|
|||||||
|
|
||||||
certificate_file.write(cert_pem)
|
certificate_file.write(cert_pem)
|
||||||
|
|
||||||
if not os.path.exists(tls_dh_params_path):
|
if not self.path_exists(tls_dh_params_path):
|
||||||
if GENERATE_DH_PARAMS:
|
if GENERATE_DH_PARAMS:
|
||||||
subprocess.check_call([
|
subprocess.check_call([
|
||||||
"openssl", "dhparam",
|
"openssl", "dhparam",
|
||||||
|
|||||||
44
synapse/config/user_directory.py
Normal file
44
synapse/config/user_directory.py
Normal file
@@ -0,0 +1,44 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright 2017 New Vector Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
from ._base import Config
|
||||||
|
|
||||||
|
|
||||||
|
class UserDirectoryConfig(Config):
|
||||||
|
"""User Directory Configuration
|
||||||
|
Configuration for the behaviour of the /user_directory API
|
||||||
|
"""
|
||||||
|
|
||||||
|
def read_config(self, config):
|
||||||
|
self.user_directory_search_all_users = False
|
||||||
|
user_directory_config = config.get("user_directory", None)
|
||||||
|
if user_directory_config:
|
||||||
|
self.user_directory_search_all_users = (
|
||||||
|
user_directory_config.get("search_all_users", False)
|
||||||
|
)
|
||||||
|
|
||||||
|
def default_config(self, config_dir_path, server_name, **kwargs):
|
||||||
|
return """
|
||||||
|
# User Directory configuration
|
||||||
|
#
|
||||||
|
# 'search_all_users' defines whether to search all users visible to your HS
|
||||||
|
# when searching the user directory, rather than limiting to users visible
|
||||||
|
# in public rooms. Defaults to false. If you set it True, you'll have to run
|
||||||
|
# UPDATE user_directory_stream_pos SET stream_id = NULL;
|
||||||
|
# on your database to tell it to rebuild the user_directory search indexes.
|
||||||
|
#
|
||||||
|
#user_directory:
|
||||||
|
# search_all_users: false
|
||||||
|
"""
|
||||||
@@ -19,8 +19,11 @@ class VoipConfig(Config):
|
|||||||
|
|
||||||
def read_config(self, config):
|
def read_config(self, config):
|
||||||
self.turn_uris = config.get("turn_uris", [])
|
self.turn_uris = config.get("turn_uris", [])
|
||||||
self.turn_shared_secret = config["turn_shared_secret"]
|
self.turn_shared_secret = config.get("turn_shared_secret")
|
||||||
|
self.turn_username = config.get("turn_username")
|
||||||
|
self.turn_password = config.get("turn_password")
|
||||||
self.turn_user_lifetime = self.parse_duration(config["turn_user_lifetime"])
|
self.turn_user_lifetime = self.parse_duration(config["turn_user_lifetime"])
|
||||||
|
self.turn_allow_guests = config.get("turn_allow_guests", True)
|
||||||
|
|
||||||
def default_config(self, **kwargs):
|
def default_config(self, **kwargs):
|
||||||
return """\
|
return """\
|
||||||
@@ -32,6 +35,18 @@ class VoipConfig(Config):
|
|||||||
# The shared secret used to compute passwords for the TURN server
|
# The shared secret used to compute passwords for the TURN server
|
||||||
turn_shared_secret: "YOUR_SHARED_SECRET"
|
turn_shared_secret: "YOUR_SHARED_SECRET"
|
||||||
|
|
||||||
|
# The Username and password if the TURN server needs them and
|
||||||
|
# does not use a token
|
||||||
|
#turn_username: "TURNSERVER_USERNAME"
|
||||||
|
#turn_password: "TURNSERVER_PASSWORD"
|
||||||
|
|
||||||
# How long generated TURN credentials last
|
# How long generated TURN credentials last
|
||||||
turn_user_lifetime: "1h"
|
turn_user_lifetime: "1h"
|
||||||
|
|
||||||
|
# Whether guests should be allowed to use the TURN server.
|
||||||
|
# This defaults to True, otherwise VoIP will be unreliable for guests.
|
||||||
|
# However, it does introduce a slight security risk as it allows users to
|
||||||
|
# connect to arbitrary endpoints without having first signed up for a
|
||||||
|
# valid account (e.g. by passing a CAPTCHA).
|
||||||
|
turn_allow_guests: True
|
||||||
"""
|
"""
|
||||||
|
|||||||
@@ -23,9 +23,37 @@ class WorkerConfig(Config):
|
|||||||
|
|
||||||
def read_config(self, config):
|
def read_config(self, config):
|
||||||
self.worker_app = config.get("worker_app")
|
self.worker_app = config.get("worker_app")
|
||||||
|
|
||||||
|
# Canonicalise worker_app so that master always has None
|
||||||
|
if self.worker_app == "synapse.app.homeserver":
|
||||||
|
self.worker_app = None
|
||||||
|
|
||||||
self.worker_listeners = config.get("worker_listeners")
|
self.worker_listeners = config.get("worker_listeners")
|
||||||
self.worker_daemonize = config.get("worker_daemonize")
|
self.worker_daemonize = config.get("worker_daemonize")
|
||||||
self.worker_pid_file = config.get("worker_pid_file")
|
self.worker_pid_file = config.get("worker_pid_file")
|
||||||
self.worker_log_file = config.get("worker_log_file")
|
self.worker_log_file = config.get("worker_log_file")
|
||||||
self.worker_log_config = config.get("worker_log_config")
|
self.worker_log_config = config.get("worker_log_config")
|
||||||
self.worker_replication_url = config.get("worker_replication_url")
|
|
||||||
|
# The host used to connect to the main synapse
|
||||||
|
self.worker_replication_host = config.get("worker_replication_host", None)
|
||||||
|
|
||||||
|
# The port on the main synapse for TCP replication
|
||||||
|
self.worker_replication_port = config.get("worker_replication_port", None)
|
||||||
|
|
||||||
|
# The port on the main synapse for HTTP replication endpoint
|
||||||
|
self.worker_replication_http_port = config.get("worker_replication_http_port")
|
||||||
|
|
||||||
|
self.worker_name = config.get("worker_name", self.worker_app)
|
||||||
|
|
||||||
|
self.worker_main_http_uri = config.get("worker_main_http_uri", None)
|
||||||
|
self.worker_cpu_affinity = config.get("worker_cpu_affinity")
|
||||||
|
|
||||||
|
if self.worker_listeners:
|
||||||
|
for listener in self.worker_listeners:
|
||||||
|
bind_address = listener.pop("bind_address", None)
|
||||||
|
bind_addresses = listener.setdefault("bind_addresses", [])
|
||||||
|
|
||||||
|
if bind_address:
|
||||||
|
bind_addresses.append(bind_address)
|
||||||
|
elif not bind_addresses:
|
||||||
|
bind_addresses.append('')
|
||||||
|
|||||||
@@ -13,8 +13,8 @@
|
|||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
from twisted.internet import ssl
|
from twisted.internet import ssl
|
||||||
from OpenSSL import SSL
|
from OpenSSL import SSL, crypto
|
||||||
from twisted.internet._sslverify import _OpenSSLECCurve, _defaultCurveName
|
from twisted.internet._sslverify import _defaultCurveName
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
@@ -32,9 +32,10 @@ class ServerContextFactory(ssl.ContextFactory):
|
|||||||
@staticmethod
|
@staticmethod
|
||||||
def configure_context(context, config):
|
def configure_context(context, config):
|
||||||
try:
|
try:
|
||||||
_ecCurve = _OpenSSLECCurve(_defaultCurveName)
|
_ecCurve = crypto.get_elliptic_curve(_defaultCurveName)
|
||||||
_ecCurve.addECKeyToContext(context)
|
context.set_tmp_ecdh(_ecCurve)
|
||||||
except:
|
|
||||||
|
except Exception:
|
||||||
logger.exception("Failed to enable elliptic curve for TLS")
|
logger.exception("Failed to enable elliptic curve for TLS")
|
||||||
context.set_options(SSL.OP_NO_SSLv2 | SSL.OP_NO_SSLv3)
|
context.set_options(SSL.OP_NO_SSLv2 | SSL.OP_NO_SSLv3)
|
||||||
context.use_certificate_chain_file(config.tls_certificate_file)
|
context.use_certificate_chain_file(config.tls_certificate_file)
|
||||||
|
|||||||
@@ -32,18 +32,25 @@ def check_event_content_hash(event, hash_algorithm=hashlib.sha256):
|
|||||||
"""Check whether the hash for this PDU matches the contents"""
|
"""Check whether the hash for this PDU matches the contents"""
|
||||||
name, expected_hash = compute_content_hash(event, hash_algorithm)
|
name, expected_hash = compute_content_hash(event, hash_algorithm)
|
||||||
logger.debug("Expecting hash: %s", encode_base64(expected_hash))
|
logger.debug("Expecting hash: %s", encode_base64(expected_hash))
|
||||||
if name not in event.hashes:
|
|
||||||
|
# some malformed events lack a 'hashes'. Protect against it being missing
|
||||||
|
# or a weird type by basically treating it the same as an unhashed event.
|
||||||
|
hashes = event.get("hashes")
|
||||||
|
if not isinstance(hashes, dict):
|
||||||
|
raise SynapseError(400, "Malformed 'hashes'", Codes.UNAUTHORIZED)
|
||||||
|
|
||||||
|
if name not in hashes:
|
||||||
raise SynapseError(
|
raise SynapseError(
|
||||||
400,
|
400,
|
||||||
"Algorithm %s not in hashes %s" % (
|
"Algorithm %s not in hashes %s" % (
|
||||||
name, list(event.hashes),
|
name, list(hashes),
|
||||||
),
|
),
|
||||||
Codes.UNAUTHORIZED,
|
Codes.UNAUTHORIZED,
|
||||||
)
|
)
|
||||||
message_hash_base64 = event.hashes[name]
|
message_hash_base64 = hashes[name]
|
||||||
try:
|
try:
|
||||||
message_hash_bytes = decode_base64(message_hash_base64)
|
message_hash_bytes = decode_base64(message_hash_base64)
|
||||||
except:
|
except Exception:
|
||||||
raise SynapseError(
|
raise SynapseError(
|
||||||
400,
|
400,
|
||||||
"Invalid base64: %s" % (message_hash_base64,),
|
"Invalid base64: %s" % (message_hash_base64,),
|
||||||
|
|||||||
@@ -13,14 +13,11 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
|
from synapse.util import logcontext
|
||||||
from twisted.web.http import HTTPClient
|
from twisted.web.http import HTTPClient
|
||||||
from twisted.internet.protocol import Factory
|
from twisted.internet.protocol import Factory
|
||||||
from twisted.internet import defer, reactor
|
from twisted.internet import defer, reactor
|
||||||
from synapse.http.endpoint import matrix_federation_endpoint
|
from synapse.http.endpoint import matrix_federation_endpoint
|
||||||
from synapse.util.logcontext import (
|
|
||||||
preserve_context_over_fn, preserve_context_over_deferred
|
|
||||||
)
|
|
||||||
import simplejson as json
|
import simplejson as json
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
@@ -43,14 +40,10 @@ def fetch_server_key(server_name, ssl_context_factory, path=KEY_API_V1):
|
|||||||
|
|
||||||
for i in range(5):
|
for i in range(5):
|
||||||
try:
|
try:
|
||||||
protocol = yield preserve_context_over_fn(
|
with logcontext.PreserveLoggingContext():
|
||||||
endpoint.connect, factory
|
protocol = yield endpoint.connect(factory)
|
||||||
)
|
server_response, server_certificate = yield protocol.remote_key
|
||||||
server_response, server_certificate = yield preserve_context_over_deferred(
|
|
||||||
protocol.remote_key
|
|
||||||
)
|
|
||||||
defer.returnValue((server_response, server_certificate))
|
defer.returnValue((server_response, server_certificate))
|
||||||
return
|
|
||||||
except SynapseKeyClientError as e:
|
except SynapseKeyClientError as e:
|
||||||
logger.exception("Error getting key for %r" % (server_name,))
|
logger.exception("Error getting key for %r" % (server_name,))
|
||||||
if e.status.startswith("4"):
|
if e.status.startswith("4"):
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
# Copyright 2014-2016 OpenMarket Ltd
|
# Copyright 2014-2016 OpenMarket Ltd
|
||||||
|
# Copyright 2017 New Vector Ltd.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@@ -15,12 +16,11 @@
|
|||||||
|
|
||||||
from synapse.crypto.keyclient import fetch_server_key
|
from synapse.crypto.keyclient import fetch_server_key
|
||||||
from synapse.api.errors import SynapseError, Codes
|
from synapse.api.errors import SynapseError, Codes
|
||||||
from synapse.util.retryutils import get_retry_limiter
|
from synapse.util import unwrapFirstError, logcontext
|
||||||
from synapse.util import unwrapFirstError
|
|
||||||
from synapse.util.async import ObservableDeferred
|
|
||||||
from synapse.util.logcontext import (
|
from synapse.util.logcontext import (
|
||||||
preserve_context_over_deferred, preserve_context_over_fn, PreserveLoggingContext,
|
PreserveLoggingContext,
|
||||||
preserve_fn
|
preserve_fn,
|
||||||
|
run_in_background,
|
||||||
)
|
)
|
||||||
from synapse.util.metrics import Measure
|
from synapse.util.metrics import Measure
|
||||||
|
|
||||||
@@ -58,7 +58,8 @@ Attributes:
|
|||||||
json_object(dict): The JSON object to verify.
|
json_object(dict): The JSON object to verify.
|
||||||
deferred(twisted.internet.defer.Deferred):
|
deferred(twisted.internet.defer.Deferred):
|
||||||
A deferred (server_name, key_id, verify_key) tuple that resolves when
|
A deferred (server_name, key_id, verify_key) tuple that resolves when
|
||||||
a verify key has been fetched
|
a verify key has been fetched. The deferreds' callbacks are run with no
|
||||||
|
logcontext.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
|
||||||
@@ -75,31 +76,41 @@ class Keyring(object):
|
|||||||
self.perspective_servers = self.config.perspectives
|
self.perspective_servers = self.config.perspectives
|
||||||
self.hs = hs
|
self.hs = hs
|
||||||
|
|
||||||
|
# map from server name to Deferred. Has an entry for each server with
|
||||||
|
# an ongoing key download; the Deferred completes once the download
|
||||||
|
# completes.
|
||||||
|
#
|
||||||
|
# These are regular, logcontext-agnostic Deferreds.
|
||||||
self.key_downloads = {}
|
self.key_downloads = {}
|
||||||
|
|
||||||
def verify_json_for_server(self, server_name, json_object):
|
def verify_json_for_server(self, server_name, json_object):
|
||||||
return self.verify_json_objects_for_server(
|
return logcontext.make_deferred_yieldable(
|
||||||
|
self.verify_json_objects_for_server(
|
||||||
[(server_name, json_object)]
|
[(server_name, json_object)]
|
||||||
)[0]
|
)[0]
|
||||||
|
)
|
||||||
|
|
||||||
def verify_json_objects_for_server(self, server_and_json):
|
def verify_json_objects_for_server(self, server_and_json):
|
||||||
"""Bulk verfies signatures of json objects, bulk fetching keys as
|
"""Bulk verifies signatures of json objects, bulk fetching keys as
|
||||||
necessary.
|
necessary.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
server_and_json (list): List of pairs of (server_name, json_object)
|
server_and_json (list): List of pairs of (server_name, json_object)
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
list of deferreds indicating success or failure to verify each
|
List<Deferred>: for each input pair, a deferred indicating success
|
||||||
json object's signature for the given server_name.
|
or failure to verify each json object's signature for the given
|
||||||
|
server_name. The deferreds run their callbacks in the sentinel
|
||||||
|
logcontext.
|
||||||
"""
|
"""
|
||||||
verify_requests = []
|
verify_requests = []
|
||||||
|
|
||||||
for server_name, json_object in server_and_json:
|
for server_name, json_object in server_and_json:
|
||||||
logger.debug("Verifying for %s", server_name)
|
|
||||||
|
|
||||||
key_ids = signature_ids(json_object, server_name)
|
key_ids = signature_ids(json_object, server_name)
|
||||||
if not key_ids:
|
if not key_ids:
|
||||||
|
logger.warn("Request from %s: no supported signature keys",
|
||||||
|
server_name)
|
||||||
deferred = defer.fail(SynapseError(
|
deferred = defer.fail(SynapseError(
|
||||||
400,
|
400,
|
||||||
"Not signed with a supported algorithm",
|
"Not signed with a supported algorithm",
|
||||||
@@ -108,76 +119,69 @@ class Keyring(object):
|
|||||||
else:
|
else:
|
||||||
deferred = defer.Deferred()
|
deferred = defer.Deferred()
|
||||||
|
|
||||||
|
logger.debug("Verifying for %s with key_ids %s",
|
||||||
|
server_name, key_ids)
|
||||||
|
|
||||||
verify_request = VerifyKeyRequest(
|
verify_request = VerifyKeyRequest(
|
||||||
server_name, key_ids, json_object, deferred
|
server_name, key_ids, json_object, deferred
|
||||||
)
|
)
|
||||||
|
|
||||||
verify_requests.append(verify_request)
|
verify_requests.append(verify_request)
|
||||||
|
|
||||||
|
run_in_background(self._start_key_lookups, verify_requests)
|
||||||
|
|
||||||
|
# Pass those keys to handle_key_deferred so that the json object
|
||||||
|
# signatures can be verified
|
||||||
|
handle = preserve_fn(_handle_key_deferred)
|
||||||
|
return [
|
||||||
|
handle(rq) for rq in verify_requests
|
||||||
|
]
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def handle_key_deferred(verify_request):
|
def _start_key_lookups(self, verify_requests):
|
||||||
server_name = verify_request.server_name
|
"""Sets off the key fetches for each verify request
|
||||||
try:
|
|
||||||
_, key_id, verify_key = yield verify_request.deferred
|
|
||||||
except IOError as e:
|
|
||||||
logger.warn(
|
|
||||||
"Got IOError when downloading keys for %s: %s %s",
|
|
||||||
server_name, type(e).__name__, str(e.message),
|
|
||||||
)
|
|
||||||
raise SynapseError(
|
|
||||||
502,
|
|
||||||
"Error downloading keys for %s" % (server_name,),
|
|
||||||
Codes.UNAUTHORIZED,
|
|
||||||
)
|
|
||||||
except Exception as e:
|
|
||||||
logger.exception(
|
|
||||||
"Got Exception when downloading keys for %s: %s %s",
|
|
||||||
server_name, type(e).__name__, str(e.message),
|
|
||||||
)
|
|
||||||
raise SynapseError(
|
|
||||||
401,
|
|
||||||
"No key for %s with id %s" % (server_name, key_ids),
|
|
||||||
Codes.UNAUTHORIZED,
|
|
||||||
)
|
|
||||||
|
|
||||||
json_object = verify_request.json_object
|
Once each fetch completes, verify_request.deferred will be resolved.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
verify_requests (List[VerifyKeyRequest]):
|
||||||
|
"""
|
||||||
|
|
||||||
try:
|
try:
|
||||||
verify_signed_json(json_object, server_name, verify_key)
|
# create a deferred for each server we're going to look up the keys
|
||||||
except:
|
# for; we'll resolve them once we have completed our lookups.
|
||||||
raise SynapseError(
|
# These will be passed into wait_for_previous_lookups to block
|
||||||
401,
|
# any other lookups until we have finished.
|
||||||
"Invalid signature for server %s with key %s:%s" % (
|
# The deferreds are called with no logcontext.
|
||||||
server_name, verify_key.alg, verify_key.version
|
|
||||||
),
|
|
||||||
Codes.UNAUTHORIZED,
|
|
||||||
)
|
|
||||||
|
|
||||||
server_to_deferred = {
|
server_to_deferred = {
|
||||||
server_name: defer.Deferred()
|
rq.server_name: defer.Deferred()
|
||||||
for server_name, _ in server_and_json
|
for rq in verify_requests
|
||||||
}
|
}
|
||||||
|
|
||||||
with PreserveLoggingContext():
|
|
||||||
|
|
||||||
# We want to wait for any previous lookups to complete before
|
# We want to wait for any previous lookups to complete before
|
||||||
# proceeding.
|
# proceeding.
|
||||||
wait_on_deferred = self.wait_for_previous_lookups(
|
yield self.wait_for_previous_lookups(
|
||||||
[server_name for server_name, _ in server_and_json],
|
[rq.server_name for rq in verify_requests],
|
||||||
server_to_deferred,
|
server_to_deferred,
|
||||||
)
|
)
|
||||||
|
|
||||||
# Actually start fetching keys.
|
# Actually start fetching keys.
|
||||||
wait_on_deferred.addBoth(
|
self._get_server_verify_keys(verify_requests)
|
||||||
lambda _: self.get_server_verify_keys(verify_requests)
|
|
||||||
)
|
|
||||||
|
|
||||||
# When we've finished fetching all the keys for a given server_name,
|
# When we've finished fetching all the keys for a given server_name,
|
||||||
# resolve the deferred passed to `wait_for_previous_lookups` so that
|
# resolve the deferred passed to `wait_for_previous_lookups` so that
|
||||||
# any lookups waiting will proceed.
|
# any lookups waiting will proceed.
|
||||||
|
#
|
||||||
|
# map from server name to a set of request ids
|
||||||
server_to_request_ids = {}
|
server_to_request_ids = {}
|
||||||
|
|
||||||
def remove_deferreds(res, server_name, verify_request):
|
for verify_request in verify_requests:
|
||||||
|
server_name = verify_request.server_name
|
||||||
|
request_id = id(verify_request)
|
||||||
|
server_to_request_ids.setdefault(server_name, set()).add(request_id)
|
||||||
|
|
||||||
|
def remove_deferreds(res, verify_request):
|
||||||
|
server_name = verify_request.server_name
|
||||||
request_id = id(verify_request)
|
request_id = id(verify_request)
|
||||||
server_to_request_ids[server_name].discard(request_id)
|
server_to_request_ids[server_name].discard(request_id)
|
||||||
if not server_to_request_ids[server_name]:
|
if not server_to_request_ids[server_name]:
|
||||||
@@ -187,17 +191,11 @@ class Keyring(object):
|
|||||||
return res
|
return res
|
||||||
|
|
||||||
for verify_request in verify_requests:
|
for verify_request in verify_requests:
|
||||||
server_name = verify_request.server_name
|
verify_request.deferred.addBoth(
|
||||||
request_id = id(verify_request)
|
remove_deferreds, verify_request,
|
||||||
server_to_request_ids.setdefault(server_name, set()).add(request_id)
|
)
|
||||||
deferred.addBoth(remove_deferreds, server_name, verify_request)
|
except Exception:
|
||||||
|
logger.exception("Error starting key lookups")
|
||||||
# Pass those keys to handle_key_deferred so that the json object
|
|
||||||
# signatures can be verified
|
|
||||||
return [
|
|
||||||
preserve_context_over_fn(handle_key_deferred, verify_request)
|
|
||||||
for verify_request in verify_requests
|
|
||||||
]
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def wait_for_previous_lookups(self, server_names, server_to_deferred):
|
def wait_for_previous_lookups(self, server_names, server_to_deferred):
|
||||||
@@ -206,7 +204,13 @@ class Keyring(object):
|
|||||||
Args:
|
Args:
|
||||||
server_names (list): list of server_names we want to lookup
|
server_names (list): list of server_names we want to lookup
|
||||||
server_to_deferred (dict): server_name to deferred which gets
|
server_to_deferred (dict): server_name to deferred which gets
|
||||||
resolved once we've finished looking up keys for that server
|
resolved once we've finished looking up keys for that server.
|
||||||
|
The Deferreds should be regular twisted ones which call their
|
||||||
|
callbacks with no logcontext.
|
||||||
|
|
||||||
|
Returns: a Deferred which resolves once all key lookups for the given
|
||||||
|
servers have completed. Follows the synapse rules of logcontext
|
||||||
|
preservation.
|
||||||
"""
|
"""
|
||||||
while True:
|
while True:
|
||||||
wait_on = [
|
wait_on = [
|
||||||
@@ -220,19 +224,23 @@ class Keyring(object):
|
|||||||
else:
|
else:
|
||||||
break
|
break
|
||||||
|
|
||||||
for server_name, deferred in server_to_deferred.items():
|
def rm(r, server_name_):
|
||||||
d = ObservableDeferred(preserve_context_over_deferred(deferred))
|
self.key_downloads.pop(server_name_, None)
|
||||||
self.key_downloads[server_name] = d
|
|
||||||
|
|
||||||
def rm(r, server_name):
|
|
||||||
self.key_downloads.pop(server_name, None)
|
|
||||||
return r
|
return r
|
||||||
|
|
||||||
d.addBoth(rm, server_name)
|
for server_name, deferred in server_to_deferred.items():
|
||||||
|
self.key_downloads[server_name] = deferred
|
||||||
|
deferred.addBoth(rm, server_name)
|
||||||
|
|
||||||
def get_server_verify_keys(self, verify_requests):
|
def _get_server_verify_keys(self, verify_requests):
|
||||||
"""Takes a dict of KeyGroups and tries to find at least one key for
|
"""Tries to find at least one key for each verify request
|
||||||
each group.
|
|
||||||
|
For each verify_request, verify_request.deferred is called back with
|
||||||
|
params (server_name, key_id, VerifyKey) if a key is found, or errbacked
|
||||||
|
with a SynapseError if none of the keys are found.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
verify_requests (list[VerifyKeyRequest]): list of verify requests
|
||||||
"""
|
"""
|
||||||
|
|
||||||
# These are functions that produce keys given a list of key ids
|
# These are functions that produce keys given a list of key ids
|
||||||
@@ -245,8 +253,11 @@ class Keyring(object):
|
|||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def do_iterations():
|
def do_iterations():
|
||||||
with Measure(self.clock, "get_server_verify_keys"):
|
with Measure(self.clock, "get_server_verify_keys"):
|
||||||
|
# dict[str, dict[str, VerifyKey]]: results so far.
|
||||||
|
# map server_name -> key_id -> VerifyKey
|
||||||
merged_results = {}
|
merged_results = {}
|
||||||
|
|
||||||
|
# dict[str, set(str)]: keys to fetch for each server
|
||||||
missing_keys = {}
|
missing_keys = {}
|
||||||
for verify_request in verify_requests:
|
for verify_request in verify_requests:
|
||||||
missing_keys.setdefault(verify_request.server_name, set()).update(
|
missing_keys.setdefault(verify_request.server_name, set()).update(
|
||||||
@@ -290,7 +301,8 @@ class Keyring(object):
|
|||||||
if not missing_keys:
|
if not missing_keys:
|
||||||
break
|
break
|
||||||
|
|
||||||
for verify_request in requests_missing_keys.values():
|
with PreserveLoggingContext():
|
||||||
|
for verify_request in requests_missing_keys:
|
||||||
verify_request.deferred.errback(SynapseError(
|
verify_request.deferred.errback(SynapseError(
|
||||||
401,
|
401,
|
||||||
"No key for %s with id %s" % (
|
"No key for %s with id %s" % (
|
||||||
@@ -300,23 +312,35 @@ class Keyring(object):
|
|||||||
))
|
))
|
||||||
|
|
||||||
def on_err(err):
|
def on_err(err):
|
||||||
|
with PreserveLoggingContext():
|
||||||
for verify_request in verify_requests:
|
for verify_request in verify_requests:
|
||||||
if not verify_request.deferred.called:
|
if not verify_request.deferred.called:
|
||||||
verify_request.deferred.errback(err)
|
verify_request.deferred.errback(err)
|
||||||
|
|
||||||
do_iterations().addErrback(on_err)
|
run_in_background(do_iterations).addErrback(on_err)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def get_keys_from_store(self, server_name_and_key_ids):
|
def get_keys_from_store(self, server_name_and_key_ids):
|
||||||
res = yield preserve_context_over_deferred(defer.gatherResults(
|
"""
|
||||||
|
|
||||||
|
Args:
|
||||||
|
server_name_and_key_ids (list[(str, iterable[str])]):
|
||||||
|
list of (server_name, iterable[key_id]) tuples to fetch keys for
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Deferred: resolves to dict[str, dict[str, VerifyKey]]: map from
|
||||||
|
server_name -> key_id -> VerifyKey
|
||||||
|
"""
|
||||||
|
res = yield logcontext.make_deferred_yieldable(defer.gatherResults(
|
||||||
[
|
[
|
||||||
preserve_fn(self.store.get_server_verify_keys)(
|
run_in_background(
|
||||||
server_name, key_ids
|
self.store.get_server_verify_keys,
|
||||||
|
server_name, key_ids,
|
||||||
).addCallback(lambda ks, server: (server, ks), server_name)
|
).addCallback(lambda ks, server: (server, ks), server_name)
|
||||||
for server_name, key_ids in server_name_and_key_ids
|
for server_name, key_ids in server_name_and_key_ids
|
||||||
],
|
],
|
||||||
consumeErrors=True,
|
consumeErrors=True,
|
||||||
)).addErrback(unwrapFirstError)
|
).addErrback(unwrapFirstError))
|
||||||
|
|
||||||
defer.returnValue(dict(res))
|
defer.returnValue(dict(res))
|
||||||
|
|
||||||
@@ -333,17 +357,17 @@ class Keyring(object):
|
|||||||
logger.exception(
|
logger.exception(
|
||||||
"Unable to get key from %r: %s %s",
|
"Unable to get key from %r: %s %s",
|
||||||
perspective_name,
|
perspective_name,
|
||||||
type(e).__name__, str(e.message),
|
type(e).__name__, str(e),
|
||||||
)
|
)
|
||||||
defer.returnValue({})
|
defer.returnValue({})
|
||||||
|
|
||||||
results = yield preserve_context_over_deferred(defer.gatherResults(
|
results = yield logcontext.make_deferred_yieldable(defer.gatherResults(
|
||||||
[
|
[
|
||||||
preserve_fn(get_key)(p_name, p_keys)
|
run_in_background(get_key, p_name, p_keys)
|
||||||
for p_name, p_keys in self.perspective_servers.items()
|
for p_name, p_keys in self.perspective_servers.items()
|
||||||
],
|
],
|
||||||
consumeErrors=True,
|
consumeErrors=True,
|
||||||
)).addErrback(unwrapFirstError)
|
).addErrback(unwrapFirstError))
|
||||||
|
|
||||||
union_of_keys = {}
|
union_of_keys = {}
|
||||||
for result in results:
|
for result in results:
|
||||||
@@ -356,12 +380,6 @@ class Keyring(object):
|
|||||||
def get_keys_from_server(self, server_name_and_key_ids):
|
def get_keys_from_server(self, server_name_and_key_ids):
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def get_key(server_name, key_ids):
|
def get_key(server_name, key_ids):
|
||||||
limiter = yield get_retry_limiter(
|
|
||||||
server_name,
|
|
||||||
self.clock,
|
|
||||||
self.store,
|
|
||||||
)
|
|
||||||
with limiter:
|
|
||||||
keys = None
|
keys = None
|
||||||
try:
|
try:
|
||||||
keys = yield self.get_server_verify_key_v2_direct(
|
keys = yield self.get_server_verify_key_v2_direct(
|
||||||
@@ -371,7 +389,7 @@ class Keyring(object):
|
|||||||
logger.info(
|
logger.info(
|
||||||
"Unable to get key %r for %r directly: %s %s",
|
"Unable to get key %r for %r directly: %s %s",
|
||||||
key_ids, server_name,
|
key_ids, server_name,
|
||||||
type(e).__name__, str(e.message),
|
type(e).__name__, str(e),
|
||||||
)
|
)
|
||||||
|
|
||||||
if not keys:
|
if not keys:
|
||||||
@@ -383,13 +401,13 @@ class Keyring(object):
|
|||||||
|
|
||||||
defer.returnValue(keys)
|
defer.returnValue(keys)
|
||||||
|
|
||||||
results = yield preserve_context_over_deferred(defer.gatherResults(
|
results = yield logcontext.make_deferred_yieldable(defer.gatherResults(
|
||||||
[
|
[
|
||||||
preserve_fn(get_key)(server_name, key_ids)
|
run_in_background(get_key, server_name, key_ids)
|
||||||
for server_name, key_ids in server_name_and_key_ids
|
for server_name, key_ids in server_name_and_key_ids
|
||||||
],
|
],
|
||||||
consumeErrors=True,
|
consumeErrors=True,
|
||||||
)).addErrback(unwrapFirstError)
|
).addErrback(unwrapFirstError))
|
||||||
|
|
||||||
merged = {}
|
merged = {}
|
||||||
for result in results:
|
for result in results:
|
||||||
@@ -466,9 +484,10 @@ class Keyring(object):
|
|||||||
for server_name, response_keys in processed_response.items():
|
for server_name, response_keys in processed_response.items():
|
||||||
keys.setdefault(server_name, {}).update(response_keys)
|
keys.setdefault(server_name, {}).update(response_keys)
|
||||||
|
|
||||||
yield preserve_context_over_deferred(defer.gatherResults(
|
yield logcontext.make_deferred_yieldable(defer.gatherResults(
|
||||||
[
|
[
|
||||||
preserve_fn(self.store_keys)(
|
run_in_background(
|
||||||
|
self.store_keys,
|
||||||
server_name=server_name,
|
server_name=server_name,
|
||||||
from_server=perspective_name,
|
from_server=perspective_name,
|
||||||
verify_keys=response_keys,
|
verify_keys=response_keys,
|
||||||
@@ -476,7 +495,7 @@ class Keyring(object):
|
|||||||
for server_name, response_keys in keys.items()
|
for server_name, response_keys in keys.items()
|
||||||
],
|
],
|
||||||
consumeErrors=True
|
consumeErrors=True
|
||||||
)).addErrback(unwrapFirstError)
|
).addErrback(unwrapFirstError))
|
||||||
|
|
||||||
defer.returnValue(keys)
|
defer.returnValue(keys)
|
||||||
|
|
||||||
@@ -524,9 +543,10 @@ class Keyring(object):
|
|||||||
|
|
||||||
keys.update(response_keys)
|
keys.update(response_keys)
|
||||||
|
|
||||||
yield preserve_context_over_deferred(defer.gatherResults(
|
yield logcontext.make_deferred_yieldable(defer.gatherResults(
|
||||||
[
|
[
|
||||||
preserve_fn(self.store_keys)(
|
run_in_background(
|
||||||
|
self.store_keys,
|
||||||
server_name=key_server_name,
|
server_name=key_server_name,
|
||||||
from_server=server_name,
|
from_server=server_name,
|
||||||
verify_keys=verify_keys,
|
verify_keys=verify_keys,
|
||||||
@@ -534,7 +554,7 @@ class Keyring(object):
|
|||||||
for key_server_name, verify_keys in keys.items()
|
for key_server_name, verify_keys in keys.items()
|
||||||
],
|
],
|
||||||
consumeErrors=True
|
consumeErrors=True
|
||||||
)).addErrback(unwrapFirstError)
|
).addErrback(unwrapFirstError))
|
||||||
|
|
||||||
defer.returnValue(keys)
|
defer.returnValue(keys)
|
||||||
|
|
||||||
@@ -600,9 +620,10 @@ class Keyring(object):
|
|||||||
response_keys.update(verify_keys)
|
response_keys.update(verify_keys)
|
||||||
response_keys.update(old_verify_keys)
|
response_keys.update(old_verify_keys)
|
||||||
|
|
||||||
yield preserve_context_over_deferred(defer.gatherResults(
|
yield logcontext.make_deferred_yieldable(defer.gatherResults(
|
||||||
[
|
[
|
||||||
preserve_fn(self.store.store_server_keys_json)(
|
run_in_background(
|
||||||
|
self.store.store_server_keys_json,
|
||||||
server_name=server_name,
|
server_name=server_name,
|
||||||
key_id=key_id,
|
key_id=key_id,
|
||||||
from_server=server_name,
|
from_server=server_name,
|
||||||
@@ -613,7 +634,7 @@ class Keyring(object):
|
|||||||
for key_id in updated_key_ids
|
for key_id in updated_key_ids
|
||||||
],
|
],
|
||||||
consumeErrors=True,
|
consumeErrors=True,
|
||||||
)).addErrback(unwrapFirstError)
|
).addErrback(unwrapFirstError))
|
||||||
|
|
||||||
results[server_name] = response_keys
|
results[server_name] = response_keys
|
||||||
|
|
||||||
@@ -691,7 +712,6 @@ class Keyring(object):
|
|||||||
|
|
||||||
defer.returnValue(verify_keys)
|
defer.returnValue(verify_keys)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def store_keys(self, server_name, from_server, verify_keys):
|
def store_keys(self, server_name, from_server, verify_keys):
|
||||||
"""Store a collection of verify keys for a given server
|
"""Store a collection of verify keys for a given server
|
||||||
Args:
|
Args:
|
||||||
@@ -702,12 +722,57 @@ class Keyring(object):
|
|||||||
A deferred that completes when the keys are stored.
|
A deferred that completes when the keys are stored.
|
||||||
"""
|
"""
|
||||||
# TODO(markjh): Store whether the keys have expired.
|
# TODO(markjh): Store whether the keys have expired.
|
||||||
yield preserve_context_over_deferred(defer.gatherResults(
|
return logcontext.make_deferred_yieldable(defer.gatherResults(
|
||||||
[
|
[
|
||||||
preserve_fn(self.store.store_server_verify_key)(
|
run_in_background(
|
||||||
|
self.store.store_server_verify_key,
|
||||||
server_name, server_name, key.time_added, key
|
server_name, server_name, key.time_added, key
|
||||||
)
|
)
|
||||||
for key_id, key in verify_keys.items()
|
for key_id, key in verify_keys.items()
|
||||||
],
|
],
|
||||||
consumeErrors=True,
|
consumeErrors=True,
|
||||||
)).addErrback(unwrapFirstError)
|
).addErrback(unwrapFirstError))
|
||||||
|
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def _handle_key_deferred(verify_request):
|
||||||
|
server_name = verify_request.server_name
|
||||||
|
try:
|
||||||
|
with PreserveLoggingContext():
|
||||||
|
_, key_id, verify_key = yield verify_request.deferred
|
||||||
|
except IOError as e:
|
||||||
|
logger.warn(
|
||||||
|
"Got IOError when downloading keys for %s: %s %s",
|
||||||
|
server_name, type(e).__name__, str(e),
|
||||||
|
)
|
||||||
|
raise SynapseError(
|
||||||
|
502,
|
||||||
|
"Error downloading keys for %s" % (server_name,),
|
||||||
|
Codes.UNAUTHORIZED,
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
logger.exception(
|
||||||
|
"Got Exception when downloading keys for %s: %s %s",
|
||||||
|
server_name, type(e).__name__, str(e),
|
||||||
|
)
|
||||||
|
raise SynapseError(
|
||||||
|
401,
|
||||||
|
"No key for %s with id %s" % (server_name, verify_request.key_ids),
|
||||||
|
Codes.UNAUTHORIZED,
|
||||||
|
)
|
||||||
|
|
||||||
|
json_object = verify_request.json_object
|
||||||
|
|
||||||
|
logger.debug("Got key %s %s:%s for server %s, verifying" % (
|
||||||
|
key_id, verify_key.alg, verify_key.version, server_name,
|
||||||
|
))
|
||||||
|
try:
|
||||||
|
verify_signed_json(json_object, server_name, verify_key)
|
||||||
|
except Exception:
|
||||||
|
raise SynapseError(
|
||||||
|
401,
|
||||||
|
"Invalid signature for server %s with key %s:%s" % (
|
||||||
|
server_name, verify_key.alg, verify_key.version
|
||||||
|
),
|
||||||
|
Codes.UNAUTHORIZED,
|
||||||
|
)
|
||||||
|
|||||||
678
synapse/event_auth.py
Normal file
678
synapse/event_auth.py
Normal file
@@ -0,0 +1,678 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright 2014 - 2016 OpenMarket Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import logging
|
||||||
|
|
||||||
|
from canonicaljson import encode_canonical_json
|
||||||
|
from signedjson.key import decode_verify_key_bytes
|
||||||
|
from signedjson.sign import verify_signed_json, SignatureVerifyException
|
||||||
|
from unpaddedbase64 import decode_base64
|
||||||
|
|
||||||
|
from synapse.api.constants import EventTypes, Membership, JoinRules
|
||||||
|
from synapse.api.errors import AuthError, SynapseError, EventSizeError
|
||||||
|
from synapse.types import UserID, get_domain_from_id
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def check(event, auth_events, do_sig_check=True, do_size_check=True):
|
||||||
|
""" Checks if this event is correctly authed.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
event: the event being checked.
|
||||||
|
auth_events (dict: event-key -> event): the existing room state.
|
||||||
|
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
True if the auth checks pass.
|
||||||
|
"""
|
||||||
|
if do_size_check:
|
||||||
|
_check_size_limits(event)
|
||||||
|
|
||||||
|
if not hasattr(event, "room_id"):
|
||||||
|
raise AuthError(500, "Event has no room_id: %s" % event)
|
||||||
|
|
||||||
|
if do_sig_check:
|
||||||
|
sender_domain = get_domain_from_id(event.sender)
|
||||||
|
event_id_domain = get_domain_from_id(event.event_id)
|
||||||
|
|
||||||
|
is_invite_via_3pid = (
|
||||||
|
event.type == EventTypes.Member
|
||||||
|
and event.membership == Membership.INVITE
|
||||||
|
and "third_party_invite" in event.content
|
||||||
|
)
|
||||||
|
|
||||||
|
# Check the sender's domain has signed the event
|
||||||
|
if not event.signatures.get(sender_domain):
|
||||||
|
# We allow invites via 3pid to have a sender from a different
|
||||||
|
# HS, as the sender must match the sender of the original
|
||||||
|
# 3pid invite. This is checked further down with the
|
||||||
|
# other dedicated membership checks.
|
||||||
|
if not is_invite_via_3pid:
|
||||||
|
raise AuthError(403, "Event not signed by sender's server")
|
||||||
|
|
||||||
|
# Check the event_id's domain has signed the event
|
||||||
|
if not event.signatures.get(event_id_domain):
|
||||||
|
raise AuthError(403, "Event not signed by sending server")
|
||||||
|
|
||||||
|
if auth_events is None:
|
||||||
|
# Oh, we don't know what the state of the room was, so we
|
||||||
|
# are trusting that this is allowed (at least for now)
|
||||||
|
logger.warn("Trusting event: %s", event.event_id)
|
||||||
|
return True
|
||||||
|
|
||||||
|
if event.type == EventTypes.Create:
|
||||||
|
room_id_domain = get_domain_from_id(event.room_id)
|
||||||
|
if room_id_domain != sender_domain:
|
||||||
|
raise AuthError(
|
||||||
|
403,
|
||||||
|
"Creation event's room_id domain does not match sender's"
|
||||||
|
)
|
||||||
|
# FIXME
|
||||||
|
return True
|
||||||
|
|
||||||
|
creation_event = auth_events.get((EventTypes.Create, ""), None)
|
||||||
|
|
||||||
|
if not creation_event:
|
||||||
|
raise SynapseError(
|
||||||
|
403,
|
||||||
|
"Room %r does not exist" % (event.room_id,)
|
||||||
|
)
|
||||||
|
|
||||||
|
creating_domain = get_domain_from_id(event.room_id)
|
||||||
|
originating_domain = get_domain_from_id(event.sender)
|
||||||
|
if creating_domain != originating_domain:
|
||||||
|
if not _can_federate(event, auth_events):
|
||||||
|
raise AuthError(
|
||||||
|
403,
|
||||||
|
"This room has been marked as unfederatable."
|
||||||
|
)
|
||||||
|
|
||||||
|
# FIXME: Temp hack
|
||||||
|
if event.type == EventTypes.Aliases:
|
||||||
|
if not event.is_state():
|
||||||
|
raise AuthError(
|
||||||
|
403,
|
||||||
|
"Alias event must be a state event",
|
||||||
|
)
|
||||||
|
if not event.state_key:
|
||||||
|
raise AuthError(
|
||||||
|
403,
|
||||||
|
"Alias event must have non-empty state_key"
|
||||||
|
)
|
||||||
|
sender_domain = get_domain_from_id(event.sender)
|
||||||
|
if event.state_key != sender_domain:
|
||||||
|
raise AuthError(
|
||||||
|
403,
|
||||||
|
"Alias event's state_key does not match sender's domain"
|
||||||
|
)
|
||||||
|
return True
|
||||||
|
|
||||||
|
if logger.isEnabledFor(logging.DEBUG):
|
||||||
|
logger.debug(
|
||||||
|
"Auth events: %s",
|
||||||
|
[a.event_id for a in auth_events.values()]
|
||||||
|
)
|
||||||
|
|
||||||
|
if event.type == EventTypes.Member:
|
||||||
|
allowed = _is_membership_change_allowed(
|
||||||
|
event, auth_events
|
||||||
|
)
|
||||||
|
if allowed:
|
||||||
|
logger.debug("Allowing! %s", event)
|
||||||
|
else:
|
||||||
|
logger.debug("Denying! %s", event)
|
||||||
|
return allowed
|
||||||
|
|
||||||
|
_check_event_sender_in_room(event, auth_events)
|
||||||
|
|
||||||
|
# Special case to allow m.room.third_party_invite events wherever
|
||||||
|
# a user is allowed to issue invites. Fixes
|
||||||
|
# https://github.com/vector-im/vector-web/issues/1208 hopefully
|
||||||
|
if event.type == EventTypes.ThirdPartyInvite:
|
||||||
|
user_level = get_user_power_level(event.user_id, auth_events)
|
||||||
|
invite_level = _get_named_level(auth_events, "invite", 0)
|
||||||
|
|
||||||
|
if user_level < invite_level:
|
||||||
|
raise AuthError(
|
||||||
|
403, (
|
||||||
|
"You cannot issue a third party invite for %s." %
|
||||||
|
(event.content.display_name,)
|
||||||
|
)
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
return True
|
||||||
|
|
||||||
|
_can_send_event(event, auth_events)
|
||||||
|
|
||||||
|
if event.type == EventTypes.PowerLevels:
|
||||||
|
_check_power_levels(event, auth_events)
|
||||||
|
|
||||||
|
if event.type == EventTypes.Redaction:
|
||||||
|
check_redaction(event, auth_events)
|
||||||
|
|
||||||
|
logger.debug("Allowing! %s", event)
|
||||||
|
|
||||||
|
|
||||||
|
def _check_size_limits(event):
|
||||||
|
def too_big(field):
|
||||||
|
raise EventSizeError("%s too large" % (field,))
|
||||||
|
|
||||||
|
if len(event.user_id) > 255:
|
||||||
|
too_big("user_id")
|
||||||
|
if len(event.room_id) > 255:
|
||||||
|
too_big("room_id")
|
||||||
|
if event.is_state() and len(event.state_key) > 255:
|
||||||
|
too_big("state_key")
|
||||||
|
if len(event.type) > 255:
|
||||||
|
too_big("type")
|
||||||
|
if len(event.event_id) > 255:
|
||||||
|
too_big("event_id")
|
||||||
|
if len(encode_canonical_json(event.get_pdu_json())) > 65536:
|
||||||
|
too_big("event")
|
||||||
|
|
||||||
|
|
||||||
|
def _can_federate(event, auth_events):
|
||||||
|
creation_event = auth_events.get((EventTypes.Create, ""))
|
||||||
|
|
||||||
|
return creation_event.content.get("m.federate", True) is True
|
||||||
|
|
||||||
|
|
||||||
|
def _is_membership_change_allowed(event, auth_events):
|
||||||
|
membership = event.content["membership"]
|
||||||
|
|
||||||
|
# Check if this is the room creator joining:
|
||||||
|
if len(event.prev_events) == 1 and Membership.JOIN == membership:
|
||||||
|
# Get room creation event:
|
||||||
|
key = (EventTypes.Create, "", )
|
||||||
|
create = auth_events.get(key)
|
||||||
|
if create and event.prev_events[0][0] == create.event_id:
|
||||||
|
if create.content["creator"] == event.state_key:
|
||||||
|
return True
|
||||||
|
|
||||||
|
target_user_id = event.state_key
|
||||||
|
|
||||||
|
creating_domain = get_domain_from_id(event.room_id)
|
||||||
|
target_domain = get_domain_from_id(target_user_id)
|
||||||
|
if creating_domain != target_domain:
|
||||||
|
if not _can_federate(event, auth_events):
|
||||||
|
raise AuthError(
|
||||||
|
403,
|
||||||
|
"This room has been marked as unfederatable."
|
||||||
|
)
|
||||||
|
|
||||||
|
# get info about the caller
|
||||||
|
key = (EventTypes.Member, event.user_id, )
|
||||||
|
caller = auth_events.get(key)
|
||||||
|
|
||||||
|
caller_in_room = caller and caller.membership == Membership.JOIN
|
||||||
|
caller_invited = caller and caller.membership == Membership.INVITE
|
||||||
|
|
||||||
|
# get info about the target
|
||||||
|
key = (EventTypes.Member, target_user_id, )
|
||||||
|
target = auth_events.get(key)
|
||||||
|
|
||||||
|
target_in_room = target and target.membership == Membership.JOIN
|
||||||
|
target_banned = target and target.membership == Membership.BAN
|
||||||
|
|
||||||
|
key = (EventTypes.JoinRules, "", )
|
||||||
|
join_rule_event = auth_events.get(key)
|
||||||
|
if join_rule_event:
|
||||||
|
join_rule = join_rule_event.content.get(
|
||||||
|
"join_rule", JoinRules.INVITE
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
join_rule = JoinRules.INVITE
|
||||||
|
|
||||||
|
user_level = get_user_power_level(event.user_id, auth_events)
|
||||||
|
target_level = get_user_power_level(
|
||||||
|
target_user_id, auth_events
|
||||||
|
)
|
||||||
|
|
||||||
|
# FIXME (erikj): What should we do here as the default?
|
||||||
|
ban_level = _get_named_level(auth_events, "ban", 50)
|
||||||
|
|
||||||
|
logger.debug(
|
||||||
|
"_is_membership_change_allowed: %s",
|
||||||
|
{
|
||||||
|
"caller_in_room": caller_in_room,
|
||||||
|
"caller_invited": caller_invited,
|
||||||
|
"target_banned": target_banned,
|
||||||
|
"target_in_room": target_in_room,
|
||||||
|
"membership": membership,
|
||||||
|
"join_rule": join_rule,
|
||||||
|
"target_user_id": target_user_id,
|
||||||
|
"event.user_id": event.user_id,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
if Membership.INVITE == membership and "third_party_invite" in event.content:
|
||||||
|
if not _verify_third_party_invite(event, auth_events):
|
||||||
|
raise AuthError(403, "You are not invited to this room.")
|
||||||
|
if target_banned:
|
||||||
|
raise AuthError(
|
||||||
|
403, "%s is banned from the room" % (target_user_id,)
|
||||||
|
)
|
||||||
|
return True
|
||||||
|
|
||||||
|
if Membership.JOIN != membership:
|
||||||
|
if (caller_invited
|
||||||
|
and Membership.LEAVE == membership
|
||||||
|
and target_user_id == event.user_id):
|
||||||
|
return True
|
||||||
|
|
||||||
|
if not caller_in_room: # caller isn't joined
|
||||||
|
raise AuthError(
|
||||||
|
403,
|
||||||
|
"%s not in room %s." % (event.user_id, event.room_id,)
|
||||||
|
)
|
||||||
|
|
||||||
|
if Membership.INVITE == membership:
|
||||||
|
# TODO (erikj): We should probably handle this more intelligently
|
||||||
|
# PRIVATE join rules.
|
||||||
|
|
||||||
|
# Invites are valid iff caller is in the room and target isn't.
|
||||||
|
if target_banned:
|
||||||
|
raise AuthError(
|
||||||
|
403, "%s is banned from the room" % (target_user_id,)
|
||||||
|
)
|
||||||
|
elif target_in_room: # the target is already in the room.
|
||||||
|
raise AuthError(403, "%s is already in the room." %
|
||||||
|
target_user_id)
|
||||||
|
else:
|
||||||
|
invite_level = _get_named_level(auth_events, "invite", 0)
|
||||||
|
|
||||||
|
if user_level < invite_level:
|
||||||
|
raise AuthError(
|
||||||
|
403, "You cannot invite user %s." % target_user_id
|
||||||
|
)
|
||||||
|
elif Membership.JOIN == membership:
|
||||||
|
# Joins are valid iff caller == target and they were:
|
||||||
|
# invited: They are accepting the invitation
|
||||||
|
# joined: It's a NOOP
|
||||||
|
if event.user_id != target_user_id:
|
||||||
|
raise AuthError(403, "Cannot force another user to join.")
|
||||||
|
elif target_banned:
|
||||||
|
raise AuthError(403, "You are banned from this room")
|
||||||
|
elif join_rule == JoinRules.PUBLIC:
|
||||||
|
pass
|
||||||
|
elif join_rule == JoinRules.INVITE:
|
||||||
|
if not caller_in_room and not caller_invited:
|
||||||
|
raise AuthError(403, "You are not invited to this room.")
|
||||||
|
else:
|
||||||
|
# TODO (erikj): may_join list
|
||||||
|
# TODO (erikj): private rooms
|
||||||
|
raise AuthError(403, "You are not allowed to join this room")
|
||||||
|
elif Membership.LEAVE == membership:
|
||||||
|
# TODO (erikj): Implement kicks.
|
||||||
|
if target_banned and user_level < ban_level:
|
||||||
|
raise AuthError(
|
||||||
|
403, "You cannot unban user %s." % (target_user_id,)
|
||||||
|
)
|
||||||
|
elif target_user_id != event.user_id:
|
||||||
|
kick_level = _get_named_level(auth_events, "kick", 50)
|
||||||
|
|
||||||
|
if user_level < kick_level or user_level <= target_level:
|
||||||
|
raise AuthError(
|
||||||
|
403, "You cannot kick user %s." % target_user_id
|
||||||
|
)
|
||||||
|
elif Membership.BAN == membership:
|
||||||
|
if user_level < ban_level or user_level <= target_level:
|
||||||
|
raise AuthError(403, "You don't have permission to ban")
|
||||||
|
else:
|
||||||
|
raise AuthError(500, "Unknown membership %s" % membership)
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
def _check_event_sender_in_room(event, auth_events):
|
||||||
|
key = (EventTypes.Member, event.user_id, )
|
||||||
|
member_event = auth_events.get(key)
|
||||||
|
|
||||||
|
return _check_joined_room(
|
||||||
|
member_event,
|
||||||
|
event.user_id,
|
||||||
|
event.room_id
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def _check_joined_room(member, user_id, room_id):
|
||||||
|
if not member or member.membership != Membership.JOIN:
|
||||||
|
raise AuthError(403, "User %s not in room %s (%s)" % (
|
||||||
|
user_id, room_id, repr(member)
|
||||||
|
))
|
||||||
|
|
||||||
|
|
||||||
|
def get_send_level(etype, state_key, auth_events):
|
||||||
|
key = (EventTypes.PowerLevels, "", )
|
||||||
|
send_level_event = auth_events.get(key)
|
||||||
|
send_level = None
|
||||||
|
if send_level_event:
|
||||||
|
send_level = send_level_event.content.get("events", {}).get(
|
||||||
|
etype
|
||||||
|
)
|
||||||
|
if send_level is None:
|
||||||
|
if state_key is not None:
|
||||||
|
send_level = send_level_event.content.get(
|
||||||
|
"state_default", 50
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
send_level = send_level_event.content.get(
|
||||||
|
"events_default", 0
|
||||||
|
)
|
||||||
|
|
||||||
|
if send_level:
|
||||||
|
send_level = int(send_level)
|
||||||
|
else:
|
||||||
|
send_level = 0
|
||||||
|
|
||||||
|
return send_level
|
||||||
|
|
||||||
|
|
||||||
|
def _can_send_event(event, auth_events):
|
||||||
|
send_level = get_send_level(
|
||||||
|
event.type, event.get("state_key", None), auth_events
|
||||||
|
)
|
||||||
|
user_level = get_user_power_level(event.user_id, auth_events)
|
||||||
|
|
||||||
|
if user_level < send_level:
|
||||||
|
raise AuthError(
|
||||||
|
403,
|
||||||
|
"You don't have permission to post that to the room. " +
|
||||||
|
"user_level (%d) < send_level (%d)" % (user_level, send_level)
|
||||||
|
)
|
||||||
|
|
||||||
|
# Check state_key
|
||||||
|
if hasattr(event, "state_key"):
|
||||||
|
if event.state_key.startswith("@"):
|
||||||
|
if event.state_key != event.user_id:
|
||||||
|
raise AuthError(
|
||||||
|
403,
|
||||||
|
"You are not allowed to set others state"
|
||||||
|
)
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
def check_redaction(event, auth_events):
|
||||||
|
"""Check whether the event sender is allowed to redact the target event.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
True if the the sender is allowed to redact the target event if the
|
||||||
|
target event was created by them.
|
||||||
|
False if the sender is allowed to redact the target event with no
|
||||||
|
further checks.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
AuthError if the event sender is definitely not allowed to redact
|
||||||
|
the target event.
|
||||||
|
"""
|
||||||
|
user_level = get_user_power_level(event.user_id, auth_events)
|
||||||
|
|
||||||
|
redact_level = _get_named_level(auth_events, "redact", 50)
|
||||||
|
|
||||||
|
if user_level >= redact_level:
|
||||||
|
return False
|
||||||
|
|
||||||
|
redacter_domain = get_domain_from_id(event.event_id)
|
||||||
|
redactee_domain = get_domain_from_id(event.redacts)
|
||||||
|
if redacter_domain == redactee_domain:
|
||||||
|
return True
|
||||||
|
|
||||||
|
raise AuthError(
|
||||||
|
403,
|
||||||
|
"You don't have permission to redact events"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def _check_power_levels(event, auth_events):
|
||||||
|
user_list = event.content.get("users", {})
|
||||||
|
# Validate users
|
||||||
|
for k, v in user_list.items():
|
||||||
|
try:
|
||||||
|
UserID.from_string(k)
|
||||||
|
except Exception:
|
||||||
|
raise SynapseError(400, "Not a valid user_id: %s" % (k,))
|
||||||
|
|
||||||
|
try:
|
||||||
|
int(v)
|
||||||
|
except Exception:
|
||||||
|
raise SynapseError(400, "Not a valid power level: %s" % (v,))
|
||||||
|
|
||||||
|
key = (event.type, event.state_key, )
|
||||||
|
current_state = auth_events.get(key)
|
||||||
|
|
||||||
|
if not current_state:
|
||||||
|
return
|
||||||
|
|
||||||
|
user_level = get_user_power_level(event.user_id, auth_events)
|
||||||
|
|
||||||
|
# Check other levels:
|
||||||
|
levels_to_check = [
|
||||||
|
("users_default", None),
|
||||||
|
("events_default", None),
|
||||||
|
("state_default", None),
|
||||||
|
("ban", None),
|
||||||
|
("redact", None),
|
||||||
|
("kick", None),
|
||||||
|
("invite", None),
|
||||||
|
]
|
||||||
|
|
||||||
|
old_list = current_state.content.get("users", {})
|
||||||
|
for user in set(old_list.keys() + user_list.keys()):
|
||||||
|
levels_to_check.append(
|
||||||
|
(user, "users")
|
||||||
|
)
|
||||||
|
|
||||||
|
old_list = current_state.content.get("events", {})
|
||||||
|
new_list = event.content.get("events", {})
|
||||||
|
for ev_id in set(old_list.keys() + new_list.keys()):
|
||||||
|
levels_to_check.append(
|
||||||
|
(ev_id, "events")
|
||||||
|
)
|
||||||
|
|
||||||
|
old_state = current_state.content
|
||||||
|
new_state = event.content
|
||||||
|
|
||||||
|
for level_to_check, dir in levels_to_check:
|
||||||
|
old_loc = old_state
|
||||||
|
new_loc = new_state
|
||||||
|
if dir:
|
||||||
|
old_loc = old_loc.get(dir, {})
|
||||||
|
new_loc = new_loc.get(dir, {})
|
||||||
|
|
||||||
|
if level_to_check in old_loc:
|
||||||
|
old_level = int(old_loc[level_to_check])
|
||||||
|
else:
|
||||||
|
old_level = None
|
||||||
|
|
||||||
|
if level_to_check in new_loc:
|
||||||
|
new_level = int(new_loc[level_to_check])
|
||||||
|
else:
|
||||||
|
new_level = None
|
||||||
|
|
||||||
|
if new_level is not None and old_level is not None:
|
||||||
|
if new_level == old_level:
|
||||||
|
continue
|
||||||
|
|
||||||
|
if dir == "users" and level_to_check != event.user_id:
|
||||||
|
if old_level == user_level:
|
||||||
|
raise AuthError(
|
||||||
|
403,
|
||||||
|
"You don't have permission to remove ops level equal "
|
||||||
|
"to your own"
|
||||||
|
)
|
||||||
|
|
||||||
|
if old_level > user_level or new_level > user_level:
|
||||||
|
raise AuthError(
|
||||||
|
403,
|
||||||
|
"You don't have permission to add ops level greater "
|
||||||
|
"than your own"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def _get_power_level_event(auth_events):
|
||||||
|
key = (EventTypes.PowerLevels, "", )
|
||||||
|
return auth_events.get(key)
|
||||||
|
|
||||||
|
|
||||||
|
def get_user_power_level(user_id, auth_events):
|
||||||
|
power_level_event = _get_power_level_event(auth_events)
|
||||||
|
|
||||||
|
if power_level_event:
|
||||||
|
level = power_level_event.content.get("users", {}).get(user_id)
|
||||||
|
if not level:
|
||||||
|
level = power_level_event.content.get("users_default", 0)
|
||||||
|
|
||||||
|
if level is None:
|
||||||
|
return 0
|
||||||
|
else:
|
||||||
|
return int(level)
|
||||||
|
else:
|
||||||
|
key = (EventTypes.Create, "", )
|
||||||
|
create_event = auth_events.get(key)
|
||||||
|
if (create_event is not None and
|
||||||
|
create_event.content["creator"] == user_id):
|
||||||
|
return 100
|
||||||
|
else:
|
||||||
|
return 0
|
||||||
|
|
||||||
|
|
||||||
|
def _get_named_level(auth_events, name, default):
|
||||||
|
power_level_event = _get_power_level_event(auth_events)
|
||||||
|
|
||||||
|
if not power_level_event:
|
||||||
|
return default
|
||||||
|
|
||||||
|
level = power_level_event.content.get(name, None)
|
||||||
|
if level is not None:
|
||||||
|
return int(level)
|
||||||
|
else:
|
||||||
|
return default
|
||||||
|
|
||||||
|
|
||||||
|
def _verify_third_party_invite(event, auth_events):
|
||||||
|
"""
|
||||||
|
Validates that the invite event is authorized by a previous third-party invite.
|
||||||
|
|
||||||
|
Checks that the public key, and keyserver, match those in the third party invite,
|
||||||
|
and that the invite event has a signature issued using that public key.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
event: The m.room.member join event being validated.
|
||||||
|
auth_events: All relevant previous context events which may be used
|
||||||
|
for authorization decisions.
|
||||||
|
|
||||||
|
Return:
|
||||||
|
True if the event fulfills the expectations of a previous third party
|
||||||
|
invite event.
|
||||||
|
"""
|
||||||
|
if "third_party_invite" not in event.content:
|
||||||
|
return False
|
||||||
|
if "signed" not in event.content["third_party_invite"]:
|
||||||
|
return False
|
||||||
|
signed = event.content["third_party_invite"]["signed"]
|
||||||
|
for key in {"mxid", "token"}:
|
||||||
|
if key not in signed:
|
||||||
|
return False
|
||||||
|
|
||||||
|
token = signed["token"]
|
||||||
|
|
||||||
|
invite_event = auth_events.get(
|
||||||
|
(EventTypes.ThirdPartyInvite, token,)
|
||||||
|
)
|
||||||
|
if not invite_event:
|
||||||
|
return False
|
||||||
|
|
||||||
|
if invite_event.sender != event.sender:
|
||||||
|
return False
|
||||||
|
|
||||||
|
if event.user_id != invite_event.user_id:
|
||||||
|
return False
|
||||||
|
|
||||||
|
if signed["mxid"] != event.state_key:
|
||||||
|
return False
|
||||||
|
if signed["token"] != token:
|
||||||
|
return False
|
||||||
|
|
||||||
|
for public_key_object in get_public_keys(invite_event):
|
||||||
|
public_key = public_key_object["public_key"]
|
||||||
|
try:
|
||||||
|
for server, signature_block in signed["signatures"].items():
|
||||||
|
for key_name, encoded_signature in signature_block.items():
|
||||||
|
if not key_name.startswith("ed25519:"):
|
||||||
|
continue
|
||||||
|
verify_key = decode_verify_key_bytes(
|
||||||
|
key_name,
|
||||||
|
decode_base64(public_key)
|
||||||
|
)
|
||||||
|
verify_signed_json(signed, server, verify_key)
|
||||||
|
|
||||||
|
# We got the public key from the invite, so we know that the
|
||||||
|
# correct server signed the signed bundle.
|
||||||
|
# The caller is responsible for checking that the signing
|
||||||
|
# server has not revoked that public key.
|
||||||
|
return True
|
||||||
|
except (KeyError, SignatureVerifyException,):
|
||||||
|
continue
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def get_public_keys(invite_event):
|
||||||
|
public_keys = []
|
||||||
|
if "public_key" in invite_event.content:
|
||||||
|
o = {
|
||||||
|
"public_key": invite_event.content["public_key"],
|
||||||
|
}
|
||||||
|
if "key_validity_url" in invite_event.content:
|
||||||
|
o["key_validity_url"] = invite_event.content["key_validity_url"]
|
||||||
|
public_keys.append(o)
|
||||||
|
public_keys.extend(invite_event.content.get("public_keys", []))
|
||||||
|
return public_keys
|
||||||
|
|
||||||
|
|
||||||
|
def auth_types_for_event(event):
|
||||||
|
"""Given an event, return a list of (EventType, StateKey) that may be
|
||||||
|
needed to auth the event. The returned list may be a superset of what
|
||||||
|
would actually be required depending on the full state of the room.
|
||||||
|
|
||||||
|
Used to limit the number of events to fetch from the database to
|
||||||
|
actually auth the event.
|
||||||
|
"""
|
||||||
|
if event.type == EventTypes.Create:
|
||||||
|
return []
|
||||||
|
|
||||||
|
auth_types = []
|
||||||
|
|
||||||
|
auth_types.append((EventTypes.PowerLevels, "", ))
|
||||||
|
auth_types.append((EventTypes.Member, event.user_id, ))
|
||||||
|
auth_types.append((EventTypes.Create, "", ))
|
||||||
|
|
||||||
|
if event.type == EventTypes.Member:
|
||||||
|
membership = event.content["membership"]
|
||||||
|
if membership in [Membership.JOIN, Membership.INVITE]:
|
||||||
|
auth_types.append((EventTypes.JoinRules, "", ))
|
||||||
|
|
||||||
|
auth_types.append((EventTypes.Member, event.state_key, ))
|
||||||
|
|
||||||
|
if membership == Membership.INVITE:
|
||||||
|
if "third_party_invite" in event.content:
|
||||||
|
key = (
|
||||||
|
EventTypes.ThirdPartyInvite,
|
||||||
|
event.content["third_party_invite"]["signed"]["token"]
|
||||||
|
)
|
||||||
|
auth_types.append(key)
|
||||||
|
|
||||||
|
return auth_types
|
||||||
@@ -36,16 +36,37 @@ class _EventInternalMetadata(object):
|
|||||||
def is_invite_from_remote(self):
|
def is_invite_from_remote(self):
|
||||||
return getattr(self, "invite_from_remote", False)
|
return getattr(self, "invite_from_remote", False)
|
||||||
|
|
||||||
|
def get_send_on_behalf_of(self):
|
||||||
|
"""Whether this server should send the event on behalf of another server.
|
||||||
|
This is used by the federation "send_join" API to forward the initial join
|
||||||
|
event for a server in the room.
|
||||||
|
|
||||||
|
returns a str with the name of the server this event is sent on behalf of.
|
||||||
|
"""
|
||||||
|
return getattr(self, "send_on_behalf_of", None)
|
||||||
|
|
||||||
|
|
||||||
def _event_dict_property(key):
|
def _event_dict_property(key):
|
||||||
|
# We want to be able to use hasattr with the event dict properties.
|
||||||
|
# However, (on python3) hasattr expects AttributeError to be raised. Hence,
|
||||||
|
# we need to transform the KeyError into an AttributeError
|
||||||
def getter(self):
|
def getter(self):
|
||||||
|
try:
|
||||||
return self._event_dict[key]
|
return self._event_dict[key]
|
||||||
|
except KeyError:
|
||||||
|
raise AttributeError(key)
|
||||||
|
|
||||||
def setter(self, v):
|
def setter(self, v):
|
||||||
|
try:
|
||||||
self._event_dict[key] = v
|
self._event_dict[key] = v
|
||||||
|
except KeyError:
|
||||||
|
raise AttributeError(key)
|
||||||
|
|
||||||
def delete(self):
|
def delete(self):
|
||||||
|
try:
|
||||||
del self._event_dict[key]
|
del self._event_dict[key]
|
||||||
|
except KeyError:
|
||||||
|
raise AttributeError(key)
|
||||||
|
|
||||||
return property(
|
return property(
|
||||||
getter,
|
getter,
|
||||||
@@ -70,7 +91,6 @@ class EventBase(object):
|
|||||||
auth_events = _event_dict_property("auth_events")
|
auth_events = _event_dict_property("auth_events")
|
||||||
depth = _event_dict_property("depth")
|
depth = _event_dict_property("depth")
|
||||||
content = _event_dict_property("content")
|
content = _event_dict_property("content")
|
||||||
event_id = _event_dict_property("event_id")
|
|
||||||
hashes = _event_dict_property("hashes")
|
hashes = _event_dict_property("hashes")
|
||||||
origin = _event_dict_property("origin")
|
origin = _event_dict_property("origin")
|
||||||
origin_server_ts = _event_dict_property("origin_server_ts")
|
origin_server_ts = _event_dict_property("origin_server_ts")
|
||||||
@@ -79,8 +99,6 @@ class EventBase(object):
|
|||||||
redacts = _event_dict_property("redacts")
|
redacts = _event_dict_property("redacts")
|
||||||
room_id = _event_dict_property("room_id")
|
room_id = _event_dict_property("room_id")
|
||||||
sender = _event_dict_property("sender")
|
sender = _event_dict_property("sender")
|
||||||
state_key = _event_dict_property("state_key")
|
|
||||||
type = _event_dict_property("type")
|
|
||||||
user_id = _event_dict_property("sender")
|
user_id = _event_dict_property("sender")
|
||||||
|
|
||||||
@property
|
@property
|
||||||
@@ -153,6 +171,11 @@ class FrozenEvent(EventBase):
|
|||||||
else:
|
else:
|
||||||
frozen_dict = event_dict
|
frozen_dict = event_dict
|
||||||
|
|
||||||
|
self.event_id = event_dict["event_id"]
|
||||||
|
self.type = event_dict["type"]
|
||||||
|
if "state_key" in event_dict:
|
||||||
|
self.state_key = event_dict["state_key"]
|
||||||
|
|
||||||
super(FrozenEvent, self).__init__(
|
super(FrozenEvent, self).__init__(
|
||||||
frozen_dict,
|
frozen_dict,
|
||||||
signatures=signatures,
|
signatures=signatures,
|
||||||
|
|||||||
@@ -13,7 +13,7 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
from . import EventBase, FrozenEvent
|
from . import EventBase, FrozenEvent, _event_dict_property
|
||||||
|
|
||||||
from synapse.types import EventID
|
from synapse.types import EventID
|
||||||
|
|
||||||
@@ -34,6 +34,10 @@ class EventBuilder(EventBase):
|
|||||||
internal_metadata_dict=internal_metadata_dict,
|
internal_metadata_dict=internal_metadata_dict,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
event_id = _event_dict_property("event_id")
|
||||||
|
state_key = _event_dict_property("state_key")
|
||||||
|
type = _event_dict_property("type")
|
||||||
|
|
||||||
def build(self):
|
def build(self):
|
||||||
return FrozenEvent.from_event(self)
|
return FrozenEvent.from_event(self)
|
||||||
|
|
||||||
@@ -51,7 +55,7 @@ class EventBuilderFactory(object):
|
|||||||
|
|
||||||
local_part = str(int(self.clock.time())) + i + random_string(5)
|
local_part = str(int(self.clock.time())) + i + random_string(5)
|
||||||
|
|
||||||
e_id = EventID.create(local_part, self.hostname)
|
e_id = EventID(local_part, self.hostname)
|
||||||
|
|
||||||
return e_id.to_string()
|
return e_id.to_string()
|
||||||
|
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user