mirror of
https://github.com/element-hq/synapse.git
synced 2025-12-11 01:40:27 +00:00
Compare commits
3185 Commits
v0.17.1-rc
...
erikj/urle
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
80a22daf60 | ||
|
|
11d2609da7 | ||
|
|
dab87b84a3 | ||
|
|
13decdbf96 | ||
|
|
135fc5b9cd | ||
|
|
020a501354 | ||
|
|
db2fd801f7 | ||
|
|
e8b03cab1b | ||
|
|
8844f95c32 | ||
|
|
7945435587 | ||
|
|
6bd1b7053e | ||
|
|
b4478e586f | ||
|
|
112c2253e2 | ||
|
|
6850f8aea3 | ||
|
|
cd087a265d | ||
|
|
87c864b698 | ||
|
|
ae85c7804e | ||
|
|
f8d1917fce | ||
|
|
6eb3aa94b6 | ||
|
|
edb45aae38 | ||
|
|
b370fe61c0 | ||
|
|
6a9777ba02 | ||
|
|
01579384cc | ||
|
|
e01ba5bda3 | ||
|
|
7b824f1475 | ||
|
|
35ff941172 | ||
|
|
1d71f484d4 | ||
|
|
15e8ed874f | ||
|
|
551422051b | ||
|
|
c7f0969731 | ||
|
|
3449da3bc7 | ||
|
|
d1679a4ed7 | ||
|
|
01afc563c3 | ||
|
|
e089100c62 | ||
|
|
68b0ee4e8d | ||
|
|
22284a6f65 | ||
|
|
917380e89d | ||
|
|
104c0bc1d5 | ||
|
|
700e5e7198 | ||
|
|
b214a04ffc | ||
|
|
0e5f479fc0 | ||
|
|
518f6de088 | ||
|
|
7d0f712348 | ||
|
|
e4570c53dd | ||
|
|
88964b987e | ||
|
|
204fc98520 | ||
|
|
301b339494 | ||
|
|
9cd3f06ab7 | ||
|
|
f92963f5db | ||
|
|
725a72ec5a | ||
|
|
a89f9f830c | ||
|
|
39ce38b024 | ||
|
|
eb8d8d6f57 | ||
|
|
8da39ad98f | ||
|
|
3ee4ad09eb | ||
|
|
0ca5c4d2af | ||
|
|
11597ddea5 | ||
|
|
2fe3f848b9 | ||
|
|
05630758f2 | ||
|
|
fcfe7f6ad3 | ||
|
|
b4e37c6f50 | ||
|
|
9ee44a372d | ||
|
|
88cc9cc69e | ||
|
|
dc7c020b33 | ||
|
|
c5de6987c2 | ||
|
|
241e4e8687 | ||
|
|
929b34963d | ||
|
|
9a0db062af | ||
|
|
a838444a70 | ||
|
|
4262aba17b | ||
|
|
86932be2cb | ||
|
|
32260baa41 | ||
|
|
33f6195d9a | ||
|
|
a164270833 | ||
|
|
352e1ff9ed | ||
|
|
79452edeee | ||
|
|
e9e4cb25fc | ||
|
|
792d340572 | ||
|
|
4ceaa7433a | ||
|
|
788e69098c | ||
|
|
0f890f477e | ||
|
|
545001b9e4 | ||
|
|
01ccc9e6f2 | ||
|
|
a9cb1a35c8 | ||
|
|
a32d2548d9 | ||
|
|
9187e0762f | ||
|
|
f879127aaa | ||
|
|
e6d87c93f3 | ||
|
|
004cc8a328 | ||
|
|
ef520d8d0e | ||
|
|
a134c572a6 | ||
|
|
c2a5cf2fe3 | ||
|
|
800cfd5774 | ||
|
|
152c2ac19e | ||
|
|
e70287cff3 | ||
|
|
03a26e28d9 | ||
|
|
3e0c0660b3 | ||
|
|
3f49e131d9 | ||
|
|
9b8c0fb162 | ||
|
|
691f8492fb | ||
|
|
a9d7d98d3f | ||
|
|
bdbb1eec65 | ||
|
|
01f72e2fc7 | ||
|
|
9187862002 | ||
|
|
aa3587fdd1 | ||
|
|
51406dab96 | ||
|
|
fecb45e0c3 | ||
|
|
44cd6e1358 | ||
|
|
8d6dc106d1 | ||
|
|
a052aa42e7 | ||
|
|
8efe773ef1 | ||
|
|
b7e7b52452 | ||
|
|
8cbbfaefc1 | ||
|
|
84b5cc69f5 | ||
|
|
fde8e8f09f | ||
|
|
eb9fc021e3 | ||
|
|
1c41b05c8c | ||
|
|
5bdb57cb66 | ||
|
|
f5aa027c2f | ||
|
|
e66fbcbb02 | ||
|
|
9aa5a0af51 | ||
|
|
610accbb7f | ||
|
|
c384705ee8 | ||
|
|
1a3aa957ca | ||
|
|
3f961e638a | ||
|
|
fa72803490 | ||
|
|
9a0d783c11 | ||
|
|
38f952b9bc | ||
|
|
a8ce159be4 | ||
|
|
f609acc109 | ||
|
|
0092cf38ae | ||
|
|
5b631ff41a | ||
|
|
ba48755d56 | ||
|
|
926ba76e23 | ||
|
|
9cf519769b | ||
|
|
7c7706f42b | ||
|
|
2cc9f76bc3 | ||
|
|
ddb00efc1d | ||
|
|
2a376579f3 | ||
|
|
873aea7168 | ||
|
|
bf7ee93cb6 | ||
|
|
5ea624b0f5 | ||
|
|
0ad5125814 | ||
|
|
068c21ab10 | ||
|
|
b29d1abab6 | ||
|
|
7367a4a823 | ||
|
|
7d26591048 | ||
|
|
2059b8573f | ||
|
|
10fdcf561d | ||
|
|
5ccb57d3ff | ||
|
|
c33c1ceddd | ||
|
|
fb647164f2 | ||
|
|
a492b17fe2 | ||
|
|
cb2c7c0669 | ||
|
|
3959754de3 | ||
|
|
4f28018c83 | ||
|
|
57db62e554 | ||
|
|
0011ede3b0 | ||
|
|
62ad701326 | ||
|
|
3f0f06cb31 | ||
|
|
3e839e0548 | ||
|
|
ebd0127999 | ||
|
|
cfe75a9fb6 | ||
|
|
f51565e023 | ||
|
|
d144ed6ffb | ||
|
|
a08726fc42 | ||
|
|
b27320b550 | ||
|
|
350331d466 | ||
|
|
1a69c6d590 | ||
|
|
df8ff682a7 | ||
|
|
3518d0ea8f | ||
|
|
d45a114824 | ||
|
|
6dbebef141 | ||
|
|
16adb11cc0 | ||
|
|
82f16faa78 | ||
|
|
b78717b87b | ||
|
|
95cb401ae0 | ||
|
|
5d8476d8ff | ||
|
|
47ce527f45 | ||
|
|
56e709857c | ||
|
|
cb9f8e527c | ||
|
|
cea462e285 | ||
|
|
bf8e97bd3c | ||
|
|
ea3442c15c | ||
|
|
16469a4f15 | ||
|
|
c82111a55f | ||
|
|
da87791975 | ||
|
|
99e9b4f26c | ||
|
|
f5160d4a3e | ||
|
|
8b3573a8b2 | ||
|
|
299fd740c7 | ||
|
|
9a2d9b4789 | ||
|
|
141c343e03 | ||
|
|
f43b6d6d9b | ||
|
|
0f942f68c1 | ||
|
|
d0fcc48f9d | ||
|
|
31becf4ac3 | ||
|
|
d023ecb810 | ||
|
|
ea7b3c4b1b | ||
|
|
6ea27fafad | ||
|
|
265b993b8a | ||
|
|
e05bf34117 | ||
|
|
631a73f7ef | ||
|
|
c3f79c9da5 | ||
|
|
889a2a853a | ||
|
|
d65ceb4b48 | ||
|
|
e48c7aac4d | ||
|
|
1708412f56 | ||
|
|
b984dd0b73 | ||
|
|
ba1d08bc4b | ||
|
|
58dd148c4f | ||
|
|
88541f9009 | ||
|
|
dbe80a286b | ||
|
|
20f40348d4 | ||
|
|
735fd8719a | ||
|
|
a56d54dcb7 | ||
|
|
02a1296ad6 | ||
|
|
8cb44da4aa | ||
|
|
8ffaacbee3 | ||
|
|
b2932107bb | ||
|
|
7aed50a038 | ||
|
|
b6c4b851f1 | ||
|
|
ed9b5eced4 | ||
|
|
d4ffe61d4f | ||
|
|
69ce365b79 | ||
|
|
2e223163ff | ||
|
|
f8bfcd7e0d | ||
|
|
d032785aa7 | ||
|
|
2c911d75e8 | ||
|
|
c818fcab11 | ||
|
|
06a14876e5 | ||
|
|
42174946f8 | ||
|
|
f394f5574d | ||
|
|
efb79820b4 | ||
|
|
fafa3e7114 | ||
|
|
6619f047ad | ||
|
|
d960d23830 | ||
|
|
1a6c7cdf54 | ||
|
|
89b7232ff8 | ||
|
|
1773df0632 | ||
|
|
65cf454fd1 | ||
|
|
9e08a93a7b | ||
|
|
4b44f05f19 | ||
|
|
a83c514d1f | ||
|
|
33bebb63f3 | ||
|
|
483e8104db | ||
|
|
2ad4d5b5bb | ||
|
|
92789199a9 | ||
|
|
529c026ac1 | ||
|
|
7c371834cc | ||
|
|
64346be26d | ||
|
|
22518e2833 | ||
|
|
884b26ae41 | ||
|
|
1b2af11650 | ||
|
|
872ff95ed4 | ||
|
|
22004b524e | ||
|
|
4bc4236faf | ||
|
|
2324124a72 | ||
|
|
f793bc3877 | ||
|
|
784f036306 | ||
|
|
6411f725be | ||
|
|
a9a2d66cdd | ||
|
|
0c8ba5dd1c | ||
|
|
3a75de923b | ||
|
|
17445e6701 | ||
|
|
126b9bf96f | ||
|
|
157298f986 | ||
|
|
89f90d808a | ||
|
|
8ded8ba2c7 | ||
|
|
182ff17c83 | ||
|
|
f381d63813 | ||
|
|
6b8604239f | ||
|
|
f756f961ea | ||
|
|
28e973ac11 | ||
|
|
9cb3a190bc | ||
|
|
493e25d554 | ||
|
|
3594dbc6dc | ||
|
|
2311189ee4 | ||
|
|
c57607874c | ||
|
|
8956f0147a | ||
|
|
e5b4a208ce | ||
|
|
73fe866847 | ||
|
|
45b5fe9122 | ||
|
|
d62ce972f8 | ||
|
|
6ae9a3d2a6 | ||
|
|
2ec49826e8 | ||
|
|
a90c60912f | ||
|
|
50e8657867 | ||
|
|
1cf9e071dd | ||
|
|
d0957753bf | ||
|
|
199dba6c15 | ||
|
|
70349872c2 | ||
|
|
eba93b05bf | ||
|
|
bf8a36e080 | ||
|
|
5d0f665848 | ||
|
|
3bd760628b | ||
|
|
eb9b5eec81 | ||
|
|
c2ecfcc3a4 | ||
|
|
7e6cf89dc2 | ||
|
|
26d37f7a63 | ||
|
|
bb73f55fc6 | ||
|
|
faeb369f15 | ||
|
|
3dec9c66b3 | ||
|
|
46244b2759 | ||
|
|
27b094f382 | ||
|
|
573712da6b | ||
|
|
c96d547f4d | ||
|
|
d15d237b0d | ||
|
|
27939cbb0e | ||
|
|
6f72765371 | ||
|
|
cbaad969f9 | ||
|
|
ca9b9d9703 | ||
|
|
a2b25de68d | ||
|
|
8fbb4d0d19 | ||
|
|
95e4cffd85 | ||
|
|
e316bbb4c0 | ||
|
|
f5ac4dc2d4 | ||
|
|
25634ed152 | ||
|
|
24087bffa9 | ||
|
|
ad0ccf15ea | ||
|
|
e440e28456 | ||
|
|
d874d4f2d7 | ||
|
|
6ff8c87484 | ||
|
|
324c3e9399 | ||
|
|
3fc33bae8b | ||
|
|
3acd616979 | ||
|
|
923d9300ed | ||
|
|
a71a080cd2 | ||
|
|
d1a3325f99 | ||
|
|
bf5ef10a93 | ||
|
|
6af025d3c4 | ||
|
|
012e8e142a | ||
|
|
3a061cae26 | ||
|
|
b96278d6fe | ||
|
|
4810f7effd | ||
|
|
c714c61853 | ||
|
|
acac21248c | ||
|
|
6ed9ff69c2 | ||
|
|
106906a65e | ||
|
|
5fb347fc41 | ||
|
|
cd94728e93 | ||
|
|
fd1601c596 | ||
|
|
ef344b10e5 | ||
|
|
b8d821aa68 | ||
|
|
92c52df702 | ||
|
|
d28ec43e15 | ||
|
|
39bf47319f | ||
|
|
ac27f6a35e | ||
|
|
5978dccff0 | ||
|
|
278d21b5e4 | ||
|
|
5fcbf1e07c | ||
|
|
c0c9327fe0 | ||
|
|
059d3a6c8e | ||
|
|
d627174da2 | ||
|
|
ddb6a79b68 | ||
|
|
0b27ae8dc3 | ||
|
|
4a6d551704 | ||
|
|
bfdf7b9237 | ||
|
|
630caf8a70 | ||
|
|
8fd1a32456 | ||
|
|
4d09366656 | ||
|
|
a9b712e9dc | ||
|
|
32c7b8e48b | ||
|
|
1026690cd2 | ||
|
|
10b34dbb9a | ||
|
|
39a6b35496 | ||
|
|
74fcbf741b | ||
|
|
e571aef06d | ||
|
|
61ffaa8137 | ||
|
|
671540dccf | ||
|
|
5fa571a91b | ||
|
|
053255f36c | ||
|
|
f133228cb3 | ||
|
|
50fe92cd26 | ||
|
|
8ec2e638be | ||
|
|
24dd73028a | ||
|
|
e3624fad5f | ||
|
|
617199d73d | ||
|
|
3e1e69ccaf | ||
|
|
770b2252ca | ||
|
|
3d33eef6fc | ||
|
|
b31bf0bb51 | ||
|
|
9a304ef2b0 | ||
|
|
ebfe64e3d6 | ||
|
|
225dc3b4cb | ||
|
|
9fcbbe8e7d | ||
|
|
447aed42d2 | ||
|
|
ee6fb4cf85 | ||
|
|
3c7b480ba3 | ||
|
|
25c0a020f4 | ||
|
|
3fa362502c | ||
|
|
5ff3d23564 | ||
|
|
c46e75d3d8 | ||
|
|
db91e72ade | ||
|
|
bc496df192 | ||
|
|
a1beca0e25 | ||
|
|
b5049d2e5c | ||
|
|
1f881e0746 | ||
|
|
80b8a28100 | ||
|
|
bd25f9cf36 | ||
|
|
4eeae7ad65 | ||
|
|
bb9f0f3cdb | ||
|
|
6b02fc80d1 | ||
|
|
9c9356512e | ||
|
|
18eae413af | ||
|
|
78d6ddba86 | ||
|
|
9dcd667ac2 | ||
|
|
33cac3dc29 | ||
|
|
6e87b34f7b | ||
|
|
d5352cbba8 | ||
|
|
14737ba495 | ||
|
|
e15d4ea248 | ||
|
|
a18828c129 | ||
|
|
6da4c4d3bd | ||
|
|
0cbda53819 | ||
|
|
77c0629ebc | ||
|
|
e16e45b1b4 | ||
|
|
e1e4ec9f9d | ||
|
|
78e7e05188 | ||
|
|
ad48dfe73d | ||
|
|
518a74586c | ||
|
|
d1fe4db882 | ||
|
|
421d68ca8c | ||
|
|
326189c25a | ||
|
|
3af53c183a | ||
|
|
63c4383927 | ||
|
|
af19f5e9aa | ||
|
|
773f0eed1e | ||
|
|
adfc0c9539 | ||
|
|
d413a2ba98 | ||
|
|
b387ee17b6 | ||
|
|
03dd745fe2 | ||
|
|
e051abd20b | ||
|
|
02ba118f81 | ||
|
|
4c65b98e4a | ||
|
|
d1f3490e75 | ||
|
|
46022025ea | ||
|
|
2186d7c06e | ||
|
|
88b9c5cbf0 | ||
|
|
d7eacc4f87 | ||
|
|
b178eca261 | ||
|
|
d8f90c4208 | ||
|
|
4b0f06e99c | ||
|
|
e98f0f9112 | ||
|
|
25adde9a04 | ||
|
|
6e9bf67f18 | ||
|
|
2b91846497 | ||
|
|
73560237d6 | ||
|
|
86c4f49a31 | ||
|
|
f632083576 | ||
|
|
6c6e197b0a | ||
|
|
d02e43b15f | ||
|
|
349c739966 | ||
|
|
9a72b70630 | ||
|
|
25e2456ee7 | ||
|
|
d32385336f | ||
|
|
b2da272b77 | ||
|
|
4528dd2443 | ||
|
|
93efd7eb04 | ||
|
|
ab9f844aaf | ||
|
|
5c431f421c | ||
|
|
d84f65255e | ||
|
|
a94d9b6b82 | ||
|
|
5552ed9a7f | ||
|
|
2c8526cac7 | ||
|
|
87b7d72760 | ||
|
|
49fce04624 | ||
|
|
b0d9e633ee | ||
|
|
ad7ec63d08 | ||
|
|
62d7d66ae5 | ||
|
|
8fe253f19b | ||
|
|
293380bef7 | ||
|
|
447f4f0d5f | ||
|
|
9d332e0f79 | ||
|
|
0af58f14ee | ||
|
|
81d037dbd8 | ||
|
|
28a6ccb49c | ||
|
|
cd871a3057 | ||
|
|
8ff6726c0d | ||
|
|
d69768348f | ||
|
|
8e85220373 | ||
|
|
3fe2bae857 | ||
|
|
aae77da73f | ||
|
|
ce4f66133e | ||
|
|
b6dc7044a9 | ||
|
|
9a89dae8c5 | ||
|
|
0af5dc63a8 | ||
|
|
5a4da21d58 | ||
|
|
d57765fc8a | ||
|
|
2cf6a7bc20 | ||
|
|
4a53f3a3e8 | ||
|
|
be0dfcd4a2 | ||
|
|
1432f7ccd5 | ||
|
|
2f18a2647b | ||
|
|
d6af5512bb | ||
|
|
ce236f8ac8 | ||
|
|
dc519602ac | ||
|
|
17b54389fe | ||
|
|
28b338ed9b | ||
|
|
a177325b49 | ||
|
|
36da256cc6 | ||
|
|
1224612a79 | ||
|
|
bc67e7d260 | ||
|
|
a87006f9c7 | ||
|
|
06db5c4b76 | ||
|
|
8716eb4920 | ||
|
|
2d9ab533f9 | ||
|
|
390093d45e | ||
|
|
2fb3a28c98 | ||
|
|
a7e4ff9cca | ||
|
|
f884cfffb9 | ||
|
|
a5213df1f7 | ||
|
|
3d5a25407c | ||
|
|
e8f7541d3f | ||
|
|
fb6563b4be | ||
|
|
1954e867b4 | ||
|
|
f23b4078c0 | ||
|
|
11ab2f56f5 | ||
|
|
0486a7814a | ||
|
|
90c14da992 | ||
|
|
1067b96364 | ||
|
|
38506773eb | ||
|
|
300edc2348 | ||
|
|
05f98a2224 | ||
|
|
3cb2dabaad | ||
|
|
d728c47142 | ||
|
|
4102468da9 | ||
|
|
936482d507 | ||
|
|
3d12d97415 | ||
|
|
0f5d2cc37c | ||
|
|
8615f19d20 | ||
|
|
5e97ca7ee6 | ||
|
|
d863f68cab | ||
|
|
6368e5c0ab | ||
|
|
0a90d9ede4 | ||
|
|
6324b65f08 | ||
|
|
44a498418c | ||
|
|
5dfc83704b | ||
|
|
febdca4b37 | ||
|
|
f5f89fda21 | ||
|
|
307f88dfb6 | ||
|
|
5b527d7ee1 | ||
|
|
807e848f0f | ||
|
|
4a31a61ef9 | ||
|
|
ee7a1cabd8 | ||
|
|
9795b9ebb1 | ||
|
|
c5b589f2e8 | ||
|
|
64ddec1bc0 | ||
|
|
a4c5e4a645 | ||
|
|
1159abbdd2 | ||
|
|
a027c2af8d | ||
|
|
5c3c32f16f | ||
|
|
39f4e29d01 | ||
|
|
992018d1c0 | ||
|
|
80fa610f9c | ||
|
|
5e16c1dc8c | ||
|
|
19d274085f | ||
|
|
0fc2362d37 | ||
|
|
21bf87a146 | ||
|
|
694f1c1b18 | ||
|
|
e21370ba54 | ||
|
|
85a4d78213 | ||
|
|
dcc8eded41 | ||
|
|
fefeb0ab0e | ||
|
|
81391fa162 | ||
|
|
1e4edd1717 | ||
|
|
c6c009603c | ||
|
|
4d88958cf6 | ||
|
|
227c491510 | ||
|
|
f4d93ae424 | ||
|
|
f68e4cf690 | ||
|
|
5f23b6d5ea | ||
|
|
7cd34512d8 | ||
|
|
07ab948c38 | ||
|
|
825a07a974 | ||
|
|
f8e1ab5fee | ||
|
|
b9e4a97922 | ||
|
|
5f07f5694c | ||
|
|
8c9d5b4873 | ||
|
|
c175a5f0f2 | ||
|
|
d90e8ea444 | ||
|
|
174eacc8ba | ||
|
|
a66f489678 | ||
|
|
e79db0a673 | ||
|
|
e365ad329f | ||
|
|
19f9227643 | ||
|
|
8f03aa9f61 | ||
|
|
2442e9876c | ||
|
|
9d30a7691c | ||
|
|
9e20840e02 | ||
|
|
dd3092c3a3 | ||
|
|
ada470bccb | ||
|
|
1ee787912b | ||
|
|
47ca5eb882 | ||
|
|
ce3a726fc0 | ||
|
|
b6c9deffda | ||
|
|
51c9d9ed65 | ||
|
|
b30cd5b107 | ||
|
|
a767f06e3f | ||
|
|
cb66a2d387 | ||
|
|
aed4e4ecdd | ||
|
|
f8fa5ae4af | ||
|
|
374c4d4ced | ||
|
|
142fb0a7d4 | ||
|
|
0211464ba2 | ||
|
|
3a556f1ea0 | ||
|
|
e9f7677170 | ||
|
|
eccfc8e928 | ||
|
|
e6b24663e4 | ||
|
|
840f72356e | ||
|
|
18e3a16e8b | ||
|
|
864a6d2977 | ||
|
|
6e375f4597 | ||
|
|
efdfd5c835 | ||
|
|
bd91857028 | ||
|
|
3079f80d4a | ||
|
|
65abc90fb6 | ||
|
|
a7b726ad18 | ||
|
|
75c1b8df01 | ||
|
|
3f9f1c50f3 | ||
|
|
48fa4e1e5b | ||
|
|
df0f602796 | ||
|
|
26cd3f5690 | ||
|
|
3355ce650d | ||
|
|
ed48ecc58c | ||
|
|
37d1a90025 | ||
|
|
3e59143ba8 | ||
|
|
9419bb5776 | ||
|
|
80573e3900 | ||
|
|
069ae2a5d6 | ||
|
|
ba24576f2f | ||
|
|
d8a6c734fa | ||
|
|
ef045dcd71 | ||
|
|
33cb7ef0b7 | ||
|
|
cdc2cb5d11 | ||
|
|
16ec3805e5 | ||
|
|
8529874368 | ||
|
|
da1010c83a | ||
|
|
cc58e177f3 | ||
|
|
d7ea8c4800 | ||
|
|
aa6ecf0984 | ||
|
|
d5f9fb06b0 | ||
|
|
c22e73293a | ||
|
|
b11dca2025 | ||
|
|
7b86c1fdcd | ||
|
|
58ebdb037c | ||
|
|
95f8a713dc | ||
|
|
74e0cc74ce | ||
|
|
1bd40ca73e | ||
|
|
f397153dfc | ||
|
|
5406392f8b | ||
|
|
f61e107f63 | ||
|
|
4b1fceb913 | ||
|
|
a4bb133b68 | ||
|
|
cd3697e8b7 | ||
|
|
3241c7aac3 | ||
|
|
624c46eb06 | ||
|
|
7a48a6b63e | ||
|
|
47d99a20d5 | ||
|
|
ad7e570d07 | ||
|
|
ae31f8ce45 | ||
|
|
7ca5c68233 | ||
|
|
2c6d63922a | ||
|
|
97d1a1dc01 | ||
|
|
8b45de90a4 | ||
|
|
7303ed65e1 | ||
|
|
da562bd6a1 | ||
|
|
d4fb4f7c52 | ||
|
|
dfbc45302e | ||
|
|
c4c1d170af | ||
|
|
fd04968f32 | ||
|
|
c2a1194424 | ||
|
|
ab1b2d0ff2 | ||
|
|
5a4da5bf78 | ||
|
|
84b31a3e7a | ||
|
|
df6c72ede3 | ||
|
|
04bb79f139 | ||
|
|
e828a7380a | ||
|
|
7ef22a41a3 | ||
|
|
96387bd26f | ||
|
|
6be01f599b | ||
|
|
63ccaa5873 | ||
|
|
8b38096a89 | ||
|
|
795b0849f3 | ||
|
|
7f14f0ae38 | ||
|
|
0edf085b68 | ||
|
|
8132a6b7ac | ||
|
|
6b48b3e277 | ||
|
|
2908f955d1 | ||
|
|
79eba878a7 | ||
|
|
68ca864141 | ||
|
|
e1fd4751de | ||
|
|
148c113fbe | ||
|
|
a0c6688976 | ||
|
|
d5a7c56ef9 | ||
|
|
0b4aa2dc21 | ||
|
|
3ab2cfec47 | ||
|
|
7298ed7c51 | ||
|
|
7098b65cb8 | ||
|
|
83d8d4d8cd | ||
|
|
2145ee1976 | ||
|
|
59a7275258 | ||
|
|
d8a05418f9 | ||
|
|
b102e93571 | ||
|
|
cdf6fc15b0 | ||
|
|
74bbeb4373 | ||
|
|
2187724ad2 | ||
|
|
eded7084d2 | ||
|
|
34c3d0a386 | ||
|
|
9d50b6f0ea | ||
|
|
ab1dc84779 | ||
|
|
7fb0e98b03 | ||
|
|
e836bdf734 | ||
|
|
c46139a17e | ||
|
|
d8391f0541 | ||
|
|
4e8374856d | ||
|
|
270f9cd23a | ||
|
|
9d83d52027 | ||
|
|
5b48eec4a1 | ||
|
|
b1edf26051 | ||
|
|
06e5bcfc83 | ||
|
|
624a8bbd67 | ||
|
|
b26cbbb60e | ||
|
|
203058a027 | ||
|
|
97bd18af4e | ||
|
|
ba05f28ae7 | ||
|
|
77a1227870 | ||
|
|
7ab2b69e18 | ||
|
|
10aaa1bc15 | ||
|
|
cdc9e50a5d | ||
|
|
6f05de0e5e | ||
|
|
56e2a4333e | ||
|
|
f959c01600 | ||
|
|
117a8c0d35 | ||
|
|
30d2730ee2 | ||
|
|
aa812feb41 | ||
|
|
552f123bea | ||
|
|
5d0cbf763f | ||
|
|
1b83c09c03 | ||
|
|
7190a550dc | ||
|
|
b2cd6accf5 | ||
|
|
053ecae4db | ||
|
|
038c994724 | ||
|
|
c161472575 | ||
|
|
008aa2fc6d | ||
|
|
6f30fd9235 | ||
|
|
9ecf621404 | ||
|
|
22db751d1e | ||
|
|
03feb7a34d | ||
|
|
35a4b63240 | ||
|
|
4dd1bfa8c1 | ||
|
|
6caa379ba1 | ||
|
|
7e6fa29cb5 | ||
|
|
44a1bfd6a6 | ||
|
|
1fc66c7460 | ||
|
|
7bd6c87eca | ||
|
|
812c191939 | ||
|
|
c741ba59c9 | ||
|
|
781c15a6a3 | ||
|
|
45ab288e07 | ||
|
|
8b33ac8f6c | ||
|
|
63ef607f1f | ||
|
|
6cfee09be9 | ||
|
|
ab335edb02 | ||
|
|
bfbf1e1f1a | ||
|
|
2d314b771f | ||
|
|
5d15abb120 | ||
|
|
46790f50cf | ||
|
|
4d0414c714 | ||
|
|
e508145c9b | ||
|
|
e0ebd1e4bd | ||
|
|
f90649eb2b | ||
|
|
9b599bc18d | ||
|
|
9b803ccc98 | ||
|
|
1282086f58 | ||
|
|
b70b646903 | ||
|
|
2dce6b15c3 | ||
|
|
4e2b2508af | ||
|
|
0ea5310290 | ||
|
|
13735843c7 | ||
|
|
618c7b816a | ||
|
|
0fcb5a8ce5 | ||
|
|
889102315e | ||
|
|
b2a788e902 | ||
|
|
82e4bfb53d | ||
|
|
e8814410ef | ||
|
|
94ff2cda73 | ||
|
|
d305987b40 | ||
|
|
167eb01d83 | ||
|
|
ad408beb66 | ||
|
|
1b870937ae | ||
|
|
2a98ba0ed3 | ||
|
|
02a9a93bde | ||
|
|
e148438e97 | ||
|
|
d46386d57e | ||
|
|
228ccf1fe3 | ||
|
|
780dbb378f | ||
|
|
1ca4288135 | ||
|
|
f5cf3638e9 | ||
|
|
5ef5e14ecc | ||
|
|
76c9af193c | ||
|
|
f9b255cd62 | ||
|
|
44ad6dd4bf | ||
|
|
1bd654dabd | ||
|
|
38b265cb51 | ||
|
|
5561c09091 | ||
|
|
3db5ff69b2 | ||
|
|
ec12e7eada | ||
|
|
631fa4a1b7 | ||
|
|
bf993db11c | ||
|
|
4ad883398f | ||
|
|
d802e8ca6a | ||
|
|
a100700630 | ||
|
|
b6b075fd49 | ||
|
|
d1622e080f | ||
|
|
2ac6deafb7 | ||
|
|
805196fbeb | ||
|
|
f103b91ffa | ||
|
|
fa4f337b49 | ||
|
|
8a4a0ddea6 | ||
|
|
45fbe4ff67 | ||
|
|
f851bc8182 | ||
|
|
9e09a1800b | ||
|
|
a34c586a89 | ||
|
|
6c3a02072b | ||
|
|
4a6754baf2 | ||
|
|
4b36897cd9 | ||
|
|
d4553818a0 | ||
|
|
6b6f03ae05 | ||
|
|
77e3757fa9 | ||
|
|
6b60f7dca0 | ||
|
|
fcdfc911ee | ||
|
|
1189be43a2 | ||
|
|
6650a07ede | ||
|
|
b19d9e2174 | ||
|
|
1f080a6c97 | ||
|
|
04897c9dc1 | ||
|
|
979eed4362 | ||
|
|
bc8a5c0330 | ||
|
|
4c8f94ac94 | ||
|
|
846a94fbc9 | ||
|
|
3cd6b22c7b | ||
|
|
c9b9ef575b | ||
|
|
275826f234 | ||
|
|
4f0488b307 | ||
|
|
e5e930aec3 | ||
|
|
fbbacb284e | ||
|
|
9f7a555b4e | ||
|
|
dd13310fb8 | ||
|
|
691cc4e036 | ||
|
|
0bb253f37b | ||
|
|
59e7e62c4b | ||
|
|
f8420d6279 | ||
|
|
99354b430e | ||
|
|
74c56f794c | ||
|
|
02237ce725 | ||
|
|
318a249c8b | ||
|
|
207fabbc6a | ||
|
|
356bcafc44 | ||
|
|
3e0aaad190 | ||
|
|
a72e4e3e28 | ||
|
|
13b3d7b4a0 | ||
|
|
e025aec028 | ||
|
|
20fe347906 | ||
|
|
9d419f48e6 | ||
|
|
9ded00f221 | ||
|
|
1650eb5847 | ||
|
|
c31a7c3ff6 | ||
|
|
b8e54fbc08 | ||
|
|
a1f8b0fd64 | ||
|
|
1b65ae00ac | ||
|
|
ebda45de4c | ||
|
|
ffc574a6f9 | ||
|
|
e2f4190209 | ||
|
|
9bc17fc5fb | ||
|
|
208a6647f1 | ||
|
|
e51c2bcaef | ||
|
|
71a1bd53b2 | ||
|
|
d0abb4e8e6 | ||
|
|
977078f06d | ||
|
|
6980c4557e | ||
|
|
632baf799e | ||
|
|
af92f5b00f | ||
|
|
4ab8abbc2b | ||
|
|
b1e62d4a57 | ||
|
|
6af3656deb | ||
|
|
4d83632009 | ||
|
|
110b373e9c | ||
|
|
ca571b0ec3 | ||
|
|
d8c26162a1 | ||
|
|
c067088747 | ||
|
|
5451cc7792 | ||
|
|
124314672f | ||
|
|
6362298fa5 | ||
|
|
8b56977b6f | ||
|
|
173567a7f2 | ||
|
|
c7d9f25d22 | ||
|
|
e27b76d117 | ||
|
|
8854c039f2 | ||
|
|
14f581abc2 | ||
|
|
2ca46c7afc | ||
|
|
82d8c1bacb | ||
|
|
2fd9831f7c | ||
|
|
195abfe7a5 | ||
|
|
d8dde19f04 | ||
|
|
585972b51a | ||
|
|
7a6546228b | ||
|
|
92f680889d | ||
|
|
785bd7fd75 | ||
|
|
c89e6aadff | ||
|
|
54a2525133 | ||
|
|
0a5866bec9 | ||
|
|
0d8e3ad48b | ||
|
|
12ef02dc3d | ||
|
|
69e8a05f35 | ||
|
|
007cd48af6 | ||
|
|
713e60b9b6 | ||
|
|
e86cefcb6f | ||
|
|
cfa4e658e0 | ||
|
|
595fe67f01 | ||
|
|
9b2feef9eb | ||
|
|
f7f90e0c8d | ||
|
|
1dd0f53b21 | ||
|
|
8299b323ee | ||
|
|
9b436c8b4c | ||
|
|
5b38fdab31 | ||
|
|
1eb300e1fc | ||
|
|
f7f6bfaae4 | ||
|
|
4ea882ede4 | ||
|
|
566e21eac8 | ||
|
|
351cc35342 | ||
|
|
37d766aedd | ||
|
|
5287e57c86 | ||
|
|
2a7e9faeec | ||
|
|
1ad1ba9e6a | ||
|
|
33a9026cdf | ||
|
|
efd0f5a3c5 | ||
|
|
f009df23ec | ||
|
|
6ba4fabdb9 | ||
|
|
9e2c22c97f | ||
|
|
39dc52157d | ||
|
|
0d437698b2 | ||
|
|
0be99858f3 | ||
|
|
eaaabc6c4f | ||
|
|
ce6d4914f4 | ||
|
|
ecf198aab8 | ||
|
|
3267b81b81 | ||
|
|
d03cfc4258 | ||
|
|
1de557975f | ||
|
|
ffba978077 | ||
|
|
13e16cf302 | ||
|
|
bd0d84bf92 | ||
|
|
1135193dfd | ||
|
|
29812c628b | ||
|
|
58fbbe0f1d | ||
|
|
631d7b87b5 | ||
|
|
6070647774 | ||
|
|
d6237859f6 | ||
|
|
0ef0aeceac | ||
|
|
b4a6b7f720 | ||
|
|
c7d46510d7 | ||
|
|
ffd3f1a783 | ||
|
|
29bafe2f7e | ||
|
|
287dd1ee2c | ||
|
|
513c23bfd9 | ||
|
|
011d03a0f6 | ||
|
|
9ab859f27b | ||
|
|
f4f65ef93e | ||
|
|
bd5718d0ad | ||
|
|
161a862ffb | ||
|
|
69994c385a | ||
|
|
b5dbbac308 | ||
|
|
582bd19ee9 | ||
|
|
74f99f227c | ||
|
|
c2bd177ea0 | ||
|
|
fe6e9f580b | ||
|
|
7216c76654 | ||
|
|
dbdfd8967d | ||
|
|
b8e40d146f | ||
|
|
4cc8bb0767 | ||
|
|
4e242b3e20 | ||
|
|
a6245478c8 | ||
|
|
2e9f5ea31a | ||
|
|
a6ad8148b9 | ||
|
|
5b5f35ccc0 | ||
|
|
9b714abf35 | ||
|
|
33122c5a1b | ||
|
|
a9c2e930ac | ||
|
|
c05e6015cc | ||
|
|
e0a75e0c25 | ||
|
|
85f5674e44 | ||
|
|
c43e8a9736 | ||
|
|
a3ac4f6b0a | ||
|
|
5dfd0350c7 | ||
|
|
ca96d609e4 | ||
|
|
2c5972f87f | ||
|
|
6079d0027a | ||
|
|
99a6c9dbf2 | ||
|
|
9342bcfce0 | ||
|
|
e504816977 | ||
|
|
b2e02084b8 | ||
|
|
db3d84f46c | ||
|
|
1b6b0b1e66 | ||
|
|
6b725cf56a | ||
|
|
64665b57d0 | ||
|
|
2b24416e90 | ||
|
|
b92a8e6e4a | ||
|
|
931fc43cc8 | ||
|
|
31aa7bd8d1 | ||
|
|
ad1911bbf4 | ||
|
|
c021c39cbd | ||
|
|
1f43d22397 | ||
|
|
a675bd08bd | ||
|
|
4d7e1dde70 | ||
|
|
ae5d18617a | ||
|
|
9732ec6797 | ||
|
|
0e28281a02 | ||
|
|
505371414f | ||
|
|
e3428d26ca | ||
|
|
35332298ef | ||
|
|
64db043a71 | ||
|
|
b60859d6cc | ||
|
|
d76621a47b | ||
|
|
4ae85ae121 | ||
|
|
cc505b4b5e | ||
|
|
1259a76047 | ||
|
|
802ca12d05 | ||
|
|
e283b555b1 | ||
|
|
b77a13812c | ||
|
|
6dfde6d485 | ||
|
|
c8eeef6947 | ||
|
|
67cb89fbdf | ||
|
|
bf4fb1fb40 | ||
|
|
f807f7f804 | ||
|
|
b8d8ed1ba9 | ||
|
|
cc794d60e7 | ||
|
|
8dd0c85ac5 | ||
|
|
76fa695241 | ||
|
|
f30c4ed2bc | ||
|
|
b752507b48 | ||
|
|
af94ba9d02 | ||
|
|
818b08d0e4 | ||
|
|
ea18996f54 | ||
|
|
68fd82e840 | ||
|
|
4fad8efbfb | ||
|
|
b78bae2d51 | ||
|
|
271f5601f3 | ||
|
|
c3b7a45e84 | ||
|
|
c3e190ce67 | ||
|
|
b75d443caf | ||
|
|
27e727a146 | ||
|
|
4ce4379235 | ||
|
|
c2c47550f9 | ||
|
|
535cc49f27 | ||
|
|
dfbf73408c | ||
|
|
bc7f3eb32f | ||
|
|
ec954f47fb | ||
|
|
81a5e0073c | ||
|
|
ab1bc9bf5f | ||
|
|
0f1eb3e914 | ||
|
|
84e27a592d | ||
|
|
c9f034b4ac | ||
|
|
a9f9d68631 | ||
|
|
707374d5dc | ||
|
|
89fa00ddff | ||
|
|
79bea15830 | ||
|
|
426f8b0f66 | ||
|
|
6a6cc27aee | ||
|
|
4c7c4d4061 | ||
|
|
4d24becf7f | ||
|
|
ba5b9b80a5 | ||
|
|
c7b0678356 | ||
|
|
a6e3222fe5 | ||
|
|
3cc852d339 | ||
|
|
0eeaa25694 | ||
|
|
aa3fac8057 | ||
|
|
c1c81ee2a4 | ||
|
|
e8496efe84 | ||
|
|
01bbacf3c4 | ||
|
|
148428ce76 | ||
|
|
c8f568ddf9 | ||
|
|
3ddda939d3 | ||
|
|
5de926d66f | ||
|
|
f878e6f8af | ||
|
|
269af961e9 | ||
|
|
ed80c6b6cc | ||
|
|
e433393c4f | ||
|
|
985ce80375 | ||
|
|
b9b9714fd5 | ||
|
|
fa969cfdde | ||
|
|
44f8e383f3 | ||
|
|
0c8da8b519 | ||
|
|
eaaa837e00 | ||
|
|
cbe3c3fdd4 | ||
|
|
6748f0a579 | ||
|
|
93b0cf7a99 | ||
|
|
d8ce68b09b | ||
|
|
78d4ced829 | ||
|
|
197c14dbcf | ||
|
|
5f20a91fa1 | ||
|
|
1e2ac54351 | ||
|
|
1e375468de | ||
|
|
c2c188b699 | ||
|
|
c46a0d7eb4 | ||
|
|
bd769a81e1 | ||
|
|
537088e7dc | ||
|
|
41fd9989a2 | ||
|
|
11d62f43c9 | ||
|
|
e4ab96021e | ||
|
|
2a7ed700d5 | ||
|
|
84716d267c | ||
|
|
e4779be97a | ||
|
|
f2da6df568 | ||
|
|
30848c0fcd | ||
|
|
e585c83209 | ||
|
|
6c1bb1601e | ||
|
|
ea87cb1ba5 | ||
|
|
3fed5bb25f | ||
|
|
27955056e0 | ||
|
|
90d70af269 | ||
|
|
b23cb8fba8 | ||
|
|
e4a709eda3 | ||
|
|
7fc1aad195 | ||
|
|
cafb8de132 | ||
|
|
d5325d7ef1 | ||
|
|
d5694ac5fa | ||
|
|
e43de3ae4b | ||
|
|
75e67b9ee4 | ||
|
|
768f00dedb | ||
|
|
4dc07e93a8 | ||
|
|
7cc483aa0e | ||
|
|
e1e7d76cf1 | ||
|
|
93247a424a | ||
|
|
5f501ec7e2 | ||
|
|
761d255fdf | ||
|
|
ace8079086 | ||
|
|
7a44c01d89 | ||
|
|
c9bc4b7031 | ||
|
|
ae79764fe5 | ||
|
|
77f1d24de3 | ||
|
|
9ccb4226ba | ||
|
|
bf86a41ef1 | ||
|
|
8090fd4664 | ||
|
|
3a743f649c | ||
|
|
adec03395d | ||
|
|
74e494b010 | ||
|
|
ef3a5ae787 | ||
|
|
8c06dd6071 | ||
|
|
60c78666ab | ||
|
|
1786b0e768 | ||
|
|
8ad5f34908 | ||
|
|
6cd5fcd536 | ||
|
|
ccc67d445b | ||
|
|
9fd086e506 | ||
|
|
0b03a97708 | ||
|
|
4824a33c31 | ||
|
|
1e5fcfd14a | ||
|
|
17b8e2bd02 | ||
|
|
a8e2a3df32 | ||
|
|
0d7c7fd907 | ||
|
|
95298783bb | ||
|
|
1a398b19fd | ||
|
|
f4c8cd5e85 | ||
|
|
b8d832a08c | ||
|
|
e3edca3b5d | ||
|
|
cacfa04cb6 | ||
|
|
e591f7b3f0 | ||
|
|
7141f1a5cc | ||
|
|
44edac0497 | ||
|
|
29e1c717c3 | ||
|
|
94133d7ce8 | ||
|
|
b15c2b7971 | ||
|
|
ba8fdc925c | ||
|
|
79b3cf3e02 | ||
|
|
b4fd710e1a | ||
|
|
b68b0ede7a | ||
|
|
68f737702b | ||
|
|
f65e31d22f | ||
|
|
f496399ac4 | ||
|
|
3166ed55b2 | ||
|
|
e1dec2f1a7 | ||
|
|
bb746a9de1 | ||
|
|
ae8d4bb0f0 | ||
|
|
c94ab5976a | ||
|
|
197d82dc07 | ||
|
|
069ae2df12 | ||
|
|
6de74ea6d7 | ||
|
|
72472456d8 | ||
|
|
c5c24c239b | ||
|
|
c5b0e9f485 | ||
|
|
abdefb8a01 | ||
|
|
afbd773dc6 | ||
|
|
2a4b9ea233 | ||
|
|
3b98439eca | ||
|
|
fde63b880d | ||
|
|
2d511defd9 | ||
|
|
dd1ea9763a | ||
|
|
e76d1135dd | ||
|
|
fcf2c0fd1a | ||
|
|
9864efa532 | ||
|
|
aa620d09a0 | ||
|
|
2eabdf3f98 | ||
|
|
5ed109d59f | ||
|
|
47d9848dc4 | ||
|
|
93e504d04e | ||
|
|
b5feaa5a49 | ||
|
|
3f405b34e9 | ||
|
|
290777b3d9 | ||
|
|
77c81ca6ea | ||
|
|
2d1b7955ae | ||
|
|
862c8da560 | ||
|
|
2d9f341c3e | ||
|
|
436ee0a2ea | ||
|
|
b393f5db51 | ||
|
|
a2562f9d74 | ||
|
|
d6dadd95ac | ||
|
|
993d3f710b | ||
|
|
4a94eb3ea4 | ||
|
|
3a0cee28d6 | ||
|
|
4f845a0713 | ||
|
|
473700f016 | ||
|
|
9ce866ed4f | ||
|
|
69ef4987a6 | ||
|
|
53cc8ad35a | ||
|
|
e2fcba038c | ||
|
|
5f59f20636 | ||
|
|
59de2c7afa | ||
|
|
4b616c8cf2 | ||
|
|
4dd61df6f8 | ||
|
|
c0c31656ff | ||
|
|
8b16b43b7f | ||
|
|
dff396de0f | ||
|
|
f06ffdb6fa | ||
|
|
6e67aaa7f2 | ||
|
|
7f0d0ba3bc | ||
|
|
4a9b1cf253 | ||
|
|
6d8799af1a | ||
|
|
258409ef61 | ||
|
|
bf81f3cf2c | ||
|
|
27ebc5c8f2 | ||
|
|
97c544f91f | ||
|
|
934ab76835 | ||
|
|
fc9878f6a4 | ||
|
|
a4d3bfe3d6 | ||
|
|
a7effa8400 | ||
|
|
a04c6bbf8f | ||
|
|
77ea8cbdd7 | ||
|
|
2800983f3e | ||
|
|
8b50fe5330 | ||
|
|
73b4e18c62 | ||
|
|
20b3660495 | ||
|
|
175a01f56c | ||
|
|
046b659ce2 | ||
|
|
413c270723 | ||
|
|
ec3a2dc773 | ||
|
|
012875258c | ||
|
|
692250c6be | ||
|
|
d2352347cf | ||
|
|
92168cbbc5 | ||
|
|
963015005e | ||
|
|
10d8b701a1 | ||
|
|
543c794a76 | ||
|
|
57cd0c3dea | ||
|
|
b524dd4c35 | ||
|
|
09703609fc | ||
|
|
ba3ff7918b | ||
|
|
ef8e578677 | ||
|
|
b880ff190a | ||
|
|
05e21285aa | ||
|
|
eae04f1952 | ||
|
|
5699b05072 | ||
|
|
a1e67bcb97 | ||
|
|
09552f9d9c | ||
|
|
f18373dc5d | ||
|
|
ebbaae5526 | ||
|
|
966a70f1fa | ||
|
|
629cdfb124 | ||
|
|
ed666d3969 | ||
|
|
b76ef6ccb8 | ||
|
|
851aeae7c7 | ||
|
|
d5e32c843f | ||
|
|
96917d5552 | ||
|
|
0401604222 | ||
|
|
b238cf7f6b | ||
|
|
960dae3340 | ||
|
|
2cc998fed8 | ||
|
|
139fe30f47 | ||
|
|
4d793626ff | ||
|
|
c544188ee3 | ||
|
|
0ab153d201 | ||
|
|
8209b5f033 | ||
|
|
b27429729d | ||
|
|
60a9a49f83 | ||
|
|
b3bf6a1218 | ||
|
|
57826d645b | ||
|
|
d7d24750be | ||
|
|
6f443a74cf | ||
|
|
14a34f12d7 | ||
|
|
3431ec55dc | ||
|
|
6027b1992f | ||
|
|
e884ff31d8 | ||
|
|
05c13f6c22 | ||
|
|
94ecd871a0 | ||
|
|
12ed4ee48e | ||
|
|
332839f6ea | ||
|
|
e5ea6dd021 | ||
|
|
cccfcfa7b9 | ||
|
|
68f34e85ce | ||
|
|
3e703eb04e | ||
|
|
508460f240 | ||
|
|
6e9f147faa | ||
|
|
4540730111 | ||
|
|
e96ee95a7e | ||
|
|
2f9eafdd36 | ||
|
|
b3de67234e | ||
|
|
514c2d3c4d | ||
|
|
bfde076022 | ||
|
|
cb3aee8219 | ||
|
|
85fda57208 | ||
|
|
4b203bdba5 | ||
|
|
d3862812ff | ||
|
|
8d26385d76 | ||
|
|
3b0470dba5 | ||
|
|
8575e3160f | ||
|
|
67b7b904ba | ||
|
|
f60218ec41 | ||
|
|
a78cda4baf | ||
|
|
7a39da8cc6 | ||
|
|
5bbb53580a | ||
|
|
26451a09eb | ||
|
|
8d55877c9e | ||
|
|
a62406aaa5 | ||
|
|
91818723a1 | ||
|
|
e9aec001f4 | ||
|
|
28e8c46f29 | ||
|
|
6d586dc05c | ||
|
|
410b4e14a1 | ||
|
|
fe4e885f54 | ||
|
|
bbb739d24a | ||
|
|
26752df503 | ||
|
|
e52c391cd4 | ||
|
|
0aac30d53b | ||
|
|
0184a97dbd | ||
|
|
85b9f76f1d | ||
|
|
6322fbbd41 | ||
|
|
8ba89f1050 | ||
|
|
429925a5e9 | ||
|
|
83936293eb | ||
|
|
e2cb760dcc | ||
|
|
925b3638ff | ||
|
|
9a6fd3ef29 | ||
|
|
2f82de18ee | ||
|
|
b8ca494ee9 | ||
|
|
6e16aca8b0 | ||
|
|
d4d12daed9 | ||
|
|
f467a8f66d | ||
|
|
c9184ed87e | ||
|
|
1fc4a962e4 | ||
|
|
08284c86ed | ||
|
|
f502b0dea1 | ||
|
|
1200f28d66 | ||
|
|
76ed3476d3 | ||
|
|
58dc1f2c78 | ||
|
|
5a7f561a9b | ||
|
|
ed9a7f5436 | ||
|
|
1f64207f26 | ||
|
|
42b50483be | ||
|
|
6264cf9666 | ||
|
|
f386632800 | ||
|
|
5e49a57ecc | ||
|
|
3d31b39297 | ||
|
|
73cfe48031 | ||
|
|
05538587ef | ||
|
|
f92d7416d7 | ||
|
|
1f12d808e7 | ||
|
|
29a4066a4d | ||
|
|
7afb4e3f54 | ||
|
|
495f075b41 | ||
|
|
b5e8d529e6 | ||
|
|
3e279411fe | ||
|
|
47574c9cba | ||
|
|
6ff14ddd2e | ||
|
|
5946aa0877 | ||
|
|
d800ab2847 | ||
|
|
2c365f4723 | ||
|
|
a1a253ea50 | ||
|
|
c72058bcc6 | ||
|
|
27f26e48b7 | ||
|
|
8c23221666 | ||
|
|
731f3c37a0 | ||
|
|
4b444723f0 | ||
|
|
816605a137 | ||
|
|
78cefd78d6 | ||
|
|
a0a561ae85 | ||
|
|
ed3d0170d9 | ||
|
|
976128f368 | ||
|
|
d04d672a80 | ||
|
|
036f439f53 | ||
|
|
1bce3e6b35 | ||
|
|
e3cbec10c1 | ||
|
|
8abdd7b553 | ||
|
|
ff13c5e7af | ||
|
|
27bd0b9a91 | ||
|
|
bce144595c | ||
|
|
75eba3b07d | ||
|
|
1591eddaea | ||
|
|
4fec80ba6f | ||
|
|
7fe8ed1787 | ||
|
|
e204062310 | ||
|
|
44c722931b | ||
|
|
2d520a9826 | ||
|
|
24d894e2e2 | ||
|
|
ccfcef6b59 | ||
|
|
e0004aa28a | ||
|
|
b668112320 | ||
|
|
dae9a00a28 | ||
|
|
71995e1397 | ||
|
|
8177563ebe | ||
|
|
4202fba82a | ||
|
|
812c030e87 | ||
|
|
1217c7da91 | ||
|
|
7d69f2d956 | ||
|
|
385dcb7c60 | ||
|
|
b8b936a6ea | ||
|
|
b5f665de32 | ||
|
|
e5ae386ea4 | ||
|
|
36e51aad3c | ||
|
|
b490299a3b | ||
|
|
5db7070dd1 | ||
|
|
d7fe6b356c | ||
|
|
fcf01dd88e | ||
|
|
4f66312df8 | ||
|
|
3fafb7b189 | ||
|
|
776a070421 | ||
|
|
dfeca6cf40 | ||
|
|
6aa5bc8635 | ||
|
|
d8f47d2efa | ||
|
|
0a9315bbc7 | ||
|
|
1ff419d343 | ||
|
|
24df576795 | ||
|
|
fdf1ca30f0 | ||
|
|
052c5d19d5 | ||
|
|
5ddd199870 | ||
|
|
a9d6fa8b2b | ||
|
|
4564b05483 | ||
|
|
72613bc379 | ||
|
|
ebcd55d641 | ||
|
|
4b461a6931 | ||
|
|
93e7a38370 | ||
|
|
617304b2cf | ||
|
|
ba502fb89a | ||
|
|
6c6b9689bb | ||
|
|
d9fd937e39 | ||
|
|
fe9dc522d4 | ||
|
|
505e7e8b9d | ||
|
|
6fd7e6db3d | ||
|
|
fdca6e36ee | ||
|
|
90ae0cffec | ||
|
|
de4cb50ca6 | ||
|
|
a09e09ce76 | ||
|
|
48d2949416 | ||
|
|
6ae8373d40 | ||
|
|
b58e24cc3c | ||
|
|
d53fe399eb | ||
|
|
a837765e8c | ||
|
|
f540b494a4 | ||
|
|
8060974344 | ||
|
|
b0d975e216 | ||
|
|
e54d7d536e | ||
|
|
1e9b4d5a95 | ||
|
|
efc2b7db95 | ||
|
|
bfd68019c2 | ||
|
|
1946867bc2 | ||
|
|
1664948e41 | ||
|
|
935e588799 | ||
|
|
eed59dcc1e | ||
|
|
2cac7623a5 | ||
|
|
298d83b340 | ||
|
|
0185b75381 | ||
|
|
7132e5cdff | ||
|
|
98bdb4468b | ||
|
|
ea11ee09f3 | ||
|
|
c62c480dc6 | ||
|
|
197bd126f0 | ||
|
|
f45f07ab86 | ||
|
|
a053ff3979 | ||
|
|
ecdd2a3658 | ||
|
|
2f34ad31ac | ||
|
|
671f0afa1d | ||
|
|
64ed74c01e | ||
|
|
1a81a1898e | ||
|
|
6ba21bf2b8 | ||
|
|
09e4bc0501 | ||
|
|
6e2a7ee1bc | ||
|
|
65f0513a33 | ||
|
|
6f83c4537c | ||
|
|
cca94272fa | ||
|
|
66b121b2fc | ||
|
|
8d34120a53 | ||
|
|
1a01af079e | ||
|
|
87e5e05aea | ||
|
|
4d039aa2ca | ||
|
|
21e255a8f1 | ||
|
|
d5477c7afd | ||
|
|
02a6108235 | ||
|
|
7233341eac | ||
|
|
8be6fd95a3 | ||
|
|
59dbb47065 | ||
|
|
9c7db2491b | ||
|
|
0fe6f3c521 | ||
|
|
036362ede6 | ||
|
|
a757dd4863 | ||
|
|
f5cc22bdc6 | ||
|
|
5dd1b2c525 | ||
|
|
cc7609aa9f | ||
|
|
f1378aef91 | ||
|
|
b2d8d07109 | ||
|
|
f9791498ae | ||
|
|
f091061711 | ||
|
|
4abcff0177 | ||
|
|
63c58c2a3f | ||
|
|
304880d185 | ||
|
|
5d79d728f5 | ||
|
|
dc51af3d03 | ||
|
|
350622a107 | ||
|
|
63fda37e20 | ||
|
|
293ef29655 | ||
|
|
535c99f157 | ||
|
|
45a5df5914 | ||
|
|
3b5f22ca40 | ||
|
|
b5db4ed5f6 | ||
|
|
168524543f | ||
|
|
3e123b8497 | ||
|
|
42137efde7 | ||
|
|
eeb2f9e546 | ||
|
|
5dbaa520a5 | ||
|
|
dd48f7204c | ||
|
|
04095f7581 | ||
|
|
a584a81b3e | ||
|
|
619e8ecd0c | ||
|
|
23da638360 | ||
|
|
dfbda5e025 | ||
|
|
2b03751c3c | ||
|
|
dbc0dfd2d5 | ||
|
|
11f139a647 | ||
|
|
6e614e9e10 | ||
|
|
c049472b8a | ||
|
|
9a804b2812 | ||
|
|
fbbc40f385 | ||
|
|
8cf9f0a3e7 | ||
|
|
e6618ece2d | ||
|
|
58c4720293 | ||
|
|
836d5c44b6 | ||
|
|
11c2a3655f | ||
|
|
539aa4d333 | ||
|
|
f85a415279 | ||
|
|
6489455bed | ||
|
|
d668caa79c | ||
|
|
74bf4ee7bf | ||
|
|
33ba90c6e9 | ||
|
|
ccd62415ac | ||
|
|
bd7bb5df71 | ||
|
|
e3417a06e2 | ||
|
|
7fb80b5eae | ||
|
|
2d17b09a6d | ||
|
|
24c8f38784 | ||
|
|
25f03cf8e9 | ||
|
|
270e1c904a | ||
|
|
b4f59c7e27 | ||
|
|
ab4ee2e524 | ||
|
|
58ebb96cce | ||
|
|
99713dc7d3 | ||
|
|
1c1c0257f4 | ||
|
|
cafe659f72 | ||
|
|
72ed8196b3 | ||
|
|
107ac7ac96 | ||
|
|
234772db6d | ||
|
|
760625acba | ||
|
|
c57789d138 | ||
|
|
f33df30732 | ||
|
|
3accee1a8c | ||
|
|
a5425b2e5b | ||
|
|
6e381180ae | ||
|
|
056ba9b795 | ||
|
|
88664afe14 | ||
|
|
f98efea9b1 | ||
|
|
d9e3a4b5db | ||
|
|
66d8ffabbd | ||
|
|
ace23463c5 | ||
|
|
bbfe4e996c | ||
|
|
9f430fa07f | ||
|
|
7c53a27801 | ||
|
|
a8bc7cae56 | ||
|
|
bf1050f7cf | ||
|
|
c6f4ff1475 | ||
|
|
3a431a126d | ||
|
|
ac08316548 | ||
|
|
85e8092cca | ||
|
|
ad53fc3cf4 | ||
|
|
6fa8148ccb | ||
|
|
7c69849a0d | ||
|
|
11bc21b6d9 | ||
|
|
13f540ef1b | ||
|
|
ec5c4499f4 | ||
|
|
f2a5b6dbfd | ||
|
|
b8492b6c2f | ||
|
|
331570ea6f | ||
|
|
55af207321 | ||
|
|
d648f65aaf | ||
|
|
608b5a6317 | ||
|
|
64953c8ed2 | ||
|
|
f451b64c8f | ||
|
|
2c9475b58e | ||
|
|
6d17573c23 | ||
|
|
d12ae7fd1c | ||
|
|
224137fcf9 | ||
|
|
e4435b014e | ||
|
|
871605f4e2 | ||
|
|
e0d2f6d5b0 | ||
|
|
bfbc907cec | ||
|
|
5e9d75b4a5 | ||
|
|
627e6ea2b0 | ||
|
|
9da4316ca5 | ||
|
|
eb7cbf27bc | ||
|
|
6b95e35e96 | ||
|
|
ff3d810ea8 | ||
|
|
34194aaff7 | ||
|
|
114f290947 | ||
|
|
baafb85ba4 | ||
|
|
29ded770b1 | ||
|
|
dc026bb16f | ||
|
|
328378f9cb | ||
|
|
c1935f0a41 | ||
|
|
43cd86ba8a | ||
|
|
8e345ce465 | ||
|
|
b64d312421 | ||
|
|
ccad2ed824 | ||
|
|
369195caa5 | ||
|
|
57ed7f6772 | ||
|
|
a3648f84b2 | ||
|
|
5331cd150a | ||
|
|
7313a23dba | ||
|
|
f7278e612e | ||
|
|
b990b2fce5 | ||
|
|
aedaba018f | ||
|
|
de042b3b88 | ||
|
|
a7e9d8762d | ||
|
|
ca238bc023 | ||
|
|
40dcf0d856 | ||
|
|
d3c3026496 | ||
|
|
093f7e47cc | ||
|
|
6a12998a83 | ||
|
|
b9c84f3f3a | ||
|
|
ffad4fe35b | ||
|
|
94e6ad71f5 | ||
|
|
8571f864d2 | ||
|
|
fc6d4974a6 | ||
|
|
738ccf61c0 | ||
|
|
dcabef952c | ||
|
|
771c8a83c7 | ||
|
|
6631985990 | ||
|
|
e0f20e9425 | ||
|
|
fe7c1b969c | ||
|
|
78f306a6f7 | ||
|
|
9ac98197bb | ||
|
|
27c28eaa27 | ||
|
|
be2672716d | ||
|
|
653d90c1a5 | ||
|
|
310b1ccdc1 | ||
|
|
a59b0ad1a1 | ||
|
|
7b222fc56e | ||
|
|
d0debb2116 | ||
|
|
66f371e8b8 | ||
|
|
b843631d71 | ||
|
|
c2ddd773bc | ||
|
|
7dd3bf5e24 | ||
|
|
db7d0c3127 | ||
|
|
f346048a6e | ||
|
|
e3aa8a7aa8 | ||
|
|
cf589f2c1e | ||
|
|
8af4569583 | ||
|
|
b25db11d08 | ||
|
|
587f07543f | ||
|
|
aa93cb9f44 | ||
|
|
537dbadea0 | ||
|
|
07a07588a0 | ||
|
|
dfaa58f72d | ||
|
|
9ac263ed1b | ||
|
|
d2d8ed4884 | ||
|
|
5d8290429c | ||
|
|
6aa423a1a8 | ||
|
|
3669065466 | ||
|
|
7ebf518c02 | ||
|
|
34ed4f4206 | ||
|
|
60833c8978 | ||
|
|
482a2ad122 | ||
|
|
c0380402bc | ||
|
|
cdbf38728d | ||
|
|
0c27383dd7 | ||
|
|
ef862186dd | ||
|
|
2c2dcf81d0 | ||
|
|
1827057acc | ||
|
|
8346e6e696 | ||
|
|
e4c15fcb5c | ||
|
|
3e5a62ecd8 | ||
|
|
82475a18d9 | ||
|
|
2e996271fe | ||
|
|
a2c89a225c | ||
|
|
7166854f41 | ||
|
|
3033261891 | ||
|
|
2347efc065 | ||
|
|
9b147cd730 | ||
|
|
3a9f5bf6dd | ||
|
|
ab37bef83b | ||
|
|
ad8b316939 | ||
|
|
421fdf7460 | ||
|
|
25a96e0c63 | ||
|
|
46826bb078 | ||
|
|
f87b287291 | ||
|
|
bb9246e525 | ||
|
|
c84770b877 | ||
|
|
380fb87ecc | ||
|
|
87ae59f5e9 | ||
|
|
e42b4ebf0f | ||
|
|
d3c150411c | ||
|
|
1e166470ab | ||
|
|
34e682d385 | ||
|
|
7239258ae6 | ||
|
|
5fd12dce01 | ||
|
|
82ae0238f9 | ||
|
|
81804909d3 | ||
|
|
c366276056 | ||
|
|
1a9255c12e | ||
|
|
94f36b0273 | ||
|
|
c45dc6c62a | ||
|
|
f053a1409e | ||
|
|
22f935ab7c | ||
|
|
9388eece2b | ||
|
|
acb58bfb6a | ||
|
|
f7181615f2 | ||
|
|
f144365281 | ||
|
|
d9aa645f86 | ||
|
|
22f3d3ae76 | ||
|
|
b4da08cad8 | ||
|
|
efab1dadde | ||
|
|
33d5134b59 | ||
|
|
119cb9bbcf | ||
|
|
e6e2627636 | ||
|
|
30f7bfa121 | ||
|
|
7af825bae4 | ||
|
|
26bcda31b8 | ||
|
|
d134d0935e | ||
|
|
e4f3431116 | ||
|
|
a46982cee9 | ||
|
|
70caf49914 | ||
|
|
719aec4064 | ||
|
|
cea7839911 | ||
|
|
a1595cec78 | ||
|
|
2e165295b7 | ||
|
|
a90a0f5c8a | ||
|
|
91b3981800 | ||
|
|
0cdb32fc43 | ||
|
|
838810b76a | ||
|
|
736b9a4784 | ||
|
|
4903ccf159 | ||
|
|
51fb884c52 | ||
|
|
d4040e9e28 | ||
|
|
3fb8784c92 | ||
|
|
c02b6a37d6 | ||
|
|
814fb032eb | ||
|
|
54f9a4cb59 | ||
|
|
8e780b113d | ||
|
|
574d573ac2 | ||
|
|
78f0ddbfad | ||
|
|
6a70647d45 | ||
|
|
c1f52a321d | ||
|
|
b9557064bf | ||
|
|
cf6121e3da | ||
|
|
247c736b9b | ||
|
|
8fbc0d29ee | ||
|
|
c06c00190f | ||
|
|
c0aba0a23e | ||
|
|
b9676a75f6 | ||
|
|
69a18514e9 | ||
|
|
122cd52ce4 | ||
|
|
26ae5178a4 | ||
|
|
bf9060156a | ||
|
|
82301b6c29 | ||
|
|
1745069543 | ||
|
|
c7ddb5ef7a | ||
|
|
7b41013102 | ||
|
|
77fb2b72ae | ||
|
|
7f94709066 | ||
|
|
867822fa1e | ||
|
|
73880268ef | ||
|
|
131485ef66 | ||
|
|
11dbceb761 | ||
|
|
0127423027 | ||
|
|
85657eedf8 | ||
|
|
b48045a8f5 | ||
|
|
6f65e2f90c | ||
|
|
323634bf8b | ||
|
|
9c712a366f | ||
|
|
a8c8e4efd4 | ||
|
|
414522aed5 | ||
|
|
2be8a281d2 | ||
|
|
6308ac45b0 | ||
|
|
b9b72bc6e2 | ||
|
|
d892079844 | ||
|
|
e263c26690 | ||
|
|
f3cf3ff8b6 | ||
|
|
4902db1fc9 | ||
|
|
d563b8d944 | ||
|
|
85a0d6c7ab | ||
|
|
34840cdcef | ||
|
|
28a4649785 | ||
|
|
7c551ec445 | ||
|
|
84fbb80c8f | ||
|
|
40453b3f84 | ||
|
|
29574fd5b3 | ||
|
|
2e6f5a4910 | ||
|
|
405ba4178a | ||
|
|
efcb6db688 | ||
|
|
0018491af2 | ||
|
|
0364d23210 | ||
|
|
8c5f03cec7 | ||
|
|
f8434db549 | ||
|
|
ab904caf33 | ||
|
|
54a59adc7c | ||
|
|
64765e5199 | ||
|
|
0cd01f5c9c | ||
|
|
2a3e822f44 | ||
|
|
a828a64b75 | ||
|
|
d4d176e5d0 | ||
|
|
449d1297ca | ||
|
|
d72667fcce | ||
|
|
a41fe500d6 | ||
|
|
54f59bd7d4 | ||
|
|
98ce212093 | ||
|
|
8a1137ceab | ||
|
|
877c029c16 | ||
|
|
944692ef69 | ||
|
|
391712a4f9 | ||
|
|
ad544c803a | ||
|
|
dbf87282d3 | ||
|
|
69b3fd485d | ||
|
|
5058292537 | ||
|
|
fcc803b2bf | ||
|
|
ea0152b132 | ||
|
|
3f213d908d | ||
|
|
1ca0e78ca1 | ||
|
|
b43d3267e2 | ||
|
|
b5cb6347a4 | ||
|
|
96b9b6c127 | ||
|
|
f10ce8944b | ||
|
|
a5c401bd12 | ||
|
|
b9caf4f726 | ||
|
|
d1d5362267 | ||
|
|
9f26d3b75b | ||
|
|
a76886726b | ||
|
|
ac66e11f2b | ||
|
|
4264ceb31c | ||
|
|
023ee197be | ||
|
|
d1605794ad | ||
|
|
3376f16012 | ||
|
|
6ce6bbedcb | ||
|
|
27cc627e42 | ||
|
|
62b89daac6 | ||
|
|
773e64cc1a | ||
|
|
2d05eb3cf5 | ||
|
|
ac63b92b64 | ||
|
|
30bcbf775a | ||
|
|
7eb9f34cc3 | ||
|
|
0b08c48fc5 | ||
|
|
65e1683680 | ||
|
|
feb496056e | ||
|
|
e2eebf1696 | ||
|
|
36c28bc467 | ||
|
|
3a1f3f8388 | ||
|
|
52bfa604e1 | ||
|
|
0a6a966e2b | ||
|
|
773e1c6d68 | ||
|
|
0d1c85e643 | ||
|
|
1df7c28661 | ||
|
|
36d2b66f90 | ||
|
|
8a240e4f9c | ||
|
|
ec039e6790 | ||
|
|
142b6b4abf | ||
|
|
2a06b44be2 | ||
|
|
2dc57e7413 | ||
|
|
07a32d192c | ||
|
|
9a27448b1b | ||
|
|
9ee397b440 | ||
|
|
9d0170ac6c | ||
|
|
b4276a3896 | ||
|
|
bfcf016714 | ||
|
|
9cee0ce7db | ||
|
|
350333a09a | ||
|
|
639d9ae9a0 | ||
|
|
4d17add8de | ||
|
|
27b1b4a2c9 | ||
|
|
5b5b171f3e | ||
|
|
b282fe7170 | ||
|
|
9ff4e0e91b | ||
|
|
0834d1a70c | ||
|
|
63fcc42990 | ||
|
|
6194a64ae9 | ||
|
|
eefd9fee81 | ||
|
|
014fee93b3 | ||
|
|
86780a8bc3 | ||
|
|
31e0fe9031 | ||
|
|
3ba2859e0c | ||
|
|
e9dd8370b0 | ||
|
|
4d7fc7f977 | ||
|
|
f9b4bb05e0 | ||
|
|
7450693435 | ||
|
|
8da6f0be48 | ||
|
|
11880103b1 | ||
|
|
7984708a55 | ||
|
|
24d35ab47b | ||
|
|
30348c924c | ||
|
|
6cdca71079 | ||
|
|
305d16d612 | ||
|
|
a3810136fe | ||
|
|
3ce8d59176 | ||
|
|
b9c2ae6788 | ||
|
|
85be3dde81 | ||
|
|
2f8b580b64 | ||
|
|
c5b0bdd542 | ||
|
|
e4df0e189d | ||
|
|
4ad613f6be | ||
|
|
ac6bc55512 | ||
|
|
69efd77749 | ||
|
|
276af7b59b | ||
|
|
51b156d48a | ||
|
|
30f5ffdca2 | ||
|
|
650f0e69f2 | ||
|
|
d28db583da | ||
|
|
58a35366be | ||
|
|
dc56a6b8c8 | ||
|
|
bac9bf1b12 | ||
|
|
d82c42837f | ||
|
|
35b4aa04be | ||
|
|
2a28b79e04 | ||
|
|
281553afe6 | ||
|
|
987f4945b4 | ||
|
|
31d56c3fb5 | ||
|
|
09f79aaad0 | ||
|
|
23e0ff840a | ||
|
|
48e7697911 | ||
|
|
f136c89d5e | ||
|
|
01fc847f7f | ||
|
|
7fc1f1e2b6 | ||
|
|
57cfa513f5 | ||
|
|
d58b1ffe94 | ||
|
|
e71940aa64 | ||
|
|
e36950dec5 | ||
|
|
f902e89d4b | ||
|
|
a380f041c2 | ||
|
|
9397edb28b | ||
|
|
06ce7335e9 | ||
|
|
13c8749ac9 | ||
|
|
e1f1784f99 | ||
|
|
86e865d7d2 | ||
|
|
a2dfab12c5 | ||
|
|
00957d1aa4 | ||
|
|
6af0096f4f | ||
|
|
250ce11ab9 | ||
|
|
566641a0b5 | ||
|
|
acafcf1c5b | ||
|
|
e56c79c114 | ||
|
|
6ebe2d23b1 | ||
|
|
0bfea9a2be | ||
|
|
e64655c25d | ||
|
|
5a16cb4bf0 | ||
|
|
b88a323ffb | ||
|
|
59358cd3e7 | ||
|
|
55366814a6 | ||
|
|
b2d40d7363 | ||
|
|
4bd597d9fc | ||
|
|
ad8a26e361 | ||
|
|
19b9366d73 | ||
|
|
e08f81d96a | ||
|
|
8d1dd7eb30 | ||
|
|
35e0cfb54d | ||
|
|
7b67848042 | ||
|
|
95f21c7a66 | ||
|
|
d101488c5f | ||
|
|
64778693be | ||
|
|
37a187bfab | ||
|
|
733896e046 | ||
|
|
a188056364 | ||
|
|
961e242aaf | ||
|
|
e0e214556a | ||
|
|
633dcc316c | ||
|
|
c36d15d2de | ||
|
|
737f283a07 | ||
|
|
aac6d1fc9b | ||
|
|
bd08ee7a46 | ||
|
|
a4cb21659b | ||
|
|
eddce9d74a | ||
|
|
2e05f5d7a4 | ||
|
|
d78d08981a | ||
|
|
7b53e9ebfd | ||
|
|
be20243549 | ||
|
|
f40c2db05a | ||
|
|
067b00d49d | ||
|
|
994d7ae7c5 | ||
|
|
d2d146a314 | ||
|
|
61f471f779 | ||
|
|
0c01f829ae | ||
|
|
5068fb16a5 | ||
|
|
2abe85d50e | ||
|
|
be44558886 | ||
|
|
9adf1991ca | ||
|
|
248eb4638d | ||
|
|
da146657c9 | ||
|
|
a158c36a8a | ||
|
|
6957bfdca6 | ||
|
|
2ccf3b241c | ||
|
|
9ce53a3861 | ||
|
|
54d2b7e596 | ||
|
|
9d527191bc | ||
|
|
a8f96c63aa | ||
|
|
c144292373 | ||
|
|
f83ac78201 | ||
|
|
e6032054bf | ||
|
|
ebf5a6b14c | ||
|
|
e892457a03 | ||
|
|
a297155a97 | ||
|
|
6c82de5100 | ||
|
|
0ad44acb5a | ||
|
|
5f14e7e982 | ||
|
|
0970e0307e | ||
|
|
5aa42d4292 | ||
|
|
e0ff66251f | ||
|
|
29ed09e80a | ||
|
|
3b2dd1b3c2 | ||
|
|
872e75a3d5 | ||
|
|
7827251daf | ||
|
|
b5d1c68beb | ||
|
|
f2ed64eaaf | ||
|
|
ef328b2fc1 | ||
|
|
bad72b0b8e | ||
|
|
1bf84c4b6b | ||
|
|
fd2eef49c8 | ||
|
|
1d09586599 | ||
|
|
7f237800e9 | ||
|
|
1ece06273e | ||
|
|
bb256ac96f | ||
|
|
6a3c5d6891 | ||
|
|
cc7a294e2e | ||
|
|
7b6ed9871e | ||
|
|
d79a687d85 | ||
|
|
f29d85d9e4 | ||
|
|
a175963ba5 | ||
|
|
bbeeb97f75 | ||
|
|
0a9945220e | ||
|
|
73a5f06652 | ||
|
|
c077c3277b | ||
|
|
31f3ca1b2b | ||
|
|
c81f33f73d | ||
|
|
170ccc9de5 | ||
|
|
45c7f12d2a | ||
|
|
2cad971ab4 | ||
|
|
8d86d11fdf | ||
|
|
6037a9804c | ||
|
|
3c69f32402 | ||
|
|
6bfe8e32b5 | ||
|
|
5fc9261929 | ||
|
|
0162994983 | ||
|
|
254b7c5b15 | ||
|
|
672dcf59d3 | ||
|
|
7eae6eaa2f | ||
|
|
8b0f2afbaf | ||
|
|
79926e016e | ||
|
|
a61dd408ed | ||
|
|
53254551f0 | ||
|
|
8ffbe43ba1 | ||
|
|
bcfa5cd00c | ||
|
|
d84bd51e95 | ||
|
|
9072a8c627 | ||
|
|
3872c7a107 | ||
|
|
8f267fa8a8 | ||
|
|
64d62e41b8 | ||
|
|
3545e17f43 | ||
|
|
29235901b8 | ||
|
|
e8b1721290 | ||
|
|
3406333a58 | ||
|
|
45d173a59a | ||
|
|
663396e45d | ||
|
|
ece7e00048 | ||
|
|
9d0d40fc15 | ||
|
|
3edc57296d | ||
|
|
727124a762 | ||
|
|
6ad71cc29d | ||
|
|
d4d3629aaf | ||
|
|
3170c56e07 | ||
|
|
c1f18892bb | ||
|
|
1c99934b28 | ||
|
|
a9e2b9ec16 | ||
|
|
85bb322333 | ||
|
|
65d43f3ca5 | ||
|
|
0e0aee25c4 | ||
|
|
82c5e7de25 | ||
|
|
2e27339add | ||
|
|
88df6c0c9a | ||
|
|
402a7bf63d | ||
|
|
00466e2feb | ||
|
|
c98d91fe94 | ||
|
|
ac5491f563 | ||
|
|
b0effa2160 | ||
|
|
82f7f1543b | ||
|
|
96d79bb532 | ||
|
|
f2581ee8b8 | ||
|
|
9834367eea | ||
|
|
da52d3af31 | ||
|
|
ad882cd54d | ||
|
|
3557cf34dc | ||
|
|
856a18f7a8 | ||
|
|
d766343668 | ||
|
|
0bf2c7f3bc | ||
|
|
36be39b8b3 | ||
|
|
3365117151 | ||
|
|
92312aa3e6 | ||
|
|
6b1ffa5f3d | ||
|
|
f4e7545d88 | ||
|
|
e933a2712d | ||
|
|
d638a7484b | ||
|
|
7eff3afa05 | ||
|
|
b84907bdbb | ||
|
|
e4919b9329 | ||
|
|
8a12b6f1eb | ||
|
|
848cf95ea0 | ||
|
|
9037787f0b | ||
|
|
eda96586ca | ||
|
|
64a2cef9bb | ||
|
|
a41dce8f8a | ||
|
|
c0d6045776 | ||
|
|
49f4bc4709 | ||
|
|
8eec652de5 | ||
|
|
fc5d876dba | ||
|
|
f58dbb02a6 | ||
|
|
ca7ea2a4b5 | ||
|
|
c80439a320 | ||
|
|
acf6d4d2e3 | ||
|
|
aea5461488 | ||
|
|
bf92b7201f | ||
|
|
1a4f8022e6 | ||
|
|
b2d20e94fa | ||
|
|
7455ba436a | ||
|
|
b7442c3e2b | ||
|
|
a3708a1885 | ||
|
|
3346a21324 | ||
|
|
30ecfef5a3 | ||
|
|
c927d6de9b | ||
|
|
0c4cf9372b | ||
|
|
6226a27bf8 | ||
|
|
efff39c030 | ||
|
|
b5c268738b | ||
|
|
17673404fb | ||
|
|
7f026792e1 | ||
|
|
11940d462a | ||
|
|
6184f6fcbc | ||
|
|
e556aefe0a | ||
|
|
7efb38d1dd | ||
|
|
20746d8150 | ||
|
|
699be7d1be | ||
|
|
2fa14fd48a | ||
|
|
66eb0bd548 | ||
|
|
5aae844e60 | ||
|
|
ec8d7603e6 | ||
|
|
8c87bb550e | ||
|
|
4aa29508af | ||
|
|
b4017539d4 | ||
|
|
b6557f2cfe | ||
|
|
138e030cfe | ||
|
|
502ae6c663 | ||
|
|
e6acf0c399 | ||
|
|
04eca2589d | ||
|
|
474c9aadbe | ||
|
|
7dcbcca68c | ||
|
|
fa467e62a9 | ||
|
|
355d62c499 | ||
|
|
ce3e583d94 | ||
|
|
fc2f29c1d0 | ||
|
|
ce3c8df6df | ||
|
|
095b45c165 | ||
|
|
795f8e3fe7 | ||
|
|
d7457c7661 | ||
|
|
359c97f506 | ||
|
|
9e617cd4c2 | ||
|
|
d0497425f8 | ||
|
|
808ddf0ae7 | ||
|
|
feb15dc99f | ||
|
|
ecd7e36047 | ||
|
|
6bba80241c | ||
|
|
3a46280ca3 | ||
|
|
e1a12e24d2 | ||
|
|
6a3743b0d4 | ||
|
|
481f6c87e7 | ||
|
|
df4407d665 | ||
|
|
70a00eacf9 | ||
|
|
a02d609b1f | ||
|
|
5c3cb8778a | ||
|
|
1beda9c8a7 | ||
|
|
27c005ae2c | ||
|
|
505bfd82bb | ||
|
|
fdbd90e25d | ||
|
|
52cd019a54 | ||
|
|
f20cd34858 | ||
|
|
7723b4caa4 | ||
|
|
9adcd3a514 | ||
|
|
063a1251a9 | ||
|
|
af6da6db2d | ||
|
|
131c0134f5 | ||
|
|
fad3a84335 | ||
|
|
38434a7fbb | ||
|
|
84f600b2ee | ||
|
|
aec1708c53 | ||
|
|
f3c8658217 | ||
|
|
a5d9303283 | ||
|
|
38258a0976 | ||
|
|
a597994fb6 | ||
|
|
82b3e0851c | ||
|
|
f8c407a13b | ||
|
|
8da976fe00 | ||
|
|
1232ae41cf | ||
|
|
99fa03e8b5 | ||
|
|
a8331897aa | ||
|
|
0f3e296cb7 | ||
|
|
6826593b81 | ||
|
|
6b61060b51 | ||
|
|
46ecd9fd6d | ||
|
|
9efcc3f3be | ||
|
|
832e9c52ca | ||
|
|
54a79c1d37 | ||
|
|
2849d3f29d | ||
|
|
5ae38b65c1 | ||
|
|
bfe3f5815f | ||
|
|
cc01eae332 | ||
|
|
85e98fd4e8 | ||
|
|
51adaac953 | ||
|
|
10e0737569 | ||
|
|
fac3c03087 | ||
|
|
14d5e22700 | ||
|
|
fbfe44bb4d | ||
|
|
d61a04583e | ||
|
|
7e919bdbd0 | ||
|
|
96355d2f2f | ||
|
|
df4ecff5a9 | ||
|
|
6d6591880e | ||
|
|
bd84387ac6 | ||
|
|
ebfaff84c9 | ||
|
|
73d676dc8b | ||
|
|
62f6b86ba7 | ||
|
|
f6124311fd | ||
|
|
88a4d54883 | ||
|
|
368c88c487 | ||
|
|
5deaf9e30b | ||
|
|
acb501c46d | ||
|
|
97479d0c54 | ||
|
|
06567ec513 | ||
|
|
692daf6f54 | ||
|
|
458b6f4733 | ||
|
|
fe08db2713 | ||
|
|
21b7375778 | ||
|
|
4c0ec15bdc | ||
|
|
85c590105f | ||
|
|
ae7a132f38 | ||
|
|
ac001dabdc | ||
|
|
bfb3d255b1 | ||
|
|
ab55794b6f | ||
|
|
d3169e8d28 | ||
|
|
05b9f48ee5 | ||
|
|
4c9812f5da | ||
|
|
4b3403ca9b | ||
|
|
1c13c9f6b6 | ||
|
|
c7a26b7c32 | ||
|
|
fd1c18c088 | ||
|
|
c2c9a78db9 | ||
|
|
e75a779d9e | ||
|
|
828db669ec | ||
|
|
9636b2407d | ||
|
|
3670025e64 | ||
|
|
4ac363a168 | ||
|
|
d360c97ae1 | ||
|
|
76100203ab | ||
|
|
d1e1fd6210 | ||
|
|
252b503fc8 | ||
|
|
84a35f32c7 | ||
|
|
c517a19c2d | ||
|
|
738a2867c8 | ||
|
|
755adff0e4 | ||
|
|
888c59c955 | ||
|
|
f25a4a4692 | ||
|
|
b3e1f2aa7a | ||
|
|
31aca5589c | ||
|
|
76d40f4904 | ||
|
|
fbfad76c03 | ||
|
|
c974116f19 | ||
|
|
e978247fe5 | ||
|
|
51e9fe36e4 | ||
|
|
2367c5568c | ||
|
|
10e48d8310 | ||
|
|
ba8e144554 | ||
|
|
f5b46482f4 | ||
|
|
fdf2a31a51 | ||
|
|
41dab8a222 | ||
|
|
c77b24c092 | ||
|
|
5d2134d485 | ||
|
|
a55fa2047f | ||
|
|
3d9d48fffb | ||
|
|
a0d03f2e15 | ||
|
|
d0897dead5 | ||
|
|
567aa35b67 | ||
|
|
f2f40e64a9 | ||
|
|
4c6a31cd6e | ||
|
|
83333498a5 | ||
|
|
86063d4321 | ||
|
|
09eb08f910 | ||
|
|
97efe99ae9 | ||
|
|
691c8198b7 | ||
|
|
86e6165687 | ||
|
|
1e38be3a7a | ||
|
|
841c228533 | ||
|
|
c430111d0e | ||
|
|
97d3918377 | ||
|
|
6f6bf2a1eb | ||
|
|
8c5009b628 | ||
|
|
ae7b4da4cc | ||
|
|
fc7cae8aa3 | ||
|
|
f9058ca785 | ||
|
|
f648313f98 | ||
|
|
15f012032c | ||
|
|
4ec1cf49e2 | ||
|
|
f878f64f43 | ||
|
|
5f027d1fc5 | ||
|
|
380dba1020 | ||
|
|
ed4d176152 | ||
|
|
c6064a7ba6 | ||
|
|
a8594fd19f | ||
|
|
7fae460402 | ||
|
|
37b4c7d8a9 | ||
|
|
e5d2df9c34 | ||
|
|
04006bb7f0 | ||
|
|
ce59a2faad | ||
|
|
633f97151c | ||
|
|
e6153e1bd1 | ||
|
|
5d6bad1b3c | ||
|
|
e8ecbb6f20 | ||
|
|
d11d7cdf87 | ||
|
|
9e8e236d98 | ||
|
|
d6c75cb7c2 | ||
|
|
1ccd5676e3 | ||
|
|
d906206049 | ||
|
|
f85b6ca494 | ||
|
|
f2f179dce2 | ||
|
|
6d00213e80 | ||
|
|
897f8752da | ||
|
|
beda469bc6 | ||
|
|
46aebbbcbf | ||
|
|
01521299c7 | ||
|
|
2fae34bd2c | ||
|
|
95a22ae194 | ||
|
|
ec0a523ac3 | ||
|
|
e178feca3f | ||
|
|
f0325a9ccc | ||
|
|
c050f493dd | ||
|
|
a3e4a198e3 | ||
|
|
8b2fa38256 | ||
|
|
641ccdbb14 | ||
|
|
6f5e41e420 | ||
|
|
0d37a7bf83 | ||
|
|
ebf94aff8d | ||
|
|
7a13fe16f7 | ||
|
|
bf5c9706d9 | ||
|
|
7b62d0bc70 | ||
|
|
7e6c2937c3 | ||
|
|
b1dfd20292 | ||
|
|
edd6cdfc9a | ||
|
|
3cb1799347 | ||
|
|
8a0fddfd73 | ||
|
|
d524bc9110 | ||
|
|
d2b00d0866 | ||
|
|
ab655dca33 | ||
|
|
5a32e9273e | ||
|
|
caddadfc5a | ||
|
|
dd52d4de4c | ||
|
|
024eb98524 | ||
|
|
32019c9897 | ||
|
|
657488113e | ||
|
|
3b4de17d2b | ||
|
|
7d0981b312 | ||
|
|
07c3c08fad | ||
|
|
f477370c0c | ||
|
|
586f474a44 | ||
|
|
6823fe5241 | ||
|
|
f7085ac84f | ||
|
|
9898bbd9dc | ||
|
|
9a8ae6f1bf | ||
|
|
2f4b2f4783 | ||
|
|
6d363cea9d | ||
|
|
f0e4bac64e | ||
|
|
4304e7e593 | ||
|
|
6515b9c0d4 | ||
|
|
8c48971b51 | ||
|
|
e10c527930 | ||
|
|
2f5be2d8dc | ||
|
|
4086026524 | ||
|
|
9d914454c8 | ||
|
|
19e2fb4386 | ||
|
|
189fd15564 | ||
|
|
8404f132c3 | ||
|
|
b2850e62db | ||
|
|
06c00bd19b | ||
|
|
b42a972b71 | ||
|
|
2c8ac84a26 | ||
|
|
1ef6084b75 | ||
|
|
bd85434cb3 | ||
|
|
c18f7fc410 | ||
|
|
dafd50d178 | ||
|
|
883ff92a7f | ||
|
|
d79d165761 | ||
|
|
8cfc0165e9 | ||
|
|
62451800e7 | ||
|
|
b31ed22738 | ||
|
|
7738329672 | ||
|
|
dd3df11c55 | ||
|
|
e1c5463efc | ||
|
|
468749c9fc | ||
|
|
eedf400d05 | ||
|
|
5175094707 | ||
|
|
8e82611f37 | ||
|
|
6028718b1a | ||
|
|
f784980d2b | ||
|
|
0d766c8ccf | ||
|
|
e02bdaf08b | ||
|
|
b6b67715ed | ||
|
|
555d702e34 | ||
|
|
899a3a1268 | ||
|
|
f3de4f8cb7 | ||
|
|
321d5b73d8 | ||
|
|
62ce3034f3 | ||
|
|
0aff09f6c9 | ||
|
|
48c3b7dc19 | ||
|
|
cc50b1ae53 | ||
|
|
f576c34594 | ||
|
|
0eac4fa525 | ||
|
|
822cb39dfa | ||
|
|
342fb8dae9 | ||
|
|
f023be9293 | ||
|
|
828c58522e | ||
|
|
97ffc5690b | ||
|
|
b4bc6fef5b | ||
|
|
68030fd37b | ||
|
|
b7336ff32d | ||
|
|
5b6672c66d | ||
|
|
84cf00c645 | ||
|
|
bea15fb599 | ||
|
|
0c88ab1844 | ||
|
|
b7f4f902fa | ||
|
|
702c020e58 | ||
|
|
09f15918be | ||
|
|
da2c8f3c94 | ||
|
|
a58e4e0d48 | ||
|
|
f2a5aebf98 | ||
|
|
a9c1b419a9 | ||
|
|
f5cd5ebd7b | ||
|
|
1859af9b2a | ||
|
|
c95e9fff99 | ||
|
|
7dfd70fc83 | ||
|
|
b2f8642d3d | ||
|
|
f5a4001bb1 | ||
|
|
b9b6d17ab1 | ||
|
|
c824dc727a | ||
|
|
edc6a1e4f9 | ||
|
|
35129ac998 | ||
|
|
ed02a0018c | ||
|
|
8bb8cc993a | ||
|
|
aa1336c00a | ||
|
|
4da3fc0ea0 | ||
|
|
24c16fc349 | ||
|
|
b8255eba26 | ||
|
|
b2999a7055 | ||
|
|
c3208e45c9 | ||
|
|
9d95351cad | ||
|
|
1de53a7a1a | ||
|
|
bae1115e55 | ||
|
|
b3d398343e | ||
|
|
0648e76979 | ||
|
|
8588d0eb3d | ||
|
|
1574b839e0 | ||
|
|
7ec2bf9b77 | ||
|
|
d431c0924c | ||
|
|
2bf5a47b3e | ||
|
|
d3bd94805f | ||
|
|
09cbcb78d3 | ||
|
|
631376e2ac | ||
|
|
abed247182 | ||
|
|
9240948346 | ||
|
|
62e6d40b39 | ||
|
|
d45c984653 | ||
|
|
d53a80af25 | ||
|
|
85cd30b1fd | ||
|
|
deca951241 | ||
|
|
9f07f4c559 | ||
|
|
6e18805ac2 | ||
|
|
77692b52b5 | ||
|
|
efa4ccfaee | ||
|
|
e721a7f2c1 | ||
|
|
1233d244ff | ||
|
|
b541fac7c3 | ||
|
|
af32d3b773 | ||
|
|
2fda8134f1 | ||
|
|
8b34f71bea | ||
|
|
fbaf868f62 | ||
|
|
4a9c38bfa3 | ||
|
|
be14c24cea | ||
|
|
1697f6a323 | ||
|
|
52d12ca782 | ||
|
|
c45d8e9ba2 | ||
|
|
da13b4aa86 | ||
|
|
b08f76bd23 | ||
|
|
bd07a35c29 | ||
|
|
de796f27e6 | ||
|
|
2687af82d4 | ||
|
|
3727d66a0e | ||
|
|
0d81e26769 | ||
|
|
59bc64328f | ||
|
|
f32fb65552 | ||
|
|
39a76b9cba | ||
|
|
1529c19675 | ||
|
|
194b6259c5 | ||
|
|
5a2c33c12e | ||
|
|
7dae7087d3 | ||
|
|
12aefb9dfc | ||
|
|
9609c91e7d | ||
|
|
338df4f409 | ||
|
|
3e90250ea3 | ||
|
|
0b1e287e81 | ||
|
|
6c9a0ba415 | ||
|
|
0697bb2247 | ||
|
|
24081224d1 | ||
|
|
c46e7a9c9b | ||
|
|
a2849a18a5 | ||
|
|
59984e9f58 | ||
|
|
546ec1a5cf | ||
|
|
7a00178832 | ||
|
|
9df84dd22d | ||
|
|
3f23154088 | ||
|
|
f6270a8fe2 | ||
|
|
235407a78e | ||
|
|
77bf92e3c6 | ||
|
|
bb3d0c270d | ||
|
|
f8c45d428c | ||
|
|
153535fc56 | ||
|
|
a8d8225ead | ||
|
|
cc03f4c58b | ||
|
|
32c8b5507c | ||
|
|
971edd04af | ||
|
|
471200074b | ||
|
|
6841d8ff55 | ||
|
|
12f3b9000c | ||
|
|
aa09d6b8f0 | ||
|
|
dc4b23e1a1 | ||
|
|
8379a741cc | ||
|
|
321fe5c44c | ||
|
|
b5b3a7e867 | ||
|
|
4febfe47f0 | ||
|
|
77eca2487c | ||
|
|
1c4f05db41 | ||
|
|
7d855447ef | ||
|
|
debbea5b29 | ||
|
|
5c4edc83b5 | ||
|
|
b6146537d2 | ||
|
|
f62b69e32a | ||
|
|
7f02e4d008 | ||
|
|
9192e593ec | ||
|
|
11bfe438a2 | ||
|
|
aaecffba3a | ||
|
|
e1d7c96814 | ||
|
|
7e03f9a484 | ||
|
|
46ca345b06 | ||
|
|
f36ea03741 | ||
|
|
c9d4e7b716 | ||
|
|
f681aab895 | ||
|
|
11254bdf6d | ||
|
|
1985860c6e | ||
|
|
2ac516850b | ||
|
|
302fbd218d | ||
|
|
b2d6e63b79 | ||
|
|
feec718265 | ||
|
|
ee5e8d71ac | ||
|
|
26072df6af | ||
|
|
b69f76c106 | ||
|
|
4d9b5c60f9 | ||
|
|
0163466d72 | ||
|
|
4c79a63fd7 | ||
|
|
54fed21c04 | ||
|
|
90565d015e | ||
|
|
0cf2a64974 | ||
|
|
83bcdcee61 | ||
|
|
d4a459f7cb | ||
|
|
c3d963ac24 | ||
|
|
6d4e6d4cba | ||
|
|
baf9e74a73 | ||
|
|
f9834a3d1a | ||
|
|
aac06e8f74 | ||
|
|
2bbc4cab60 | ||
|
|
cea4e4e7b2 | ||
|
|
0a8b0eeca1 | ||
|
|
51e89709aa | ||
|
|
53b27bbf06 | ||
|
|
70a2157b64 | ||
|
|
f97511a1f3 | ||
|
|
73dc099645 | ||
|
|
88d85ebae1 | ||
|
|
50934ce460 | ||
|
|
e90fcd9edd | ||
|
|
9687e039e7 | ||
|
|
a28ec23273 | ||
|
|
a2a6c1c22f | ||
|
|
524d61bf7e | ||
|
|
7c9cdb2245 | ||
|
|
a289150943 | ||
|
|
544722bad2 | ||
|
|
f8ee66250a | ||
|
|
ed787cf09e | ||
|
|
1587b5a033 | ||
|
|
59ef517e6b | ||
|
|
847d5db1d1 | ||
|
|
daec6fc355 | ||
|
|
0e830d3770 | ||
|
|
dc6cede78e | ||
|
|
c7546b3cdb | ||
|
|
d56c39cf24 | ||
|
|
f9d156d270 | ||
|
|
9d58ccc547 | ||
|
|
9355a5c42b | ||
|
|
3991b4cbdb | ||
|
|
af4a1bac50 | ||
|
|
0964005d84 | ||
|
|
1c93cd9f9f | ||
|
|
8ecaff51a1 | ||
|
|
f6c48802f5 | ||
|
|
a88bc67f88 | ||
|
|
42c43cfafd | ||
|
|
c7daf3136c | ||
|
|
64038b806c | ||
|
|
2bd4513a4d | ||
|
|
d073cb7ead | ||
|
|
8a8ad46f48 | ||
|
|
2771447c29 | ||
|
|
6cc4fcf25c | ||
|
|
ac507e7ab8 | ||
|
|
e6651e8046 | ||
|
|
291628d42a | ||
|
|
3c09818d91 | ||
|
|
27d3f2e7ab | ||
|
|
17e0a58020 | ||
|
|
587d8ac60f | ||
|
|
34449cfc6c | ||
|
|
a4632783fb | ||
|
|
24772ba56e | ||
|
|
eeda4e618c | ||
|
|
d24197bead | ||
|
|
c6bbad109b | ||
|
|
16dc9064d4 | ||
|
|
63772443e6 | ||
|
|
a3f6576084 | ||
|
|
7fc2b5c063 | ||
|
|
89e3e39d52 | ||
|
|
2938a00825 | ||
|
|
5219f7e060 | ||
|
|
93ebeb2aa8 | ||
|
|
c1b077cd19 | ||
|
|
06cc0bb762 | ||
|
|
64c6566980 | ||
|
|
8fd4d9129f | ||
|
|
9164bfa1c3 | ||
|
|
9084720993 | ||
|
|
80d5d3baa1 | ||
|
|
b1c27975d0 | ||
|
|
dc155f4c2c | ||
|
|
2746e805fe | ||
|
|
0aeb1324b7 | ||
|
|
4a9055d446 | ||
|
|
3c91c5b216 | ||
|
|
f6e8019b9c | ||
|
|
760469c812 | ||
|
|
47ed4d84bb | ||
|
|
f09d2b692f | ||
|
|
4c3eb14d68 | ||
|
|
1d4d518b50 | ||
|
|
159434a133 | ||
|
|
264f6c2a39 | ||
|
|
82e71a259c | ||
|
|
490b97d3e7 | ||
|
|
f9d5b60a24 | ||
|
|
1cc22da600 | ||
|
|
aac13b1f9a | ||
|
|
ccc1a3d54d | ||
|
|
665e53524e | ||
|
|
e438699c59 | ||
|
|
a9111786f9 | ||
|
|
1fc1bc2a51 | ||
|
|
db0609f1ec | ||
|
|
ab731d8f8e | ||
|
|
45bdacd9a7 | ||
|
|
177f104432 | ||
|
|
22fbf86e4f | ||
|
|
f138bb40e2 | ||
|
|
855645c719 | ||
|
|
25423f50aa | ||
|
|
2ef617bc06 | ||
|
|
e83a08d795 | ||
|
|
b6800a8ecd | ||
|
|
d04e2ff3a4 | ||
|
|
a842fed418 | ||
|
|
e01a1bc92d | ||
|
|
6fdd31915b | ||
|
|
07caa749bf | ||
|
|
f09db236b1 | ||
|
|
8bfd01f619 | ||
|
|
1b17d1a106 | ||
|
|
b01aaadd48 | ||
|
|
1071c7d963 | ||
|
|
6453d03edd | ||
|
|
3ae48a1f99 | ||
|
|
4cedd53224 | ||
|
|
5663137e03 | ||
|
|
b202531be6 | ||
|
|
1b179455fc | ||
|
|
981f852d54 | ||
|
|
def63649df | ||
|
|
06f1ad1625 | ||
|
|
95fc70216d | ||
|
|
9b0316c75a | ||
|
|
03c2720940 | ||
|
|
b21b9dbc37 | ||
|
|
78c083f159 | ||
|
|
3aa8925091 | ||
|
|
f2f74ffce6 | ||
|
|
7d2cf7e960 | ||
|
|
0108ed8ae6 | ||
|
|
a7f48320b1 | ||
|
|
df2a616c7b | ||
|
|
550308c7a1 | ||
|
|
e8b1d2a452 | ||
|
|
5b54d51d1e | ||
|
|
f6955db970 | ||
|
|
8ca05b5755 | ||
|
|
f0ca088280 | ||
|
|
50ac1d843d | ||
|
|
513e600f63 | ||
|
|
b95dbdcba4 | ||
|
|
927a67ee1a | ||
|
|
6942d68247 | ||
|
|
b59994b454 | ||
|
|
816988baaa | ||
|
|
2869a29fd7 | ||
|
|
d43b63818c | ||
|
|
a68ade6ed3 | ||
|
|
29c5922021 | ||
|
|
d9350b0db8 | ||
|
|
bcb1245a2d | ||
|
|
62073992c5 | ||
|
|
0393c4203c | ||
|
|
6f7540ada4 | ||
|
|
1d107d8484 | ||
|
|
f7aed3d7a2 | ||
|
|
9009143fb9 | ||
|
|
fbd3866bc6 | ||
|
|
9e18e0b1cb | ||
|
|
c61ddeedac | ||
|
|
0af6213019 | ||
|
|
35e2cc8b52 | ||
|
|
6e9f3ab415 | ||
|
|
e641115421 | ||
|
|
3061dac53e | ||
|
|
668f91d707 | ||
|
|
0061e8744f | ||
|
|
fa74fcf512 | ||
|
|
a2f2516199 | ||
|
|
a940618c94 | ||
|
|
c57f871184 | ||
|
|
8681aff4f1 | ||
|
|
5d9546f9f4 | ||
|
|
7b5546d077 | ||
|
|
5d34e32d42 | ||
|
|
f382117852 | ||
|
|
3de7c8a4d0 | ||
|
|
2ff2d36b80 | ||
|
|
9bfc617791 | ||
|
|
503c0ab78b | ||
|
|
e779ee0ee2 | ||
|
|
4285be791d | ||
|
|
b5665f7516 | ||
|
|
6d3513740d | ||
|
|
850b103b36 | ||
|
|
21185e3e8a | ||
|
|
24a70e19c7 | ||
|
|
04aa2f2863 | ||
|
|
f7bcdbe56c | ||
|
|
3027ea22b0 | ||
|
|
5875a65253 | ||
|
|
36d621201b | ||
|
|
9040c9ffa1 | ||
|
|
4a18127917 | ||
|
|
adae348fdf | ||
|
|
4974147aa3 | ||
|
|
13122e5e24 | ||
|
|
cf3e1cc200 | ||
|
|
a38d46249e | ||
|
|
aab6a31c96 | ||
|
|
748d8fdc7b | ||
|
|
655891d179 | ||
|
|
4225a97f4e | ||
|
|
22578545a0 | ||
|
|
667fcd54e8 | ||
|
|
f96020550f | ||
|
|
81964aeb90 | ||
|
|
2e9ee30969 | ||
|
|
a61e4522b5 | ||
|
|
1168cbd54d | ||
|
|
bbc0d9617f | ||
|
|
8009d84364 | ||
|
|
dc692556d6 | ||
|
|
dc78db8c56 | ||
|
|
4f78108d8c | ||
|
|
0b78d8adf2 | ||
|
|
85827eef2d | ||
|
|
90c070c850 | ||
|
|
87528f0756 | ||
|
|
88acb99747 | ||
|
|
2b8ff4659f | ||
|
|
ddfcdd4778 | ||
|
|
6f0c5e5d9b | ||
|
|
49cf205dc7 | ||
|
|
39af634dd2 | ||
|
|
3f6ec271ba | ||
|
|
4d49e0bdfd | ||
|
|
81570abfb2 | ||
|
|
ddc89df89d | ||
|
|
eb24aecf8c | ||
|
|
e1ba98d724 | ||
|
|
a298331de4 | ||
|
|
71edaae981 | ||
|
|
64527f94cc | ||
|
|
883df2e983 | ||
|
|
5336acd46f | ||
|
|
fa9d2c7295 | ||
|
|
19fe990476 | ||
|
|
995f2f032f | ||
|
|
9e1283c824 | ||
|
|
a68807d426 | ||
|
|
2e67cabd7f | ||
|
|
b7b62bf9ea | ||
|
|
d84319ae10 | ||
|
|
23b6701a28 | ||
|
|
e58a9d781c | ||
|
|
74d4cdee25 | ||
|
|
418bcd4309 | ||
|
|
098db4aa52 | ||
|
|
c33b25fd8d | ||
|
|
de4f798f01 | ||
|
|
ea6dc356b0 | ||
|
|
955f34d23e | ||
|
|
241d7d2d62 | ||
|
|
1535f21eb5 | ||
|
|
4be85281f9 | ||
|
|
cb3edec6af | ||
|
|
923f77cff3 | ||
|
|
55e6fc917c | ||
|
|
68c1ed4d1a | ||
|
|
b82fa849c8 | ||
|
|
e457034e99 | ||
|
|
1d98cf26be | ||
|
|
211786ecd6 | ||
|
|
4fb65a1091 | ||
|
|
5810cffd33 | ||
|
|
f3eead0660 | ||
|
|
4131381123 | ||
|
|
6a5ded5988 | ||
|
|
4f181f361d | ||
|
|
c566f0ee17 | ||
|
|
772c6067a3 | ||
|
|
baffe96d95 | ||
|
|
264a48aedf | ||
|
|
21c88016bd | ||
|
|
ed992ae6ba | ||
|
|
3e6e8a1c03 | ||
|
|
e0b6db29ed | ||
|
|
a70a43bc51 | ||
|
|
f2b2cd8eb4 | ||
|
|
00f51493f5 | ||
|
|
d5ae1f1291 | ||
|
|
1b01488d27 | ||
|
|
0f73f0e70e | ||
|
|
ca35e54d6b | ||
|
|
497f053344 | ||
|
|
ad816b0add | ||
|
|
0c057736ac | ||
|
|
43253c10b8 | ||
|
|
18ab019a4a | ||
|
|
76b09c29b0 | ||
|
|
ba6bc2faa0 | ||
|
|
edbcb4152b | ||
|
|
949c2c5435 | ||
|
|
b17af156c7 | ||
|
|
1c9da43a95 | ||
|
|
0b32bb20bb | ||
|
|
c94de0ab60 | ||
|
|
502c901e11 | ||
|
|
48a5a7552d | ||
|
|
706b5d76ed | ||
|
|
7c679b1118 | ||
|
|
d080b3425c | ||
|
|
03a98aff3c | ||
|
|
fa20c9ce94 | ||
|
|
5ef5435529 | ||
|
|
aa7b890cfe | ||
|
|
7cd6edb947 | ||
|
|
0294c14ec4 | ||
|
|
7fe42cf949 | ||
|
|
15ca0c6a4d | ||
|
|
0baf498bd1 | ||
|
|
a232e06100 | ||
|
|
4a32d25d4c | ||
|
|
31f85f9db9 | ||
|
|
ec609f8094 | ||
|
|
caef86f428 | ||
|
|
54417999b6 | ||
|
|
45dc260060 | ||
|
|
d1c217c823 | ||
|
|
897d57bc58 | ||
|
|
555460ae1b | ||
|
|
4162f820ff | ||
|
|
29205e9596 | ||
|
|
d213884c41 | ||
|
|
b91e2833b3 | ||
|
|
f2acc3dcf9 | ||
|
|
3ddec016ff | ||
|
|
8e01263587 | ||
|
|
3265def8c7 | ||
|
|
af4701b311 | ||
|
|
44330a21e9 | ||
|
|
464ffd1b5e | ||
|
|
327425764e | ||
|
|
dbff7e9436 | ||
|
|
a4339de9de | ||
|
|
8aee5aa068 | ||
|
|
52b2318777 | ||
|
|
56f38d1776 | ||
|
|
8cb252d00c | ||
|
|
776594f99d | ||
|
|
ed44c475d8 | ||
|
|
ab80d5e0a9 | ||
|
|
f25d74f69c | ||
|
|
ea05155a8c | ||
|
|
d271383e63 | ||
|
|
0fc0a3bdff | ||
|
|
6c4d582144 | ||
|
|
685da5a3b0 | ||
|
|
a6c6750166 | ||
|
|
bdbcfc2a80 | ||
|
|
6eb0c8a2e4 | ||
|
|
6b54fa81de | ||
|
|
25eb769b26 | ||
|
|
0b6b999e7b | ||
|
|
3328428d05 | ||
|
|
4598682b43 | ||
|
|
033d43e419 | ||
|
|
647c724573 | ||
|
|
a15ba15e64 | ||
|
|
6a6cbfcf1e | ||
|
|
d2688d7f03 | ||
|
|
303b6f29f0 | ||
|
|
1fe7ca1362 | ||
|
|
9bba6ebaa9 | ||
|
|
66efcbbff1 | ||
|
|
0877157353 | ||
|
|
2ffec928e2 | ||
|
|
b390756150 | ||
|
|
b8f84f99ff | ||
|
|
43b77c5d97 | ||
|
|
2f267ee160 | ||
|
|
7d5b142547 | ||
|
|
c3276aef25 | ||
|
|
fa722a699c | ||
|
|
023143f9ae | ||
|
|
5c688739d6 | ||
|
|
ebb46497ba | ||
|
|
91ec972277 | ||
|
|
5beda10bbd | ||
|
|
257025ac89 | ||
|
|
3f9889bfd6 | ||
|
|
caa22334b3 | ||
|
|
5834c6178c | ||
|
|
b152ee71fe | ||
|
|
d987353840 | ||
|
|
a1c8f268e5 | ||
|
|
8b93af662d | ||
|
|
2117c409a0 | ||
|
|
fa9d36e050 | ||
|
|
4ef222ab61 | ||
|
|
61cd9af09b | ||
|
|
791658b576 | ||
|
|
2982d16e07 | ||
|
|
c5b49eb7ca | ||
|
|
b568ca309c | ||
|
|
3c320c006c | ||
|
|
85b51fdd6b | ||
|
|
43954d000e | ||
|
|
2a0159b8ae | ||
|
|
cb98ac261b | ||
|
|
31a07d2335 | ||
|
|
91279fd218 | ||
|
|
513188aa56 | ||
|
|
fadb01551a | ||
|
|
d25c20ccbe | ||
|
|
7d893beebe | ||
|
|
94a83b534f | ||
|
|
74cbfdc7de | ||
|
|
d4a35ada28 | ||
|
|
e020834e4f | ||
|
|
2ad72da931 | ||
|
|
8da7d0e4f9 | ||
|
|
3c4208a057 | ||
|
|
f4164edb70 | ||
|
|
2eed4d7af4 | ||
|
|
438ef47637 | ||
|
|
74a3b4a650 | ||
|
|
9b69c85f7c | ||
|
|
d51b8a1674 | ||
|
|
662b031a30 | ||
|
|
4ec67a3d21 | ||
|
|
0595413c0f | ||
|
|
a7032abb2e | ||
|
|
9e6d88f4e2 | ||
|
|
8c93e0bae7 | ||
|
|
70332a12dd | ||
|
|
373654c635 | ||
|
|
485d999c8a | ||
|
|
69054e3d4c | ||
|
|
0237a0d1a5 | ||
|
|
69a2d4e38c | ||
|
|
19275b3030 | ||
|
|
f12993ec16 | ||
|
|
940d4fad24 | ||
|
|
bb36b93f71 | ||
|
|
d87b87adf7 | ||
|
|
caed150363 | ||
|
|
80a6a445fa | ||
|
|
628e65721b | ||
|
|
274c2f50a5 | ||
|
|
a99e933550 | ||
|
|
3847fa38c4 | ||
|
|
f2690c6423 | ||
|
|
81b94c5750 | ||
|
|
65fa37ac5e | ||
|
|
3baf641a48 | ||
|
|
c0238ecbed | ||
|
|
273b6bcf22 | ||
|
|
f7f1027d3d | ||
|
|
34e5e17f91 | ||
|
|
b96c6c3185 | ||
|
|
1ffe9578d1 | ||
|
|
cce957e254 | ||
|
|
bd9b8d87ae | ||
|
|
2aa39db681 | ||
|
|
657847e4c6 | ||
|
|
965168a842 | ||
|
|
2854ee2a52 | ||
|
|
598317927c | ||
|
|
7ed5acacf4 | ||
|
|
c1c38da586 | ||
|
|
051a9ea921 | ||
|
|
265d847ffd | ||
|
|
f4778d4cd9 | ||
|
|
9e25443db8 | ||
|
|
44982606ee | ||
|
|
516a272aca | ||
|
|
0cfd6c3161 | ||
|
|
5405351b14 | ||
|
|
1b91ff685f | ||
|
|
ed7a703d4c | ||
|
|
1671913287 | ||
|
|
f51888530d | ||
|
|
826ca61745 | ||
|
|
fbd2615de4 | ||
|
|
c10cb581c6 | ||
|
|
ef0cc648cf | ||
|
|
761f9fccff | ||
|
|
a662252758 | ||
|
|
1aa3e1d287 | ||
|
|
1bb8ec296d | ||
|
|
d80f64d370 | ||
|
|
998666be64 | ||
|
|
c882783535 | ||
|
|
572acde483 | ||
|
|
5dc2a702cf | ||
|
|
3e784eff74 | ||
|
|
16b652f0a3 | ||
|
|
e82247f990 | ||
|
|
c7f665d700 | ||
|
|
d3f108b6bb | ||
|
|
097330bae8 | ||
|
|
21b977ccfe | ||
|
|
928d337c16 | ||
|
|
bc1a8b1f7a | ||
|
|
b3be9e4376 | ||
|
|
67f0c990f8 | ||
|
|
fba1111dd6 | ||
|
|
c8cd87b21b | ||
|
|
55e17d3697 | ||
|
|
1ee6285905 | ||
|
|
68e1a872fd | ||
|
|
55fc17cf4b | ||
|
|
ffc807af50 | ||
|
|
41788bba50 | ||
|
|
873f870e5a | ||
|
|
5acbe09b67 | ||
|
|
8c1e746f54 | ||
|
|
93b32d4515 | ||
|
|
bed10f9880 | ||
|
|
4bbef62124 | ||
|
|
3cf15edef7 | ||
|
|
a234e895cf | ||
|
|
c943d8d2e8 | ||
|
|
4daa397a00 | ||
|
|
c7cd35d682 | ||
|
|
54cc69154e | ||
|
|
11faa4296d | ||
|
|
f6338d6a3e | ||
|
|
1ccdc1e93a | ||
|
|
25414b44a2 | ||
|
|
3f11953fcb | ||
|
|
50943ab942 | ||
|
|
30961182f2 | ||
|
|
c1a133a6b6 | ||
|
|
778fa85f47 | ||
|
|
1a1e198f72 | ||
|
|
3b8d0ceb22 | ||
|
|
7356d52e73 | ||
|
|
9459137f1e | ||
|
|
1294d4a329 | ||
|
|
ab34fdecb7 | ||
|
|
b162cb2e41 | ||
|
|
0e1900d819 | ||
|
|
641efb6a39 | ||
|
|
e7af8be5ae | ||
|
|
142983b4ea | ||
|
|
721414d98a | ||
|
|
e993925279 | ||
|
|
a3dc1e9cbe | ||
|
|
d9dcb2ba3a | ||
|
|
adf53f04ce | ||
|
|
c435bfee9c | ||
|
|
db7283cc6b | ||
|
|
d0b8d49f71 | ||
|
|
5474824975 | ||
|
|
17f4f14df7 | ||
|
|
cd5b264b03 | ||
|
|
eb6a7cf3f4 | ||
|
|
37638c06c5 | ||
|
|
60a015550a | ||
|
|
90d5983d7a | ||
|
|
d89f8683dc | ||
|
|
c20cb5160d | ||
|
|
fda97dd58a | ||
|
|
8e1ed09dff | ||
|
|
965f33c901 | ||
|
|
9899824b85 | ||
|
|
9219139351 | ||
|
|
63c19e1df9 | ||
|
|
3e86dcf1c0 | ||
|
|
86bcf4d6a7 | ||
|
|
ba07d4a70e | ||
|
|
928b2187ea | ||
|
|
4b31426a02 | ||
|
|
122c7a43c9 | ||
|
|
aad8a1a825 | ||
|
|
3bb3f02517 |
47
.github/ISSUE_TEMPLATE.md
vendored
Normal file
47
.github/ISSUE_TEMPLATE.md
vendored
Normal file
@@ -0,0 +1,47 @@
|
|||||||
|
<!--
|
||||||
|
|
||||||
|
**IF YOU HAVE SUPPORT QUESTIONS ABOUT RUNNING OR CONFIGURING YOUR OWN HOME SERVER**:
|
||||||
|
You will likely get better support more quickly if you ask in ** #matrix:matrix.org ** ;)
|
||||||
|
|
||||||
|
|
||||||
|
This is a bug report template. By following the instructions below and
|
||||||
|
filling out the sections with your information, you will help the us to get all
|
||||||
|
the necessary data to fix your issue.
|
||||||
|
|
||||||
|
You can also preview your report before submitting it. You may remove sections
|
||||||
|
that aren't relevant to your particular case.
|
||||||
|
|
||||||
|
Text between <!-- and --> marks will be invisible in the report.
|
||||||
|
|
||||||
|
-->
|
||||||
|
|
||||||
|
### Description
|
||||||
|
|
||||||
|
Describe here the problem that you are experiencing, or the feature you are requesting.
|
||||||
|
|
||||||
|
### Steps to reproduce
|
||||||
|
|
||||||
|
- For bugs, list the steps
|
||||||
|
- that reproduce the bug
|
||||||
|
- using hyphens as bullet points
|
||||||
|
|
||||||
|
Describe how what happens differs from what you expected.
|
||||||
|
|
||||||
|
If you can identify any relevant log snippets from _homeserver.log_, please include
|
||||||
|
those here (please be careful to remove any personal or private data):
|
||||||
|
|
||||||
|
### Version information
|
||||||
|
|
||||||
|
<!-- IMPORTANT: please answer the following questions, to help us narrow down the problem -->
|
||||||
|
|
||||||
|
- **Homeserver**: Was this issue identified on matrix.org or another homeserver?
|
||||||
|
|
||||||
|
If not matrix.org:
|
||||||
|
- **Version**: What version of Synapse is running? <!--
|
||||||
|
You can find the Synapse version by inspecting the server headers (replace matrix.org with
|
||||||
|
your own homeserver domain):
|
||||||
|
$ curl -v https://matrix.org/_matrix/client/versions 2>&1 | grep "Server:"
|
||||||
|
-->
|
||||||
|
- **Install method**: package manager/git clone/pip
|
||||||
|
- **Platform**: Tell us about the environment in which your homeserver is operating
|
||||||
|
- distro, hardware, if it's running in a vm/container, etc.
|
||||||
10
.gitignore
vendored
10
.gitignore
vendored
@@ -24,10 +24,10 @@ homeserver*.yaml
|
|||||||
.coverage
|
.coverage
|
||||||
htmlcov
|
htmlcov
|
||||||
|
|
||||||
demo/*.db
|
demo/*/*.db
|
||||||
demo/*.log
|
demo/*/*.log
|
||||||
demo/*.log.*
|
demo/*/*.log.*
|
||||||
demo/*.pid
|
demo/*/*.pid
|
||||||
demo/media_store.*
|
demo/media_store.*
|
||||||
demo/etc
|
demo/etc
|
||||||
|
|
||||||
@@ -46,3 +46,5 @@ static/client/register/register_config.js
|
|||||||
|
|
||||||
env/
|
env/
|
||||||
*.config
|
*.config
|
||||||
|
|
||||||
|
.vscode/
|
||||||
|
|||||||
17
.travis.yml
Normal file
17
.travis.yml
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
sudo: false
|
||||||
|
language: python
|
||||||
|
python: 2.7
|
||||||
|
|
||||||
|
# tell travis to cache ~/.cache/pip
|
||||||
|
cache: pip
|
||||||
|
|
||||||
|
env:
|
||||||
|
- TOX_ENV=packaging
|
||||||
|
- TOX_ENV=pep8
|
||||||
|
- TOX_ENV=py27
|
||||||
|
|
||||||
|
install:
|
||||||
|
- pip install tox
|
||||||
|
|
||||||
|
script:
|
||||||
|
- tox -e $TOX_ENV
|
||||||
1048
CHANGES.rst
1048
CHANGES.rst
File diff suppressed because it is too large
Load Diff
@@ -30,8 +30,12 @@ use github's pull request workflow to review the contribution, and either ask
|
|||||||
you to make any refinements needed or merge it and make them ourselves. The
|
you to make any refinements needed or merge it and make them ourselves. The
|
||||||
changes will then land on master when we next do a release.
|
changes will then land on master when we next do a release.
|
||||||
|
|
||||||
We use Jenkins for continuous integration (http://matrix.org/jenkins), and
|
We use `Jenkins <http://matrix.org/jenkins>`_ and
|
||||||
typically all pull requests get automatically tested Jenkins: if your change breaks the build, Jenkins will yell about it in #matrix-dev:matrix.org so please lurk there and keep an eye open.
|
`Travis <https://travis-ci.org/matrix-org/synapse>`_ for continuous
|
||||||
|
integration. All pull requests to synapse get automatically tested by Travis;
|
||||||
|
the Jenkins builds require an adminstrator to start them. If your change
|
||||||
|
breaks the build, this will be shown in github, so please keep an eye on the
|
||||||
|
pull request for feedback.
|
||||||
|
|
||||||
Code style
|
Code style
|
||||||
~~~~~~~~~~
|
~~~~~~~~~~
|
||||||
@@ -115,4 +119,4 @@ can't be accepted. Git makes this trivial - just use the -s flag when you do
|
|||||||
Conclusion
|
Conclusion
|
||||||
~~~~~~~~~~
|
~~~~~~~~~~
|
||||||
|
|
||||||
That's it! Matrix is a very open and collaborative project as you might expect given our obsession with open communication. If we're going to successfully matrix together all the fragmented communication technologies out there we are reliant on contributions and collaboration from the community to do so. So please get involved - and we hope you have as much fun hacking on Matrix as we do!
|
That's it! Matrix is a very open and collaborative project as you might expect given our obsession with open communication. If we're going to successfully matrix together all the fragmented communication technologies out there we are reliant on contributions and collaboration from the community to do so. So please get involved - and we hope you have as much fun hacking on Matrix as we do!
|
||||||
|
|||||||
@@ -27,4 +27,5 @@ exclude jenkins*.sh
|
|||||||
exclude jenkins*
|
exclude jenkins*
|
||||||
recursive-exclude jenkins *.sh
|
recursive-exclude jenkins *.sh
|
||||||
|
|
||||||
|
prune .github
|
||||||
prune demo/etc
|
prune demo/etc
|
||||||
|
|||||||
711
README.rst
711
README.rst
@@ -11,7 +11,7 @@ VoIP. The basics you need to know to get up and running are:
|
|||||||
like ``#matrix:matrix.org`` or ``#test:localhost:8448``.
|
like ``#matrix:matrix.org`` or ``#test:localhost:8448``.
|
||||||
|
|
||||||
- Matrix user IDs look like ``@matthew:matrix.org`` (although in the future
|
- Matrix user IDs look like ``@matthew:matrix.org`` (although in the future
|
||||||
you will normally refer to yourself and others using a third party identifier
|
you will normally refer to yourself and others using a third party identifier
|
||||||
(3PID): email address, phone number, etc rather than manipulating Matrix user IDs)
|
(3PID): email address, phone number, etc rather than manipulating Matrix user IDs)
|
||||||
|
|
||||||
The overall architecture is::
|
The overall architecture is::
|
||||||
@@ -20,12 +20,13 @@ The overall architecture is::
|
|||||||
https://somewhere.org/_matrix https://elsewhere.net/_matrix
|
https://somewhere.org/_matrix https://elsewhere.net/_matrix
|
||||||
|
|
||||||
``#matrix:matrix.org`` is the official support room for Matrix, and can be
|
``#matrix:matrix.org`` is the official support room for Matrix, and can be
|
||||||
accessed by any client from https://matrix.org/blog/try-matrix-now or via IRC
|
accessed by any client from https://matrix.org/docs/projects/try-matrix-now.html or
|
||||||
bridge at irc://irc.freenode.net/matrix.
|
via IRC bridge at irc://irc.freenode.net/matrix.
|
||||||
|
|
||||||
Synapse is currently in rapid development, but as of version 0.5 we believe it
|
Synapse is currently in rapid development, but as of version 0.5 we believe it
|
||||||
is sufficiently stable to be run as an internet-facing service for real usage!
|
is sufficiently stable to be run as an internet-facing service for real usage!
|
||||||
|
|
||||||
|
|
||||||
About Matrix
|
About Matrix
|
||||||
============
|
============
|
||||||
|
|
||||||
@@ -52,10 +53,10 @@ generation of fully open and interoperable messaging and VoIP apps for the
|
|||||||
internet.
|
internet.
|
||||||
|
|
||||||
Synapse is a reference "homeserver" implementation of Matrix from the core
|
Synapse is a reference "homeserver" implementation of Matrix from the core
|
||||||
development team at matrix.org, written in Python/Twisted for clarity and
|
development team at matrix.org, written in Python/Twisted. It is intended to
|
||||||
simplicity. It is intended to showcase the concept of Matrix and let folks see
|
showcase the concept of Matrix and let folks see the spec in the context of a
|
||||||
the spec in the context of a codebase and let you run your own homeserver and
|
codebase and let you run your own homeserver and generally help bootstrap the
|
||||||
generally help bootstrap the ecosystem.
|
ecosystem.
|
||||||
|
|
||||||
In Matrix, every user runs one or more Matrix clients, which connect through to
|
In Matrix, every user runs one or more Matrix clients, which connect through to
|
||||||
a Matrix homeserver. The homeserver stores all their personal chat history and
|
a Matrix homeserver. The homeserver stores all their personal chat history and
|
||||||
@@ -66,26 +67,16 @@ hosted by someone else (e.g. matrix.org) - there is no single point of control
|
|||||||
or mandatory service provider in Matrix, unlike WhatsApp, Facebook, Hangouts,
|
or mandatory service provider in Matrix, unlike WhatsApp, Facebook, Hangouts,
|
||||||
etc.
|
etc.
|
||||||
|
|
||||||
Synapse ships with two basic demo Matrix clients: webclient (a basic group chat
|
|
||||||
web client demo implemented in AngularJS) and cmdclient (a basic Python
|
|
||||||
command line utility which lets you easily see what the JSON APIs are up to).
|
|
||||||
|
|
||||||
Meanwhile, iOS and Android SDKs and clients are available from:
|
|
||||||
|
|
||||||
- https://github.com/matrix-org/matrix-ios-sdk
|
|
||||||
- https://github.com/matrix-org/matrix-ios-kit
|
|
||||||
- https://github.com/matrix-org/matrix-ios-console
|
|
||||||
- https://github.com/matrix-org/matrix-android-sdk
|
|
||||||
|
|
||||||
We'd like to invite you to join #matrix:matrix.org (via
|
We'd like to invite you to join #matrix:matrix.org (via
|
||||||
https://matrix.org/blog/try-matrix-now), run a homeserver, take a look at the
|
https://matrix.org/docs/projects/try-matrix-now.html), run a homeserver, take a look
|
||||||
Matrix spec at https://matrix.org/docs/spec and API docs at
|
at the `Matrix spec <https://matrix.org/docs/spec>`_, and experiment with the
|
||||||
https://matrix.org/docs/api, experiment with the APIs and the demo clients, and
|
`APIs <https://matrix.org/docs/api>`_ and `Client SDKs
|
||||||
report any bugs via https://matrix.org/jira.
|
<http://matrix.org/docs/projects/try-matrix-now.html#client-sdks>`_.
|
||||||
|
|
||||||
Thanks for using Matrix!
|
Thanks for using Matrix!
|
||||||
|
|
||||||
[1] End-to-end encryption is currently in development - see https://matrix.org/git/olm
|
[1] End-to-end encryption is currently in beta: `blog post <https://matrix.org/blog/2016/11/21/matrixs-olm-end-to-end-encryption-security-assessment-released-and-implemented-cross-platform-on-riot-at-last>`_.
|
||||||
|
|
||||||
|
|
||||||
Synapse Installation
|
Synapse Installation
|
||||||
====================
|
====================
|
||||||
@@ -93,11 +84,17 @@ Synapse Installation
|
|||||||
Synapse is the reference python/twisted Matrix homeserver implementation.
|
Synapse is the reference python/twisted Matrix homeserver implementation.
|
||||||
|
|
||||||
System requirements:
|
System requirements:
|
||||||
|
|
||||||
- POSIX-compliant system (tested on Linux & OS X)
|
- POSIX-compliant system (tested on Linux & OS X)
|
||||||
- Python 2.7
|
- Python 2.7
|
||||||
- At least 1GB of free RAM if you want to join large public rooms like #matrix:matrix.org
|
- At least 1GB of free RAM if you want to join large public rooms like #matrix:matrix.org
|
||||||
|
|
||||||
Synapse is written in python but some of the libraries is uses are written in
|
Installing from source
|
||||||
|
----------------------
|
||||||
|
(Prebuilt packages are available for some platforms - see `Platform-Specific
|
||||||
|
Instructions`_.)
|
||||||
|
|
||||||
|
Synapse is written in python but some of the libraries it uses are written in
|
||||||
C. So before we can install synapse itself we need a working C compiler and the
|
C. So before we can install synapse itself we need a working C compiler and the
|
||||||
header files for python C extensions.
|
header files for python C extensions.
|
||||||
|
|
||||||
@@ -112,10 +109,10 @@ Installing prerequisites on ArchLinux::
|
|||||||
sudo pacman -S base-devel python2 python-pip \
|
sudo pacman -S base-devel python2 python-pip \
|
||||||
python-setuptools python-virtualenv sqlite3
|
python-setuptools python-virtualenv sqlite3
|
||||||
|
|
||||||
Installing prerequisites on CentOS 7::
|
Installing prerequisites on CentOS 7 or Fedora 25::
|
||||||
|
|
||||||
sudo yum install libtiff-devel libjpeg-devel libzip-devel freetype-devel \
|
sudo yum install libtiff-devel libjpeg-devel libzip-devel freetype-devel \
|
||||||
lcms2-devel libwebp-devel tcl-devel tk-devel \
|
lcms2-devel libwebp-devel tcl-devel tk-devel redhat-rpm-config \
|
||||||
python-virtualenv libffi-devel openssl-devel
|
python-virtualenv libffi-devel openssl-devel
|
||||||
sudo yum groupinstall "Development Tools"
|
sudo yum groupinstall "Development Tools"
|
||||||
|
|
||||||
@@ -124,6 +121,7 @@ Installing prerequisites on Mac OS X::
|
|||||||
xcode-select --install
|
xcode-select --install
|
||||||
sudo easy_install pip
|
sudo easy_install pip
|
||||||
sudo pip install virtualenv
|
sudo pip install virtualenv
|
||||||
|
brew install pkg-config libffi
|
||||||
|
|
||||||
Installing prerequisites on Raspbian::
|
Installing prerequisites on Raspbian::
|
||||||
|
|
||||||
@@ -134,10 +132,22 @@ Installing prerequisites on Raspbian::
|
|||||||
sudo pip install --upgrade ndg-httpsclient
|
sudo pip install --upgrade ndg-httpsclient
|
||||||
sudo pip install --upgrade virtualenv
|
sudo pip install --upgrade virtualenv
|
||||||
|
|
||||||
|
Installing prerequisites on openSUSE::
|
||||||
|
|
||||||
|
sudo zypper in -t pattern devel_basis
|
||||||
|
sudo zypper in python-pip python-setuptools sqlite3 python-virtualenv \
|
||||||
|
python-devel libffi-devel libopenssl-devel libjpeg62-devel
|
||||||
|
|
||||||
|
Installing prerequisites on OpenBSD::
|
||||||
|
|
||||||
|
doas pkg_add python libffi py-pip py-setuptools sqlite3 py-virtualenv \
|
||||||
|
libxslt
|
||||||
|
|
||||||
To install the synapse homeserver run::
|
To install the synapse homeserver run::
|
||||||
|
|
||||||
virtualenv -p python2.7 ~/.synapse
|
virtualenv -p python2.7 ~/.synapse
|
||||||
source ~/.synapse/bin/activate
|
source ~/.synapse/bin/activate
|
||||||
|
pip install --upgrade pip
|
||||||
pip install --upgrade setuptools
|
pip install --upgrade setuptools
|
||||||
pip install https://github.com/matrix-org/synapse/tarball/master
|
pip install https://github.com/matrix-org/synapse/tarball/master
|
||||||
|
|
||||||
@@ -145,38 +155,74 @@ This installs synapse, along with the libraries it uses, into a virtual
|
|||||||
environment under ``~/.synapse``. Feel free to pick a different directory
|
environment under ``~/.synapse``. Feel free to pick a different directory
|
||||||
if you prefer.
|
if you prefer.
|
||||||
|
|
||||||
In case of problems, please see the _Troubleshooting section below.
|
In case of problems, please see the _`Troubleshooting` section below.
|
||||||
|
|
||||||
Alternatively, Silvio Fricke has contributed a Dockerfile to automate the
|
Alternatively, Silvio Fricke has contributed a Dockerfile to automate the
|
||||||
above in Docker at https://registry.hub.docker.com/u/silviof/docker-matrix/.
|
above in Docker at https://registry.hub.docker.com/u/silviof/docker-matrix/.
|
||||||
|
|
||||||
Also, Martin Giess has created an auto-deployment process with vagrant/ansible,
|
Also, Martin Giess has created an auto-deployment process with vagrant/ansible,
|
||||||
tested with VirtualBox/AWS/DigitalOcean - see https://github.com/EMnify/matrix-synapse-auto-deploy
|
tested with VirtualBox/AWS/DigitalOcean - see https://github.com/EMnify/matrix-synapse-auto-deploy
|
||||||
for details.
|
for details.
|
||||||
|
|
||||||
To set up your homeserver, run (in your virtualenv, as before)::
|
Configuring synapse
|
||||||
|
-------------------
|
||||||
|
|
||||||
|
Before you can start Synapse, you will need to generate a configuration
|
||||||
|
file. To do this, run (in your virtualenv, as before)::
|
||||||
|
|
||||||
cd ~/.synapse
|
cd ~/.synapse
|
||||||
python -m synapse.app.homeserver \
|
python -m synapse.app.homeserver \
|
||||||
--server-name machine.my.domain.name \
|
--server-name my.domain.name \
|
||||||
--config-path homeserver.yaml \
|
--config-path homeserver.yaml \
|
||||||
--generate-config \
|
--generate-config \
|
||||||
--report-stats=[yes|no]
|
--report-stats=[yes|no]
|
||||||
|
|
||||||
...substituting your host and domain name as appropriate.
|
... substituting an appropriate value for ``--server-name``. The server name
|
||||||
|
determines the "domain" part of user-ids for users on your server: these will
|
||||||
|
all be of the format ``@user:my.domain.name``. It also determines how other
|
||||||
|
matrix servers will reach yours for `Federation`_. For a test configuration,
|
||||||
|
set this to the hostname of your server. For a more production-ready setup, you
|
||||||
|
will probably want to specify your domain (``example.com``) rather than a
|
||||||
|
matrix-specific hostname here (in the same way that your email address is
|
||||||
|
probably ``user@example.com`` rather than ``user@email.example.com``) - but
|
||||||
|
doing so may require more advanced setup - see `Setting up
|
||||||
|
Federation`_. Beware that the server name cannot be changed later.
|
||||||
|
|
||||||
This will generate you a config file that you can then customise, but it will
|
This command will generate you a config file that you can then customise, but it will
|
||||||
also generate a set of keys for you. These keys will allow your Home Server to
|
also generate a set of keys for you. These keys will allow your Home Server to
|
||||||
identify itself to other Home Servers, so don't lose or delete them. It would be
|
identify itself to other Home Servers, so don't lose or delete them. It would be
|
||||||
wise to back them up somewhere safe. If, for whatever reason, you do need to
|
wise to back them up somewhere safe. (If, for whatever reason, you do need to
|
||||||
change your Home Server's keys, you may find that other Home Servers have the
|
change your Home Server's keys, you may find that other Home Servers have the
|
||||||
old key cached. If you update the signing key, you should change the name of the
|
old key cached. If you update the signing key, you should change the name of the
|
||||||
key in the <server name>.signing.key file (the second word) to something different.
|
key in the ``<server name>.signing.key`` file (the second word) to something
|
||||||
|
different. See `the spec`__ for more information on key management.)
|
||||||
|
|
||||||
By default, registration of new users is disabled. You can either enable
|
.. __: `key_management`_
|
||||||
registration in the config by specifying ``enable_registration: true``
|
|
||||||
(it is then recommended to also set up CAPTCHA - see docs/CAPTCHA_SETUP), or
|
The default configuration exposes two HTTP ports: 8008 and 8448. Port 8008 is
|
||||||
you can use the command line to register new users::
|
configured without TLS; it should be behind a reverse proxy for TLS/SSL
|
||||||
|
termination on port 443 which in turn should be used for clients. Port 8448
|
||||||
|
is configured to use TLS with a self-signed certificate. If you would like
|
||||||
|
to do initial test with a client without having to setup a reverse proxy,
|
||||||
|
you can temporarly use another certificate. (Note that a self-signed
|
||||||
|
certificate is fine for `Federation`_). You can do so by changing
|
||||||
|
``tls_certificate_path``, ``tls_private_key_path`` and ``tls_dh_params_path``
|
||||||
|
in ``homeserver.yaml``; alternatively, you can use a reverse-proxy, but be sure
|
||||||
|
to read `Using a reverse proxy with Synapse`_ when doing so.
|
||||||
|
|
||||||
|
Apart from port 8448 using TLS, both ports are the same in the default
|
||||||
|
configuration.
|
||||||
|
|
||||||
|
Registering a user
|
||||||
|
------------------
|
||||||
|
|
||||||
|
You will need at least one user on your server in order to use a Matrix
|
||||||
|
client. Users can be registered either `via a Matrix client`__, or via a
|
||||||
|
commandline script.
|
||||||
|
|
||||||
|
.. __: `client-user-reg`_
|
||||||
|
|
||||||
|
To get started, it is easiest to use the command line to register new users::
|
||||||
|
|
||||||
$ source ~/.synapse/bin/activate
|
$ source ~/.synapse/bin/activate
|
||||||
$ synctl start # if not already running
|
$ synctl start # if not already running
|
||||||
@@ -184,10 +230,41 @@ you can use the command line to register new users::
|
|||||||
New user localpart: erikj
|
New user localpart: erikj
|
||||||
Password:
|
Password:
|
||||||
Confirm password:
|
Confirm password:
|
||||||
|
Make admin [no]:
|
||||||
Success!
|
Success!
|
||||||
|
|
||||||
|
This process uses a setting ``registration_shared_secret`` in
|
||||||
|
``homeserver.yaml``, which is shared between Synapse itself and the
|
||||||
|
``register_new_matrix_user`` script. It doesn't matter what it is (a random
|
||||||
|
value is generated by ``--generate-config``), but it should be kept secret, as
|
||||||
|
anyone with knowledge of it can register users on your server even if
|
||||||
|
``enable_registration`` is ``false``.
|
||||||
|
|
||||||
|
Setting up a TURN server
|
||||||
|
------------------------
|
||||||
|
|
||||||
For reliable VoIP calls to be routed via this homeserver, you MUST configure
|
For reliable VoIP calls to be routed via this homeserver, you MUST configure
|
||||||
a TURN server. See docs/turn-howto.rst for details.
|
a TURN server. See `<docs/turn-howto.rst>`_ for details.
|
||||||
|
|
||||||
|
IPv6
|
||||||
|
----
|
||||||
|
|
||||||
|
As of Synapse 0.19 we finally support IPv6, many thanks to @kyrias and @glyph
|
||||||
|
for providing PR #1696.
|
||||||
|
|
||||||
|
However, for federation to work on hosts with IPv6 DNS servers you **must**
|
||||||
|
be running Twisted 17.1.0 or later - see https://github.com/matrix-org/synapse/issues/1002
|
||||||
|
for details. We can't make Synapse depend on Twisted 17.1 by default
|
||||||
|
yet as it will break most older distributions (see https://github.com/matrix-org/synapse/pull/1909)
|
||||||
|
so if you are using operating system dependencies you'll have to install your
|
||||||
|
own Twisted 17.1 package via pip or backports etc.
|
||||||
|
|
||||||
|
If you're running in a virtualenv then pip should have installed the newest
|
||||||
|
Twisted automatically, but if your virtualenv is old you will need to manually
|
||||||
|
upgrade to a newer Twisted dependency via:
|
||||||
|
|
||||||
|
pip install Twisted>=17.1.0
|
||||||
|
|
||||||
|
|
||||||
Running Synapse
|
Running Synapse
|
||||||
===============
|
===============
|
||||||
@@ -199,29 +276,72 @@ run (e.g. ``~/.synapse``), and::
|
|||||||
source ./bin/activate
|
source ./bin/activate
|
||||||
synctl start
|
synctl start
|
||||||
|
|
||||||
Using PostgreSQL
|
|
||||||
================
|
|
||||||
|
|
||||||
As of Synapse 0.9, `PostgreSQL <http://www.postgresql.org>`_ is supported as an
|
Connecting to Synapse from a client
|
||||||
alternative to the `SQLite <http://sqlite.org/>`_ database that Synapse has
|
===================================
|
||||||
traditionally used for convenience and simplicity.
|
|
||||||
|
|
||||||
The advantages of Postgres include:
|
The easiest way to try out your new Synapse installation is by connecting to it
|
||||||
|
from a web client. The easiest option is probably the one at
|
||||||
|
http://riot.im/app. You will need to specify a "Custom server" when you log on
|
||||||
|
or register: set this to ``https://domain.tld`` if you setup a reverse proxy
|
||||||
|
following the recommended setup, or ``https://localhost:8448`` - remember to specify the
|
||||||
|
port (``:8448``) if not ``:443`` unless you changed the configuration. (Leave the identity
|
||||||
|
server as the default - see `Identity servers`_.)
|
||||||
|
|
||||||
* significant performance improvements due to the superior threading and
|
If using port 8448 you will run into errors until you accept the self-signed
|
||||||
caching model, smarter query optimiser
|
certificate. You can easily do this by going to ``https://localhost:8448``
|
||||||
* allowing the DB to be run on separate hardware
|
directly with your browser and accept the presented certificate. You can then
|
||||||
* allowing basic active/backup high-availability with a "hot spare" synapse
|
go back in your web client and proceed further.
|
||||||
pointing at the same DB master, as well as enabling DB replication in
|
|
||||||
synapse itself.
|
|
||||||
|
|
||||||
The only disadvantage is that the code is relatively new as of April 2015 and
|
If all goes well you should at least be able to log in, create a room, and
|
||||||
may have a few regressions relative to SQLite.
|
start sending messages.
|
||||||
|
|
||||||
For information on how to install and use PostgreSQL, please see
|
(The homeserver runs a web client by default at https://localhost:8448/, though
|
||||||
`docs/postgres.rst <docs/postgres.rst>`_.
|
as of the time of writing it is somewhat outdated and not really recommended -
|
||||||
|
https://github.com/matrix-org/synapse/issues/1527).
|
||||||
|
|
||||||
Platform Specific Instructions
|
.. _`client-user-reg`:
|
||||||
|
|
||||||
|
Registering a new user from a client
|
||||||
|
------------------------------------
|
||||||
|
|
||||||
|
By default, registration of new users via Matrix clients is disabled. To enable
|
||||||
|
it, specify ``enable_registration: true`` in ``homeserver.yaml``. (It is then
|
||||||
|
recommended to also set up CAPTCHA - see `<docs/CAPTCHA_SETUP.rst>`_.)
|
||||||
|
|
||||||
|
Once ``enable_registration`` is set to ``true``, it is possible to register a
|
||||||
|
user via `riot.im <https://riot.im/app/#/register>`_ or other Matrix clients.
|
||||||
|
|
||||||
|
Your new user name will be formed partly from the ``server_name`` (see
|
||||||
|
`Configuring synapse`_), and partly from a localpart you specify when you
|
||||||
|
create the account. Your name will take the form of::
|
||||||
|
|
||||||
|
@localpart:my.domain.name
|
||||||
|
|
||||||
|
(pronounced "at localpart on my dot domain dot name").
|
||||||
|
|
||||||
|
As when logging in, you will need to specify a "Custom server". Specify your
|
||||||
|
desired ``localpart`` in the 'User name' box.
|
||||||
|
|
||||||
|
|
||||||
|
Security Note
|
||||||
|
=============
|
||||||
|
|
||||||
|
Matrix serves raw user generated data in some APIs - specifically the `content
|
||||||
|
repository endpoints <http://matrix.org/docs/spec/client_server/latest.html#get-matrix-media-r0-download-servername-mediaid>`_.
|
||||||
|
|
||||||
|
Whilst we have tried to mitigate against possible XSS attacks (e.g.
|
||||||
|
https://github.com/matrix-org/synapse/pull/1021) we recommend running
|
||||||
|
matrix homeservers on a dedicated domain name, to limit any malicious user generated
|
||||||
|
content served to web browsers a matrix API from being able to attack webapps hosted
|
||||||
|
on the same domain. This is particularly true of sharing a matrix webclient and
|
||||||
|
server on the same domain.
|
||||||
|
|
||||||
|
See https://github.com/vector-im/vector-web/issues/1977 and
|
||||||
|
https://developer.github.com/changes/2014-04-25-user-content-security for more details.
|
||||||
|
|
||||||
|
|
||||||
|
Platform-Specific Instructions
|
||||||
==============================
|
==============================
|
||||||
|
|
||||||
Debian
|
Debian
|
||||||
@@ -229,21 +349,27 @@ Debian
|
|||||||
|
|
||||||
Matrix provides official Debian packages via apt from http://matrix.org/packages/debian/.
|
Matrix provides official Debian packages via apt from http://matrix.org/packages/debian/.
|
||||||
Note that these packages do not include a client - choose one from
|
Note that these packages do not include a client - choose one from
|
||||||
https://matrix.org/blog/try-matrix-now/ (or build your own with one of our SDKs :)
|
https://matrix.org/docs/projects/try-matrix-now.html (or build your own with one of our SDKs :)
|
||||||
|
|
||||||
Fedora
|
Fedora
|
||||||
------
|
------
|
||||||
|
|
||||||
|
Synapse is in the Fedora repositories as ``matrix-synapse``::
|
||||||
|
|
||||||
|
sudo dnf install matrix-synapse
|
||||||
|
|
||||||
Oleg Girko provides Fedora RPMs at
|
Oleg Girko provides Fedora RPMs at
|
||||||
https://obs.infoserver.lv/project/monitor/matrix-synapse
|
https://obs.infoserver.lv/project/monitor/matrix-synapse
|
||||||
|
|
||||||
ArchLinux
|
ArchLinux
|
||||||
---------
|
---------
|
||||||
|
|
||||||
The quickest way to get up and running with ArchLinux is probably with Ivan
|
The quickest way to get up and running with ArchLinux is probably with the community package
|
||||||
Shapovalov's AUR package from
|
https://www.archlinux.org/packages/community/any/matrix-synapse/, which should pull in most of
|
||||||
https://aur.archlinux.org/packages/matrix-synapse/, which should pull in all
|
the necessary dependencies. If the default web client is to be served (enabled by default in
|
||||||
the necessary dependencies.
|
the generated config),
|
||||||
|
https://www.archlinux.org/packages/community/any/python2-matrix-angular-sdk/ will also need to
|
||||||
|
be installed.
|
||||||
|
|
||||||
Alternatively, to install using pip a few changes may be needed as ArchLinux
|
Alternatively, to install using pip a few changes may be needed as ArchLinux
|
||||||
defaults to python 3, but synapse currently assumes python 2.7 by default:
|
defaults to python 3, but synapse currently assumes python 2.7 by default:
|
||||||
@@ -280,9 +406,35 @@ FreeBSD
|
|||||||
|
|
||||||
Synapse can be installed via FreeBSD Ports or Packages contributed by Brendan Molloy from:
|
Synapse can be installed via FreeBSD Ports or Packages contributed by Brendan Molloy from:
|
||||||
|
|
||||||
- Ports: ``cd /usr/ports/net/py-matrix-synapse && make install clean``
|
- Ports: ``cd /usr/ports/net-im/py-matrix-synapse && make install clean``
|
||||||
- Packages: ``pkg install py27-matrix-synapse``
|
- Packages: ``pkg install py27-matrix-synapse``
|
||||||
|
|
||||||
|
|
||||||
|
OpenBSD
|
||||||
|
-------
|
||||||
|
|
||||||
|
There is currently no port for OpenBSD. Additionally, OpenBSD's security
|
||||||
|
settings require a slightly more difficult installation process.
|
||||||
|
|
||||||
|
1) Create a new directory in ``/usr/local`` called ``_synapse``. Also, create a
|
||||||
|
new user called ``_synapse`` and set that directory as the new user's home.
|
||||||
|
This is required because, by default, OpenBSD only allows binaries which need
|
||||||
|
write and execute permissions on the same memory space to be run from
|
||||||
|
``/usr/local``.
|
||||||
|
2) ``su`` to the new ``_synapse`` user and change to their home directory.
|
||||||
|
3) Create a new virtualenv: ``virtualenv -p python2.7 ~/.synapse``
|
||||||
|
4) Source the virtualenv configuration located at
|
||||||
|
``/usr/local/_synapse/.synapse/bin/activate``. This is done in ``ksh`` by
|
||||||
|
using the ``.`` command, rather than ``bash``'s ``source``.
|
||||||
|
5) Optionally, use ``pip`` to install ``lxml``, which Synapse needs to parse
|
||||||
|
webpages for their titles.
|
||||||
|
6) Use ``pip`` to install this repository: ``pip install
|
||||||
|
https://github.com/matrix-org/synapse/tarball/master``
|
||||||
|
7) Optionally, change ``_synapse``'s shell to ``/bin/false`` to reduce the
|
||||||
|
chance of a compromised Synapse server being used to take over your box.
|
||||||
|
|
||||||
|
After this, you may proceed with the rest of the install directions.
|
||||||
|
|
||||||
NixOS
|
NixOS
|
||||||
-----
|
-----
|
||||||
|
|
||||||
@@ -322,6 +474,7 @@ Troubleshooting:
|
|||||||
you do, you may need to create a symlink to ``libsodium.a`` so ``ld`` can find
|
you do, you may need to create a symlink to ``libsodium.a`` so ``ld`` can find
|
||||||
it: ``ln -s /usr/local/lib/libsodium.a /usr/lib/libsodium.a``
|
it: ``ln -s /usr/local/lib/libsodium.a /usr/lib/libsodium.a``
|
||||||
|
|
||||||
|
|
||||||
Troubleshooting
|
Troubleshooting
|
||||||
===============
|
===============
|
||||||
|
|
||||||
@@ -385,6 +538,30 @@ fix try re-installing from PyPI or directly from
|
|||||||
# Install from github
|
# Install from github
|
||||||
pip install --user https://github.com/pyca/pynacl/tarball/master
|
pip install --user https://github.com/pyca/pynacl/tarball/master
|
||||||
|
|
||||||
|
Running out of File Handles
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
If synapse runs out of filehandles, it typically fails badly - live-locking
|
||||||
|
at 100% CPU, and/or failing to accept new TCP connections (blocking the
|
||||||
|
connecting client). Matrix currently can legitimately use a lot of file handles,
|
||||||
|
thanks to busy rooms like #matrix:matrix.org containing hundreds of participating
|
||||||
|
servers. The first time a server talks in a room it will try to connect
|
||||||
|
simultaneously to all participating servers, which could exhaust the available
|
||||||
|
file descriptors between DNS queries & HTTPS sockets, especially if DNS is slow
|
||||||
|
to respond. (We need to improve the routing algorithm used to be better than
|
||||||
|
full mesh, but as of June 2017 this hasn't happened yet).
|
||||||
|
|
||||||
|
If you hit this failure mode, we recommend increasing the maximum number of
|
||||||
|
open file handles to be at least 4096 (assuming a default of 1024 or 256).
|
||||||
|
This is typically done by editing ``/etc/security/limits.conf``
|
||||||
|
|
||||||
|
Separately, Synapse may leak file handles if inbound HTTP requests get stuck
|
||||||
|
during processing - e.g. blocked behind a lock or talking to a remote server etc.
|
||||||
|
This is best diagnosed by matching up the 'Received request' and 'Processed request'
|
||||||
|
log lines and looking for any 'Processed request' lines which take more than
|
||||||
|
a few seconds to execute. Please let us know at #matrix-dev:matrix.org if
|
||||||
|
you see this failure mode so we can help debug it, however.
|
||||||
|
|
||||||
ArchLinux
|
ArchLinux
|
||||||
~~~~~~~~~
|
~~~~~~~~~
|
||||||
|
|
||||||
@@ -395,37 +572,6 @@ you will need to explicitly call Python2.7 - either running as::
|
|||||||
|
|
||||||
...or by editing synctl with the correct python executable.
|
...or by editing synctl with the correct python executable.
|
||||||
|
|
||||||
Synapse Development
|
|
||||||
===================
|
|
||||||
|
|
||||||
To check out a synapse for development, clone the git repo into a working
|
|
||||||
directory of your choice::
|
|
||||||
|
|
||||||
git clone https://github.com/matrix-org/synapse.git
|
|
||||||
cd synapse
|
|
||||||
|
|
||||||
Synapse has a number of external dependencies, that are easiest
|
|
||||||
to install using pip and a virtualenv::
|
|
||||||
|
|
||||||
virtualenv env
|
|
||||||
source env/bin/activate
|
|
||||||
python synapse/python_dependencies.py | xargs -n1 pip install
|
|
||||||
pip install setuptools_trial mock
|
|
||||||
|
|
||||||
This will run a process of downloading and installing all the needed
|
|
||||||
dependencies into a virtual env.
|
|
||||||
|
|
||||||
Once this is done, you may wish to run Synapse's unit tests, to
|
|
||||||
check that everything is installed as it should be::
|
|
||||||
|
|
||||||
python setup.py test
|
|
||||||
|
|
||||||
This should end with a 'PASSED' result::
|
|
||||||
|
|
||||||
Ran 143 tests in 0.601s
|
|
||||||
|
|
||||||
PASSED (successes=143)
|
|
||||||
|
|
||||||
|
|
||||||
Upgrading an existing Synapse
|
Upgrading an existing Synapse
|
||||||
=============================
|
=============================
|
||||||
@@ -436,143 +582,259 @@ versions of synapse.
|
|||||||
|
|
||||||
.. _UPGRADE.rst: UPGRADE.rst
|
.. _UPGRADE.rst: UPGRADE.rst
|
||||||
|
|
||||||
|
.. _federation:
|
||||||
|
|
||||||
Setting up Federation
|
Setting up Federation
|
||||||
=====================
|
=====================
|
||||||
|
|
||||||
In order for other homeservers to send messages to your server, it will need to
|
Federation is the process by which users on different servers can participate
|
||||||
be publicly visible on the internet, and they will need to know its host name.
|
in the same room. For this to work, those other servers must be able to contact
|
||||||
You have two choices here, which will influence the form of your Matrix user
|
yours to send messages.
|
||||||
IDs:
|
|
||||||
|
|
||||||
1) Use the machine's own hostname as available on public DNS in the form of
|
As explained in `Configuring synapse`_, the ``server_name`` in your
|
||||||
its A records. This is easier to set up initially, perhaps for
|
``homeserver.yaml`` file determines the way that other servers will reach
|
||||||
testing, but lacks the flexibility of SRV.
|
yours. By default, they will treat it as a hostname and try to connect to
|
||||||
|
port 8448. This is easy to set up and will work with the default configuration,
|
||||||
|
provided you set the ``server_name`` to match your machine's public DNS
|
||||||
|
hostname.
|
||||||
|
|
||||||
2) Set up a SRV record for your domain name. This requires you create a SRV
|
For a more flexible configuration, you can set up a DNS SRV record. This allows
|
||||||
record in DNS, but gives the flexibility to run the server on your own
|
you to run your server on a machine that might not have the same name as your
|
||||||
choice of TCP port, on a machine that might not be the same name as the
|
domain name. For example, you might want to run your server at
|
||||||
domain name.
|
``synapse.example.com``, but have your Matrix user-ids look like
|
||||||
|
``@user:example.com``. (A SRV record also allows you to change the port from
|
||||||
|
the default 8448. However, if you are thinking of using a reverse-proxy on the
|
||||||
|
federation port, which is not recommended, be sure to read
|
||||||
|
`Reverse-proxying the federation port`_ first.)
|
||||||
|
|
||||||
For the first form, simply pass the required hostname (of the machine) as the
|
To use a SRV record, first create your SRV record and publish it in DNS. This
|
||||||
--server-name parameter::
|
should have the format ``_matrix._tcp.<yourdomain.com> <ttl> IN SRV 10 0 <port>
|
||||||
|
<synapse.server.name>``. The DNS record should then look something like::
|
||||||
|
|
||||||
|
$ dig -t srv _matrix._tcp.example.com
|
||||||
|
_matrix._tcp.example.com. 3600 IN SRV 10 0 8448 synapse.example.com.
|
||||||
|
|
||||||
|
You can then configure your homeserver to use ``<yourdomain.com>`` as the domain in
|
||||||
|
its user-ids, by setting ``server_name``::
|
||||||
|
|
||||||
python -m synapse.app.homeserver \
|
python -m synapse.app.homeserver \
|
||||||
--server-name machine.my.domain.name \
|
--server-name <yourdomain.com> \
|
||||||
--config-path homeserver.yaml \
|
--config-path homeserver.yaml \
|
||||||
--generate-config
|
--generate-config
|
||||||
python -m synapse.app.homeserver --config-path homeserver.yaml
|
python -m synapse.app.homeserver --config-path homeserver.yaml
|
||||||
|
|
||||||
Alternatively, you can run ``synctl start`` to guide you through the process.
|
If you've already generated the config file, you need to edit the ``server_name``
|
||||||
|
in your ``homeserver.yaml`` file. If you've already started Synapse and a
|
||||||
For the second form, first create your SRV record and publish it in DNS. This
|
|
||||||
needs to be named _matrix._tcp.YOURDOMAIN, and point at at least one hostname
|
|
||||||
and port where the server is running. (At the current time synapse does not
|
|
||||||
support clustering multiple servers into a single logical homeserver). The DNS
|
|
||||||
record would then look something like::
|
|
||||||
|
|
||||||
$ dig -t srv _matrix._tcp.machine.my.domain.name
|
|
||||||
_matrix._tcp IN SRV 10 0 8448 machine.my.domain.name.
|
|
||||||
|
|
||||||
|
|
||||||
At this point, you should then run the homeserver with the hostname of this
|
|
||||||
SRV record, as that is the name other machines will expect it to have::
|
|
||||||
|
|
||||||
python -m synapse.app.homeserver \
|
|
||||||
--server-name YOURDOMAIN \
|
|
||||||
--config-path homeserver.yaml \
|
|
||||||
--generate-config
|
|
||||||
python -m synapse.app.homeserver --config-path homeserver.yaml
|
|
||||||
|
|
||||||
|
|
||||||
If you've already generated the config file, you need to edit the "server_name"
|
|
||||||
in you ```homeserver.yaml``` file. If you've already started Synapse and a
|
|
||||||
database has been created, you will have to recreate the database.
|
database has been created, you will have to recreate the database.
|
||||||
|
|
||||||
You may additionally want to pass one or more "-v" options, in order to
|
If all goes well, you should be able to `connect to your server with a client`__,
|
||||||
increase the verbosity of logging output; at least for initial testing.
|
and then join a room via federation. (Try ``#matrix-dev:matrix.org`` as a first
|
||||||
|
step. "Matrix HQ"'s sheer size and activity level tends to make even the
|
||||||
|
largest boxes pause for thought.)
|
||||||
|
|
||||||
|
.. __: `Connecting to Synapse from a client`_
|
||||||
|
|
||||||
|
Troubleshooting
|
||||||
|
---------------
|
||||||
|
|
||||||
|
You can use the federation tester to check if your homeserver is all set:
|
||||||
|
``https://matrix.org/federationtester/api/report?server_name=<your_server_name>``
|
||||||
|
If any of the attributes under "checks" is false, federation won't work.
|
||||||
|
|
||||||
|
The typical failure mode with federation is that when you try to join a room,
|
||||||
|
it is rejected with "401: Unauthorized". Generally this means that other
|
||||||
|
servers in the room couldn't access yours. (Joining a room over federation is a
|
||||||
|
complicated dance which requires connections in both directions).
|
||||||
|
|
||||||
|
So, things to check are:
|
||||||
|
|
||||||
|
* If you are trying to use a reverse-proxy, read `Reverse-proxying the
|
||||||
|
federation port`_.
|
||||||
|
* If you are not using a SRV record, check that your ``server_name`` (the part
|
||||||
|
of your user-id after the ``:``) matches your hostname, and that port 8448 on
|
||||||
|
that hostname is reachable from outside your network.
|
||||||
|
* If you *are* using a SRV record, check that it matches your ``server_name``
|
||||||
|
(it should be ``_matrix._tcp.<server_name>``), and that the port and hostname
|
||||||
|
it specifies are reachable from outside your network.
|
||||||
|
|
||||||
Running a Demo Federation of Synapses
|
Running a Demo Federation of Synapses
|
||||||
-------------------------------------
|
-------------------------------------
|
||||||
|
|
||||||
If you want to get up and running quickly with a trio of homeservers in a
|
If you want to get up and running quickly with a trio of homeservers in a
|
||||||
private federation (``localhost:8080``, ``localhost:8081`` and
|
private federation, there is a script in the ``demo`` directory. This is mainly
|
||||||
``localhost:8082``) which you can then access through the webclient running at
|
useful just for development purposes. See `<demo/README>`_.
|
||||||
http://localhost:8080. Simply run::
|
|
||||||
|
|
||||||
demo/start.sh
|
|
||||||
|
|
||||||
This is mainly useful just for development purposes.
|
|
||||||
|
|
||||||
Running The Demo Web Client
|
|
||||||
===========================
|
|
||||||
|
|
||||||
The homeserver runs a web client by default at https://localhost:8448/.
|
|
||||||
|
|
||||||
If this is the first time you have used the client from that browser (it uses
|
|
||||||
HTML5 local storage to remember its config), you will need to log in to your
|
|
||||||
account. If you don't yet have an account, because you've just started the
|
|
||||||
homeserver for the first time, then you'll need to register one.
|
|
||||||
|
|
||||||
|
|
||||||
Registering A New Account
|
Using PostgreSQL
|
||||||
-------------------------
|
================
|
||||||
|
|
||||||
Your new user name will be formed partly from the hostname your server is
|
As of Synapse 0.9, `PostgreSQL <http://www.postgresql.org>`_ is supported as an
|
||||||
running as, and partly from a localpart you specify when you create the
|
alternative to the `SQLite <http://sqlite.org/>`_ database that Synapse has
|
||||||
account. Your name will take the form of::
|
traditionally used for convenience and simplicity.
|
||||||
|
|
||||||
@localpart:my.domain.here
|
The advantages of Postgres include:
|
||||||
(pronounced "at localpart on my dot domain dot here")
|
|
||||||
|
|
||||||
Specify your desired localpart in the topmost box of the "Register for an
|
* significant performance improvements due to the superior threading and
|
||||||
account" form, and click the "Register" button. Hostnames can contain ports if
|
caching model, smarter query optimiser
|
||||||
required due to lack of SRV records (e.g. @matthew:localhost:8448 on an
|
* allowing the DB to be run on separate hardware
|
||||||
internal synapse sandbox running on localhost).
|
* allowing basic active/backup high-availability with a "hot spare" synapse
|
||||||
|
pointing at the same DB master, as well as enabling DB replication in
|
||||||
|
synapse itself.
|
||||||
|
|
||||||
If registration fails, you may need to enable it in the homeserver (see
|
For information on how to install and use PostgreSQL, please see
|
||||||
`Synapse Installation`_ above)
|
`docs/postgres.rst <docs/postgres.rst>`_.
|
||||||
|
|
||||||
|
|
||||||
Logging In To An Existing Account
|
.. _reverse-proxy:
|
||||||
---------------------------------
|
|
||||||
|
Using a reverse proxy with Synapse
|
||||||
|
==================================
|
||||||
|
|
||||||
|
It is recommended to put a reverse proxy such as
|
||||||
|
`nginx <https://nginx.org/en/docs/http/ngx_http_proxy_module.html>`_,
|
||||||
|
`Apache <https://httpd.apache.org/docs/current/mod/mod_proxy_http.html>`_ or
|
||||||
|
`HAProxy <http://www.haproxy.org/>`_ in front of Synapse. One advantage of
|
||||||
|
doing so is that it means that you can expose the default https port (443) to
|
||||||
|
Matrix clients without needing to run Synapse with root privileges.
|
||||||
|
|
||||||
|
The most important thing to know here is that Matrix clients and other Matrix
|
||||||
|
servers do not necessarily need to connect to your server via the same
|
||||||
|
port. Indeed, clients will use port 443 by default, whereas servers default to
|
||||||
|
port 8448. Where these are different, we refer to the 'client port' and the
|
||||||
|
'federation port'.
|
||||||
|
|
||||||
|
The next most important thing to know is that using a reverse-proxy on the
|
||||||
|
federation port has a number of pitfalls. It is possible, but be sure to read
|
||||||
|
`Reverse-proxying the federation port`_.
|
||||||
|
|
||||||
|
The recommended setup is therefore to configure your reverse-proxy on port 443
|
||||||
|
to port 8008 of synapse for client connections, but to also directly expose port
|
||||||
|
8448 for server-server connections. All the Matrix endpoints begin ``/_matrix``,
|
||||||
|
so an example nginx configuration might look like::
|
||||||
|
|
||||||
|
server {
|
||||||
|
listen 443 ssl;
|
||||||
|
listen [::]:443 ssl;
|
||||||
|
server_name matrix.example.com;
|
||||||
|
|
||||||
|
location /_matrix {
|
||||||
|
proxy_pass http://localhost:8008;
|
||||||
|
proxy_set_header X-Forwarded-For $remote_addr;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
You will also want to set ``bind_addresses: ['127.0.0.1']`` and ``x_forwarded: true``
|
||||||
|
for port 8008 in ``homeserver.yaml`` to ensure that client IP addresses are
|
||||||
|
recorded correctly.
|
||||||
|
|
||||||
|
Having done so, you can then use ``https://matrix.example.com`` (instead of
|
||||||
|
``https://matrix.example.com:8448``) as the "Custom server" when `Connecting to
|
||||||
|
Synapse from a client`_.
|
||||||
|
|
||||||
|
Reverse-proxying the federation port
|
||||||
|
------------------------------------
|
||||||
|
|
||||||
|
There are two issues to consider before using a reverse-proxy on the federation
|
||||||
|
port:
|
||||||
|
|
||||||
|
* Due to the way SSL certificates are managed in the Matrix federation protocol
|
||||||
|
(see `spec`__), Synapse needs to be configured with the path to the SSL
|
||||||
|
certificate, *even if you do not terminate SSL at Synapse*.
|
||||||
|
|
||||||
|
.. __: `key_management`_
|
||||||
|
|
||||||
|
* Synapse does not currently support SNI on the federation protocol
|
||||||
|
(`bug #1491 <https://github.com/matrix-org/synapse/issues/1491>`_), which
|
||||||
|
means that using name-based virtual hosting is unreliable.
|
||||||
|
|
||||||
|
Furthermore, a number of the normal reasons for using a reverse-proxy do not
|
||||||
|
apply:
|
||||||
|
|
||||||
|
* Other servers will connect on port 8448 by default, so there is no need to
|
||||||
|
listen on port 443 (for federation, at least), which avoids the need for root
|
||||||
|
privileges and virtual hosting.
|
||||||
|
|
||||||
|
* A self-signed SSL certificate is fine for federation, so there is no need to
|
||||||
|
automate renewals. (The certificate generated by ``--generate-config`` is
|
||||||
|
valid for 10 years.)
|
||||||
|
|
||||||
|
If you want to set up a reverse-proxy on the federation port despite these
|
||||||
|
caveats, you will need to do the following:
|
||||||
|
|
||||||
|
* In ``homeserver.yaml``, set ``tls_certificate_path`` to the path to the SSL
|
||||||
|
certificate file used by your reverse-proxy, and set ``no_tls`` to ``True``.
|
||||||
|
(``tls_private_key_path`` will be ignored if ``no_tls`` is ``True``.)
|
||||||
|
|
||||||
|
* In your reverse-proxy configuration:
|
||||||
|
|
||||||
|
* If there are other virtual hosts on the same port, make sure that the
|
||||||
|
*default* one uses the certificate configured above.
|
||||||
|
|
||||||
|
* Forward ``/_matrix`` to Synapse.
|
||||||
|
|
||||||
|
* If your reverse-proxy is not listening on port 8448, publish a SRV record to
|
||||||
|
tell other servers how to find you. See `Setting up Federation`_.
|
||||||
|
|
||||||
|
When updating the SSL certificate, just update the file pointed to by
|
||||||
|
``tls_certificate_path``: there is no need to restart synapse. (You may like to
|
||||||
|
use a symbolic link to help make this process atomic.)
|
||||||
|
|
||||||
|
The most common mistake when setting up federation is not to tell Synapse about
|
||||||
|
your SSL certificate. To check it, you can visit
|
||||||
|
``https://matrix.org/federationtester/api/report?server_name=<your_server_name>``.
|
||||||
|
Unfortunately, there is no UI for this yet, but, you should see
|
||||||
|
``"MatchingTLSFingerprint": true``. If not, check that
|
||||||
|
``Certificates[0].SHA256Fingerprint`` (the fingerprint of the certificate
|
||||||
|
presented by your reverse-proxy) matches ``Keys.tls_fingerprints[0].sha256``
|
||||||
|
(the fingerprint of the certificate Synapse is using).
|
||||||
|
|
||||||
Just enter the ``@localpart:my.domain.here`` Matrix user ID and password into
|
|
||||||
the form and click the Login button.
|
|
||||||
|
|
||||||
Identity Servers
|
Identity Servers
|
||||||
================
|
================
|
||||||
|
|
||||||
The job of authenticating 3PIDs and tracking which 3PIDs are associated with a
|
Identity servers have the job of mapping email addresses and other 3rd Party
|
||||||
given Matrix user is very security-sensitive, as there is obvious risk of spam
|
IDs (3PIDs) to Matrix user IDs, as well as verifying the ownership of 3PIDs
|
||||||
if it is too easy to sign up for Matrix accounts or harvest 3PID data.
|
before creating that mapping.
|
||||||
Meanwhile the job of publishing the end-to-end encryption public keys for
|
|
||||||
Matrix users is also very security-sensitive for similar reasons.
|
|
||||||
|
|
||||||
Therefore the role of managing trusted identity in the Matrix ecosystem is
|
**They are not where accounts or credentials are stored - these live on home
|
||||||
farmed out to a cluster of known trusted ecosystem partners, who run 'Matrix
|
servers. Identity Servers are just for mapping 3rd party IDs to matrix IDs.**
|
||||||
Identity Servers' such as ``sydent``, whose role is purely to authenticate and
|
|
||||||
track 3PID logins and publish end-user public keys.
|
|
||||||
|
|
||||||
It's currently early days for identity servers as Matrix is not yet using 3PIDs
|
This process is very security-sensitive, as there is obvious risk of spam if it
|
||||||
as the primary means of identity and E2E encryption is not complete. As such,
|
is too easy to sign up for Matrix accounts or harvest 3PID data. In the longer
|
||||||
we are running a single identity server (https://matrix.org) at the current
|
term, we hope to create a decentralised system to manage it (`matrix-doc #712
|
||||||
time.
|
<https://github.com/matrix-org/matrix-doc/issues/712>`_), but in the meantime,
|
||||||
|
the role of managing trusted identity in the Matrix ecosystem is farmed out to
|
||||||
|
a cluster of known trusted ecosystem partners, who run 'Matrix Identity
|
||||||
|
Servers' such as `Sydent <https://github.com/matrix-org/sydent>`_, whose role
|
||||||
|
is purely to authenticate and track 3PID logins and publish end-user public
|
||||||
|
keys.
|
||||||
|
|
||||||
|
You can host your own copy of Sydent, but this will prevent you reaching other
|
||||||
|
users in the Matrix ecosystem via their email address, and prevent them finding
|
||||||
|
you. We therefore recommend that you use one of the centralised identity servers
|
||||||
|
at ``https://matrix.org`` or ``https://vector.im`` for now.
|
||||||
|
|
||||||
|
To reiterate: the Identity server will only be used if you choose to associate
|
||||||
|
an email address with your account, or send an invite to another user via their
|
||||||
|
email address.
|
||||||
|
|
||||||
|
|
||||||
URL Previews
|
URL Previews
|
||||||
============
|
============
|
||||||
|
|
||||||
Synapse 0.15.0 introduces an experimental new API for previewing URLs at
|
Synapse 0.15.0 introduces a new API for previewing URLs at
|
||||||
/_matrix/media/r0/preview_url. This is disabled by default. To turn it on
|
``/_matrix/media/r0/preview_url``. This is disabled by default. To turn it on
|
||||||
you must enable the `url_preview_enabled: True` config parameter and explicitly
|
you must enable the ``url_preview_enabled: True`` config parameter and
|
||||||
specify the IP ranges that Synapse is not allowed to spider for previewing in
|
explicitly specify the IP ranges that Synapse is not allowed to spider for
|
||||||
the `url_preview_ip_range_blacklist` configuration parameter. This is critical
|
previewing in the ``url_preview_ip_range_blacklist`` configuration parameter.
|
||||||
from a security perspective to stop arbitrary Matrix users spidering 'internal'
|
This is critical from a security perspective to stop arbitrary Matrix users
|
||||||
URLs on your network. At the very least we recommend that your loopback and
|
spidering 'internal' URLs on your network. At the very least we recommend that
|
||||||
RFC1918 IP addresses are blacklisted.
|
your loopback and RFC1918 IP addresses are blacklisted.
|
||||||
|
|
||||||
This also requires the optional lxml and netaddr python dependencies to be
|
This also requires the optional lxml and netaddr python dependencies to be
|
||||||
installed.
|
installed. This in turn requires the libxml2 library to be available - on
|
||||||
|
Debian/Ubuntu this means ``apt-get install libxml2-dev``, or equivalent for
|
||||||
|
your OS.
|
||||||
|
|
||||||
|
|
||||||
Password reset
|
Password reset
|
||||||
@@ -583,25 +845,66 @@ server, they can request a password-reset token via clients such as Vector.
|
|||||||
|
|
||||||
A manual password reset can be done via direct database access as follows.
|
A manual password reset can be done via direct database access as follows.
|
||||||
|
|
||||||
First calculate the hash of the new password:
|
First calculate the hash of the new password::
|
||||||
|
|
||||||
$ source ~/.synapse/bin/activate
|
$ source ~/.synapse/bin/activate
|
||||||
$ ./scripts/hash_password
|
$ ./scripts/hash_password
|
||||||
Password:
|
Password:
|
||||||
Confirm password:
|
Confirm password:
|
||||||
$2a$12$xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
$2a$12$xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
||||||
|
|
||||||
Then update the `users` table in the database:
|
Then update the `users` table in the database::
|
||||||
|
|
||||||
UPDATE users SET password_hash='$2a$12$xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'
|
UPDATE users SET password_hash='$2a$12$xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'
|
||||||
WHERE name='@test:test.com';
|
WHERE name='@test:test.com';
|
||||||
|
|
||||||
Where's the spec?!
|
|
||||||
==================
|
|
||||||
|
|
||||||
The source of the matrix spec lives at https://github.com/matrix-org/matrix-doc.
|
Synapse Development
|
||||||
A recent HTML snapshot of this lives at http://matrix.org/docs/spec
|
===================
|
||||||
|
|
||||||
|
Before setting up a development environment for synapse, make sure you have the
|
||||||
|
system dependencies (such as the python header files) installed - see
|
||||||
|
`Installing from source`_.
|
||||||
|
|
||||||
|
To check out a synapse for development, clone the git repo into a working
|
||||||
|
directory of your choice::
|
||||||
|
|
||||||
|
git clone https://github.com/matrix-org/synapse.git
|
||||||
|
cd synapse
|
||||||
|
|
||||||
|
Synapse has a number of external dependencies, that are easiest
|
||||||
|
to install using pip and a virtualenv::
|
||||||
|
|
||||||
|
virtualenv -p python2.7 env
|
||||||
|
source env/bin/activate
|
||||||
|
python synapse/python_dependencies.py | xargs pip install
|
||||||
|
pip install lxml mock
|
||||||
|
|
||||||
|
This will run a process of downloading and installing all the needed
|
||||||
|
dependencies into a virtual env.
|
||||||
|
|
||||||
|
Once this is done, you may wish to run Synapse's unit tests, to
|
||||||
|
check that everything is installed as it should be::
|
||||||
|
|
||||||
|
PYTHONPATH="." trial tests
|
||||||
|
|
||||||
|
This should end with a 'PASSED' result::
|
||||||
|
|
||||||
|
Ran 143 tests in 0.601s
|
||||||
|
|
||||||
|
PASSED (successes=143)
|
||||||
|
|
||||||
|
Running the Integration Tests
|
||||||
|
=============================
|
||||||
|
|
||||||
|
Synapse is accompanied by `SyTest <https://github.com/matrix-org/sytest>`_,
|
||||||
|
a Matrix homeserver integration testing suite, which uses HTTP requests to
|
||||||
|
access the API as a Matrix client would. It is able to run Synapse directly from
|
||||||
|
the source tree, so installation of the server is not required.
|
||||||
|
|
||||||
|
Testing with SyTest is recommended for verifying that changes related to the
|
||||||
|
Client-Server API are functioning correctly. See the `installation instructions
|
||||||
|
<https://github.com/matrix-org/sytest#installing>`_ for details.
|
||||||
|
|
||||||
Building Internal API Documentation
|
Building Internal API Documentation
|
||||||
===================================
|
===================================
|
||||||
@@ -617,7 +920,6 @@ Building internal API documentation::
|
|||||||
python setup.py build_sphinx
|
python setup.py build_sphinx
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
Help!! Synapse eats all my RAM!
|
Help!! Synapse eats all my RAM!
|
||||||
===============================
|
===============================
|
||||||
|
|
||||||
@@ -626,10 +928,9 @@ cache a lot of recent room data and metadata in RAM in order to speed up
|
|||||||
common requests. We'll improve this in future, but for now the easiest
|
common requests. We'll improve this in future, but for now the easiest
|
||||||
way to either reduce the RAM usage (at the risk of slowing things down)
|
way to either reduce the RAM usage (at the risk of slowing things down)
|
||||||
is to set the almost-undocumented ``SYNAPSE_CACHE_FACTOR`` environment
|
is to set the almost-undocumented ``SYNAPSE_CACHE_FACTOR`` environment
|
||||||
variable. Roughly speaking, a SYNAPSE_CACHE_FACTOR of 1.0 will max out
|
variable. The default is 0.5, which can be decreased to reduce RAM usage
|
||||||
at around 3-4GB of resident memory - this is what we currently run the
|
in memory constrained enviroments, or increased if performance starts to
|
||||||
matrix.org on. The default setting is currently 0.1, which is probably
|
degrade.
|
||||||
around a ~700MB footprint. You can dial it down further to 0.02 if
|
|
||||||
desired, which targets roughly ~512MB. Conversely you can dial it up if
|
|
||||||
you need performance for lots of users and have a box with a lot of RAM.
|
|
||||||
|
|
||||||
|
|
||||||
|
.. _`key_management`: https://matrix.org/docs/spec/server_server/unstable.html#retrieving-server-keys
|
||||||
|
|||||||
90
UPGRADE.rst
90
UPGRADE.rst
@@ -5,30 +5,60 @@ Before upgrading check if any special steps are required to upgrade from the
|
|||||||
what you currently have installed to current version of synapse. The extra
|
what you currently have installed to current version of synapse. The extra
|
||||||
instructions that may be required are listed later in this document.
|
instructions that may be required are listed later in this document.
|
||||||
|
|
||||||
If synapse was installed in a virtualenv then active that virtualenv before
|
1. If synapse was installed in a virtualenv then active that virtualenv before
|
||||||
upgrading. If synapse is installed in a virtualenv in ``~/.synapse/`` then run:
|
upgrading. If synapse is installed in a virtualenv in ``~/.synapse/`` then
|
||||||
|
run:
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
source ~/.synapse/bin/activate
|
||||||
|
|
||||||
|
2. If synapse was installed using pip then upgrade to the latest version by
|
||||||
|
running:
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
pip install --upgrade --process-dependency-links https://github.com/matrix-org/synapse/tarball/master
|
||||||
|
|
||||||
|
# restart synapse
|
||||||
|
synctl restart
|
||||||
|
|
||||||
|
|
||||||
|
If synapse was installed using git then upgrade to the latest version by
|
||||||
|
running:
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
# Pull the latest version of the master branch.
|
||||||
|
git pull
|
||||||
|
# Update the versions of synapse's python dependencies.
|
||||||
|
python synapse/python_dependencies.py | xargs pip install --upgrade
|
||||||
|
|
||||||
|
# restart synapse
|
||||||
|
./synctl restart
|
||||||
|
|
||||||
|
|
||||||
|
To check whether your update was sucessful, you can check the Server header
|
||||||
|
returned by the Client-Server API:
|
||||||
|
|
||||||
.. code:: bash
|
.. code:: bash
|
||||||
|
|
||||||
source ~/.synapse/bin/activate
|
# replace <host.name> with the hostname of your synapse homeserver.
|
||||||
|
# You may need to specify a port (eg, :8448) if your server is not
|
||||||
|
# configured on port 443.
|
||||||
|
curl -kv https://<host.name>/_matrix/client/versions 2>&1 | grep "Server:"
|
||||||
|
|
||||||
If synapse was installed using pip then upgrade to the latest version by
|
Upgrading to $NEXT_VERSION
|
||||||
running:
|
====================
|
||||||
|
|
||||||
.. code:: bash
|
This release expands the anonymous usage stats sent if the opt-in
|
||||||
|
``report_stats`` configuration is set to ``true``. We now capture RSS memory
|
||||||
pip install --upgrade --process-dependency-links https://github.com/matrix-org/synapse/tarball/master
|
and cpu use at a very coarse level. This requires administrators to install
|
||||||
|
the optional ``psutil`` python module.
|
||||||
If synapse was installed using git then upgrade to the latest version by
|
|
||||||
running:
|
|
||||||
|
|
||||||
.. code:: bash
|
|
||||||
|
|
||||||
# Pull the latest version of the master branch.
|
|
||||||
git pull
|
|
||||||
# Update the versions of synapse's python dependencies.
|
|
||||||
python synapse/python_dependencies.py | xargs -n1 pip install --upgrade
|
|
||||||
|
|
||||||
|
We would appreciate it if you could assist by ensuring this module is available
|
||||||
|
and ``report_stats`` is enabled. This will let us see if performance changes to
|
||||||
|
synapse are having an impact to the general community.
|
||||||
|
|
||||||
Upgrading to v0.15.0
|
Upgrading to v0.15.0
|
||||||
====================
|
====================
|
||||||
@@ -68,7 +98,7 @@ It has been replaced by specifying a list of application service registrations i
|
|||||||
``homeserver.yaml``::
|
``homeserver.yaml``::
|
||||||
|
|
||||||
app_service_config_files: ["registration-01.yaml", "registration-02.yaml"]
|
app_service_config_files: ["registration-01.yaml", "registration-02.yaml"]
|
||||||
|
|
||||||
Where ``registration-01.yaml`` looks like::
|
Where ``registration-01.yaml`` looks like::
|
||||||
|
|
||||||
url: <String> # e.g. "https://my.application.service.com"
|
url: <String> # e.g. "https://my.application.service.com"
|
||||||
@@ -157,7 +187,7 @@ This release completely changes the database schema and so requires upgrading
|
|||||||
it before starting the new version of the homeserver.
|
it before starting the new version of the homeserver.
|
||||||
|
|
||||||
The script "database-prepare-for-0.5.0.sh" should be used to upgrade the
|
The script "database-prepare-for-0.5.0.sh" should be used to upgrade the
|
||||||
database. This will save all user information, such as logins and profiles,
|
database. This will save all user information, such as logins and profiles,
|
||||||
but will otherwise purge the database. This includes messages, which
|
but will otherwise purge the database. This includes messages, which
|
||||||
rooms the home server was a member of and room alias mappings.
|
rooms the home server was a member of and room alias mappings.
|
||||||
|
|
||||||
@@ -166,18 +196,18 @@ file and ask for help in #matrix:matrix.org. The upgrade process is,
|
|||||||
unfortunately, non trivial and requires human intervention to resolve any
|
unfortunately, non trivial and requires human intervention to resolve any
|
||||||
resulting conflicts during the upgrade process.
|
resulting conflicts during the upgrade process.
|
||||||
|
|
||||||
Before running the command the homeserver should be first completely
|
Before running the command the homeserver should be first completely
|
||||||
shutdown. To run it, simply specify the location of the database, e.g.:
|
shutdown. To run it, simply specify the location of the database, e.g.:
|
||||||
|
|
||||||
./scripts/database-prepare-for-0.5.0.sh "homeserver.db"
|
./scripts/database-prepare-for-0.5.0.sh "homeserver.db"
|
||||||
|
|
||||||
Once this has successfully completed it will be safe to restart the
|
Once this has successfully completed it will be safe to restart the
|
||||||
homeserver. You may notice that the homeserver takes a few seconds longer to
|
homeserver. You may notice that the homeserver takes a few seconds longer to
|
||||||
restart than usual as it reinitializes the database.
|
restart than usual as it reinitializes the database.
|
||||||
|
|
||||||
On startup of the new version, users can either rejoin remote rooms using room
|
On startup of the new version, users can either rejoin remote rooms using room
|
||||||
aliases or by being reinvited. Alternatively, if any other homeserver sends a
|
aliases or by being reinvited. Alternatively, if any other homeserver sends a
|
||||||
message to a room that the homeserver was previously in the local HS will
|
message to a room that the homeserver was previously in the local HS will
|
||||||
automatically rejoin the room.
|
automatically rejoin the room.
|
||||||
|
|
||||||
Upgrading to v0.4.0
|
Upgrading to v0.4.0
|
||||||
@@ -236,7 +266,7 @@ automatically generate default config use::
|
|||||||
--config-path homeserver.config \
|
--config-path homeserver.config \
|
||||||
--generate-config
|
--generate-config
|
||||||
|
|
||||||
This config can be edited if desired, for example to specify a different SSL
|
This config can be edited if desired, for example to specify a different SSL
|
||||||
certificate to use. Once done you can run the home server using::
|
certificate to use. Once done you can run the home server using::
|
||||||
|
|
||||||
$ python synapse/app/homeserver.py --config-path homeserver.config
|
$ python synapse/app/homeserver.py --config-path homeserver.config
|
||||||
@@ -257,20 +287,20 @@ This release completely changes the database schema and so requires upgrading
|
|||||||
it before starting the new version of the homeserver.
|
it before starting the new version of the homeserver.
|
||||||
|
|
||||||
The script "database-prepare-for-0.0.1.sh" should be used to upgrade the
|
The script "database-prepare-for-0.0.1.sh" should be used to upgrade the
|
||||||
database. This will save all user information, such as logins and profiles,
|
database. This will save all user information, such as logins and profiles,
|
||||||
but will otherwise purge the database. This includes messages, which
|
but will otherwise purge the database. This includes messages, which
|
||||||
rooms the home server was a member of and room alias mappings.
|
rooms the home server was a member of and room alias mappings.
|
||||||
|
|
||||||
Before running the command the homeserver should be first completely
|
Before running the command the homeserver should be first completely
|
||||||
shutdown. To run it, simply specify the location of the database, e.g.:
|
shutdown. To run it, simply specify the location of the database, e.g.:
|
||||||
|
|
||||||
./scripts/database-prepare-for-0.0.1.sh "homeserver.db"
|
./scripts/database-prepare-for-0.0.1.sh "homeserver.db"
|
||||||
|
|
||||||
Once this has successfully completed it will be safe to restart the
|
Once this has successfully completed it will be safe to restart the
|
||||||
homeserver. You may notice that the homeserver takes a few seconds longer to
|
homeserver. You may notice that the homeserver takes a few seconds longer to
|
||||||
restart than usual as it reinitializes the database.
|
restart than usual as it reinitializes the database.
|
||||||
|
|
||||||
On startup of the new version, users can either rejoin remote rooms using room
|
On startup of the new version, users can either rejoin remote rooms using room
|
||||||
aliases or by being reinvited. Alternatively, if any other homeserver sends a
|
aliases or by being reinvited. Alternatively, if any other homeserver sends a
|
||||||
message to a room that the homeserver was previously in the local HS will
|
message to a room that the homeserver was previously in the local HS will
|
||||||
automatically rejoin the room.
|
automatically rejoin the room.
|
||||||
|
|||||||
@@ -32,7 +32,7 @@ import urlparse
|
|||||||
import nacl.signing
|
import nacl.signing
|
||||||
import nacl.encoding
|
import nacl.encoding
|
||||||
|
|
||||||
from syutil.crypto.jsonsign import verify_signed_json, SignatureVerifyException
|
from signedjson.sign import verify_signed_json, SignatureVerifyException
|
||||||
|
|
||||||
CONFIG_JSON = "cmdclient_config.json"
|
CONFIG_JSON = "cmdclient_config.json"
|
||||||
|
|
||||||
|
|||||||
@@ -36,15 +36,13 @@ class HttpClient(object):
|
|||||||
the request body. This will be encoded as JSON.
|
the request body. This will be encoded as JSON.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Deferred: Succeeds when we get *any* HTTP response.
|
Deferred: Succeeds when we get a 2xx HTTP response. The result
|
||||||
|
will be the decoded JSON body.
|
||||||
The result of the deferred is a tuple of `(code, response)`,
|
|
||||||
where `response` is a dict representing the decoded JSON body.
|
|
||||||
"""
|
"""
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def get_json(self, url, args=None):
|
def get_json(self, url, args=None):
|
||||||
""" Get's some json from the given host homeserver and path
|
""" Gets some json from the given host homeserver and path
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
url (str): The URL to GET data from.
|
url (str): The URL to GET data from.
|
||||||
@@ -54,10 +52,8 @@ class HttpClient(object):
|
|||||||
and *not* a string.
|
and *not* a string.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Deferred: Succeeds when we get *any* HTTP response.
|
Deferred: Succeeds when we get a 2xx HTTP response. The result
|
||||||
|
will be the decoded JSON body.
|
||||||
The result of the deferred is a tuple of `(code, response)`,
|
|
||||||
where `response` is a dict representing the decoded JSON body.
|
|
||||||
"""
|
"""
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@@ -214,4 +210,4 @@ class _JsonProducer(object):
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
def stopProducing(self):
|
def stopProducing(self):
|
||||||
pass
|
pass
|
||||||
|
|||||||
50
contrib/example_log_config.yaml
Normal file
50
contrib/example_log_config.yaml
Normal file
@@ -0,0 +1,50 @@
|
|||||||
|
# Example log_config file for synapse. To enable, point `log_config` to it in
|
||||||
|
# `homeserver.yaml`, and restart synapse.
|
||||||
|
#
|
||||||
|
# This configuration will produce similar results to the defaults within
|
||||||
|
# synapse, but can be edited to give more flexibility.
|
||||||
|
|
||||||
|
version: 1
|
||||||
|
|
||||||
|
formatters:
|
||||||
|
fmt:
|
||||||
|
format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s- %(message)s'
|
||||||
|
|
||||||
|
filters:
|
||||||
|
context:
|
||||||
|
(): synapse.util.logcontext.LoggingContextFilter
|
||||||
|
request: ""
|
||||||
|
|
||||||
|
handlers:
|
||||||
|
# example output to console
|
||||||
|
console:
|
||||||
|
class: logging.StreamHandler
|
||||||
|
filters: [context]
|
||||||
|
|
||||||
|
# example output to file - to enable, edit 'root' config below.
|
||||||
|
file:
|
||||||
|
class: logging.handlers.RotatingFileHandler
|
||||||
|
formatter: fmt
|
||||||
|
filename: /var/log/synapse/homeserver.log
|
||||||
|
maxBytes: 100000000
|
||||||
|
backupCount: 3
|
||||||
|
filters: [context]
|
||||||
|
|
||||||
|
|
||||||
|
root:
|
||||||
|
level: INFO
|
||||||
|
handlers: [console] # to use file handler instead, switch to [file]
|
||||||
|
|
||||||
|
loggers:
|
||||||
|
synapse:
|
||||||
|
level: INFO
|
||||||
|
|
||||||
|
synapse.storage.SQL:
|
||||||
|
# beware: increasing this to DEBUG will make synapse log sensitive
|
||||||
|
# information such as access tokens.
|
||||||
|
level: INFO
|
||||||
|
|
||||||
|
# example of enabling debugging for a component:
|
||||||
|
#
|
||||||
|
# synapse.federation.transport.server:
|
||||||
|
# level: DEBUG
|
||||||
37
contrib/prometheus/README
Normal file
37
contrib/prometheus/README
Normal file
@@ -0,0 +1,37 @@
|
|||||||
|
This directory contains some sample monitoring config for using the
|
||||||
|
'Prometheus' monitoring server against synapse.
|
||||||
|
|
||||||
|
To use it, first install prometheus by following the instructions at
|
||||||
|
|
||||||
|
http://prometheus.io/
|
||||||
|
|
||||||
|
### for Prometheus v1
|
||||||
|
Add a new job to the main prometheus.conf file:
|
||||||
|
|
||||||
|
job: {
|
||||||
|
name: "synapse"
|
||||||
|
|
||||||
|
target_group: {
|
||||||
|
target: "http://SERVER.LOCATION.HERE:PORT/_synapse/metrics"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
### for Prometheus v2
|
||||||
|
Add a new job to the main prometheus.yml file:
|
||||||
|
|
||||||
|
- job_name: "synapse"
|
||||||
|
metrics_path: "/_synapse/metrics"
|
||||||
|
# when endpoint uses https:
|
||||||
|
scheme: "https"
|
||||||
|
|
||||||
|
static_configs:
|
||||||
|
- targets: ['SERVER.LOCATION:PORT']
|
||||||
|
|
||||||
|
To use `synapse.rules` add
|
||||||
|
|
||||||
|
rule_files:
|
||||||
|
- "/PATH/TO/synapse-v2.rules"
|
||||||
|
|
||||||
|
Metrics are disabled by default when running synapse; they must be enabled
|
||||||
|
with the 'enable-metrics' option, either in the synapse config file or as a
|
||||||
|
command-line option.
|
||||||
395
contrib/prometheus/consoles/synapse.html
Normal file
395
contrib/prometheus/consoles/synapse.html
Normal file
@@ -0,0 +1,395 @@
|
|||||||
|
{{ template "head" . }}
|
||||||
|
|
||||||
|
{{ template "prom_content_head" . }}
|
||||||
|
<h1>System Resources</h1>
|
||||||
|
|
||||||
|
<h3>CPU</h3>
|
||||||
|
<div id="process_resource_utime"></div>
|
||||||
|
<script>
|
||||||
|
new PromConsole.Graph({
|
||||||
|
node: document.querySelector("#process_resource_utime"),
|
||||||
|
expr: "rate(process_cpu_seconds_total[2m]) * 100",
|
||||||
|
name: "[[job]]",
|
||||||
|
min: 0,
|
||||||
|
max: 100,
|
||||||
|
renderer: "line",
|
||||||
|
height: 150,
|
||||||
|
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yUnits: "%",
|
||||||
|
yTitle: "CPU Usage"
|
||||||
|
})
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<h3>Memory</h3>
|
||||||
|
<div id="process_resource_maxrss"></div>
|
||||||
|
<script>
|
||||||
|
new PromConsole.Graph({
|
||||||
|
node: document.querySelector("#process_resource_maxrss"),
|
||||||
|
expr: "process_psutil_rss:max",
|
||||||
|
name: "Maxrss",
|
||||||
|
min: 0,
|
||||||
|
renderer: "line",
|
||||||
|
height: 150,
|
||||||
|
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||||
|
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||||
|
yUnits: "bytes",
|
||||||
|
yTitle: "Usage"
|
||||||
|
})
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<h3>File descriptors</h3>
|
||||||
|
<div id="process_fds"></div>
|
||||||
|
<script>
|
||||||
|
new PromConsole.Graph({
|
||||||
|
node: document.querySelector("#process_fds"),
|
||||||
|
expr: "process_open_fds{job='synapse'}",
|
||||||
|
name: "FDs",
|
||||||
|
min: 0,
|
||||||
|
renderer: "line",
|
||||||
|
height: 150,
|
||||||
|
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yUnits: "",
|
||||||
|
yTitle: "Descriptors"
|
||||||
|
})
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<h1>Reactor</h1>
|
||||||
|
|
||||||
|
<h3>Total reactor time</h3>
|
||||||
|
<div id="reactor_total_time"></div>
|
||||||
|
<script>
|
||||||
|
new PromConsole.Graph({
|
||||||
|
node: document.querySelector("#reactor_total_time"),
|
||||||
|
expr: "rate(python_twisted_reactor_tick_time:total[2m]) / 1000",
|
||||||
|
name: "time",
|
||||||
|
max: 1,
|
||||||
|
min: 0,
|
||||||
|
renderer: "area",
|
||||||
|
height: 150,
|
||||||
|
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yUnits: "s/s",
|
||||||
|
yTitle: "Usage"
|
||||||
|
})
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<h3>Average reactor tick time</h3>
|
||||||
|
<div id="reactor_average_time"></div>
|
||||||
|
<script>
|
||||||
|
new PromConsole.Graph({
|
||||||
|
node: document.querySelector("#reactor_average_time"),
|
||||||
|
expr: "rate(python_twisted_reactor_tick_time:total[2m]) / rate(python_twisted_reactor_tick_time:count[2m]) / 1000",
|
||||||
|
name: "time",
|
||||||
|
min: 0,
|
||||||
|
renderer: "line",
|
||||||
|
height: 150,
|
||||||
|
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yUnits: "s",
|
||||||
|
yTitle: "Time"
|
||||||
|
})
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<h3>Pending calls per tick</h3>
|
||||||
|
<div id="reactor_pending_calls"></div>
|
||||||
|
<script>
|
||||||
|
new PromConsole.Graph({
|
||||||
|
node: document.querySelector("#reactor_pending_calls"),
|
||||||
|
expr: "rate(python_twisted_reactor_pending_calls:total[30s])/rate(python_twisted_reactor_pending_calls:count[30s])",
|
||||||
|
name: "calls",
|
||||||
|
min: 0,
|
||||||
|
renderer: "line",
|
||||||
|
height: 150,
|
||||||
|
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yTitle: "Pending Cals"
|
||||||
|
})
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<h1>Storage</h1>
|
||||||
|
|
||||||
|
<h3>Queries</h3>
|
||||||
|
<div id="synapse_storage_query_time"></div>
|
||||||
|
<script>
|
||||||
|
new PromConsole.Graph({
|
||||||
|
node: document.querySelector("#synapse_storage_query_time"),
|
||||||
|
expr: "rate(synapse_storage_query_time:count[2m])",
|
||||||
|
name: "[[verb]]",
|
||||||
|
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||||
|
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||||
|
yUnits: "queries/s",
|
||||||
|
yTitle: "Queries"
|
||||||
|
})
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<h3>Transactions</h3>
|
||||||
|
<div id="synapse_storage_transaction_time"></div>
|
||||||
|
<script>
|
||||||
|
new PromConsole.Graph({
|
||||||
|
node: document.querySelector("#synapse_storage_transaction_time"),
|
||||||
|
expr: "rate(synapse_storage_transaction_time:count[2m])",
|
||||||
|
name: "[[desc]]",
|
||||||
|
min: 0,
|
||||||
|
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||||
|
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||||
|
yUnits: "txn/s",
|
||||||
|
yTitle: "Transactions"
|
||||||
|
})
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<h3>Transaction execution time</h3>
|
||||||
|
<div id="synapse_storage_transactions_time_msec"></div>
|
||||||
|
<script>
|
||||||
|
new PromConsole.Graph({
|
||||||
|
node: document.querySelector("#synapse_storage_transactions_time_msec"),
|
||||||
|
expr: "rate(synapse_storage_transaction_time:total[2m]) / 1000",
|
||||||
|
name: "[[desc]]",
|
||||||
|
min: 0,
|
||||||
|
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yUnits: "s/s",
|
||||||
|
yTitle: "Usage"
|
||||||
|
})
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<h3>Database scheduling latency</h3>
|
||||||
|
<div id="synapse_storage_schedule_time"></div>
|
||||||
|
<script>
|
||||||
|
new PromConsole.Graph({
|
||||||
|
node: document.querySelector("#synapse_storage_schedule_time"),
|
||||||
|
expr: "rate(synapse_storage_schedule_time:total[2m]) / 1000",
|
||||||
|
name: "Total latency",
|
||||||
|
min: 0,
|
||||||
|
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yUnits: "s/s",
|
||||||
|
yTitle: "Usage"
|
||||||
|
})
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<h3>Cache hit ratio</h3>
|
||||||
|
<div id="synapse_cache_ratio"></div>
|
||||||
|
<script>
|
||||||
|
new PromConsole.Graph({
|
||||||
|
node: document.querySelector("#synapse_cache_ratio"),
|
||||||
|
expr: "rate(synapse_util_caches_cache:total[2m]) * 100",
|
||||||
|
name: "[[name]]",
|
||||||
|
min: 0,
|
||||||
|
max: 100,
|
||||||
|
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||||
|
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||||
|
yUnits: "%",
|
||||||
|
yTitle: "Percentage"
|
||||||
|
})
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<h3>Cache size</h3>
|
||||||
|
<div id="synapse_cache_size"></div>
|
||||||
|
<script>
|
||||||
|
new PromConsole.Graph({
|
||||||
|
node: document.querySelector("#synapse_cache_size"),
|
||||||
|
expr: "synapse_util_caches_cache:size",
|
||||||
|
name: "[[name]]",
|
||||||
|
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||||
|
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||||
|
yUnits: "",
|
||||||
|
yTitle: "Items"
|
||||||
|
})
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<h1>Requests</h1>
|
||||||
|
|
||||||
|
<h3>Requests by Servlet</h3>
|
||||||
|
<div id="synapse_http_server_requests_servlet"></div>
|
||||||
|
<script>
|
||||||
|
new PromConsole.Graph({
|
||||||
|
node: document.querySelector("#synapse_http_server_requests_servlet"),
|
||||||
|
expr: "rate(synapse_http_server_requests:servlet[2m])",
|
||||||
|
name: "[[servlet]]",
|
||||||
|
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yUnits: "req/s",
|
||||||
|
yTitle: "Requests"
|
||||||
|
})
|
||||||
|
</script>
|
||||||
|
<h4> (without <tt>EventStreamRestServlet</tt> or <tt>SyncRestServlet</tt>)</h4>
|
||||||
|
<div id="synapse_http_server_requests_servlet_minus_events"></div>
|
||||||
|
<script>
|
||||||
|
new PromConsole.Graph({
|
||||||
|
node: document.querySelector("#synapse_http_server_requests_servlet_minus_events"),
|
||||||
|
expr: "rate(synapse_http_server_requests:servlet{servlet!=\"EventStreamRestServlet\", servlet!=\"SyncRestServlet\"}[2m])",
|
||||||
|
name: "[[servlet]]",
|
||||||
|
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yUnits: "req/s",
|
||||||
|
yTitle: "Requests"
|
||||||
|
})
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<h3>Average response times</h3>
|
||||||
|
<div id="synapse_http_server_response_time_avg"></div>
|
||||||
|
<script>
|
||||||
|
new PromConsole.Graph({
|
||||||
|
node: document.querySelector("#synapse_http_server_response_time_avg"),
|
||||||
|
expr: "rate(synapse_http_server_response_time:total[2m]) / rate(synapse_http_server_response_time:count[2m]) / 1000",
|
||||||
|
name: "[[servlet]]",
|
||||||
|
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yUnits: "s/req",
|
||||||
|
yTitle: "Response time"
|
||||||
|
})
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<h3>All responses by code</h3>
|
||||||
|
<div id="synapse_http_server_responses"></div>
|
||||||
|
<script>
|
||||||
|
new PromConsole.Graph({
|
||||||
|
node: document.querySelector("#synapse_http_server_responses"),
|
||||||
|
expr: "rate(synapse_http_server_responses[2m])",
|
||||||
|
name: "[[method]] / [[code]]",
|
||||||
|
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yUnits: "req/s",
|
||||||
|
yTitle: "Requests"
|
||||||
|
})
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<h3>Error responses by code</h3>
|
||||||
|
<div id="synapse_http_server_responses_err"></div>
|
||||||
|
<script>
|
||||||
|
new PromConsole.Graph({
|
||||||
|
node: document.querySelector("#synapse_http_server_responses_err"),
|
||||||
|
expr: "rate(synapse_http_server_responses{code=~\"[45]..\"}[2m])",
|
||||||
|
name: "[[method]] / [[code]]",
|
||||||
|
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yUnits: "req/s",
|
||||||
|
yTitle: "Requests"
|
||||||
|
})
|
||||||
|
</script>
|
||||||
|
|
||||||
|
|
||||||
|
<h3>CPU Usage</h3>
|
||||||
|
<div id="synapse_http_server_response_ru_utime"></div>
|
||||||
|
<script>
|
||||||
|
new PromConsole.Graph({
|
||||||
|
node: document.querySelector("#synapse_http_server_response_ru_utime"),
|
||||||
|
expr: "rate(synapse_http_server_response_ru_utime:total[2m])",
|
||||||
|
name: "[[servlet]]",
|
||||||
|
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yUnits: "s/s",
|
||||||
|
yTitle: "CPU Usage"
|
||||||
|
})
|
||||||
|
</script>
|
||||||
|
|
||||||
|
|
||||||
|
<h3>DB Usage</h3>
|
||||||
|
<div id="synapse_http_server_response_db_txn_duration"></div>
|
||||||
|
<script>
|
||||||
|
new PromConsole.Graph({
|
||||||
|
node: document.querySelector("#synapse_http_server_response_db_txn_duration"),
|
||||||
|
expr: "rate(synapse_http_server_response_db_txn_duration:total[2m])",
|
||||||
|
name: "[[servlet]]",
|
||||||
|
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yUnits: "s/s",
|
||||||
|
yTitle: "DB Usage"
|
||||||
|
})
|
||||||
|
</script>
|
||||||
|
|
||||||
|
|
||||||
|
<h3>Average event send times</h3>
|
||||||
|
<div id="synapse_http_server_send_time_avg"></div>
|
||||||
|
<script>
|
||||||
|
new PromConsole.Graph({
|
||||||
|
node: document.querySelector("#synapse_http_server_send_time_avg"),
|
||||||
|
expr: "rate(synapse_http_server_response_time:total{servlet='RoomSendEventRestServlet'}[2m]) / rate(synapse_http_server_response_time:count{servlet='RoomSendEventRestServlet'}[2m]) / 1000",
|
||||||
|
name: "[[servlet]]",
|
||||||
|
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yUnits: "s/req",
|
||||||
|
yTitle: "Response time"
|
||||||
|
})
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<h1>Federation</h1>
|
||||||
|
|
||||||
|
<h3>Sent Messages</h3>
|
||||||
|
<div id="synapse_federation_client_sent"></div>
|
||||||
|
<script>
|
||||||
|
new PromConsole.Graph({
|
||||||
|
node: document.querySelector("#synapse_federation_client_sent"),
|
||||||
|
expr: "rate(synapse_federation_client_sent[2m])",
|
||||||
|
name: "[[type]]",
|
||||||
|
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yUnits: "req/s",
|
||||||
|
yTitle: "Requests"
|
||||||
|
})
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<h3>Received Messages</h3>
|
||||||
|
<div id="synapse_federation_server_received"></div>
|
||||||
|
<script>
|
||||||
|
new PromConsole.Graph({
|
||||||
|
node: document.querySelector("#synapse_federation_server_received"),
|
||||||
|
expr: "rate(synapse_federation_server_received[2m])",
|
||||||
|
name: "[[type]]",
|
||||||
|
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yUnits: "req/s",
|
||||||
|
yTitle: "Requests"
|
||||||
|
})
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<h3>Pending</h3>
|
||||||
|
<div id="synapse_federation_transaction_queue_pending"></div>
|
||||||
|
<script>
|
||||||
|
new PromConsole.Graph({
|
||||||
|
node: document.querySelector("#synapse_federation_transaction_queue_pending"),
|
||||||
|
expr: "synapse_federation_transaction_queue_pending",
|
||||||
|
name: "[[type]]",
|
||||||
|
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||||
|
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||||
|
yUnits: "",
|
||||||
|
yTitle: "Units"
|
||||||
|
})
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<h1>Clients</h1>
|
||||||
|
|
||||||
|
<h3>Notifiers</h3>
|
||||||
|
<div id="synapse_notifier_listeners"></div>
|
||||||
|
<script>
|
||||||
|
new PromConsole.Graph({
|
||||||
|
node: document.querySelector("#synapse_notifier_listeners"),
|
||||||
|
expr: "synapse_notifier_listeners",
|
||||||
|
name: "listeners",
|
||||||
|
min: 0,
|
||||||
|
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||||
|
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||||
|
yUnits: "",
|
||||||
|
yTitle: "Listeners"
|
||||||
|
})
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<h3>Notified Events</h3>
|
||||||
|
<div id="synapse_notifier_notified_events"></div>
|
||||||
|
<script>
|
||||||
|
new PromConsole.Graph({
|
||||||
|
node: document.querySelector("#synapse_notifier_notified_events"),
|
||||||
|
expr: "rate(synapse_notifier_notified_events[2m])",
|
||||||
|
name: "events",
|
||||||
|
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yUnits: "events/s",
|
||||||
|
yTitle: "Event rate"
|
||||||
|
})
|
||||||
|
</script>
|
||||||
|
|
||||||
|
{{ template "prom_content_tail" . }}
|
||||||
|
|
||||||
|
{{ template "tail" }}
|
||||||
21
contrib/prometheus/synapse-v1.rules
Normal file
21
contrib/prometheus/synapse-v1.rules
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
synapse_federation_transaction_queue_pendingEdus:total = sum(synapse_federation_transaction_queue_pendingEdus or absent(synapse_federation_transaction_queue_pendingEdus)*0)
|
||||||
|
synapse_federation_transaction_queue_pendingPdus:total = sum(synapse_federation_transaction_queue_pendingPdus or absent(synapse_federation_transaction_queue_pendingPdus)*0)
|
||||||
|
|
||||||
|
synapse_http_server_requests:method{servlet=""} = sum(synapse_http_server_requests) by (method)
|
||||||
|
synapse_http_server_requests:servlet{method=""} = sum(synapse_http_server_requests) by (servlet)
|
||||||
|
|
||||||
|
synapse_http_server_requests:total{servlet=""} = sum(synapse_http_server_requests:by_method) by (servlet)
|
||||||
|
|
||||||
|
synapse_cache:hit_ratio_5m = rate(synapse_util_caches_cache:hits[5m]) / rate(synapse_util_caches_cache:total[5m])
|
||||||
|
synapse_cache:hit_ratio_30s = rate(synapse_util_caches_cache:hits[30s]) / rate(synapse_util_caches_cache:total[30s])
|
||||||
|
|
||||||
|
synapse_federation_client_sent{type="EDU"} = synapse_federation_client_sent_edus + 0
|
||||||
|
synapse_federation_client_sent{type="PDU"} = synapse_federation_client_sent_pdu_destinations:count + 0
|
||||||
|
synapse_federation_client_sent{type="Query"} = sum(synapse_federation_client_sent_queries) by (job)
|
||||||
|
|
||||||
|
synapse_federation_server_received{type="EDU"} = synapse_federation_server_received_edus + 0
|
||||||
|
synapse_federation_server_received{type="PDU"} = synapse_federation_server_received_pdus + 0
|
||||||
|
synapse_federation_server_received{type="Query"} = sum(synapse_federation_server_received_queries) by (job)
|
||||||
|
|
||||||
|
synapse_federation_transaction_queue_pending{type="EDU"} = synapse_federation_transaction_queue_pending_edus + 0
|
||||||
|
synapse_federation_transaction_queue_pending{type="PDU"} = synapse_federation_transaction_queue_pending_pdus + 0
|
||||||
60
contrib/prometheus/synapse-v2.rules
Normal file
60
contrib/prometheus/synapse-v2.rules
Normal file
@@ -0,0 +1,60 @@
|
|||||||
|
groups:
|
||||||
|
- name: synapse
|
||||||
|
rules:
|
||||||
|
- record: "synapse_federation_transaction_queue_pendingEdus:total"
|
||||||
|
expr: "sum(synapse_federation_transaction_queue_pendingEdus or absent(synapse_federation_transaction_queue_pendingEdus)*0)"
|
||||||
|
- record: "synapse_federation_transaction_queue_pendingPdus:total"
|
||||||
|
expr: "sum(synapse_federation_transaction_queue_pendingPdus or absent(synapse_federation_transaction_queue_pendingPdus)*0)"
|
||||||
|
- record: 'synapse_http_server_requests:method'
|
||||||
|
labels:
|
||||||
|
servlet: ""
|
||||||
|
expr: "sum(synapse_http_server_requests) by (method)"
|
||||||
|
- record: 'synapse_http_server_requests:servlet'
|
||||||
|
labels:
|
||||||
|
method: ""
|
||||||
|
expr: 'sum(synapse_http_server_requests) by (servlet)'
|
||||||
|
|
||||||
|
- record: 'synapse_http_server_requests:total'
|
||||||
|
labels:
|
||||||
|
servlet: ""
|
||||||
|
expr: 'sum(synapse_http_server_requests:by_method) by (servlet)'
|
||||||
|
|
||||||
|
- record: 'synapse_cache:hit_ratio_5m'
|
||||||
|
expr: 'rate(synapse_util_caches_cache:hits[5m]) / rate(synapse_util_caches_cache:total[5m])'
|
||||||
|
- record: 'synapse_cache:hit_ratio_30s'
|
||||||
|
expr: 'rate(synapse_util_caches_cache:hits[30s]) / rate(synapse_util_caches_cache:total[30s])'
|
||||||
|
|
||||||
|
- record: 'synapse_federation_client_sent'
|
||||||
|
labels:
|
||||||
|
type: "EDU"
|
||||||
|
expr: 'synapse_federation_client_sent_edus + 0'
|
||||||
|
- record: 'synapse_federation_client_sent'
|
||||||
|
labels:
|
||||||
|
type: "PDU"
|
||||||
|
expr: 'synapse_federation_client_sent_pdu_destinations:count + 0'
|
||||||
|
- record: 'synapse_federation_client_sent'
|
||||||
|
labels:
|
||||||
|
type: "Query"
|
||||||
|
expr: 'sum(synapse_federation_client_sent_queries) by (job)'
|
||||||
|
|
||||||
|
- record: 'synapse_federation_server_received'
|
||||||
|
labels:
|
||||||
|
type: "EDU"
|
||||||
|
expr: 'synapse_federation_server_received_edus + 0'
|
||||||
|
- record: 'synapse_federation_server_received'
|
||||||
|
labels:
|
||||||
|
type: "PDU"
|
||||||
|
expr: 'synapse_federation_server_received_pdus + 0'
|
||||||
|
- record: 'synapse_federation_server_received'
|
||||||
|
labels:
|
||||||
|
type: "Query"
|
||||||
|
expr: 'sum(synapse_federation_server_received_queries) by (job)'
|
||||||
|
|
||||||
|
- record: 'synapse_federation_transaction_queue_pending'
|
||||||
|
labels:
|
||||||
|
type: "EDU"
|
||||||
|
expr: 'synapse_federation_transaction_queue_pending_edus + 0'
|
||||||
|
- record: 'synapse_federation_transaction_queue_pending'
|
||||||
|
labels:
|
||||||
|
type: "PDU"
|
||||||
|
expr: 'synapse_federation_transaction_queue_pending_pdus + 0'
|
||||||
@@ -1,5 +1,5 @@
|
|||||||
# This assumes that Synapse has been installed as a system package
|
# This assumes that Synapse has been installed as a system package
|
||||||
# (e.g. https://aur.archlinux.org/packages/matrix-synapse/ for ArchLinux)
|
# (e.g. https://www.archlinux.org/packages/community/any/matrix-synapse/ for ArchLinux)
|
||||||
# rather than in a user home directory or similar under virtualenv.
|
# rather than in a user home directory or similar under virtualenv.
|
||||||
|
|
||||||
[Unit]
|
[Unit]
|
||||||
@@ -9,9 +9,10 @@ Description=Synapse Matrix homeserver
|
|||||||
Type=simple
|
Type=simple
|
||||||
User=synapse
|
User=synapse
|
||||||
Group=synapse
|
Group=synapse
|
||||||
EnvironmentFile=-/etc/sysconfig/synapse
|
|
||||||
WorkingDirectory=/var/lib/synapse
|
WorkingDirectory=/var/lib/synapse
|
||||||
ExecStart=/usr/bin/python2.7 -m synapse.app.homeserver --config-path=/etc/synapse/homeserver.yaml --log-config=/etc/synapse/log_config.yaml
|
ExecStart=/usr/bin/python2.7 -m synapse.app.homeserver --config-path=/etc/synapse/homeserver.yaml
|
||||||
|
ExecStop=/usr/bin/synctl stop /etc/synapse/homeserver.yaml
|
||||||
|
|
||||||
[Install]
|
[Install]
|
||||||
WantedBy=multi-user.target
|
WantedBy=multi-user.target
|
||||||
|
|
||||||
|
|||||||
@@ -10,13 +10,13 @@ https://developers.google.com/recaptcha/
|
|||||||
|
|
||||||
Setting ReCaptcha Keys
|
Setting ReCaptcha Keys
|
||||||
----------------------
|
----------------------
|
||||||
The keys are a config option on the home server config. If they are not
|
The keys are a config option on the home server config. If they are not
|
||||||
visible, you can generate them via --generate-config. Set the following value:
|
visible, you can generate them via --generate-config. Set the following value::
|
||||||
|
|
||||||
recaptcha_public_key: YOUR_PUBLIC_KEY
|
recaptcha_public_key: YOUR_PUBLIC_KEY
|
||||||
recaptcha_private_key: YOUR_PRIVATE_KEY
|
recaptcha_private_key: YOUR_PRIVATE_KEY
|
||||||
|
|
||||||
In addition, you MUST enable captchas via:
|
In addition, you MUST enable captchas via::
|
||||||
|
|
||||||
enable_registration_captcha: true
|
enable_registration_captcha: true
|
||||||
|
|
||||||
@@ -25,7 +25,5 @@ Configuring IP used for auth
|
|||||||
The ReCaptcha API requires that the IP address of the user who solved the
|
The ReCaptcha API requires that the IP address of the user who solved the
|
||||||
captcha is sent. If the client is connecting through a proxy or load balancer,
|
captcha is sent. If the client is connecting through a proxy or load balancer,
|
||||||
it may be required to use the X-Forwarded-For (XFF) header instead of the origin
|
it may be required to use the X-Forwarded-For (XFF) header instead of the origin
|
||||||
IP address. This can be configured as an option on the home server like so:
|
IP address. This can be configured using the x_forwarded directive in the
|
||||||
|
listeners section of the homeserver.yaml configuration file.
|
||||||
captcha_ip_origin_is_x_forwarded: true
|
|
||||||
|
|
||||||
23
docs/admin_api/media_admin_api.md
Normal file
23
docs/admin_api/media_admin_api.md
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
# List all media in a room
|
||||||
|
|
||||||
|
This API gets a list of known media in a room.
|
||||||
|
|
||||||
|
The API is:
|
||||||
|
```
|
||||||
|
GET /_matrix/client/r0/admin/room/<room_id>/media
|
||||||
|
```
|
||||||
|
including an `access_token` of a server admin.
|
||||||
|
|
||||||
|
It returns a JSON body like the following:
|
||||||
|
```
|
||||||
|
{
|
||||||
|
"local": [
|
||||||
|
"mxc://localhost/xwvutsrqponmlkjihgfedcba",
|
||||||
|
"mxc://localhost/abcdefghijklmnopqrstuvwx"
|
||||||
|
],
|
||||||
|
"remote": [
|
||||||
|
"mxc://matrix.org/xwvutsrqponmlkjihgfedcba",
|
||||||
|
"mxc://matrix.org/abcdefghijklmnopqrstuvwx"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
@@ -8,8 +8,56 @@ Depending on the amount of history being purged a call to the API may take
|
|||||||
several minutes or longer. During this period users will not be able to
|
several minutes or longer. During this period users will not be able to
|
||||||
paginate further back in the room from the point being purged from.
|
paginate further back in the room from the point being purged from.
|
||||||
|
|
||||||
The API is simply:
|
The API is:
|
||||||
|
|
||||||
``POST /_matrix/client/r0/admin/purge_history/<room_id>/<event_id>``
|
``POST /_matrix/client/r0/admin/purge_history/<room_id>[/<event_id>]``
|
||||||
|
|
||||||
including an ``access_token`` of a server admin.
|
including an ``access_token`` of a server admin.
|
||||||
|
|
||||||
|
By default, events sent by local users are not deleted, as they may represent
|
||||||
|
the only copies of this content in existence. (Events sent by remote users are
|
||||||
|
deleted.)
|
||||||
|
|
||||||
|
Room state data (such as joins, leaves, topic) is always preserved.
|
||||||
|
|
||||||
|
To delete local message events as well, set ``delete_local_events`` in the body:
|
||||||
|
|
||||||
|
.. code:: json
|
||||||
|
|
||||||
|
{
|
||||||
|
"delete_local_events": true
|
||||||
|
}
|
||||||
|
|
||||||
|
The caller must specify the point in the room to purge up to. This can be
|
||||||
|
specified by including an event_id in the URI, or by setting a
|
||||||
|
``purge_up_to_event_id`` or ``purge_up_to_ts`` in the request body. If an event
|
||||||
|
id is given, that event (and others at the same graph depth) will be retained.
|
||||||
|
If ``purge_up_to_ts`` is given, it should be a timestamp since the unix epoch,
|
||||||
|
in milliseconds.
|
||||||
|
|
||||||
|
The API starts the purge running, and returns immediately with a JSON body with
|
||||||
|
a purge id:
|
||||||
|
|
||||||
|
.. code:: json
|
||||||
|
|
||||||
|
{
|
||||||
|
"purge_id": "<opaque id>"
|
||||||
|
}
|
||||||
|
|
||||||
|
Purge status query
|
||||||
|
------------------
|
||||||
|
|
||||||
|
It is possible to poll for updates on recent purges with a second API;
|
||||||
|
|
||||||
|
``GET /_matrix/client/r0/admin/purge_history_status/<purge_id>``
|
||||||
|
|
||||||
|
(again, with a suitable ``access_token``). This API returns a JSON body like
|
||||||
|
the following:
|
||||||
|
|
||||||
|
.. code:: json
|
||||||
|
|
||||||
|
{
|
||||||
|
"status": "active"
|
||||||
|
}
|
||||||
|
|
||||||
|
The status will be one of ``active``, ``complete``, or ``failed``.
|
||||||
|
|||||||
@@ -2,15 +2,13 @@ Purge Remote Media API
|
|||||||
======================
|
======================
|
||||||
|
|
||||||
The purge remote media API allows server admins to purge old cached remote
|
The purge remote media API allows server admins to purge old cached remote
|
||||||
media.
|
media.
|
||||||
|
|
||||||
The API is::
|
The API is::
|
||||||
|
|
||||||
POST /_matrix/client/r0/admin/purge_media_cache
|
POST /_matrix/client/r0/admin/purge_media_cache?before_ts=<unix_timestamp_in_ms>&access_token=<access_token>
|
||||||
|
|
||||||
{
|
{}
|
||||||
"before_ts": <unix_timestamp_in_ms>
|
|
||||||
}
|
|
||||||
|
|
||||||
Which will remove all cached media that was last accessed before
|
Which will remove all cached media that was last accessed before
|
||||||
``<unix_timestamp_in_ms>``.
|
``<unix_timestamp_in_ms>``.
|
||||||
|
|||||||
73
docs/admin_api/user_admin_api.rst
Normal file
73
docs/admin_api/user_admin_api.rst
Normal file
@@ -0,0 +1,73 @@
|
|||||||
|
Query Account
|
||||||
|
=============
|
||||||
|
|
||||||
|
This API returns information about a specific user account.
|
||||||
|
|
||||||
|
The api is::
|
||||||
|
|
||||||
|
GET /_matrix/client/r0/admin/whois/<user_id>
|
||||||
|
|
||||||
|
including an ``access_token`` of a server admin.
|
||||||
|
|
||||||
|
It returns a JSON body like the following:
|
||||||
|
|
||||||
|
.. code:: json
|
||||||
|
|
||||||
|
{
|
||||||
|
"user_id": "<user_id>",
|
||||||
|
"devices": {
|
||||||
|
"": {
|
||||||
|
"sessions": [
|
||||||
|
{
|
||||||
|
"connections": [
|
||||||
|
{
|
||||||
|
"ip": "1.2.3.4",
|
||||||
|
"last_seen": 1417222374433,
|
||||||
|
"user_agent": "Mozilla/5.0 ..."
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ip": "1.2.3.10",
|
||||||
|
"last_seen": 1417222374500,
|
||||||
|
"user_agent": "Dalvik/2.1.0 ..."
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
``last_seen`` is measured in milliseconds since the Unix epoch.
|
||||||
|
|
||||||
|
Deactivate Account
|
||||||
|
==================
|
||||||
|
|
||||||
|
This API deactivates an account. It removes active access tokens, resets the
|
||||||
|
password, and deletes third-party IDs (to prevent the user requesting a
|
||||||
|
password reset).
|
||||||
|
|
||||||
|
The api is::
|
||||||
|
|
||||||
|
POST /_matrix/client/r0/admin/deactivate/<user_id>
|
||||||
|
|
||||||
|
including an ``access_token`` of a server admin, and an empty request body.
|
||||||
|
|
||||||
|
|
||||||
|
Reset password
|
||||||
|
==============
|
||||||
|
|
||||||
|
Changes the password of another user.
|
||||||
|
|
||||||
|
The api is::
|
||||||
|
|
||||||
|
POST /_matrix/client/r0/admin/reset_password/<user_id>
|
||||||
|
|
||||||
|
with a body of:
|
||||||
|
|
||||||
|
.. code:: json
|
||||||
|
|
||||||
|
{
|
||||||
|
"new_password": "<secret>"
|
||||||
|
}
|
||||||
|
|
||||||
|
including an ``access_token`` of a server admin.
|
||||||
@@ -1,52 +1,119 @@
|
|||||||
Basically, PEP8
|
- Everything should comply with PEP8. Code should pass
|
||||||
|
``pep8 --max-line-length=100`` without any warnings.
|
||||||
|
|
||||||
- NEVER tabs. 4 spaces to indent.
|
- **Indenting**:
|
||||||
- Max line width: 79 chars (with flexibility to overflow by a "few chars" if
|
|
||||||
|
- NEVER tabs. 4 spaces to indent.
|
||||||
|
|
||||||
|
- follow PEP8; either hanging indent or multiline-visual indent depending
|
||||||
|
on the size and shape of the arguments and what makes more sense to the
|
||||||
|
author. In other words, both this::
|
||||||
|
|
||||||
|
print("I am a fish %s" % "moo")
|
||||||
|
|
||||||
|
and this::
|
||||||
|
|
||||||
|
print("I am a fish %s" %
|
||||||
|
"moo")
|
||||||
|
|
||||||
|
and this::
|
||||||
|
|
||||||
|
print(
|
||||||
|
"I am a fish %s" %
|
||||||
|
"moo",
|
||||||
|
)
|
||||||
|
|
||||||
|
...are valid, although given each one takes up 2x more vertical space than
|
||||||
|
the previous, it's up to the author's discretion as to which layout makes
|
||||||
|
most sense for their function invocation. (e.g. if they want to add
|
||||||
|
comments per-argument, or put expressions in the arguments, or group
|
||||||
|
related arguments together, or want to deliberately extend or preserve
|
||||||
|
vertical/horizontal space)
|
||||||
|
|
||||||
|
- **Line length**:
|
||||||
|
|
||||||
|
Max line length is 79 chars (with flexibility to overflow by a "few chars" if
|
||||||
the overflowing content is not semantically significant and avoids an
|
the overflowing content is not semantically significant and avoids an
|
||||||
explosion of vertical whitespace).
|
explosion of vertical whitespace).
|
||||||
- Use camel case for class and type names
|
|
||||||
- Use underscores for functions and variables.
|
Use parentheses instead of ``\`` for line continuation where ever possible
|
||||||
- Use double quotes.
|
(which is pretty much everywhere).
|
||||||
- Use parentheses instead of '\\' for line continuation where ever possible
|
|
||||||
(which is pretty much everywhere)
|
- **Naming**:
|
||||||
- There should be max a single new line between:
|
|
||||||
|
- Use camel case for class and type names
|
||||||
|
- Use underscores for functions and variables.
|
||||||
|
|
||||||
|
- Use double quotes ``"foo"`` rather than single quotes ``'foo'``.
|
||||||
|
|
||||||
|
- **Blank lines**:
|
||||||
|
|
||||||
|
- There should be max a single new line between:
|
||||||
|
|
||||||
- statements
|
- statements
|
||||||
- functions in a class
|
- functions in a class
|
||||||
- There should be two new lines between:
|
|
||||||
|
- There should be two new lines between:
|
||||||
|
|
||||||
- definitions in a module (e.g., between different classes)
|
- definitions in a module (e.g., between different classes)
|
||||||
- There should be spaces where spaces should be and not where there shouldn't be:
|
|
||||||
- a single space after a comma
|
|
||||||
- a single space before and after for '=' when used as assignment
|
|
||||||
- no spaces before and after for '=' for default values and keyword arguments.
|
|
||||||
- Indenting must follow PEP8; either hanging indent or multiline-visual indent
|
|
||||||
depending on the size and shape of the arguments and what makes more sense to
|
|
||||||
the author. In other words, both this::
|
|
||||||
|
|
||||||
print("I am a fish %s" % "moo")
|
- **Whitespace**:
|
||||||
|
|
||||||
and this::
|
There should be spaces where spaces should be and not where there shouldn't
|
||||||
|
be:
|
||||||
|
|
||||||
print("I am a fish %s" %
|
- a single space after a comma
|
||||||
"moo")
|
- a single space before and after for '=' when used as assignment
|
||||||
|
- no spaces before and after for '=' for default values and keyword arguments.
|
||||||
|
|
||||||
and this::
|
- **Comments**: should follow the `google code style
|
||||||
|
<http://google.github.io/styleguide/pyguide.html?showone=Comments#Comments>`_.
|
||||||
|
This is so that we can generate documentation with `sphinx
|
||||||
|
<http://sphinxcontrib-napoleon.readthedocs.org/en/latest/>`_. See the
|
||||||
|
`examples
|
||||||
|
<http://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_google.html>`_
|
||||||
|
in the sphinx documentation.
|
||||||
|
|
||||||
print(
|
- **Imports**:
|
||||||
"I am a fish %s" %
|
|
||||||
"moo"
|
|
||||||
)
|
|
||||||
|
|
||||||
...are valid, although given each one takes up 2x more vertical space than
|
- Prefer to import classes and functions than packages or modules.
|
||||||
the previous, it's up to the author's discretion as to which layout makes most
|
|
||||||
sense for their function invocation. (e.g. if they want to add comments
|
|
||||||
per-argument, or put expressions in the arguments, or group related arguments
|
|
||||||
together, or want to deliberately extend or preserve vertical/horizontal
|
|
||||||
space)
|
|
||||||
|
|
||||||
Comments should follow the `google code style <http://google.github.io/styleguide/pyguide.html?showone=Comments#Comments>`_.
|
Example::
|
||||||
This is so that we can generate documentation with
|
|
||||||
`sphinx <http://sphinxcontrib-napoleon.readthedocs.org/en/latest/>`_. See the
|
|
||||||
`examples <http://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_google.html>`_
|
|
||||||
in the sphinx documentation.
|
|
||||||
|
|
||||||
Code should pass pep8 --max-line-length=100 without any warnings.
|
from synapse.types import UserID
|
||||||
|
...
|
||||||
|
user_id = UserID(local, server)
|
||||||
|
|
||||||
|
is preferred over::
|
||||||
|
|
||||||
|
from synapse import types
|
||||||
|
...
|
||||||
|
user_id = types.UserID(local, server)
|
||||||
|
|
||||||
|
(or any other variant).
|
||||||
|
|
||||||
|
This goes against the advice in the Google style guide, but it means that
|
||||||
|
errors in the name are caught early (at import time).
|
||||||
|
|
||||||
|
- Multiple imports from the same package can be combined onto one line::
|
||||||
|
|
||||||
|
from synapse.types import GroupID, RoomID, UserID
|
||||||
|
|
||||||
|
An effort should be made to keep the individual imports in alphabetical
|
||||||
|
order.
|
||||||
|
|
||||||
|
If the list becomes long, wrap it with parentheses and split it over
|
||||||
|
multiple lines.
|
||||||
|
|
||||||
|
- As per `PEP-8 <https://www.python.org/dev/peps/pep-0008/#imports>`_,
|
||||||
|
imports should be grouped in the following order, with a blank line between
|
||||||
|
each group:
|
||||||
|
|
||||||
|
1. standard library imports
|
||||||
|
2. related third party imports
|
||||||
|
3. local application/library specific imports
|
||||||
|
|
||||||
|
- Imports within each group should be sorted alphabetically by module name.
|
||||||
|
|
||||||
|
- Avoid wildcard imports (``from synapse.types import *``) and relative
|
||||||
|
imports (``from .types import UserID``).
|
||||||
|
|||||||
@@ -1,10 +1,442 @@
|
|||||||
What do I do about "Unexpected logging context" debug log-lines everywhere?
|
Log contexts
|
||||||
|
============
|
||||||
|
|
||||||
<Mjark> The logging context lives in thread local storage
|
.. contents::
|
||||||
<Mjark> Sometimes it gets out of sync with what it should actually be, usually because something scheduled something to run on the reactor without preserving the logging context.
|
|
||||||
<Matthew> what is the impact of it getting out of sync? and how and when should we preserve log context?
|
|
||||||
<Mjark> The impact is that some of the CPU and database metrics will be under-reported, and some log lines will be mis-attributed.
|
|
||||||
<Mjark> It should happen auto-magically in all the APIs that do IO or otherwise defer to the reactor.
|
|
||||||
<Erik> Mjark: the other place is if we branch, e.g. using defer.gatherResults
|
|
||||||
|
|
||||||
Unanswered: how and when should we preserve log context?
|
To help track the processing of individual requests, synapse uses a
|
||||||
|
'log context' to track which request it is handling at any given moment. This
|
||||||
|
is done via a thread-local variable; a ``logging.Filter`` is then used to fish
|
||||||
|
the information back out of the thread-local variable and add it to each log
|
||||||
|
record.
|
||||||
|
|
||||||
|
Logcontexts are also used for CPU and database accounting, so that we can track
|
||||||
|
which requests were responsible for high CPU use or database activity.
|
||||||
|
|
||||||
|
The ``synapse.util.logcontext`` module provides a facilities for managing the
|
||||||
|
current log context (as well as providing the ``LoggingContextFilter`` class).
|
||||||
|
|
||||||
|
Deferreds make the whole thing complicated, so this document describes how it
|
||||||
|
all works, and how to write code which follows the rules.
|
||||||
|
|
||||||
|
Logcontexts without Deferreds
|
||||||
|
-----------------------------
|
||||||
|
|
||||||
|
In the absence of any Deferred voodoo, things are simple enough. As with any
|
||||||
|
code of this nature, the rule is that our function should leave things as it
|
||||||
|
found them:
|
||||||
|
|
||||||
|
.. code:: python
|
||||||
|
|
||||||
|
from synapse.util import logcontext # omitted from future snippets
|
||||||
|
|
||||||
|
def handle_request(request_id):
|
||||||
|
request_context = logcontext.LoggingContext()
|
||||||
|
|
||||||
|
calling_context = logcontext.LoggingContext.current_context()
|
||||||
|
logcontext.LoggingContext.set_current_context(request_context)
|
||||||
|
try:
|
||||||
|
request_context.request = request_id
|
||||||
|
do_request_handling()
|
||||||
|
logger.debug("finished")
|
||||||
|
finally:
|
||||||
|
logcontext.LoggingContext.set_current_context(calling_context)
|
||||||
|
|
||||||
|
def do_request_handling():
|
||||||
|
logger.debug("phew") # this will be logged against request_id
|
||||||
|
|
||||||
|
|
||||||
|
LoggingContext implements the context management methods, so the above can be
|
||||||
|
written much more succinctly as:
|
||||||
|
|
||||||
|
.. code:: python
|
||||||
|
|
||||||
|
def handle_request(request_id):
|
||||||
|
with logcontext.LoggingContext() as request_context:
|
||||||
|
request_context.request = request_id
|
||||||
|
do_request_handling()
|
||||||
|
logger.debug("finished")
|
||||||
|
|
||||||
|
def do_request_handling():
|
||||||
|
logger.debug("phew")
|
||||||
|
|
||||||
|
|
||||||
|
Using logcontexts with Deferreds
|
||||||
|
--------------------------------
|
||||||
|
|
||||||
|
Deferreds — and in particular, ``defer.inlineCallbacks`` — break
|
||||||
|
the linear flow of code so that there is no longer a single entry point where
|
||||||
|
we should set the logcontext and a single exit point where we should remove it.
|
||||||
|
|
||||||
|
Consider the example above, where ``do_request_handling`` needs to do some
|
||||||
|
blocking operation, and returns a deferred:
|
||||||
|
|
||||||
|
.. code:: python
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def handle_request(request_id):
|
||||||
|
with logcontext.LoggingContext() as request_context:
|
||||||
|
request_context.request = request_id
|
||||||
|
yield do_request_handling()
|
||||||
|
logger.debug("finished")
|
||||||
|
|
||||||
|
|
||||||
|
In the above flow:
|
||||||
|
|
||||||
|
* The logcontext is set
|
||||||
|
* ``do_request_handling`` is called, and returns a deferred
|
||||||
|
* ``handle_request`` yields the deferred
|
||||||
|
* The ``inlineCallbacks`` wrapper of ``handle_request`` returns a deferred
|
||||||
|
|
||||||
|
So we have stopped processing the request (and will probably go on to start
|
||||||
|
processing the next), without clearing the logcontext.
|
||||||
|
|
||||||
|
To circumvent this problem, synapse code assumes that, wherever you have a
|
||||||
|
deferred, you will want to yield on it. To that end, whereever functions return
|
||||||
|
a deferred, we adopt the following conventions:
|
||||||
|
|
||||||
|
**Rules for functions returning deferreds:**
|
||||||
|
|
||||||
|
* If the deferred is already complete, the function returns with the same
|
||||||
|
logcontext it started with.
|
||||||
|
* If the deferred is incomplete, the function clears the logcontext before
|
||||||
|
returning; when the deferred completes, it restores the logcontext before
|
||||||
|
running any callbacks.
|
||||||
|
|
||||||
|
That sounds complicated, but actually it means a lot of code (including the
|
||||||
|
example above) "just works". There are two cases:
|
||||||
|
|
||||||
|
* If ``do_request_handling`` returns a completed deferred, then the logcontext
|
||||||
|
will still be in place. In this case, execution will continue immediately
|
||||||
|
after the ``yield``; the "finished" line will be logged against the right
|
||||||
|
context, and the ``with`` block restores the original context before we
|
||||||
|
return to the caller.
|
||||||
|
|
||||||
|
* If the returned deferred is incomplete, ``do_request_handling`` clears the
|
||||||
|
logcontext before returning. The logcontext is therefore clear when
|
||||||
|
``handle_request`` yields the deferred. At that point, the ``inlineCallbacks``
|
||||||
|
wrapper adds a callback to the deferred, and returns another (incomplete)
|
||||||
|
deferred to the caller, and it is safe to begin processing the next request.
|
||||||
|
|
||||||
|
Once ``do_request_handling``'s deferred completes, it will reinstate the
|
||||||
|
logcontext, before running the callback added by the ``inlineCallbacks``
|
||||||
|
wrapper. That callback runs the second half of ``handle_request``, so again
|
||||||
|
the "finished" line will be logged against the right
|
||||||
|
context, and the ``with`` block restores the original context.
|
||||||
|
|
||||||
|
As an aside, it's worth noting that ``handle_request`` follows our rules -
|
||||||
|
though that only matters if the caller has its own logcontext which it cares
|
||||||
|
about.
|
||||||
|
|
||||||
|
The following sections describe pitfalls and helpful patterns when implementing
|
||||||
|
these rules.
|
||||||
|
|
||||||
|
Always yield your deferreds
|
||||||
|
---------------------------
|
||||||
|
|
||||||
|
Whenever you get a deferred back from a function, you should ``yield`` on it
|
||||||
|
as soon as possible. (Returning it directly to your caller is ok too, if you're
|
||||||
|
not doing ``inlineCallbacks``.) Do not pass go; do not do any logging; do not
|
||||||
|
call any other functions.
|
||||||
|
|
||||||
|
.. code:: python
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def fun():
|
||||||
|
logger.debug("starting")
|
||||||
|
yield do_some_stuff() # just like this
|
||||||
|
|
||||||
|
d = more_stuff()
|
||||||
|
result = yield d # also fine, of course
|
||||||
|
|
||||||
|
defer.returnValue(result)
|
||||||
|
|
||||||
|
def nonInlineCallbacksFun():
|
||||||
|
logger.debug("just a wrapper really")
|
||||||
|
return do_some_stuff() # this is ok too - the caller will yield on
|
||||||
|
# it anyway.
|
||||||
|
|
||||||
|
Provided this pattern is followed all the way back up to the callchain to where
|
||||||
|
the logcontext was set, this will make things work out ok: provided
|
||||||
|
``do_some_stuff`` and ``more_stuff`` follow the rules above, then so will
|
||||||
|
``fun`` (as wrapped by ``inlineCallbacks``) and ``nonInlineCallbacksFun``.
|
||||||
|
|
||||||
|
It's all too easy to forget to ``yield``: for instance if we forgot that
|
||||||
|
``do_some_stuff`` returned a deferred, we might plough on regardless. This
|
||||||
|
leads to a mess; it will probably work itself out eventually, but not before
|
||||||
|
a load of stuff has been logged against the wrong content. (Normally, other
|
||||||
|
things will break, more obviously, if you forget to ``yield``, so this tends
|
||||||
|
not to be a major problem in practice.)
|
||||||
|
|
||||||
|
Of course sometimes you need to do something a bit fancier with your Deferreds
|
||||||
|
- not all code follows the linear A-then-B-then-C pattern. Notes on
|
||||||
|
implementing more complex patterns are in later sections.
|
||||||
|
|
||||||
|
Where you create a new Deferred, make it follow the rules
|
||||||
|
---------------------------------------------------------
|
||||||
|
|
||||||
|
Most of the time, a Deferred comes from another synapse function. Sometimes,
|
||||||
|
though, we need to make up a new Deferred, or we get a Deferred back from
|
||||||
|
external code. We need to make it follow our rules.
|
||||||
|
|
||||||
|
The easy way to do it is with a combination of ``defer.inlineCallbacks``, and
|
||||||
|
``logcontext.PreserveLoggingContext``. Suppose we want to implement ``sleep``,
|
||||||
|
which returns a deferred which will run its callbacks after a given number of
|
||||||
|
seconds. That might look like:
|
||||||
|
|
||||||
|
.. code:: python
|
||||||
|
|
||||||
|
# not a logcontext-rules-compliant function
|
||||||
|
def get_sleep_deferred(seconds):
|
||||||
|
d = defer.Deferred()
|
||||||
|
reactor.callLater(seconds, d.callback, None)
|
||||||
|
return d
|
||||||
|
|
||||||
|
That doesn't follow the rules, but we can fix it by wrapping it with
|
||||||
|
``PreserveLoggingContext`` and ``yield`` ing on it:
|
||||||
|
|
||||||
|
.. code:: python
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def sleep(seconds):
|
||||||
|
with PreserveLoggingContext():
|
||||||
|
yield get_sleep_deferred(seconds)
|
||||||
|
|
||||||
|
This technique works equally for external functions which return deferreds,
|
||||||
|
or deferreds we have made ourselves.
|
||||||
|
|
||||||
|
You can also use ``logcontext.make_deferred_yieldable``, which just does the
|
||||||
|
boilerplate for you, so the above could be written:
|
||||||
|
|
||||||
|
.. code:: python
|
||||||
|
|
||||||
|
def sleep(seconds):
|
||||||
|
return logcontext.make_deferred_yieldable(get_sleep_deferred(seconds))
|
||||||
|
|
||||||
|
|
||||||
|
Fire-and-forget
|
||||||
|
---------------
|
||||||
|
|
||||||
|
Sometimes you want to fire off a chain of execution, but not wait for its
|
||||||
|
result. That might look a bit like this:
|
||||||
|
|
||||||
|
.. code:: python
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def do_request_handling():
|
||||||
|
yield foreground_operation()
|
||||||
|
|
||||||
|
# *don't* do this
|
||||||
|
background_operation()
|
||||||
|
|
||||||
|
logger.debug("Request handling complete")
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def background_operation():
|
||||||
|
yield first_background_step()
|
||||||
|
logger.debug("Completed first step")
|
||||||
|
yield second_background_step()
|
||||||
|
logger.debug("Completed second step")
|
||||||
|
|
||||||
|
The above code does a couple of steps in the background after
|
||||||
|
``do_request_handling`` has finished. The log lines are still logged against
|
||||||
|
the ``request_context`` logcontext, which may or may not be desirable. There
|
||||||
|
are two big problems with the above, however. The first problem is that, if
|
||||||
|
``background_operation`` returns an incomplete Deferred, it will expect its
|
||||||
|
caller to ``yield`` immediately, so will have cleared the logcontext. In this
|
||||||
|
example, that means that 'Request handling complete' will be logged without any
|
||||||
|
context.
|
||||||
|
|
||||||
|
The second problem, which is potentially even worse, is that when the Deferred
|
||||||
|
returned by ``background_operation`` completes, it will restore the original
|
||||||
|
logcontext. There is nothing waiting on that Deferred, so the logcontext will
|
||||||
|
leak into the reactor and possibly get attached to some arbitrary future
|
||||||
|
operation.
|
||||||
|
|
||||||
|
There are two potential solutions to this.
|
||||||
|
|
||||||
|
One option is to surround the call to ``background_operation`` with a
|
||||||
|
``PreserveLoggingContext`` call. That will reset the logcontext before
|
||||||
|
starting ``background_operation`` (so the context restored when the deferred
|
||||||
|
completes will be the empty logcontext), and will restore the current
|
||||||
|
logcontext before continuing the foreground process:
|
||||||
|
|
||||||
|
.. code:: python
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def do_request_handling():
|
||||||
|
yield foreground_operation()
|
||||||
|
|
||||||
|
# start background_operation off in the empty logcontext, to
|
||||||
|
# avoid leaking the current context into the reactor.
|
||||||
|
with PreserveLoggingContext():
|
||||||
|
background_operation()
|
||||||
|
|
||||||
|
# this will now be logged against the request context
|
||||||
|
logger.debug("Request handling complete")
|
||||||
|
|
||||||
|
Obviously that option means that the operations done in
|
||||||
|
``background_operation`` would be not be logged against a logcontext (though
|
||||||
|
that might be fixed by setting a different logcontext via a ``with
|
||||||
|
LoggingContext(...)`` in ``background_operation``).
|
||||||
|
|
||||||
|
The second option is to use ``logcontext.run_in_background``, which wraps a
|
||||||
|
function so that it doesn't reset the logcontext even when it returns an
|
||||||
|
incomplete deferred, and adds a callback to the returned deferred to reset the
|
||||||
|
logcontext. In other words, it turns a function that follows the Synapse rules
|
||||||
|
about logcontexts and Deferreds into one which behaves more like an external
|
||||||
|
function — the opposite operation to that described in the previous section.
|
||||||
|
It can be used like this:
|
||||||
|
|
||||||
|
.. code:: python
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def do_request_handling():
|
||||||
|
yield foreground_operation()
|
||||||
|
|
||||||
|
logcontext.run_in_background(background_operation)
|
||||||
|
|
||||||
|
# this will now be logged against the request context
|
||||||
|
logger.debug("Request handling complete")
|
||||||
|
|
||||||
|
Passing synapse deferreds into third-party functions
|
||||||
|
----------------------------------------------------
|
||||||
|
|
||||||
|
A typical example of this is where we want to collect together two or more
|
||||||
|
deferred via ``defer.gatherResults``:
|
||||||
|
|
||||||
|
.. code:: python
|
||||||
|
|
||||||
|
d1 = operation1()
|
||||||
|
d2 = operation2()
|
||||||
|
d3 = defer.gatherResults([d1, d2])
|
||||||
|
|
||||||
|
This is really a variation of the fire-and-forget problem above, in that we are
|
||||||
|
firing off ``d1`` and ``d2`` without yielding on them. The difference
|
||||||
|
is that we now have third-party code attached to their callbacks. Anyway either
|
||||||
|
technique given in the `Fire-and-forget`_ section will work.
|
||||||
|
|
||||||
|
Of course, the new Deferred returned by ``gatherResults`` needs to be wrapped
|
||||||
|
in order to make it follow the logcontext rules before we can yield it, as
|
||||||
|
described in `Where you create a new Deferred, make it follow the rules`_.
|
||||||
|
|
||||||
|
So, option one: reset the logcontext before starting the operations to be
|
||||||
|
gathered:
|
||||||
|
|
||||||
|
.. code:: python
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def do_request_handling():
|
||||||
|
with PreserveLoggingContext():
|
||||||
|
d1 = operation1()
|
||||||
|
d2 = operation2()
|
||||||
|
result = yield defer.gatherResults([d1, d2])
|
||||||
|
|
||||||
|
In this case particularly, though, option two, of using
|
||||||
|
``logcontext.preserve_fn`` almost certainly makes more sense, so that
|
||||||
|
``operation1`` and ``operation2`` are both logged against the original
|
||||||
|
logcontext. This looks like:
|
||||||
|
|
||||||
|
.. code:: python
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def do_request_handling():
|
||||||
|
d1 = logcontext.preserve_fn(operation1)()
|
||||||
|
d2 = logcontext.preserve_fn(operation2)()
|
||||||
|
|
||||||
|
with PreserveLoggingContext():
|
||||||
|
result = yield defer.gatherResults([d1, d2])
|
||||||
|
|
||||||
|
|
||||||
|
Was all this really necessary?
|
||||||
|
------------------------------
|
||||||
|
|
||||||
|
The conventions used work fine for a linear flow where everything happens in
|
||||||
|
series via ``defer.inlineCallbacks`` and ``yield``, but are certainly tricky to
|
||||||
|
follow for any more exotic flows. It's hard not to wonder if we could have done
|
||||||
|
something else.
|
||||||
|
|
||||||
|
We're not going to rewrite Synapse now, so the following is entirely of
|
||||||
|
academic interest, but I'd like to record some thoughts on an alternative
|
||||||
|
approach.
|
||||||
|
|
||||||
|
I briefly prototyped some code following an alternative set of rules. I think
|
||||||
|
it would work, but I certainly didn't get as far as thinking how it would
|
||||||
|
interact with concepts as complicated as the cache descriptors.
|
||||||
|
|
||||||
|
My alternative rules were:
|
||||||
|
|
||||||
|
* functions always preserve the logcontext of their caller, whether or not they
|
||||||
|
are returning a Deferred.
|
||||||
|
|
||||||
|
* Deferreds returned by synapse functions run their callbacks in the same
|
||||||
|
context as the function was orignally called in.
|
||||||
|
|
||||||
|
The main point of this scheme is that everywhere that sets the logcontext is
|
||||||
|
responsible for clearing it before returning control to the reactor.
|
||||||
|
|
||||||
|
So, for example, if you were the function which started a ``with
|
||||||
|
LoggingContext`` block, you wouldn't ``yield`` within it — instead you'd start
|
||||||
|
off the background process, and then leave the ``with`` block to wait for it:
|
||||||
|
|
||||||
|
.. code:: python
|
||||||
|
|
||||||
|
def handle_request(request_id):
|
||||||
|
with logcontext.LoggingContext() as request_context:
|
||||||
|
request_context.request = request_id
|
||||||
|
d = do_request_handling()
|
||||||
|
|
||||||
|
def cb(r):
|
||||||
|
logger.debug("finished")
|
||||||
|
|
||||||
|
d.addCallback(cb)
|
||||||
|
return d
|
||||||
|
|
||||||
|
(in general, mixing ``with LoggingContext`` blocks and
|
||||||
|
``defer.inlineCallbacks`` in the same function leads to slighly
|
||||||
|
counter-intuitive code, under this scheme).
|
||||||
|
|
||||||
|
Because we leave the original ``with`` block as soon as the Deferred is
|
||||||
|
returned (as opposed to waiting for it to be resolved, as we do today), the
|
||||||
|
logcontext is cleared before control passes back to the reactor; so if there is
|
||||||
|
some code within ``do_request_handling`` which needs to wait for a Deferred to
|
||||||
|
complete, there is no need for it to worry about clearing the logcontext before
|
||||||
|
doing so:
|
||||||
|
|
||||||
|
.. code:: python
|
||||||
|
|
||||||
|
def handle_request():
|
||||||
|
r = do_some_stuff()
|
||||||
|
r.addCallback(do_some_more_stuff)
|
||||||
|
return r
|
||||||
|
|
||||||
|
— and provided ``do_some_stuff`` follows the rules of returning a Deferred which
|
||||||
|
runs its callbacks in the original logcontext, all is happy.
|
||||||
|
|
||||||
|
The business of a Deferred which runs its callbacks in the original logcontext
|
||||||
|
isn't hard to achieve — we have it today, in the shape of
|
||||||
|
``logcontext._PreservingContextDeferred``:
|
||||||
|
|
||||||
|
.. code:: python
|
||||||
|
|
||||||
|
def do_some_stuff():
|
||||||
|
deferred = do_some_io()
|
||||||
|
pcd = _PreservingContextDeferred(LoggingContext.current_context())
|
||||||
|
deferred.chainDeferred(pcd)
|
||||||
|
return pcd
|
||||||
|
|
||||||
|
It turns out that, thanks to the way that Deferreds chain together, we
|
||||||
|
automatically get the property of a context-preserving deferred with
|
||||||
|
``defer.inlineCallbacks``, provided the final Defered the function ``yields``
|
||||||
|
on has that property. So we can just write:
|
||||||
|
|
||||||
|
.. code:: python
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def handle_request():
|
||||||
|
yield do_some_stuff()
|
||||||
|
yield do_some_more_stuff()
|
||||||
|
|
||||||
|
To conclude: I think this scheme would have worked equally well, with less
|
||||||
|
danger of messing it up, and probably made some more esoteric code easier to
|
||||||
|
write. But again — changing the conventions of the entire Synapse codebase is
|
||||||
|
not a sensible option for the marginal improvement offered.
|
||||||
|
|||||||
@@ -1,50 +1,115 @@
|
|||||||
How to monitor Synapse metrics using Prometheus
|
How to monitor Synapse metrics using Prometheus
|
||||||
===============================================
|
===============================================
|
||||||
|
|
||||||
1: Install prometheus:
|
1. Install prometheus:
|
||||||
Follow instructions at http://prometheus.io/docs/introduction/install/
|
|
||||||
|
|
||||||
2: Enable synapse metrics:
|
Follow instructions at http://prometheus.io/docs/introduction/install/
|
||||||
Simply setting a (local) port number will enable it. Pick a port.
|
|
||||||
prometheus itself defaults to 9090, so starting just above that for
|
|
||||||
locally monitored services seems reasonable. E.g. 9092:
|
|
||||||
|
|
||||||
Add to homeserver.yaml
|
2. Enable synapse metrics:
|
||||||
|
|
||||||
metrics_port: 9092
|
Simply setting a (local) port number will enable it. Pick a port.
|
||||||
|
prometheus itself defaults to 9090, so starting just above that for
|
||||||
|
locally monitored services seems reasonable. E.g. 9092:
|
||||||
|
|
||||||
Restart synapse
|
Add to homeserver.yaml::
|
||||||
|
|
||||||
3: Check out synapse-prometheus-config
|
metrics_port: 9092
|
||||||
https://github.com/matrix-org/synapse-prometheus-config
|
|
||||||
|
|
||||||
4: Add ``synapse.html`` and ``synapse.rules``
|
Also ensure that ``enable_metrics`` is set to ``True``.
|
||||||
The ``.html`` file needs to appear in prometheus's ``consoles`` directory,
|
|
||||||
and the ``.rules`` file needs to be invoked somewhere in the main config
|
|
||||||
file. A symlink to each from the git checkout into the prometheus directory
|
|
||||||
might be easiest to ensure ``git pull`` keeps it updated.
|
|
||||||
|
|
||||||
5: Add a prometheus target for synapse
|
Restart synapse.
|
||||||
This is easiest if prometheus runs on the same machine as synapse, as it can
|
|
||||||
then just use localhost::
|
|
||||||
|
|
||||||
global: {
|
3. Add a prometheus target for synapse.
|
||||||
rule_file: "synapse.rules"
|
|
||||||
}
|
|
||||||
|
|
||||||
job: {
|
It needs to set the ``metrics_path`` to a non-default value (under ``scrape_configs``)::
|
||||||
name: "synapse"
|
|
||||||
|
|
||||||
target_group: {
|
- job_name: "synapse"
|
||||||
target: "http://localhost:9092/"
|
metrics_path: "/_synapse/metrics"
|
||||||
}
|
static_configs:
|
||||||
}
|
- targets: ["my.server.here:9092"]
|
||||||
|
|
||||||
6: Start prometheus::
|
If your prometheus is older than 1.5.2, you will need to replace
|
||||||
|
``static_configs`` in the above with ``target_groups``.
|
||||||
|
|
||||||
./prometheus -config.file=prometheus.conf
|
Restart prometheus.
|
||||||
|
|
||||||
7: Wait a few seconds for it to start and perform the first scrape,
|
|
||||||
then visit the console:
|
|
||||||
|
|
||||||
http://server-where-prometheus-runs:9090/consoles/synapse.html
|
Block and response metrics renamed for 0.27.0
|
||||||
|
---------------------------------------------
|
||||||
|
|
||||||
|
Synapse 0.27.0 begins the process of rationalising the duplicate ``*:count``
|
||||||
|
metrics reported for the resource tracking for code blocks and HTTP requests.
|
||||||
|
|
||||||
|
At the same time, the corresponding ``*:total`` metrics are being renamed, as
|
||||||
|
the ``:total`` suffix no longer makes sense in the absence of a corresponding
|
||||||
|
``:count`` metric.
|
||||||
|
|
||||||
|
To enable a graceful migration path, this release just adds new names for the
|
||||||
|
metrics being renamed. A future release will remove the old ones.
|
||||||
|
|
||||||
|
The following table shows the new metrics, and the old metrics which they are
|
||||||
|
replacing.
|
||||||
|
|
||||||
|
==================================================== ===================================================
|
||||||
|
New name Old name
|
||||||
|
==================================================== ===================================================
|
||||||
|
synapse_util_metrics_block_count synapse_util_metrics_block_timer:count
|
||||||
|
synapse_util_metrics_block_count synapse_util_metrics_block_ru_utime:count
|
||||||
|
synapse_util_metrics_block_count synapse_util_metrics_block_ru_stime:count
|
||||||
|
synapse_util_metrics_block_count synapse_util_metrics_block_db_txn_count:count
|
||||||
|
synapse_util_metrics_block_count synapse_util_metrics_block_db_txn_duration:count
|
||||||
|
|
||||||
|
synapse_util_metrics_block_time_seconds synapse_util_metrics_block_timer:total
|
||||||
|
synapse_util_metrics_block_ru_utime_seconds synapse_util_metrics_block_ru_utime:total
|
||||||
|
synapse_util_metrics_block_ru_stime_seconds synapse_util_metrics_block_ru_stime:total
|
||||||
|
synapse_util_metrics_block_db_txn_count synapse_util_metrics_block_db_txn_count:total
|
||||||
|
synapse_util_metrics_block_db_txn_duration_seconds synapse_util_metrics_block_db_txn_duration:total
|
||||||
|
|
||||||
|
synapse_http_server_response_count synapse_http_server_requests
|
||||||
|
synapse_http_server_response_count synapse_http_server_response_time:count
|
||||||
|
synapse_http_server_response_count synapse_http_server_response_ru_utime:count
|
||||||
|
synapse_http_server_response_count synapse_http_server_response_ru_stime:count
|
||||||
|
synapse_http_server_response_count synapse_http_server_response_db_txn_count:count
|
||||||
|
synapse_http_server_response_count synapse_http_server_response_db_txn_duration:count
|
||||||
|
|
||||||
|
synapse_http_server_response_time_seconds synapse_http_server_response_time:total
|
||||||
|
synapse_http_server_response_ru_utime_seconds synapse_http_server_response_ru_utime:total
|
||||||
|
synapse_http_server_response_ru_stime_seconds synapse_http_server_response_ru_stime:total
|
||||||
|
synapse_http_server_response_db_txn_count synapse_http_server_response_db_txn_count:total
|
||||||
|
synapse_http_server_response_db_txn_duration_seconds synapse_http_server_response_db_txn_duration:total
|
||||||
|
==================================================== ===================================================
|
||||||
|
|
||||||
|
|
||||||
|
Standard Metric Names
|
||||||
|
---------------------
|
||||||
|
|
||||||
|
As of synapse version 0.18.2, the format of the process-wide metrics has been
|
||||||
|
changed to fit prometheus standard naming conventions. Additionally the units
|
||||||
|
have been changed to seconds, from miliseconds.
|
||||||
|
|
||||||
|
================================== =============================
|
||||||
|
New name Old name
|
||||||
|
================================== =============================
|
||||||
|
process_cpu_user_seconds_total process_resource_utime / 1000
|
||||||
|
process_cpu_system_seconds_total process_resource_stime / 1000
|
||||||
|
process_open_fds (no 'type' label) process_fds
|
||||||
|
================================== =============================
|
||||||
|
|
||||||
|
The python-specific counts of garbage collector performance have been renamed.
|
||||||
|
|
||||||
|
=========================== ======================
|
||||||
|
New name Old name
|
||||||
|
=========================== ======================
|
||||||
|
python_gc_time reactor_gc_time
|
||||||
|
python_gc_unreachable_total reactor_gc_unreachable
|
||||||
|
python_gc_counts reactor_gc_counts
|
||||||
|
=========================== ======================
|
||||||
|
|
||||||
|
The twisted-specific reactor metrics have been renamed.
|
||||||
|
|
||||||
|
==================================== =====================
|
||||||
|
New name Old name
|
||||||
|
==================================== =====================
|
||||||
|
python_twisted_reactor_pending_calls reactor_pending_calls
|
||||||
|
python_twisted_reactor_tick_time reactor_tick_time
|
||||||
|
==================================== =====================
|
||||||
|
|||||||
99
docs/password_auth_providers.rst
Normal file
99
docs/password_auth_providers.rst
Normal file
@@ -0,0 +1,99 @@
|
|||||||
|
Password auth provider modules
|
||||||
|
==============================
|
||||||
|
|
||||||
|
Password auth providers offer a way for server administrators to integrate
|
||||||
|
their Synapse installation with an existing authentication system.
|
||||||
|
|
||||||
|
A password auth provider is a Python class which is dynamically loaded into
|
||||||
|
Synapse, and provides a number of methods by which it can integrate with the
|
||||||
|
authentication system.
|
||||||
|
|
||||||
|
This document serves as a reference for those looking to implement their own
|
||||||
|
password auth providers.
|
||||||
|
|
||||||
|
Required methods
|
||||||
|
----------------
|
||||||
|
|
||||||
|
Password auth provider classes must provide the following methods:
|
||||||
|
|
||||||
|
*class* ``SomeProvider.parse_config``\(*config*)
|
||||||
|
|
||||||
|
This method is passed the ``config`` object for this module from the
|
||||||
|
homeserver configuration file.
|
||||||
|
|
||||||
|
It should perform any appropriate sanity checks on the provided
|
||||||
|
configuration, and return an object which is then passed into ``__init__``.
|
||||||
|
|
||||||
|
*class* ``SomeProvider``\(*config*, *account_handler*)
|
||||||
|
|
||||||
|
The constructor is passed the config object returned by ``parse_config``,
|
||||||
|
and a ``synapse.module_api.ModuleApi`` object which allows the
|
||||||
|
password provider to check if accounts exist and/or create new ones.
|
||||||
|
|
||||||
|
Optional methods
|
||||||
|
----------------
|
||||||
|
|
||||||
|
Password auth provider classes may optionally provide the following methods.
|
||||||
|
|
||||||
|
*class* ``SomeProvider.get_db_schema_files``\()
|
||||||
|
|
||||||
|
This method, if implemented, should return an Iterable of ``(name,
|
||||||
|
stream)`` pairs of database schema files. Each file is applied in turn at
|
||||||
|
initialisation, and a record is then made in the database so that it is
|
||||||
|
not re-applied on the next start.
|
||||||
|
|
||||||
|
``someprovider.get_supported_login_types``\()
|
||||||
|
|
||||||
|
This method, if implemented, should return a ``dict`` mapping from a login
|
||||||
|
type identifier (such as ``m.login.password``) to an iterable giving the
|
||||||
|
fields which must be provided by the user in the submission to the
|
||||||
|
``/login`` api. These fields are passed in the ``login_dict`` dictionary
|
||||||
|
to ``check_auth``.
|
||||||
|
|
||||||
|
For example, if a password auth provider wants to implement a custom login
|
||||||
|
type of ``com.example.custom_login``, where the client is expected to pass
|
||||||
|
the fields ``secret1`` and ``secret2``, the provider should implement this
|
||||||
|
method and return the following dict::
|
||||||
|
|
||||||
|
{"com.example.custom_login": ("secret1", "secret2")}
|
||||||
|
|
||||||
|
``someprovider.check_auth``\(*username*, *login_type*, *login_dict*)
|
||||||
|
|
||||||
|
This method is the one that does the real work. If implemented, it will be
|
||||||
|
called for each login attempt where the login type matches one of the keys
|
||||||
|
returned by ``get_supported_login_types``.
|
||||||
|
|
||||||
|
It is passed the (possibly UNqualified) ``user`` provided by the client,
|
||||||
|
the login type, and a dictionary of login secrets passed by the client.
|
||||||
|
|
||||||
|
The method should return a Twisted ``Deferred`` object, which resolves to
|
||||||
|
the canonical ``@localpart:domain`` user id if authentication is successful,
|
||||||
|
and ``None`` if not.
|
||||||
|
|
||||||
|
Alternatively, the ``Deferred`` can resolve to a ``(str, func)`` tuple, in
|
||||||
|
which case the second field is a callback which will be called with the
|
||||||
|
result from the ``/login`` call (including ``access_token``, ``device_id``,
|
||||||
|
etc.)
|
||||||
|
|
||||||
|
``someprovider.check_password``\(*user_id*, *password*)
|
||||||
|
|
||||||
|
This method provides a simpler interface than ``get_supported_login_types``
|
||||||
|
and ``check_auth`` for password auth providers that just want to provide a
|
||||||
|
mechanism for validating ``m.login.password`` logins.
|
||||||
|
|
||||||
|
Iif implemented, it will be called to check logins with an
|
||||||
|
``m.login.password`` login type. It is passed a qualified
|
||||||
|
``@localpart:domain`` user id, and the password provided by the user.
|
||||||
|
|
||||||
|
The method should return a Twisted ``Deferred`` object, which resolves to
|
||||||
|
``True`` if authentication is successful, and ``False`` if not.
|
||||||
|
|
||||||
|
``someprovider.on_logged_out``\(*user_id*, *device_id*, *access_token*)
|
||||||
|
|
||||||
|
This method, if implemented, is called when a user logs out. It is passed
|
||||||
|
the qualified user ID, the ID of the deactivated device (if any: access
|
||||||
|
tokens are occasionally created without an associated device ID), and the
|
||||||
|
(now deactivated) access token.
|
||||||
|
|
||||||
|
It may return a Twisted ``Deferred`` object; the logout request will wait
|
||||||
|
for the deferred to complete but the result is ignored.
|
||||||
@@ -1,6 +1,8 @@
|
|||||||
Using Postgres
|
Using Postgres
|
||||||
--------------
|
--------------
|
||||||
|
|
||||||
|
Postgres version 9.4 or later is known to work.
|
||||||
|
|
||||||
Set up database
|
Set up database
|
||||||
===============
|
===============
|
||||||
|
|
||||||
@@ -112,9 +114,9 @@ script one last time, e.g. if the SQLite database is at ``homeserver.db``
|
|||||||
run::
|
run::
|
||||||
|
|
||||||
synapse_port_db --sqlite-database homeserver.db \
|
synapse_port_db --sqlite-database homeserver.db \
|
||||||
--postgres-config database_config.yaml
|
--postgres-config homeserver-postgres.yaml
|
||||||
|
|
||||||
Once that has completed, change the synapse config to point at the PostgreSQL
|
Once that has completed, change the synapse config to point at the PostgreSQL
|
||||||
database configuration file using the ``database_config`` parameter (see
|
database configuration file ``homeserver-postgres.yaml`` (i.e. rename it to
|
||||||
`Synapse Config`_) and restart synapse. Synapse should now be running against
|
``homeserver.yaml``) and restart synapse. Synapse should now be running against
|
||||||
PostgreSQL.
|
PostgreSQL.
|
||||||
|
|||||||
@@ -26,28 +26,10 @@ expose the append-only log to the readers should be fairly minimal.
|
|||||||
Architecture
|
Architecture
|
||||||
------------
|
------------
|
||||||
|
|
||||||
The Replication API
|
The Replication Protocol
|
||||||
~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
Synapse will optionally expose a long poll HTTP API for extracting updates. The
|
See ``tcp_replication.rst``
|
||||||
API will have a similar shape to /sync in that clients provide tokens
|
|
||||||
indicating where in the log they have reached and a timeout. The synapse server
|
|
||||||
then either responds with updates immediately if it already has updates or it
|
|
||||||
waits until the timeout for more updates. If the timeout expires and nothing
|
|
||||||
happened then the server returns an empty response.
|
|
||||||
|
|
||||||
However unlike the /sync API this replication API is returning synapse specific
|
|
||||||
data rather than trying to implement a matrix specification. The replication
|
|
||||||
results are returned as arrays of rows where the rows are mostly lifted
|
|
||||||
directly from the database. This avoids unnecessary JSON parsing on the server
|
|
||||||
and hopefully avoids an impedance mismatch between the data returned and the
|
|
||||||
required updates to the datastore.
|
|
||||||
|
|
||||||
This does not replicate all the database tables as many of the database tables
|
|
||||||
are indexes that can be recovered from the contents of other tables.
|
|
||||||
|
|
||||||
The format and parameters for the api are documented in
|
|
||||||
``synapse/replication/resource.py``.
|
|
||||||
|
|
||||||
|
|
||||||
The Slaved DataStore
|
The Slaved DataStore
|
||||||
|
|||||||
@@ -50,7 +50,7 @@ master_doc = 'index'
|
|||||||
|
|
||||||
# General information about the project.
|
# General information about the project.
|
||||||
project = u'Synapse'
|
project = u'Synapse'
|
||||||
copyright = u'2014, TNG'
|
copyright = u'Copyright 2014-2017 OpenMarket Ltd, 2017 Vector Creations Ltd, 2017 New Vector Ltd'
|
||||||
|
|
||||||
# The version info for the project you're documenting, acts as replacement for
|
# The version info for the project you're documenting, acts as replacement for
|
||||||
# |version| and |release|, also used in various other places throughout the
|
# |version| and |release|, also used in various other places throughout the
|
||||||
|
|||||||
223
docs/tcp_replication.rst
Normal file
223
docs/tcp_replication.rst
Normal file
@@ -0,0 +1,223 @@
|
|||||||
|
TCP Replication
|
||||||
|
===============
|
||||||
|
|
||||||
|
Motivation
|
||||||
|
----------
|
||||||
|
|
||||||
|
Previously the workers used an HTTP long poll mechanism to get updates from the
|
||||||
|
master, which had the problem of causing a lot of duplicate work on the server.
|
||||||
|
This TCP protocol replaces those APIs with the aim of increased efficiency.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
Overview
|
||||||
|
--------
|
||||||
|
|
||||||
|
The protocol is based on fire and forget, line based commands. An example flow
|
||||||
|
would be (where '>' indicates master to worker and '<' worker to master flows)::
|
||||||
|
|
||||||
|
> SERVER example.com
|
||||||
|
< REPLICATE events 53
|
||||||
|
> RDATA events 54 ["$foo1:bar.com", ...]
|
||||||
|
> RDATA events 55 ["$foo4:bar.com", ...]
|
||||||
|
|
||||||
|
The example shows the server accepting a new connection and sending its identity
|
||||||
|
with the ``SERVER`` command, followed by the client asking to subscribe to the
|
||||||
|
``events`` stream from the token ``53``. The server then periodically sends ``RDATA``
|
||||||
|
commands which have the format ``RDATA <stream_name> <token> <row>``, where the
|
||||||
|
format of ``<row>`` is defined by the individual streams.
|
||||||
|
|
||||||
|
Error reporting happens by either the client or server sending an `ERROR`
|
||||||
|
command, and usually the connection will be closed.
|
||||||
|
|
||||||
|
|
||||||
|
Since the protocol is a simple line based, its possible to manually connect to
|
||||||
|
the server using a tool like netcat. A few things should be noted when manually
|
||||||
|
using the protocol:
|
||||||
|
|
||||||
|
* When subscribing to a stream using ``REPLICATE``, the special token ``NOW`` can
|
||||||
|
be used to get all future updates. The special stream name ``ALL`` can be used
|
||||||
|
with ``NOW`` to subscribe to all available streams.
|
||||||
|
* The federation stream is only available if federation sending has been
|
||||||
|
disabled on the main process.
|
||||||
|
* The server will only time connections out that have sent a ``PING`` command.
|
||||||
|
If a ping is sent then the connection will be closed if no further commands
|
||||||
|
are receieved within 15s. Both the client and server protocol implementations
|
||||||
|
will send an initial PING on connection and ensure at least one command every
|
||||||
|
5s is sent (not necessarily ``PING``).
|
||||||
|
* ``RDATA`` commands *usually* include a numeric token, however if the stream
|
||||||
|
has multiple rows to replicate per token the server will send multiple
|
||||||
|
``RDATA`` commands, with all but the last having a token of ``batch``. See
|
||||||
|
the documentation on ``commands.RdataCommand`` for further details.
|
||||||
|
|
||||||
|
|
||||||
|
Architecture
|
||||||
|
------------
|
||||||
|
|
||||||
|
The basic structure of the protocol is line based, where the initial word of
|
||||||
|
each line specifies the command. The rest of the line is parsed based on the
|
||||||
|
command. For example, the `RDATA` command is defined as::
|
||||||
|
|
||||||
|
RDATA <stream_name> <token> <row_json>
|
||||||
|
|
||||||
|
(Note that `<row_json>` may contains spaces, but cannot contain newlines.)
|
||||||
|
|
||||||
|
Blank lines are ignored.
|
||||||
|
|
||||||
|
|
||||||
|
Keep alives
|
||||||
|
~~~~~~~~~~~
|
||||||
|
|
||||||
|
Both sides are expected to send at least one command every 5s or so, and
|
||||||
|
should send a ``PING`` command if necessary. If either side do not receive a
|
||||||
|
command within e.g. 15s then the connection should be closed.
|
||||||
|
|
||||||
|
Because the server may be connected to manually using e.g. netcat, the timeouts
|
||||||
|
aren't enabled until an initial ``PING`` command is seen. Both the client and
|
||||||
|
server implementations below send a ``PING`` command immediately on connection to
|
||||||
|
ensure the timeouts are enabled.
|
||||||
|
|
||||||
|
This ensures that both sides can quickly realize if the tcp connection has gone
|
||||||
|
and handle the situation appropriately.
|
||||||
|
|
||||||
|
|
||||||
|
Start up
|
||||||
|
~~~~~~~~
|
||||||
|
|
||||||
|
When a new connection is made, the server:
|
||||||
|
|
||||||
|
* Sends a ``SERVER`` command, which includes the identity of the server, allowing
|
||||||
|
the client to detect if its connected to the expected server
|
||||||
|
* Sends a ``PING`` command as above, to enable the client to time out connections
|
||||||
|
promptly.
|
||||||
|
|
||||||
|
The client:
|
||||||
|
|
||||||
|
* Sends a ``NAME`` command, allowing the server to associate a human friendly
|
||||||
|
name with the connection. This is optional.
|
||||||
|
* Sends a ``PING`` as above
|
||||||
|
* For each stream the client wishes to subscribe to it sends a ``REPLICATE``
|
||||||
|
with the stream_name and token it wants to subscribe from.
|
||||||
|
* On receipt of a ``SERVER`` command, checks that the server name matches the
|
||||||
|
expected server name.
|
||||||
|
|
||||||
|
|
||||||
|
Error handling
|
||||||
|
~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
If either side detects an error it can send an ``ERROR`` command and close the
|
||||||
|
connection.
|
||||||
|
|
||||||
|
If the client side loses the connection to the server it should reconnect,
|
||||||
|
following the steps above.
|
||||||
|
|
||||||
|
|
||||||
|
Congestion
|
||||||
|
~~~~~~~~~~
|
||||||
|
|
||||||
|
If the server sends messages faster than the client can consume them the server
|
||||||
|
will first buffer a (fairly large) number of commands and then disconnect the
|
||||||
|
client. This ensures that we don't queue up an unbounded number of commands in
|
||||||
|
memory and gives us a potential oppurtunity to squawk loudly. When/if the client
|
||||||
|
recovers it can reconnect to the server and ask for missed messages.
|
||||||
|
|
||||||
|
|
||||||
|
Reliability
|
||||||
|
~~~~~~~~~~~
|
||||||
|
|
||||||
|
In general the replication stream should be considered an unreliable transport
|
||||||
|
since e.g. commands are not resent if the connection disappears.
|
||||||
|
|
||||||
|
The exception to that are the replication streams, i.e. RDATA commands, since
|
||||||
|
these include tokens which can be used to restart the stream on connection
|
||||||
|
errors.
|
||||||
|
|
||||||
|
The client should keep track of the token in the last RDATA command received
|
||||||
|
for each stream so that on reconneciton it can start streaming from the correct
|
||||||
|
place. Note: not all RDATA have valid tokens due to batching. See
|
||||||
|
``RdataCommand`` for more details.
|
||||||
|
|
||||||
|
|
||||||
|
Example
|
||||||
|
~~~~~~~
|
||||||
|
|
||||||
|
An example iteraction is shown below. Each line is prefixed with '>' or '<' to
|
||||||
|
indicate which side is sending, these are *not* included on the wire::
|
||||||
|
|
||||||
|
* connection established *
|
||||||
|
> SERVER localhost:8823
|
||||||
|
> PING 1490197665618
|
||||||
|
< NAME synapse.app.appservice
|
||||||
|
< PING 1490197665618
|
||||||
|
< REPLICATE events 1
|
||||||
|
< REPLICATE backfill 1
|
||||||
|
< REPLICATE caches 1
|
||||||
|
> POSITION events 1
|
||||||
|
> POSITION backfill 1
|
||||||
|
> POSITION caches 1
|
||||||
|
> RDATA caches 2 ["get_user_by_id",["@01register-user:localhost:8823"],1490197670513]
|
||||||
|
> RDATA events 14 ["$149019767112vOHxz:localhost:8823",
|
||||||
|
"!AFDCvgApUmpdfVjIXm:localhost:8823","m.room.guest_access","",null]
|
||||||
|
< PING 1490197675618
|
||||||
|
> ERROR server stopping
|
||||||
|
* connection closed by server *
|
||||||
|
|
||||||
|
The ``POSITION`` command sent by the server is used to set the clients position
|
||||||
|
without needing to send data with the ``RDATA`` command.
|
||||||
|
|
||||||
|
|
||||||
|
An example of a batched set of ``RDATA`` is::
|
||||||
|
|
||||||
|
> RDATA caches batch ["get_user_by_id",["@test:localhost:8823"],1490197670513]
|
||||||
|
> RDATA caches batch ["get_user_by_id",["@test2:localhost:8823"],1490197670513]
|
||||||
|
> RDATA caches batch ["get_user_by_id",["@test3:localhost:8823"],1490197670513]
|
||||||
|
> RDATA caches 54 ["get_user_by_id",["@test4:localhost:8823"],1490197670513]
|
||||||
|
|
||||||
|
In this case the client shouldn't advance their caches token until it sees the
|
||||||
|
the last ``RDATA``.
|
||||||
|
|
||||||
|
|
||||||
|
List of commands
|
||||||
|
~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
The list of valid commands, with which side can send it: server (S) or client (C):
|
||||||
|
|
||||||
|
SERVER (S)
|
||||||
|
Sent at the start to identify which server the client is talking to
|
||||||
|
|
||||||
|
RDATA (S)
|
||||||
|
A single update in a stream
|
||||||
|
|
||||||
|
POSITION (S)
|
||||||
|
The position of the stream has been updated
|
||||||
|
|
||||||
|
ERROR (S, C)
|
||||||
|
There was an error
|
||||||
|
|
||||||
|
PING (S, C)
|
||||||
|
Sent periodically to ensure the connection is still alive
|
||||||
|
|
||||||
|
NAME (C)
|
||||||
|
Sent at the start by client to inform the server who they are
|
||||||
|
|
||||||
|
REPLICATE (C)
|
||||||
|
Asks the server to replicate a given stream
|
||||||
|
|
||||||
|
USER_SYNC (C)
|
||||||
|
A user has started or stopped syncing
|
||||||
|
|
||||||
|
FEDERATION_ACK (C)
|
||||||
|
Acknowledge receipt of some federation data
|
||||||
|
|
||||||
|
REMOVE_PUSHER (C)
|
||||||
|
Inform the server a pusher should be removed
|
||||||
|
|
||||||
|
INVALIDATE_CACHE (C)
|
||||||
|
Inform the server a cache should be invalidated
|
||||||
|
|
||||||
|
SYNC (S, C)
|
||||||
|
Used exclusively in tests
|
||||||
|
|
||||||
|
|
||||||
|
See ``synapse/replication/tcp/commands.py`` for a detailed description and the
|
||||||
|
format of each command.
|
||||||
@@ -50,14 +50,37 @@ You may be able to setup coturn via your package manager, or set it up manually
|
|||||||
|
|
||||||
pwgen -s 64 1
|
pwgen -s 64 1
|
||||||
|
|
||||||
5. Ensure youe firewall allows traffic into the TURN server on
|
5. Consider your security settings. TURN lets users request a relay
|
||||||
the ports you've configured it to listen on (remember to allow
|
which will connect to arbitrary IP addresses and ports. At the least
|
||||||
both TCP and UDP if you've enabled both).
|
we recommend:
|
||||||
|
|
||||||
6. If you've configured coturn to support TLS/DTLS, generate or
|
# VoIP traffic is all UDP. There is no reason to let users connect to arbitrary TCP endpoints via the relay.
|
||||||
|
no-tcp-relay
|
||||||
|
|
||||||
|
# don't let the relay ever try to connect to private IP address ranges within your network (if any)
|
||||||
|
# given the turn server is likely behind your firewall, remember to include any privileged public IPs too.
|
||||||
|
denied-peer-ip=10.0.0.0-10.255.255.255
|
||||||
|
denied-peer-ip=192.168.0.0-192.168.255.255
|
||||||
|
denied-peer-ip=172.16.0.0-172.31.255.255
|
||||||
|
|
||||||
|
# special case the turn server itself so that client->TURN->TURN->client flows work
|
||||||
|
allowed-peer-ip=10.0.0.1
|
||||||
|
|
||||||
|
# consider whether you want to limit the quota of relayed streams per user (or total) to avoid risk of DoS.
|
||||||
|
user-quota=12 # 4 streams per video call, so 12 streams = 3 simultaneous relayed calls per user.
|
||||||
|
total-quota=1200
|
||||||
|
|
||||||
|
Ideally coturn should refuse to relay traffic which isn't SRTP;
|
||||||
|
see https://github.com/matrix-org/synapse/issues/2009
|
||||||
|
|
||||||
|
6. Ensure your firewall allows traffic into the TURN server on
|
||||||
|
the ports you've configured it to listen on (remember to allow
|
||||||
|
both TCP and UDP TURN traffic)
|
||||||
|
|
||||||
|
7. If you've configured coturn to support TLS/DTLS, generate or
|
||||||
import your private key and certificate.
|
import your private key and certificate.
|
||||||
|
|
||||||
7. Start the turn server::
|
8. Start the turn server::
|
||||||
|
|
||||||
bin/turnserver -o
|
bin/turnserver -o
|
||||||
|
|
||||||
@@ -83,12 +106,19 @@ Your home server configuration file needs the following extra keys:
|
|||||||
to refresh credentials. The TURN REST API specification recommends
|
to refresh credentials. The TURN REST API specification recommends
|
||||||
one day (86400000).
|
one day (86400000).
|
||||||
|
|
||||||
|
4. "turn_allow_guests": Whether to allow guest users to use the TURN
|
||||||
|
server. This is enabled by default, as otherwise VoIP will not
|
||||||
|
work reliably for guests. However, it does introduce a security risk
|
||||||
|
as it lets guests connect to arbitrary endpoints without having gone
|
||||||
|
through a CAPTCHA or similar to register a real account.
|
||||||
|
|
||||||
As an example, here is the relevant section of the config file for
|
As an example, here is the relevant section of the config file for
|
||||||
matrix.org::
|
matrix.org::
|
||||||
|
|
||||||
turn_uris: [ "turn:turn.matrix.org:3478?transport=udp", "turn:turn.matrix.org:3478?transport=tcp" ]
|
turn_uris: [ "turn:turn.matrix.org:3478?transport=udp", "turn:turn.matrix.org:3478?transport=tcp" ]
|
||||||
turn_shared_secret: n0t4ctuAllymatr1Xd0TorgSshar3d5ecret4obvIousreAsons
|
turn_shared_secret: n0t4ctuAllymatr1Xd0TorgSshar3d5ecret4obvIousreAsons
|
||||||
turn_user_lifetime: 86400000
|
turn_user_lifetime: 86400000
|
||||||
|
turn_allow_guests: True
|
||||||
|
|
||||||
Now, restart synapse::
|
Now, restart synapse::
|
||||||
|
|
||||||
|
|||||||
@@ -56,6 +56,7 @@ As a first cut, let's do #2 and have the receiver hit the API to calculate its o
|
|||||||
API
|
API
|
||||||
---
|
---
|
||||||
|
|
||||||
|
```
|
||||||
GET /_matrix/media/r0/preview_url?url=http://wherever.com
|
GET /_matrix/media/r0/preview_url?url=http://wherever.com
|
||||||
200 OK
|
200 OK
|
||||||
{
|
{
|
||||||
@@ -66,6 +67,7 @@ GET /_matrix/media/r0/preview_url?url=http://wherever.com
|
|||||||
"og:description" : "“Synapse 0.12 is out! Lots of polishing, performance &amp; bugfixes: /sync API, /r0 prefix, fulltext search, 3PID invites https://t.co/5alhXLLEGP”"
|
"og:description" : "“Synapse 0.12 is out! Lots of polishing, performance &amp; bugfixes: /sync API, /r0 prefix, fulltext search, 3PID invites https://t.co/5alhXLLEGP”"
|
||||||
"og:site_name" : "Twitter"
|
"og:site_name" : "Twitter"
|
||||||
}
|
}
|
||||||
|
```
|
||||||
|
|
||||||
* Downloads the URL
|
* Downloads the URL
|
||||||
* If HTML, just stores it in RAM and parses it for OG meta tags
|
* If HTML, just stores it in RAM and parses it for OG meta tags
|
||||||
17
docs/user_directory.md
Normal file
17
docs/user_directory.md
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
User Directory API Implementation
|
||||||
|
=================================
|
||||||
|
|
||||||
|
The user directory is currently maintained based on the 'visible' users
|
||||||
|
on this particular server - i.e. ones which your account shares a room with, or
|
||||||
|
who are present in a publicly viewable room present on the server.
|
||||||
|
|
||||||
|
The directory info is stored in various tables, which can (typically after
|
||||||
|
DB corruption) get stale or out of sync. If this happens, for now the
|
||||||
|
quickest solution to fix it is:
|
||||||
|
|
||||||
|
```
|
||||||
|
UPDATE user_directory_stream_pos SET stream_id = NULL;
|
||||||
|
```
|
||||||
|
|
||||||
|
and restart the synapse, which should then start a background task to
|
||||||
|
flush the current tables and regenerate the directory.
|
||||||
204
docs/workers.rst
204
docs/workers.rst
@@ -1,62 +1,90 @@
|
|||||||
Scaling synapse via workers
|
Scaling synapse via workers
|
||||||
---------------------------
|
===========================
|
||||||
|
|
||||||
Synapse has experimental support for splitting out functionality into
|
Synapse has experimental support for splitting out functionality into
|
||||||
multiple separate python processes, helping greatly with scalability. These
|
multiple separate python processes, helping greatly with scalability. These
|
||||||
processes are called 'workers', and are (eventually) intended to scale
|
processes are called 'workers', and are (eventually) intended to scale
|
||||||
horizontally independently.
|
horizontally independently.
|
||||||
|
|
||||||
|
All of the below is highly experimental and subject to change as Synapse evolves,
|
||||||
|
but documenting it here to help folks needing highly scalable Synapses similar
|
||||||
|
to the one running matrix.org!
|
||||||
|
|
||||||
All processes continue to share the same database instance, and as such, workers
|
All processes continue to share the same database instance, and as such, workers
|
||||||
only work with postgres based synapse deployments (sharing a single sqlite
|
only work with postgres based synapse deployments (sharing a single sqlite
|
||||||
across multiple processes is a recipe for disaster, plus you should be using
|
across multiple processes is a recipe for disaster, plus you should be using
|
||||||
postgres anyway if you care about scalability).
|
postgres anyway if you care about scalability).
|
||||||
|
|
||||||
The workers communicate with the master synapse process via a synapse-specific
|
The workers communicate with the master synapse process via a synapse-specific
|
||||||
HTTP protocol called 'replication' - analogous to MySQL or Postgres style
|
TCP protocol called 'replication' - analogous to MySQL or Postgres style
|
||||||
database replication; feeding a stream of relevant data to the workers so they
|
database replication; feeding a stream of relevant data to the workers so they
|
||||||
can be kept in sync with the main synapse process and database state.
|
can be kept in sync with the main synapse process and database state.
|
||||||
|
|
||||||
To enable workers, you need to add a replication listener to the master synapse, e.g.::
|
Configuration
|
||||||
|
-------------
|
||||||
|
|
||||||
|
To make effective use of the workers, you will need to configure an HTTP
|
||||||
|
reverse-proxy such as nginx or haproxy, which will direct incoming requests to
|
||||||
|
the correct worker, or to the main synapse instance. Note that this includes
|
||||||
|
requests made to the federation port. The caveats regarding running a
|
||||||
|
reverse-proxy on the federation port still apply (see
|
||||||
|
https://github.com/matrix-org/synapse/blob/master/README.rst#reverse-proxying-the-federation-port).
|
||||||
|
|
||||||
|
To enable workers, you need to add two replication listeners to the master
|
||||||
|
synapse, e.g.::
|
||||||
|
|
||||||
listeners:
|
listeners:
|
||||||
|
# The TCP replication port
|
||||||
- port: 9092
|
- port: 9092
|
||||||
|
bind_address: '127.0.0.1'
|
||||||
|
type: replication
|
||||||
|
# The HTTP replication port
|
||||||
|
- port: 9093
|
||||||
bind_address: '127.0.0.1'
|
bind_address: '127.0.0.1'
|
||||||
type: http
|
type: http
|
||||||
tls: false
|
|
||||||
x_forwarded: false
|
|
||||||
resources:
|
resources:
|
||||||
- names: [replication]
|
- names: [replication]
|
||||||
compress: false
|
|
||||||
|
|
||||||
Under **no circumstances** should this replication API listener be exposed to the
|
Under **no circumstances** should these replication API listeners be exposed to
|
||||||
public internet; it currently implements no authentication whatsoever and is
|
the public internet; it currently implements no authentication whatsoever and is
|
||||||
unencrypted HTTP.
|
unencrypted.
|
||||||
|
|
||||||
You then create a set of configs for the various worker processes. These should be
|
(Roughly, the TCP port is used for streaming data from the master to the
|
||||||
worker configuration files should be stored in a dedicated subdirectory, to allow
|
workers, and the HTTP port for the workers to send data to the main
|
||||||
synctl to manipulate them.
|
synapse process.)
|
||||||
|
|
||||||
The current available worker applications are:
|
You then create a set of configs for the various worker processes. These
|
||||||
* synapse.app.pusher - handles sending push notifications to sygnal and email
|
should be worker configuration files, and should be stored in a dedicated
|
||||||
* synapse.app.synchrotron - handles /sync endpoints. can scales horizontally through multiple instances.
|
subdirectory, to allow synctl to manipulate them. An additional configuration
|
||||||
* synapse.app.appservice - handles output traffic to Application Services
|
for the master synapse process will need to be created because the process will
|
||||||
* synapse.app.federation_reader - handles receiving federation traffic (including public_rooms API)
|
not be started automatically. That configuration should look like this::
|
||||||
* synapse.app.media_repository - handles the media repository.
|
|
||||||
|
worker_app: synapse.app.homeserver
|
||||||
|
daemonize: true
|
||||||
|
|
||||||
Each worker configuration file inherits the configuration of the main homeserver
|
Each worker configuration file inherits the configuration of the main homeserver
|
||||||
configuration file. You can then override configuration specific to that worker,
|
configuration file. You can then override configuration specific to that worker,
|
||||||
e.g. the HTTP listener that it provides (if any); logging configuration; etc.
|
e.g. the HTTP listener that it provides (if any); logging configuration; etc.
|
||||||
You should minimise the number of overrides though to maintain a usable config.
|
You should minimise the number of overrides though to maintain a usable config.
|
||||||
|
|
||||||
You must specify the type of worker application (worker_app) and the replication
|
You must specify the type of worker application (``worker_app``). The currently
|
||||||
endpoint that it's talking to on the main synapse process (worker_replication_url).
|
available worker applications are listed below. You must also specify the
|
||||||
|
replication endpoints that it's talking to on the main synapse process.
|
||||||
|
``worker_replication_host`` should specify the host of the main synapse,
|
||||||
|
``worker_replication_port`` should point to the TCP replication listener port and
|
||||||
|
``worker_replication_http_port`` should point to the HTTP replication port.
|
||||||
|
|
||||||
|
Currently, only the ``event_creator`` worker requires specifying
|
||||||
|
``worker_replication_http_port``.
|
||||||
|
|
||||||
For instance::
|
For instance::
|
||||||
|
|
||||||
worker_app: synapse.app.synchrotron
|
worker_app: synapse.app.synchrotron
|
||||||
|
|
||||||
# The replication listener on the synapse to talk to.
|
# The replication listener on the synapse to talk to.
|
||||||
worker_replication_url: http://127.0.0.1:9092/_synapse/replication
|
worker_replication_host: 127.0.0.1
|
||||||
|
worker_replication_port: 9092
|
||||||
|
worker_replication_http_port: 9093
|
||||||
|
|
||||||
worker_listeners:
|
worker_listeners:
|
||||||
- type: http
|
- type: http
|
||||||
@@ -70,11 +98,11 @@ For instance::
|
|||||||
worker_log_config: /home/matrix/synapse/config/synchrotron_log_config.yaml
|
worker_log_config: /home/matrix/synapse/config/synchrotron_log_config.yaml
|
||||||
|
|
||||||
...is a full configuration for a synchrotron worker instance, which will expose a
|
...is a full configuration for a synchrotron worker instance, which will expose a
|
||||||
plain HTTP /sync endpoint on port 8083 separately from the /sync endpoint provided
|
plain HTTP ``/sync`` endpoint on port 8083 separately from the ``/sync`` endpoint provided
|
||||||
by the main synapse.
|
by the main synapse.
|
||||||
|
|
||||||
Obviously you should configure your loadbalancer to route the /sync endpoint to
|
Obviously you should configure your reverse-proxy to route the relevant
|
||||||
the synchrotron instance(s) in this instance.
|
endpoints to the worker (``localhost:8083`` in the above example).
|
||||||
|
|
||||||
Finally, to actually run your worker-based synapse, you must pass synctl the -a
|
Finally, to actually run your worker-based synapse, you must pass synctl the -a
|
||||||
commandline option to tell it to operate on all the worker configurations found
|
commandline option to tell it to operate on all the worker configurations found
|
||||||
@@ -91,7 +119,127 @@ To manipulate a specific worker, you pass the -w option to synctl::
|
|||||||
|
|
||||||
synctl -w $CONFIG/workers/synchrotron.yaml restart
|
synctl -w $CONFIG/workers/synchrotron.yaml restart
|
||||||
|
|
||||||
All of the above is highly experimental and subject to change as Synapse evolves,
|
|
||||||
but documenting it here to help folks needing highly scalable Synapses similar
|
|
||||||
to the one running matrix.org!
|
|
||||||
|
|
||||||
|
Available worker applications
|
||||||
|
-----------------------------
|
||||||
|
|
||||||
|
``synapse.app.pusher``
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Handles sending push notifications to sygnal and email. Doesn't handle any
|
||||||
|
REST endpoints itself, but you should set ``start_pushers: False`` in the
|
||||||
|
shared configuration file to stop the main synapse sending these notifications.
|
||||||
|
|
||||||
|
Note this worker cannot be load-balanced: only one instance should be active.
|
||||||
|
|
||||||
|
``synapse.app.synchrotron``
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
The synchrotron handles ``sync`` requests from clients. In particular, it can
|
||||||
|
handle REST endpoints matching the following regular expressions::
|
||||||
|
|
||||||
|
^/_matrix/client/(v2_alpha|r0)/sync$
|
||||||
|
^/_matrix/client/(api/v1|v2_alpha|r0)/events$
|
||||||
|
^/_matrix/client/(api/v1|r0)/initialSync$
|
||||||
|
^/_matrix/client/(api/v1|r0)/rooms/[^/]+/initialSync$
|
||||||
|
|
||||||
|
The above endpoints should all be routed to the synchrotron worker by the
|
||||||
|
reverse-proxy configuration.
|
||||||
|
|
||||||
|
It is possible to run multiple instances of the synchrotron to scale
|
||||||
|
horizontally. In this case the reverse-proxy should be configured to
|
||||||
|
load-balance across the instances, though it will be more efficient if all
|
||||||
|
requests from a particular user are routed to a single instance. Extracting
|
||||||
|
a userid from the access token is currently left as an exercise for the reader.
|
||||||
|
|
||||||
|
``synapse.app.appservice``
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Handles sending output traffic to Application Services. Doesn't handle any
|
||||||
|
REST endpoints itself, but you should set ``notify_appservices: False`` in the
|
||||||
|
shared configuration file to stop the main synapse sending these notifications.
|
||||||
|
|
||||||
|
Note this worker cannot be load-balanced: only one instance should be active.
|
||||||
|
|
||||||
|
``synapse.app.federation_reader``
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Handles a subset of federation endpoints. In particular, it can handle REST
|
||||||
|
endpoints matching the following regular expressions::
|
||||||
|
|
||||||
|
^/_matrix/federation/v1/event/
|
||||||
|
^/_matrix/federation/v1/state/
|
||||||
|
^/_matrix/federation/v1/state_ids/
|
||||||
|
^/_matrix/federation/v1/backfill/
|
||||||
|
^/_matrix/federation/v1/get_missing_events/
|
||||||
|
^/_matrix/federation/v1/publicRooms
|
||||||
|
|
||||||
|
The above endpoints should all be routed to the federation_reader worker by the
|
||||||
|
reverse-proxy configuration.
|
||||||
|
|
||||||
|
``synapse.app.federation_sender``
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Handles sending federation traffic to other servers. Doesn't handle any
|
||||||
|
REST endpoints itself, but you should set ``send_federation: False`` in the
|
||||||
|
shared configuration file to stop the main synapse sending this traffic.
|
||||||
|
|
||||||
|
Note this worker cannot be load-balanced: only one instance should be active.
|
||||||
|
|
||||||
|
``synapse.app.media_repository``
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Handles the media repository. It can handle all endpoints starting with::
|
||||||
|
|
||||||
|
/_matrix/media/
|
||||||
|
|
||||||
|
You should also set ``enable_media_repo: False`` in the shared configuration
|
||||||
|
file to stop the main synapse running background jobs related to managing the
|
||||||
|
media repository.
|
||||||
|
|
||||||
|
Note this worker cannot be load-balanced: only one instance should be active.
|
||||||
|
|
||||||
|
``synapse.app.client_reader``
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Handles client API endpoints. It can handle REST endpoints matching the
|
||||||
|
following regular expressions::
|
||||||
|
|
||||||
|
^/_matrix/client/(api/v1|r0|unstable)/publicRooms$
|
||||||
|
|
||||||
|
``synapse.app.user_dir``
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Handles searches in the user directory. It can handle REST endpoints matching
|
||||||
|
the following regular expressions::
|
||||||
|
|
||||||
|
^/_matrix/client/(api/v1|r0|unstable)/user_directory/search$
|
||||||
|
|
||||||
|
``synapse.app.frontend_proxy``
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Proxies some frequently-requested client endpoints to add caching and remove
|
||||||
|
load from the main synapse. It can handle REST endpoints matching the following
|
||||||
|
regular expressions::
|
||||||
|
|
||||||
|
^/_matrix/client/(api/v1|r0|unstable)/keys/upload
|
||||||
|
|
||||||
|
It will proxy any requests it cannot handle to the main synapse instance. It
|
||||||
|
must therefore be configured with the location of the main instance, via
|
||||||
|
the ``worker_main_http_uri`` setting in the frontend_proxy worker configuration
|
||||||
|
file. For example::
|
||||||
|
|
||||||
|
worker_main_http_uri: http://127.0.0.1:8008
|
||||||
|
|
||||||
|
|
||||||
|
``synapse.app.event_creator``
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Handles some event creation. It can handle REST endpoints matching::
|
||||||
|
|
||||||
|
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/send
|
||||||
|
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/(join|invite|leave|ban|unban|kick)$
|
||||||
|
^/_matrix/client/(api/v1|r0|unstable)/join/
|
||||||
|
|
||||||
|
It will create events locally and then send them on to the main synapse
|
||||||
|
instance to be persisted and handled.
|
||||||
|
|||||||
23
jenkins-dendron-haproxy-postgres.sh
Executable file
23
jenkins-dendron-haproxy-postgres.sh
Executable file
@@ -0,0 +1,23 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -eux
|
||||||
|
|
||||||
|
: ${WORKSPACE:="$(pwd)"}
|
||||||
|
|
||||||
|
export WORKSPACE
|
||||||
|
export PYTHONDONTWRITEBYTECODE=yep
|
||||||
|
export SYNAPSE_CACHE_FACTOR=1
|
||||||
|
|
||||||
|
export HAPROXY_BIN=/home/haproxy/haproxy-1.6.11/haproxy
|
||||||
|
|
||||||
|
./jenkins/prepare_synapse.sh
|
||||||
|
./jenkins/clone.sh sytest https://github.com/matrix-org/sytest.git
|
||||||
|
./jenkins/clone.sh dendron https://github.com/matrix-org/dendron.git
|
||||||
|
./dendron/jenkins/build_dendron.sh
|
||||||
|
./sytest/jenkins/prep_sytest_for_postgres.sh
|
||||||
|
|
||||||
|
./sytest/jenkins/install_and_run.sh \
|
||||||
|
--python $WORKSPACE/.tox/py27/bin/python \
|
||||||
|
--synapse-directory $WORKSPACE \
|
||||||
|
--dendron $WORKSPACE/dendron/bin/dendron \
|
||||||
|
--haproxy \
|
||||||
@@ -15,8 +15,6 @@ export SYNAPSE_CACHE_FACTOR=1
|
|||||||
./sytest/jenkins/prep_sytest_for_postgres.sh
|
./sytest/jenkins/prep_sytest_for_postgres.sh
|
||||||
|
|
||||||
./sytest/jenkins/install_and_run.sh \
|
./sytest/jenkins/install_and_run.sh \
|
||||||
|
--python $WORKSPACE/.tox/py27/bin/python \
|
||||||
--synapse-directory $WORKSPACE \
|
--synapse-directory $WORKSPACE \
|
||||||
--dendron $WORKSPACE/dendron/bin/dendron \
|
--dendron $WORKSPACE/dendron/bin/dendron \
|
||||||
--pusher \
|
|
||||||
--synchrotron \
|
|
||||||
--federation-reader \
|
|
||||||
|
|||||||
@@ -14,4 +14,5 @@ export SYNAPSE_CACHE_FACTOR=1
|
|||||||
./sytest/jenkins/prep_sytest_for_postgres.sh
|
./sytest/jenkins/prep_sytest_for_postgres.sh
|
||||||
|
|
||||||
./sytest/jenkins/install_and_run.sh \
|
./sytest/jenkins/install_and_run.sh \
|
||||||
|
--python $WORKSPACE/.tox/py27/bin/python \
|
||||||
--synapse-directory $WORKSPACE \
|
--synapse-directory $WORKSPACE \
|
||||||
|
|||||||
@@ -12,4 +12,5 @@ export SYNAPSE_CACHE_FACTOR=1
|
|||||||
./jenkins/clone.sh sytest https://github.com/matrix-org/sytest.git
|
./jenkins/clone.sh sytest https://github.com/matrix-org/sytest.git
|
||||||
|
|
||||||
./sytest/jenkins/install_and_run.sh \
|
./sytest/jenkins/install_and_run.sh \
|
||||||
|
--python $WORKSPACE/.tox/py27/bin/python \
|
||||||
--synapse-directory $WORKSPACE \
|
--synapse-directory $WORKSPACE \
|
||||||
|
|||||||
@@ -15,6 +15,6 @@ tox -e py27 --notest -v
|
|||||||
|
|
||||||
TOX_BIN=$TOX_DIR/py27/bin
|
TOX_BIN=$TOX_DIR/py27/bin
|
||||||
$TOX_BIN/pip install setuptools
|
$TOX_BIN/pip install setuptools
|
||||||
python synapse/python_dependencies.py | xargs -n1 $TOX_BIN/pip install
|
{ python synapse/python_dependencies.py
|
||||||
$TOX_BIN/pip install lxml
|
echo lxml psycopg2
|
||||||
$TOX_BIN/pip install psycopg2
|
} | xargs $TOX_BIN/pip install
|
||||||
|
|||||||
@@ -18,7 +18,9 @@
|
|||||||
<div class="summarytext">{{ summary_text }}</div>
|
<div class="summarytext">{{ summary_text }}</div>
|
||||||
</td>
|
</td>
|
||||||
<td class="logo">
|
<td class="logo">
|
||||||
{% if app_name == "Vector" %}
|
{% if app_name == "Riot" %}
|
||||||
|
<img src="http://matrix.org/img/riot-logo-email.png" width="83" height="83" alt="[Riot]"/>
|
||||||
|
{% elif app_name == "Vector" %}
|
||||||
<img src="http://matrix.org/img/vector-logo-email.png" width="64" height="83" alt="[Vector]"/>
|
<img src="http://matrix.org/img/vector-logo-email.png" width="64" height="83" alt="[Vector]"/>
|
||||||
{% else %}
|
{% else %}
|
||||||
<img src="http://matrix.org/img/matrix-120x51.png" width="120" height="51" alt="[matrix]"/>
|
<img src="http://matrix.org/img/matrix-120x51.png" width="120" height="51" alt="[matrix]"/>
|
||||||
|
|||||||
125
scripts-dev/federation_client.py
Normal file → Executable file
125
scripts-dev/federation_client.py
Normal file → Executable file
@@ -1,10 +1,30 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
#
|
||||||
|
# Copyright 2015, 2016 OpenMarket Ltd
|
||||||
|
# Copyright 2017 New Vector Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
from __future__ import print_function
|
||||||
|
|
||||||
|
import argparse
|
||||||
import nacl.signing
|
import nacl.signing
|
||||||
import json
|
import json
|
||||||
import base64
|
import base64
|
||||||
import requests
|
import requests
|
||||||
import sys
|
import sys
|
||||||
import srvlookup
|
import srvlookup
|
||||||
|
import yaml
|
||||||
|
|
||||||
def encode_base64(input_bytes):
|
def encode_base64(input_bytes):
|
||||||
"""Encode bytes as a base64 string without any padding."""
|
"""Encode bytes as a base64 string without any padding."""
|
||||||
@@ -103,15 +123,25 @@ def lookup(destination, path):
|
|||||||
except:
|
except:
|
||||||
return "https://%s:%d%s" % (destination, 8448, path)
|
return "https://%s:%d%s" % (destination, 8448, path)
|
||||||
|
|
||||||
def get_json(origin_name, origin_key, destination, path):
|
|
||||||
request_json = {
|
def request_json(method, origin_name, origin_key, destination, path, content):
|
||||||
"method": "GET",
|
if method is None:
|
||||||
|
if content is None:
|
||||||
|
method = "GET"
|
||||||
|
else:
|
||||||
|
method = "POST"
|
||||||
|
|
||||||
|
json_to_sign = {
|
||||||
|
"method": method,
|
||||||
"uri": path,
|
"uri": path,
|
||||||
"origin": origin_name,
|
"origin": origin_name,
|
||||||
"destination": destination,
|
"destination": destination,
|
||||||
}
|
}
|
||||||
|
|
||||||
signed_json = sign_json(request_json, origin_key, origin_name)
|
if content is not None:
|
||||||
|
json_to_sign["content"] = json.loads(content)
|
||||||
|
|
||||||
|
signed_json = sign_json(json_to_sign, origin_key, origin_name)
|
||||||
|
|
||||||
authorization_headers = []
|
authorization_headers = []
|
||||||
|
|
||||||
@@ -120,30 +150,97 @@ def get_json(origin_name, origin_key, destination, path):
|
|||||||
origin_name, key, sig,
|
origin_name, key, sig,
|
||||||
)
|
)
|
||||||
authorization_headers.append(bytes(header))
|
authorization_headers.append(bytes(header))
|
||||||
sys.stderr.write(header)
|
print ("Authorization: %s" % header, file=sys.stderr)
|
||||||
sys.stderr.write("\n")
|
|
||||||
|
|
||||||
result = requests.get(
|
dest = lookup(destination, path)
|
||||||
lookup(destination, path),
|
print ("Requesting %s" % dest, file=sys.stderr)
|
||||||
|
|
||||||
|
result = requests.request(
|
||||||
|
method=method,
|
||||||
|
url=dest,
|
||||||
headers={"Authorization": authorization_headers[0]},
|
headers={"Authorization": authorization_headers[0]},
|
||||||
verify=False,
|
verify=False,
|
||||||
|
data=content,
|
||||||
)
|
)
|
||||||
sys.stderr.write("Status Code: %d\n" % (result.status_code,))
|
sys.stderr.write("Status Code: %d\n" % (result.status_code,))
|
||||||
return result.json()
|
return result.json()
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
origin_name, keyfile, destination, path = sys.argv[1:]
|
parser = argparse.ArgumentParser(
|
||||||
|
description=
|
||||||
|
"Signs and sends a federation request to a matrix homeserver",
|
||||||
|
)
|
||||||
|
|
||||||
with open(keyfile) as f:
|
parser.add_argument(
|
||||||
|
"-N", "--server-name",
|
||||||
|
help="Name to give as the local homeserver. If unspecified, will be "
|
||||||
|
"read from the config file.",
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"-k", "--signing-key-path",
|
||||||
|
help="Path to the file containing the private ed25519 key to sign the "
|
||||||
|
"request with.",
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"-c", "--config",
|
||||||
|
default="homeserver.yaml",
|
||||||
|
help="Path to server config file. Ignored if --server-name and "
|
||||||
|
"--signing-key-path are both given.",
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"-d", "--destination",
|
||||||
|
default="matrix.org",
|
||||||
|
help="name of the remote homeserver. We will do SRV lookups and "
|
||||||
|
"connect appropriately.",
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"-X", "--method",
|
||||||
|
help="HTTP method to use for the request. Defaults to GET if --data is"
|
||||||
|
"unspecified, POST if it is."
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"--body",
|
||||||
|
help="Data to send as the body of the HTTP request"
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"path",
|
||||||
|
help="request path. We will add '/_matrix/federation/v1/' to this."
|
||||||
|
)
|
||||||
|
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
if not args.server_name or not args.signing_key_path:
|
||||||
|
read_args_from_config(args)
|
||||||
|
|
||||||
|
with open(args.signing_key_path) as f:
|
||||||
key = read_signing_keys(f)[0]
|
key = read_signing_keys(f)[0]
|
||||||
|
|
||||||
result = get_json(
|
result = request_json(
|
||||||
origin_name, key, destination, "/_matrix/federation/v1/" + path
|
args.method,
|
||||||
|
args.server_name, key, args.destination,
|
||||||
|
"/_matrix/federation/v1/" + args.path,
|
||||||
|
content=args.body,
|
||||||
)
|
)
|
||||||
|
|
||||||
json.dump(result, sys.stdout)
|
json.dump(result, sys.stdout)
|
||||||
print ""
|
print ("")
|
||||||
|
|
||||||
|
|
||||||
|
def read_args_from_config(args):
|
||||||
|
with open(args.config, 'r') as fh:
|
||||||
|
config = yaml.safe_load(fh)
|
||||||
|
if not args.server_name:
|
||||||
|
args.server_name = config['server_name']
|
||||||
|
if not args.signing_key_path:
|
||||||
|
args.signing_key_path = config['signing_key_path']
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
main()
|
main()
|
||||||
|
|||||||
@@ -9,16 +9,39 @@
|
|||||||
ROOMID="$1"
|
ROOMID="$1"
|
||||||
|
|
||||||
sqlite3 homeserver.db <<EOF
|
sqlite3 homeserver.db <<EOF
|
||||||
DELETE FROM context_depth WHERE context = '$ROOMID';
|
DELETE FROM event_forward_extremities WHERE room_id = '$ROOMID';
|
||||||
DELETE FROM current_state WHERE context = '$ROOMID';
|
DELETE FROM event_backward_extremities WHERE room_id = '$ROOMID';
|
||||||
DELETE FROM feedback WHERE room_id = '$ROOMID';
|
DELETE FROM event_edges WHERE room_id = '$ROOMID';
|
||||||
DELETE FROM messages WHERE room_id = '$ROOMID';
|
DELETE FROM room_depth WHERE room_id = '$ROOMID';
|
||||||
DELETE FROM pdu_backward_extremities WHERE context = '$ROOMID';
|
DELETE FROM state_forward_extremities WHERE room_id = '$ROOMID';
|
||||||
DELETE FROM pdu_edges WHERE context = '$ROOMID';
|
DELETE FROM events WHERE room_id = '$ROOMID';
|
||||||
DELETE FROM pdu_forward_extremities WHERE context = '$ROOMID';
|
DELETE FROM event_json WHERE room_id = '$ROOMID';
|
||||||
DELETE FROM pdus WHERE context = '$ROOMID';
|
DELETE FROM state_events WHERE room_id = '$ROOMID';
|
||||||
DELETE FROM room_data WHERE room_id = '$ROOMID';
|
DELETE FROM current_state_events WHERE room_id = '$ROOMID';
|
||||||
DELETE FROM room_memberships WHERE room_id = '$ROOMID';
|
DELETE FROM room_memberships WHERE room_id = '$ROOMID';
|
||||||
|
DELETE FROM feedback WHERE room_id = '$ROOMID';
|
||||||
|
DELETE FROM topics WHERE room_id = '$ROOMID';
|
||||||
|
DELETE FROM room_names WHERE room_id = '$ROOMID';
|
||||||
DELETE FROM rooms WHERE room_id = '$ROOMID';
|
DELETE FROM rooms WHERE room_id = '$ROOMID';
|
||||||
DELETE FROM state_pdus WHERE context = '$ROOMID';
|
DELETE FROM room_hosts WHERE room_id = '$ROOMID';
|
||||||
|
DELETE FROM room_aliases WHERE room_id = '$ROOMID';
|
||||||
|
DELETE FROM state_groups WHERE room_id = '$ROOMID';
|
||||||
|
DELETE FROM state_groups_state WHERE room_id = '$ROOMID';
|
||||||
|
DELETE FROM receipts_graph WHERE room_id = '$ROOMID';
|
||||||
|
DELETE FROM receipts_linearized WHERE room_id = '$ROOMID';
|
||||||
|
DELETE FROM event_search_content WHERE c1room_id = '$ROOMID';
|
||||||
|
DELETE FROM guest_access WHERE room_id = '$ROOMID';
|
||||||
|
DELETE FROM history_visibility WHERE room_id = '$ROOMID';
|
||||||
|
DELETE FROM room_tags WHERE room_id = '$ROOMID';
|
||||||
|
DELETE FROM room_tags_revisions WHERE room_id = '$ROOMID';
|
||||||
|
DELETE FROM room_account_data WHERE room_id = '$ROOMID';
|
||||||
|
DELETE FROM event_push_actions WHERE room_id = '$ROOMID';
|
||||||
|
DELETE FROM local_invites WHERE room_id = '$ROOMID';
|
||||||
|
DELETE FROM pusher_throttle WHERE room_id = '$ROOMID';
|
||||||
|
DELETE FROM event_reports WHERE room_id = '$ROOMID';
|
||||||
|
DELETE FROM public_room_list_stream WHERE room_id = '$ROOMID';
|
||||||
|
DELETE FROM stream_ordering_to_exterm WHERE room_id = '$ROOMID';
|
||||||
|
DELETE FROM event_auth WHERE room_id = '$ROOMID';
|
||||||
|
DELETE FROM appservice_room_list WHERE room_id = '$ROOMID';
|
||||||
|
VACUUM;
|
||||||
EOF
|
EOF
|
||||||
|
|||||||
133
scripts/move_remote_media_to_new_store.py
Executable file
133
scripts/move_remote_media_to_new_store.py
Executable file
@@ -0,0 +1,133 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright 2017 New Vector Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
"""
|
||||||
|
Moves a list of remote media from one media store to another.
|
||||||
|
|
||||||
|
The input should be a list of media files to be moved, one per line. Each line
|
||||||
|
should be formatted::
|
||||||
|
|
||||||
|
<origin server>|<file id>
|
||||||
|
|
||||||
|
This can be extracted from postgres with::
|
||||||
|
|
||||||
|
psql --tuples-only -A -c "select media_origin, filesystem_id from
|
||||||
|
matrix.remote_media_cache where ..."
|
||||||
|
|
||||||
|
To use, pipe the above into::
|
||||||
|
|
||||||
|
PYTHON_PATH=. ./scripts/move_remote_media_to_new_store.py <source repo> <dest repo>
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import print_function
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import logging
|
||||||
|
|
||||||
|
import sys
|
||||||
|
|
||||||
|
import os
|
||||||
|
|
||||||
|
import shutil
|
||||||
|
|
||||||
|
from synapse.rest.media.v1.filepath import MediaFilePaths
|
||||||
|
|
||||||
|
logger = logging.getLogger()
|
||||||
|
|
||||||
|
|
||||||
|
def main(src_repo, dest_repo):
|
||||||
|
src_paths = MediaFilePaths(src_repo)
|
||||||
|
dest_paths = MediaFilePaths(dest_repo)
|
||||||
|
for line in sys.stdin:
|
||||||
|
line = line.strip()
|
||||||
|
parts = line.split('|')
|
||||||
|
if len(parts) != 2:
|
||||||
|
print("Unable to parse input line %s" % line, file=sys.stderr)
|
||||||
|
exit(1)
|
||||||
|
|
||||||
|
move_media(parts[0], parts[1], src_paths, dest_paths)
|
||||||
|
|
||||||
|
|
||||||
|
def move_media(origin_server, file_id, src_paths, dest_paths):
|
||||||
|
"""Move the given file, and any thumbnails, to the dest repo
|
||||||
|
|
||||||
|
Args:
|
||||||
|
origin_server (str):
|
||||||
|
file_id (str):
|
||||||
|
src_paths (MediaFilePaths):
|
||||||
|
dest_paths (MediaFilePaths):
|
||||||
|
"""
|
||||||
|
logger.info("%s/%s", origin_server, file_id)
|
||||||
|
|
||||||
|
# check that the original exists
|
||||||
|
original_file = src_paths.remote_media_filepath(origin_server, file_id)
|
||||||
|
if not os.path.exists(original_file):
|
||||||
|
logger.warn(
|
||||||
|
"Original for %s/%s (%s) does not exist",
|
||||||
|
origin_server, file_id, original_file,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
mkdir_and_move(
|
||||||
|
original_file,
|
||||||
|
dest_paths.remote_media_filepath(origin_server, file_id),
|
||||||
|
)
|
||||||
|
|
||||||
|
# now look for thumbnails
|
||||||
|
original_thumb_dir = src_paths.remote_media_thumbnail_dir(
|
||||||
|
origin_server, file_id,
|
||||||
|
)
|
||||||
|
if not os.path.exists(original_thumb_dir):
|
||||||
|
return
|
||||||
|
|
||||||
|
mkdir_and_move(
|
||||||
|
original_thumb_dir,
|
||||||
|
dest_paths.remote_media_thumbnail_dir(origin_server, file_id)
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def mkdir_and_move(original_file, dest_file):
|
||||||
|
dirname = os.path.dirname(dest_file)
|
||||||
|
if not os.path.exists(dirname):
|
||||||
|
logger.debug("mkdir %s", dirname)
|
||||||
|
os.makedirs(dirname)
|
||||||
|
logger.debug("mv %s %s", original_file, dest_file)
|
||||||
|
shutil.move(original_file, dest_file)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
description=__doc__,
|
||||||
|
formatter_class = argparse.RawDescriptionHelpFormatter,
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"-v", action='store_true', help='enable debug logging')
|
||||||
|
parser.add_argument(
|
||||||
|
"src_repo",
|
||||||
|
help="Path to source content repo",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"dest_repo",
|
||||||
|
help="Path to source content repo",
|
||||||
|
)
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
logging_config = {
|
||||||
|
"level": logging.DEBUG if args.v else logging.INFO,
|
||||||
|
"format": "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(message)s"
|
||||||
|
}
|
||||||
|
logging.basicConfig(**logging_config)
|
||||||
|
|
||||||
|
main(args.src_repo, args.dest_repo)
|
||||||
@@ -1,6 +1,7 @@
|
|||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
# Copyright 2015, 2016 OpenMarket Ltd
|
# Copyright 2015, 2016 OpenMarket Ltd
|
||||||
|
# Copyright 2018 New Vector Ltd
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@@ -39,6 +40,17 @@ BOOLEAN_COLUMNS = {
|
|||||||
"event_edges": ["is_state"],
|
"event_edges": ["is_state"],
|
||||||
"presence_list": ["accepted"],
|
"presence_list": ["accepted"],
|
||||||
"presence_stream": ["currently_active"],
|
"presence_stream": ["currently_active"],
|
||||||
|
"public_room_list_stream": ["visibility"],
|
||||||
|
"device_lists_outbound_pokes": ["sent"],
|
||||||
|
"users_who_share_rooms": ["share_private"],
|
||||||
|
"groups": ["is_public"],
|
||||||
|
"group_rooms": ["is_public"],
|
||||||
|
"group_users": ["is_public", "is_admin"],
|
||||||
|
"group_summary_rooms": ["is_public"],
|
||||||
|
"group_room_categories": ["is_public"],
|
||||||
|
"group_summary_users": ["is_public"],
|
||||||
|
"group_roles": ["is_public"],
|
||||||
|
"local_group_membership": ["is_publicised", "is_admin"],
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@@ -71,6 +83,14 @@ APPEND_ONLY_TABLES = [
|
|||||||
"event_to_state_groups",
|
"event_to_state_groups",
|
||||||
"rejections",
|
"rejections",
|
||||||
"event_search",
|
"event_search",
|
||||||
|
"presence_stream",
|
||||||
|
"push_rules_stream",
|
||||||
|
"current_state_resets",
|
||||||
|
"ex_outlier_stream",
|
||||||
|
"cache_invalidation_stream",
|
||||||
|
"public_room_list_stream",
|
||||||
|
"state_group_edges",
|
||||||
|
"stream_ordering_to_exterm",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
@@ -101,6 +121,7 @@ class Store(object):
|
|||||||
|
|
||||||
_simple_update_one = SQLBaseStore.__dict__["_simple_update_one"]
|
_simple_update_one = SQLBaseStore.__dict__["_simple_update_one"]
|
||||||
_simple_update_one_txn = SQLBaseStore.__dict__["_simple_update_one_txn"]
|
_simple_update_one_txn = SQLBaseStore.__dict__["_simple_update_one_txn"]
|
||||||
|
_simple_update_txn = SQLBaseStore.__dict__["_simple_update_txn"]
|
||||||
|
|
||||||
def runInteraction(self, desc, func, *args, **kwargs):
|
def runInteraction(self, desc, func, *args, **kwargs):
|
||||||
def r(conn):
|
def r(conn):
|
||||||
@@ -111,7 +132,7 @@ class Store(object):
|
|||||||
try:
|
try:
|
||||||
txn = conn.cursor()
|
txn = conn.cursor()
|
||||||
return func(
|
return func(
|
||||||
LoggingTransaction(txn, desc, self.database_engine, []),
|
LoggingTransaction(txn, desc, self.database_engine, [], []),
|
||||||
*args, **kwargs
|
*args, **kwargs
|
||||||
)
|
)
|
||||||
except self.database_engine.module.DatabaseError as e:
|
except self.database_engine.module.DatabaseError as e:
|
||||||
@@ -230,6 +251,12 @@ class Porter(object):
|
|||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def handle_table(self, table, postgres_size, table_size, forward_chunk,
|
def handle_table(self, table, postgres_size, table_size, forward_chunk,
|
||||||
backward_chunk):
|
backward_chunk):
|
||||||
|
logger.info(
|
||||||
|
"Table %s: %i/%i (rows %i-%i) already ported",
|
||||||
|
table, postgres_size, table_size,
|
||||||
|
backward_chunk+1, forward_chunk-1,
|
||||||
|
)
|
||||||
|
|
||||||
if not table_size:
|
if not table_size:
|
||||||
return
|
return
|
||||||
|
|
||||||
@@ -241,6 +268,25 @@ class Porter(object):
|
|||||||
)
|
)
|
||||||
return
|
return
|
||||||
|
|
||||||
|
if table in (
|
||||||
|
"user_directory", "user_directory_search", "users_who_share_rooms",
|
||||||
|
"users_in_pubic_room",
|
||||||
|
):
|
||||||
|
# We don't port these tables, as they're a faff and we can regenreate
|
||||||
|
# them anyway.
|
||||||
|
self.progress.update(table, table_size) # Mark table as done
|
||||||
|
return
|
||||||
|
|
||||||
|
if table == "user_directory_stream_pos":
|
||||||
|
# We need to make sure there is a single row, `(X, null), as that is
|
||||||
|
# what synapse expects to be there.
|
||||||
|
yield self.postgres_store._simple_insert(
|
||||||
|
table=table,
|
||||||
|
values={"stream_id": None},
|
||||||
|
)
|
||||||
|
self.progress.update(table, table_size) # Mark table as done
|
||||||
|
return
|
||||||
|
|
||||||
forward_select = (
|
forward_select = (
|
||||||
"SELECT rowid, * FROM %s WHERE rowid >= ? ORDER BY rowid LIMIT ?"
|
"SELECT rowid, * FROM %s WHERE rowid >= ? ORDER BY rowid LIMIT ?"
|
||||||
% (table,)
|
% (table,)
|
||||||
@@ -288,7 +334,7 @@ class Porter(object):
|
|||||||
backward_chunk = min(row[0] for row in brows) - 1
|
backward_chunk = min(row[0] for row in brows) - 1
|
||||||
|
|
||||||
rows = frows + brows
|
rows = frows + brows
|
||||||
self._convert_rows(table, headers, rows)
|
rows = self._convert_rows(table, headers, rows)
|
||||||
|
|
||||||
def insert(txn):
|
def insert(txn):
|
||||||
self.postgres_store.insert_many_txn(
|
self.postgres_store.insert_many_txn(
|
||||||
@@ -346,10 +392,13 @@ class Porter(object):
|
|||||||
" VALUES (?,?,?,?,to_tsvector('english', ?),?,?)"
|
" VALUES (?,?,?,?,to_tsvector('english', ?),?,?)"
|
||||||
)
|
)
|
||||||
|
|
||||||
rows_dict = [
|
rows_dict = []
|
||||||
dict(zip(headers, row))
|
for row in rows:
|
||||||
for row in rows
|
d = dict(zip(headers, row))
|
||||||
]
|
if "\0" in d['value']:
|
||||||
|
logger.warn('dropping search row %s', d)
|
||||||
|
else:
|
||||||
|
rows_dict.append(d)
|
||||||
|
|
||||||
txn.executemany(sql, [
|
txn.executemany(sql, [
|
||||||
(
|
(
|
||||||
@@ -425,33 +474,10 @@ class Porter(object):
|
|||||||
self.progress.set_state("Preparing PostgreSQL")
|
self.progress.set_state("Preparing PostgreSQL")
|
||||||
self.setup_db(postgres_config, postgres_engine)
|
self.setup_db(postgres_config, postgres_engine)
|
||||||
|
|
||||||
# Step 2. Get tables.
|
self.progress.set_state("Creating port tables")
|
||||||
self.progress.set_state("Fetching tables")
|
|
||||||
sqlite_tables = yield self.sqlite_store._simple_select_onecol(
|
|
||||||
table="sqlite_master",
|
|
||||||
keyvalues={
|
|
||||||
"type": "table",
|
|
||||||
},
|
|
||||||
retcol="name",
|
|
||||||
)
|
|
||||||
|
|
||||||
postgres_tables = yield self.postgres_store._simple_select_onecol(
|
|
||||||
table="information_schema.tables",
|
|
||||||
keyvalues={
|
|
||||||
"table_schema": "public",
|
|
||||||
},
|
|
||||||
retcol="distinct table_name",
|
|
||||||
)
|
|
||||||
|
|
||||||
tables = set(sqlite_tables) & set(postgres_tables)
|
|
||||||
|
|
||||||
self.progress.set_state("Creating tables")
|
|
||||||
|
|
||||||
logger.info("Found %d tables", len(tables))
|
|
||||||
|
|
||||||
def create_port_table(txn):
|
def create_port_table(txn):
|
||||||
txn.execute(
|
txn.execute(
|
||||||
"CREATE TABLE port_from_sqlite3 ("
|
"CREATE TABLE IF NOT EXISTS port_from_sqlite3 ("
|
||||||
" table_name varchar(100) NOT NULL UNIQUE,"
|
" table_name varchar(100) NOT NULL UNIQUE,"
|
||||||
" forward_rowid bigint NOT NULL,"
|
" forward_rowid bigint NOT NULL,"
|
||||||
" backward_rowid bigint NOT NULL"
|
" backward_rowid bigint NOT NULL"
|
||||||
@@ -477,18 +503,33 @@ class Porter(object):
|
|||||||
"alter_table", alter_table
|
"alter_table", alter_table
|
||||||
)
|
)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.info("Failed to create port table: %s", e)
|
pass
|
||||||
|
|
||||||
try:
|
yield self.postgres_store.runInteraction(
|
||||||
yield self.postgres_store.runInteraction(
|
"create_port_table", create_port_table
|
||||||
"create_port_table", create_port_table
|
)
|
||||||
)
|
|
||||||
except Exception as e:
|
|
||||||
logger.info("Failed to create port table: %s", e)
|
|
||||||
|
|
||||||
self.progress.set_state("Setting up")
|
# Step 2. Get tables.
|
||||||
|
self.progress.set_state("Fetching tables")
|
||||||
|
sqlite_tables = yield self.sqlite_store._simple_select_onecol(
|
||||||
|
table="sqlite_master",
|
||||||
|
keyvalues={
|
||||||
|
"type": "table",
|
||||||
|
},
|
||||||
|
retcol="name",
|
||||||
|
)
|
||||||
|
|
||||||
# Set up tables.
|
postgres_tables = yield self.postgres_store._simple_select_onecol(
|
||||||
|
table="information_schema.tables",
|
||||||
|
keyvalues={},
|
||||||
|
retcol="distinct table_name",
|
||||||
|
)
|
||||||
|
|
||||||
|
tables = set(sqlite_tables) & set(postgres_tables)
|
||||||
|
logger.info("Found %d tables", len(tables))
|
||||||
|
|
||||||
|
# Step 3. Figure out what still needs copying
|
||||||
|
self.progress.set_state("Checking on port progress")
|
||||||
setup_res = yield defer.gatherResults(
|
setup_res = yield defer.gatherResults(
|
||||||
[
|
[
|
||||||
self.setup_table(table)
|
self.setup_table(table)
|
||||||
@@ -499,7 +540,8 @@ class Porter(object):
|
|||||||
consumeErrors=True,
|
consumeErrors=True,
|
||||||
)
|
)
|
||||||
|
|
||||||
# Process tables.
|
# Step 4. Do the copying.
|
||||||
|
self.progress.set_state("Copying to postgres")
|
||||||
yield defer.gatherResults(
|
yield defer.gatherResults(
|
||||||
[
|
[
|
||||||
self.handle_table(*res)
|
self.handle_table(*res)
|
||||||
@@ -508,6 +550,9 @@ class Porter(object):
|
|||||||
consumeErrors=True,
|
consumeErrors=True,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# Step 5. Do final post-processing
|
||||||
|
yield self._setup_state_group_id_seq()
|
||||||
|
|
||||||
self.progress.done()
|
self.progress.done()
|
||||||
except:
|
except:
|
||||||
global end_error_exec_info
|
global end_error_exec_info
|
||||||
@@ -523,17 +568,29 @@ class Porter(object):
|
|||||||
i for i, h in enumerate(headers) if h in bool_col_names
|
i for i, h in enumerate(headers) if h in bool_col_names
|
||||||
]
|
]
|
||||||
|
|
||||||
|
class BadValueException(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
def conv(j, col):
|
def conv(j, col):
|
||||||
if j in bool_cols:
|
if j in bool_cols:
|
||||||
return bool(col)
|
return bool(col)
|
||||||
|
elif isinstance(col, basestring) and "\0" in col:
|
||||||
|
logger.warn("DROPPING ROW: NUL value in table %s col %s: %r", table, headers[j], col)
|
||||||
|
raise BadValueException();
|
||||||
return col
|
return col
|
||||||
|
|
||||||
|
outrows = []
|
||||||
for i, row in enumerate(rows):
|
for i, row in enumerate(rows):
|
||||||
rows[i] = tuple(
|
try:
|
||||||
conv(j, col)
|
outrows.append(tuple(
|
||||||
for j, col in enumerate(row)
|
conv(j, col)
|
||||||
if j > 0
|
for j, col in enumerate(row)
|
||||||
)
|
if j > 0
|
||||||
|
))
|
||||||
|
except BadValueException:
|
||||||
|
pass
|
||||||
|
|
||||||
|
return outrows
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def _setup_sent_transactions(self):
|
def _setup_sent_transactions(self):
|
||||||
@@ -561,7 +618,7 @@ class Porter(object):
|
|||||||
"select", r,
|
"select", r,
|
||||||
)
|
)
|
||||||
|
|
||||||
self._convert_rows("sent_transactions", headers, rows)
|
rows = self._convert_rows("sent_transactions", headers, rows)
|
||||||
|
|
||||||
inserted_rows = len(rows)
|
inserted_rows = len(rows)
|
||||||
if inserted_rows:
|
if inserted_rows:
|
||||||
@@ -655,6 +712,16 @@ class Porter(object):
|
|||||||
|
|
||||||
defer.returnValue((done, remaining + done))
|
defer.returnValue((done, remaining + done))
|
||||||
|
|
||||||
|
def _setup_state_group_id_seq(self):
|
||||||
|
def r(txn):
|
||||||
|
txn.execute("SELECT MAX(id) FROM state_groups")
|
||||||
|
next_id = txn.fetchone()[0]+1
|
||||||
|
txn.execute(
|
||||||
|
"ALTER SEQUENCE state_group_id_seq RESTART WITH %s",
|
||||||
|
(next_id,),
|
||||||
|
)
|
||||||
|
return self.postgres_store.runInteraction("setup_state_group_id_seq", r)
|
||||||
|
|
||||||
|
|
||||||
##############################################
|
##############################################
|
||||||
###### The following is simply UI stuff ######
|
###### The following is simply UI stuff ######
|
||||||
|
|||||||
45
scripts/sync_room_to_group.pl
Executable file
45
scripts/sync_room_to_group.pl
Executable file
@@ -0,0 +1,45 @@
|
|||||||
|
#!/usr/bin/env perl
|
||||||
|
|
||||||
|
use strict;
|
||||||
|
use warnings;
|
||||||
|
|
||||||
|
use JSON::XS;
|
||||||
|
use LWP::UserAgent;
|
||||||
|
use URI::Escape;
|
||||||
|
|
||||||
|
if (@ARGV < 4) {
|
||||||
|
die "usage: $0 <homeserver url> <access_token> <room_id|room_alias> <group_id>\n";
|
||||||
|
}
|
||||||
|
|
||||||
|
my ($hs, $access_token, $room_id, $group_id) = @ARGV;
|
||||||
|
my $ua = LWP::UserAgent->new();
|
||||||
|
$ua->timeout(10);
|
||||||
|
|
||||||
|
if ($room_id =~ /^#/) {
|
||||||
|
$room_id = uri_escape($room_id);
|
||||||
|
$room_id = decode_json($ua->get("${hs}/_matrix/client/r0/directory/room/${room_id}?access_token=${access_token}")->decoded_content)->{room_id};
|
||||||
|
}
|
||||||
|
|
||||||
|
my $room_users = [ keys %{decode_json($ua->get("${hs}/_matrix/client/r0/rooms/${room_id}/joined_members?access_token=${access_token}")->decoded_content)->{joined}} ];
|
||||||
|
my $group_users = [
|
||||||
|
(map { $_->{user_id} } @{decode_json($ua->get("${hs}/_matrix/client/unstable/groups/${group_id}/users?access_token=${access_token}" )->decoded_content)->{chunk}}),
|
||||||
|
(map { $_->{user_id} } @{decode_json($ua->get("${hs}/_matrix/client/unstable/groups/${group_id}/invited_users?access_token=${access_token}" )->decoded_content)->{chunk}}),
|
||||||
|
];
|
||||||
|
|
||||||
|
die "refusing to sync from empty room" unless (@$room_users);
|
||||||
|
die "refusing to sync to empty group" unless (@$group_users);
|
||||||
|
|
||||||
|
my $diff = {};
|
||||||
|
foreach my $user (@$room_users) { $diff->{$user}++ }
|
||||||
|
foreach my $user (@$group_users) { $diff->{$user}-- }
|
||||||
|
|
||||||
|
foreach my $user (keys %$diff) {
|
||||||
|
if ($diff->{$user} == 1) {
|
||||||
|
warn "inviting $user";
|
||||||
|
print STDERR $ua->put("${hs}/_matrix/client/unstable/groups/${group_id}/admin/users/invite/${user}?access_token=${access_token}", Content=>'{}')->status_line."\n";
|
||||||
|
}
|
||||||
|
elsif ($diff->{$user} == -1) {
|
||||||
|
warn "removing $user";
|
||||||
|
print STDERR $ua->put("${hs}/_matrix/client/unstable/groups/${group_id}/admin/users/remove/${user}?access_token=${access_token}", Content=>'{}')->status_line."\n";
|
||||||
|
}
|
||||||
|
}
|
||||||
73
setup.py
73
setup.py
@@ -23,6 +23,45 @@ import sys
|
|||||||
here = os.path.abspath(os.path.dirname(__file__))
|
here = os.path.abspath(os.path.dirname(__file__))
|
||||||
|
|
||||||
|
|
||||||
|
# Some notes on `setup.py test`:
|
||||||
|
#
|
||||||
|
# Once upon a time we used to try to make `setup.py test` run `tox` to run the
|
||||||
|
# tests. That's a bad idea for three reasons:
|
||||||
|
#
|
||||||
|
# 1: `setup.py test` is supposed to find out whether the tests work in the
|
||||||
|
# *current* environmentt, not whatever tox sets up.
|
||||||
|
# 2: Empirically, trying to install tox during the test run wasn't working ("No
|
||||||
|
# module named virtualenv").
|
||||||
|
# 3: The tox documentation advises against it[1].
|
||||||
|
#
|
||||||
|
# Even further back in time, we used to use setuptools_trial [2]. That has its
|
||||||
|
# own set of issues: for instance, it requires installation of Twisted to build
|
||||||
|
# an sdist (because the recommended mode of usage is to add it to
|
||||||
|
# `setup_requires`). That in turn means that in order to successfully run tox
|
||||||
|
# you have to have the python header files installed for whichever version of
|
||||||
|
# python tox uses (which is python3 on recent ubuntus, for example).
|
||||||
|
#
|
||||||
|
# So, for now at least, we stick with what appears to be the convention among
|
||||||
|
# Twisted projects, and don't attempt to do anything when someone runs
|
||||||
|
# `setup.py test`; instead we direct people to run `trial` directly if they
|
||||||
|
# care.
|
||||||
|
#
|
||||||
|
# [1]: http://tox.readthedocs.io/en/2.5.0/example/basic.html#integration-with-setup-py-test-command
|
||||||
|
# [2]: https://pypi.python.org/pypi/setuptools_trial
|
||||||
|
class TestCommand(Command):
|
||||||
|
user_options = []
|
||||||
|
|
||||||
|
def initialize_options(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def finalize_options(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
print ("""Synapse's tests cannot be run via setup.py. To run them, try:
|
||||||
|
PYTHONPATH="." trial tests
|
||||||
|
""")
|
||||||
|
|
||||||
def read_file(path_segments):
|
def read_file(path_segments):
|
||||||
"""Read a file from the package. Takes a list of strings to join to
|
"""Read a file from the package. Takes a list of strings to join to
|
||||||
make the path"""
|
make the path"""
|
||||||
@@ -39,38 +78,6 @@ def exec_file(path_segments):
|
|||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
class Tox(Command):
|
|
||||||
user_options = [('tox-args=', 'a', "Arguments to pass to tox")]
|
|
||||||
|
|
||||||
def initialize_options(self):
|
|
||||||
self.tox_args = None
|
|
||||||
|
|
||||||
def finalize_options(self):
|
|
||||||
self.test_args = []
|
|
||||||
self.test_suite = True
|
|
||||||
|
|
||||||
def run(self):
|
|
||||||
#import here, cause outside the eggs aren't loaded
|
|
||||||
try:
|
|
||||||
import tox
|
|
||||||
except ImportError:
|
|
||||||
try:
|
|
||||||
self.distribution.fetch_build_eggs("tox")
|
|
||||||
import tox
|
|
||||||
except:
|
|
||||||
raise RuntimeError(
|
|
||||||
"The tests need 'tox' to run. Please install 'tox'."
|
|
||||||
)
|
|
||||||
import shlex
|
|
||||||
args = self.tox_args
|
|
||||||
if args:
|
|
||||||
args = shlex.split(self.tox_args)
|
|
||||||
else:
|
|
||||||
args = []
|
|
||||||
errno = tox.cmdline(args=args)
|
|
||||||
sys.exit(errno)
|
|
||||||
|
|
||||||
|
|
||||||
version = exec_file(("synapse", "__init__.py"))["__version__"]
|
version = exec_file(("synapse", "__init__.py"))["__version__"]
|
||||||
dependencies = exec_file(("synapse", "python_dependencies.py"))
|
dependencies = exec_file(("synapse", "python_dependencies.py"))
|
||||||
long_description = read_file(("README.rst",))
|
long_description = read_file(("README.rst",))
|
||||||
@@ -86,5 +93,5 @@ setup(
|
|||||||
zip_safe=False,
|
zip_safe=False,
|
||||||
long_description=long_description,
|
long_description=long_description,
|
||||||
scripts=["synctl"] + glob.glob("scripts/*"),
|
scripts=["synctl"] + glob.glob("scripts/*"),
|
||||||
cmdclass={'test': Tox},
|
cmdclass={'test': TestCommand},
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -16,4 +16,4 @@
|
|||||||
""" This is a reference implementation of a Matrix home server.
|
""" This is a reference implementation of a Matrix home server.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
__version__ = "0.17.1-rc1"
|
__version__ = "0.27.2"
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -1,5 +1,6 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
# Copyright 2014-2016 OpenMarket Ltd
|
# Copyright 2014-2016 OpenMarket Ltd
|
||||||
|
# Copyright 2017 Vector Creations Ltd
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@@ -43,10 +44,8 @@ class JoinRules(object):
|
|||||||
|
|
||||||
class LoginType(object):
|
class LoginType(object):
|
||||||
PASSWORD = u"m.login.password"
|
PASSWORD = u"m.login.password"
|
||||||
OAUTH = u"m.login.oauth2"
|
|
||||||
EMAIL_CODE = u"m.login.email.code"
|
|
||||||
EMAIL_URL = u"m.login.email.url"
|
|
||||||
EMAIL_IDENTITY = u"m.login.email.identity"
|
EMAIL_IDENTITY = u"m.login.email.identity"
|
||||||
|
MSISDN = u"m.login.msisdn"
|
||||||
RECAPTCHA = u"m.login.recaptcha"
|
RECAPTCHA = u"m.login.recaptcha"
|
||||||
DUMMY = u"m.login.dummy"
|
DUMMY = u"m.login.dummy"
|
||||||
|
|
||||||
@@ -85,3 +84,8 @@ class RoomCreationPreset(object):
|
|||||||
PRIVATE_CHAT = "private_chat"
|
PRIVATE_CHAT = "private_chat"
|
||||||
PUBLIC_CHAT = "public_chat"
|
PUBLIC_CHAT = "public_chat"
|
||||||
TRUSTED_PRIVATE_CHAT = "trusted_private_chat"
|
TRUSTED_PRIVATE_CHAT = "trusted_private_chat"
|
||||||
|
|
||||||
|
|
||||||
|
class ThirdPartyEntityKind(object):
|
||||||
|
USER = "user"
|
||||||
|
LOCATION = "location"
|
||||||
|
|||||||
@@ -17,6 +17,8 @@
|
|||||||
|
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
|
import simplejson as json
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
@@ -39,37 +41,58 @@ class Codes(object):
|
|||||||
CAPTCHA_NEEDED = "M_CAPTCHA_NEEDED"
|
CAPTCHA_NEEDED = "M_CAPTCHA_NEEDED"
|
||||||
CAPTCHA_INVALID = "M_CAPTCHA_INVALID"
|
CAPTCHA_INVALID = "M_CAPTCHA_INVALID"
|
||||||
MISSING_PARAM = "M_MISSING_PARAM"
|
MISSING_PARAM = "M_MISSING_PARAM"
|
||||||
|
INVALID_PARAM = "M_INVALID_PARAM"
|
||||||
TOO_LARGE = "M_TOO_LARGE"
|
TOO_LARGE = "M_TOO_LARGE"
|
||||||
EXCLUSIVE = "M_EXCLUSIVE"
|
EXCLUSIVE = "M_EXCLUSIVE"
|
||||||
THREEPID_AUTH_FAILED = "M_THREEPID_AUTH_FAILED"
|
THREEPID_AUTH_FAILED = "M_THREEPID_AUTH_FAILED"
|
||||||
THREEPID_IN_USE = "M_THREEPID_IN_USE"
|
THREEPID_IN_USE = "M_THREEPID_IN_USE"
|
||||||
THREEPID_NOT_FOUND = "M_THREEPID_NOT_FOUND"
|
THREEPID_NOT_FOUND = "M_THREEPID_NOT_FOUND"
|
||||||
|
THREEPID_DENIED = "M_THREEPID_DENIED"
|
||||||
INVALID_USERNAME = "M_INVALID_USERNAME"
|
INVALID_USERNAME = "M_INVALID_USERNAME"
|
||||||
SERVER_NOT_TRUSTED = "M_SERVER_NOT_TRUSTED"
|
SERVER_NOT_TRUSTED = "M_SERVER_NOT_TRUSTED"
|
||||||
|
|
||||||
|
|
||||||
class CodeMessageException(RuntimeError):
|
class CodeMessageException(RuntimeError):
|
||||||
"""An exception with integer code and message string attributes."""
|
"""An exception with integer code and message string attributes.
|
||||||
|
|
||||||
|
Attributes:
|
||||||
|
code (int): HTTP error code
|
||||||
|
msg (str): string describing the error
|
||||||
|
"""
|
||||||
def __init__(self, code, msg):
|
def __init__(self, code, msg):
|
||||||
super(CodeMessageException, self).__init__("%d: %s" % (code, msg))
|
super(CodeMessageException, self).__init__("%d: %s" % (code, msg))
|
||||||
self.code = code
|
self.code = code
|
||||||
self.msg = msg
|
self.msg = msg
|
||||||
self.response_code_message = None
|
|
||||||
|
|
||||||
def error_dict(self):
|
def error_dict(self):
|
||||||
return cs_error(self.msg)
|
return cs_error(self.msg)
|
||||||
|
|
||||||
|
|
||||||
|
class MatrixCodeMessageException(CodeMessageException):
|
||||||
|
"""An error from a general matrix endpoint, eg. from a proxied Matrix API call.
|
||||||
|
|
||||||
|
Attributes:
|
||||||
|
errcode (str): Matrix error code e.g 'M_FORBIDDEN'
|
||||||
|
"""
|
||||||
|
def __init__(self, code, msg, errcode=Codes.UNKNOWN):
|
||||||
|
super(MatrixCodeMessageException, self).__init__(code, msg)
|
||||||
|
self.errcode = errcode
|
||||||
|
|
||||||
|
|
||||||
class SynapseError(CodeMessageException):
|
class SynapseError(CodeMessageException):
|
||||||
"""A base error which can be caught for all synapse events."""
|
"""A base exception type for matrix errors which have an errcode and error
|
||||||
|
message (as well as an HTTP status code).
|
||||||
|
|
||||||
|
Attributes:
|
||||||
|
errcode (str): Matrix error code e.g 'M_FORBIDDEN'
|
||||||
|
"""
|
||||||
def __init__(self, code, msg, errcode=Codes.UNKNOWN):
|
def __init__(self, code, msg, errcode=Codes.UNKNOWN):
|
||||||
"""Constructs a synapse error.
|
"""Constructs a synapse error.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
code (int): The integer error code (an HTTP response code)
|
code (int): The integer error code (an HTTP response code)
|
||||||
msg (str): The human-readable error message.
|
msg (str): The human-readable error message.
|
||||||
err (str): The error code e.g 'M_FORBIDDEN'
|
errcode (str): The matrix error code e.g 'M_FORBIDDEN'
|
||||||
"""
|
"""
|
||||||
super(SynapseError, self).__init__(code, msg)
|
super(SynapseError, self).__init__(code, msg)
|
||||||
self.errcode = errcode
|
self.errcode = errcode
|
||||||
@@ -80,12 +103,87 @@ class SynapseError(CodeMessageException):
|
|||||||
self.errcode,
|
self.errcode,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_http_response_exception(cls, err):
|
||||||
|
"""Make a SynapseError based on an HTTPResponseException
|
||||||
|
|
||||||
|
This is useful when a proxied request has failed, and we need to
|
||||||
|
decide how to map the failure onto a matrix error to send back to the
|
||||||
|
client.
|
||||||
|
|
||||||
|
An attempt is made to parse the body of the http response as a matrix
|
||||||
|
error. If that succeeds, the errcode and error message from the body
|
||||||
|
are used as the errcode and error message in the new synapse error.
|
||||||
|
|
||||||
|
Otherwise, the errcode is set to M_UNKNOWN, and the error message is
|
||||||
|
set to the reason code from the HTTP response.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
err (HttpResponseException):
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
SynapseError:
|
||||||
|
"""
|
||||||
|
# try to parse the body as json, to get better errcode/msg, but
|
||||||
|
# default to M_UNKNOWN with the HTTP status as the error text
|
||||||
|
try:
|
||||||
|
j = json.loads(err.response)
|
||||||
|
except ValueError:
|
||||||
|
j = {}
|
||||||
|
errcode = j.get('errcode', Codes.UNKNOWN)
|
||||||
|
errmsg = j.get('error', err.msg)
|
||||||
|
|
||||||
|
res = SynapseError(err.code, errmsg, errcode)
|
||||||
|
return res
|
||||||
|
|
||||||
|
|
||||||
class RegistrationError(SynapseError):
|
class RegistrationError(SynapseError):
|
||||||
"""An error raised when a registration event fails."""
|
"""An error raised when a registration event fails."""
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class FederationDeniedError(SynapseError):
|
||||||
|
"""An error raised when the server tries to federate with a server which
|
||||||
|
is not on its federation whitelist.
|
||||||
|
|
||||||
|
Attributes:
|
||||||
|
destination (str): The destination which has been denied
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, destination):
|
||||||
|
"""Raised by federation client or server to indicate that we are
|
||||||
|
are deliberately not attempting to contact a given server because it is
|
||||||
|
not on our federation whitelist.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
destination (str): the domain in question
|
||||||
|
"""
|
||||||
|
|
||||||
|
self.destination = destination
|
||||||
|
|
||||||
|
super(FederationDeniedError, self).__init__(
|
||||||
|
code=403,
|
||||||
|
msg="Federation denied with %s." % (self.destination,),
|
||||||
|
errcode=Codes.FORBIDDEN,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class InteractiveAuthIncompleteError(Exception):
|
||||||
|
"""An error raised when UI auth is not yet complete
|
||||||
|
|
||||||
|
(This indicates we should return a 401 with 'result' as the body)
|
||||||
|
|
||||||
|
Attributes:
|
||||||
|
result (dict): the server response to the request, which should be
|
||||||
|
passed back to the client
|
||||||
|
"""
|
||||||
|
def __init__(self, result):
|
||||||
|
super(InteractiveAuthIncompleteError, self).__init__(
|
||||||
|
"Interactive auth not yet complete",
|
||||||
|
)
|
||||||
|
self.result = result
|
||||||
|
|
||||||
|
|
||||||
class UnrecognizedRequestError(SynapseError):
|
class UnrecognizedRequestError(SynapseError):
|
||||||
"""An error indicating we don't understand the request you're trying to make"""
|
"""An error indicating we don't understand the request you're trying to make"""
|
||||||
def __init__(self, *args, **kwargs):
|
def __init__(self, *args, **kwargs):
|
||||||
@@ -105,13 +203,11 @@ class UnrecognizedRequestError(SynapseError):
|
|||||||
|
|
||||||
class NotFoundError(SynapseError):
|
class NotFoundError(SynapseError):
|
||||||
"""An error indicating we can't find the thing you asked for"""
|
"""An error indicating we can't find the thing you asked for"""
|
||||||
def __init__(self, *args, **kwargs):
|
def __init__(self, msg="Not found", errcode=Codes.NOT_FOUND):
|
||||||
if "errcode" not in kwargs:
|
|
||||||
kwargs["errcode"] = Codes.NOT_FOUND
|
|
||||||
super(NotFoundError, self).__init__(
|
super(NotFoundError, self).__init__(
|
||||||
404,
|
404,
|
||||||
"Not found",
|
msg,
|
||||||
**kwargs
|
errcode=errcode
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@@ -172,7 +268,6 @@ class LimitExceededError(SynapseError):
|
|||||||
errcode=Codes.LIMIT_EXCEEDED):
|
errcode=Codes.LIMIT_EXCEEDED):
|
||||||
super(LimitExceededError, self).__init__(code, msg, errcode)
|
super(LimitExceededError, self).__init__(code, msg, errcode)
|
||||||
self.retry_after_ms = retry_after_ms
|
self.retry_after_ms = retry_after_ms
|
||||||
self.response_code_message = "Too Many Requests"
|
|
||||||
|
|
||||||
def error_dict(self):
|
def error_dict(self):
|
||||||
return cs_error(
|
return cs_error(
|
||||||
@@ -242,6 +337,19 @@ class FederationError(RuntimeError):
|
|||||||
|
|
||||||
|
|
||||||
class HttpResponseException(CodeMessageException):
|
class HttpResponseException(CodeMessageException):
|
||||||
|
"""
|
||||||
|
Represents an HTTP-level failure of an outbound request
|
||||||
|
|
||||||
|
Attributes:
|
||||||
|
response (str): body of response
|
||||||
|
"""
|
||||||
def __init__(self, code, msg, response):
|
def __init__(self, code, msg, response):
|
||||||
self.response = response
|
"""
|
||||||
|
|
||||||
|
Args:
|
||||||
|
code (int): HTTP status code
|
||||||
|
msg (str): reason phrase from HTTP response status line
|
||||||
|
response (str): body of response
|
||||||
|
"""
|
||||||
super(HttpResponseException, self).__init__(code, msg)
|
super(HttpResponseException, self).__init__(code, msg)
|
||||||
|
self.response = response
|
||||||
|
|||||||
@@ -13,11 +13,174 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
from synapse.api.errors import SynapseError
|
from synapse.api.errors import SynapseError
|
||||||
|
from synapse.storage.presence import UserPresenceState
|
||||||
from synapse.types import UserID, RoomID
|
from synapse.types import UserID, RoomID
|
||||||
|
|
||||||
from twisted.internet import defer
|
from twisted.internet import defer
|
||||||
|
|
||||||
import ujson as json
|
import simplejson as json
|
||||||
|
import jsonschema
|
||||||
|
from jsonschema import FormatChecker
|
||||||
|
|
||||||
|
FILTER_SCHEMA = {
|
||||||
|
"additionalProperties": False,
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"limit": {
|
||||||
|
"type": "number"
|
||||||
|
},
|
||||||
|
"senders": {
|
||||||
|
"$ref": "#/definitions/user_id_array"
|
||||||
|
},
|
||||||
|
"not_senders": {
|
||||||
|
"$ref": "#/definitions/user_id_array"
|
||||||
|
},
|
||||||
|
# TODO: We don't limit event type values but we probably should...
|
||||||
|
# check types are valid event types
|
||||||
|
"types": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {
|
||||||
|
"type": "string"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"not_types": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {
|
||||||
|
"type": "string"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ROOM_FILTER_SCHEMA = {
|
||||||
|
"additionalProperties": False,
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"not_rooms": {
|
||||||
|
"$ref": "#/definitions/room_id_array"
|
||||||
|
},
|
||||||
|
"rooms": {
|
||||||
|
"$ref": "#/definitions/room_id_array"
|
||||||
|
},
|
||||||
|
"ephemeral": {
|
||||||
|
"$ref": "#/definitions/room_event_filter"
|
||||||
|
},
|
||||||
|
"include_leave": {
|
||||||
|
"type": "boolean"
|
||||||
|
},
|
||||||
|
"state": {
|
||||||
|
"$ref": "#/definitions/room_event_filter"
|
||||||
|
},
|
||||||
|
"timeline": {
|
||||||
|
"$ref": "#/definitions/room_event_filter"
|
||||||
|
},
|
||||||
|
"account_data": {
|
||||||
|
"$ref": "#/definitions/room_event_filter"
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ROOM_EVENT_FILTER_SCHEMA = {
|
||||||
|
"additionalProperties": False,
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"limit": {
|
||||||
|
"type": "number"
|
||||||
|
},
|
||||||
|
"senders": {
|
||||||
|
"$ref": "#/definitions/user_id_array"
|
||||||
|
},
|
||||||
|
"not_senders": {
|
||||||
|
"$ref": "#/definitions/user_id_array"
|
||||||
|
},
|
||||||
|
"types": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {
|
||||||
|
"type": "string"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"not_types": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {
|
||||||
|
"type": "string"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"rooms": {
|
||||||
|
"$ref": "#/definitions/room_id_array"
|
||||||
|
},
|
||||||
|
"not_rooms": {
|
||||||
|
"$ref": "#/definitions/room_id_array"
|
||||||
|
},
|
||||||
|
"contains_url": {
|
||||||
|
"type": "boolean"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
USER_ID_ARRAY_SCHEMA = {
|
||||||
|
"type": "array",
|
||||||
|
"items": {
|
||||||
|
"type": "string",
|
||||||
|
"format": "matrix_user_id"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ROOM_ID_ARRAY_SCHEMA = {
|
||||||
|
"type": "array",
|
||||||
|
"items": {
|
||||||
|
"type": "string",
|
||||||
|
"format": "matrix_room_id"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
USER_FILTER_SCHEMA = {
|
||||||
|
"$schema": "http://json-schema.org/draft-04/schema#",
|
||||||
|
"description": "schema for a Sync filter",
|
||||||
|
"type": "object",
|
||||||
|
"definitions": {
|
||||||
|
"room_id_array": ROOM_ID_ARRAY_SCHEMA,
|
||||||
|
"user_id_array": USER_ID_ARRAY_SCHEMA,
|
||||||
|
"filter": FILTER_SCHEMA,
|
||||||
|
"room_filter": ROOM_FILTER_SCHEMA,
|
||||||
|
"room_event_filter": ROOM_EVENT_FILTER_SCHEMA
|
||||||
|
},
|
||||||
|
"properties": {
|
||||||
|
"presence": {
|
||||||
|
"$ref": "#/definitions/filter"
|
||||||
|
},
|
||||||
|
"account_data": {
|
||||||
|
"$ref": "#/definitions/filter"
|
||||||
|
},
|
||||||
|
"room": {
|
||||||
|
"$ref": "#/definitions/room_filter"
|
||||||
|
},
|
||||||
|
"event_format": {
|
||||||
|
"type": "string",
|
||||||
|
"enum": ["client", "federation"]
|
||||||
|
},
|
||||||
|
"event_fields": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {
|
||||||
|
"type": "string",
|
||||||
|
# Don't allow '\\' in event field filters. This makes matching
|
||||||
|
# events a lot easier as we can then use a negative lookbehind
|
||||||
|
# assertion to split '\.' If we allowed \\ then it would
|
||||||
|
# incorrectly split '\\.' See synapse.events.utils.serialize_event
|
||||||
|
"pattern": "^((?!\\\).)*$"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"additionalProperties": False
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@FormatChecker.cls_checks('matrix_room_id')
|
||||||
|
def matrix_room_id_validator(room_id_str):
|
||||||
|
return RoomID.from_string(room_id_str)
|
||||||
|
|
||||||
|
|
||||||
|
@FormatChecker.cls_checks('matrix_user_id')
|
||||||
|
def matrix_user_id_validator(user_id_str):
|
||||||
|
return UserID.from_string(user_id_str)
|
||||||
|
|
||||||
|
|
||||||
class Filtering(object):
|
class Filtering(object):
|
||||||
@@ -52,83 +215,11 @@ class Filtering(object):
|
|||||||
# NB: Filters are the complete json blobs. "Definitions" are an
|
# NB: Filters are the complete json blobs. "Definitions" are an
|
||||||
# individual top-level key e.g. public_user_data. Filters are made of
|
# individual top-level key e.g. public_user_data. Filters are made of
|
||||||
# many definitions.
|
# many definitions.
|
||||||
|
try:
|
||||||
top_level_definitions = [
|
jsonschema.validate(user_filter_json, USER_FILTER_SCHEMA,
|
||||||
"presence", "account_data"
|
format_checker=FormatChecker())
|
||||||
]
|
except jsonschema.ValidationError as e:
|
||||||
|
raise SynapseError(400, e.message)
|
||||||
room_level_definitions = [
|
|
||||||
"state", "timeline", "ephemeral", "account_data"
|
|
||||||
]
|
|
||||||
|
|
||||||
for key in top_level_definitions:
|
|
||||||
if key in user_filter_json:
|
|
||||||
self._check_definition(user_filter_json[key])
|
|
||||||
|
|
||||||
if "room" in user_filter_json:
|
|
||||||
self._check_definition_room_lists(user_filter_json["room"])
|
|
||||||
for key in room_level_definitions:
|
|
||||||
if key in user_filter_json["room"]:
|
|
||||||
self._check_definition(user_filter_json["room"][key])
|
|
||||||
|
|
||||||
def _check_definition_room_lists(self, definition):
|
|
||||||
"""Check that "rooms" and "not_rooms" are lists of room ids if they
|
|
||||||
are present
|
|
||||||
|
|
||||||
Args:
|
|
||||||
definition(dict): The filter definition
|
|
||||||
Raises:
|
|
||||||
SynapseError: If there was a problem with this definition.
|
|
||||||
"""
|
|
||||||
# check rooms are valid room IDs
|
|
||||||
room_id_keys = ["rooms", "not_rooms"]
|
|
||||||
for key in room_id_keys:
|
|
||||||
if key in definition:
|
|
||||||
if type(definition[key]) != list:
|
|
||||||
raise SynapseError(400, "Expected %s to be a list." % key)
|
|
||||||
for room_id in definition[key]:
|
|
||||||
RoomID.from_string(room_id)
|
|
||||||
|
|
||||||
def _check_definition(self, definition):
|
|
||||||
"""Check if the provided definition is valid.
|
|
||||||
|
|
||||||
This inspects not only the types but also the values to make sure they
|
|
||||||
make sense.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
definition(dict): The filter definition
|
|
||||||
Raises:
|
|
||||||
SynapseError: If there was a problem with this definition.
|
|
||||||
"""
|
|
||||||
# NB: Filters are the complete json blobs. "Definitions" are an
|
|
||||||
# individual top-level key e.g. public_user_data. Filters are made of
|
|
||||||
# many definitions.
|
|
||||||
if type(definition) != dict:
|
|
||||||
raise SynapseError(
|
|
||||||
400, "Expected JSON object, not %s" % (definition,)
|
|
||||||
)
|
|
||||||
|
|
||||||
self._check_definition_room_lists(definition)
|
|
||||||
|
|
||||||
# check senders are valid user IDs
|
|
||||||
user_id_keys = ["senders", "not_senders"]
|
|
||||||
for key in user_id_keys:
|
|
||||||
if key in definition:
|
|
||||||
if type(definition[key]) != list:
|
|
||||||
raise SynapseError(400, "Expected %s to be a list." % key)
|
|
||||||
for user_id in definition[key]:
|
|
||||||
UserID.from_string(user_id)
|
|
||||||
|
|
||||||
# TODO: We don't limit event type values but we probably should...
|
|
||||||
# check types are valid event types
|
|
||||||
event_keys = ["types", "not_types"]
|
|
||||||
for key in event_keys:
|
|
||||||
if key in definition:
|
|
||||||
if type(definition[key]) != list:
|
|
||||||
raise SynapseError(400, "Expected %s to be a list." % key)
|
|
||||||
for event_type in definition[key]:
|
|
||||||
if not isinstance(event_type, basestring):
|
|
||||||
raise SynapseError(400, "Event type should be a string")
|
|
||||||
|
|
||||||
|
|
||||||
class FilterCollection(object):
|
class FilterCollection(object):
|
||||||
@@ -152,6 +243,7 @@ class FilterCollection(object):
|
|||||||
self.include_leave = filter_json.get("room", {}).get(
|
self.include_leave = filter_json.get("room", {}).get(
|
||||||
"include_leave", False
|
"include_leave", False
|
||||||
)
|
)
|
||||||
|
self.event_fields = filter_json.get("event_fields", [])
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
return "<FilterCollection %s>" % (json.dumps(self._filter_json),)
|
return "<FilterCollection %s>" % (json.dumps(self._filter_json),)
|
||||||
@@ -186,6 +278,26 @@ class FilterCollection(object):
|
|||||||
def filter_room_account_data(self, events):
|
def filter_room_account_data(self, events):
|
||||||
return self._room_account_data.filter(self._room_filter.filter(events))
|
return self._room_account_data.filter(self._room_filter.filter(events))
|
||||||
|
|
||||||
|
def blocks_all_presence(self):
|
||||||
|
return (
|
||||||
|
self._presence_filter.filters_all_types() or
|
||||||
|
self._presence_filter.filters_all_senders()
|
||||||
|
)
|
||||||
|
|
||||||
|
def blocks_all_room_ephemeral(self):
|
||||||
|
return (
|
||||||
|
self._room_ephemeral_filter.filters_all_types() or
|
||||||
|
self._room_ephemeral_filter.filters_all_senders() or
|
||||||
|
self._room_ephemeral_filter.filters_all_rooms()
|
||||||
|
)
|
||||||
|
|
||||||
|
def blocks_all_room_timeline(self):
|
||||||
|
return (
|
||||||
|
self._room_timeline_filter.filters_all_types() or
|
||||||
|
self._room_timeline_filter.filters_all_senders() or
|
||||||
|
self._room_timeline_filter.filters_all_rooms()
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class Filter(object):
|
class Filter(object):
|
||||||
def __init__(self, filter_json):
|
def __init__(self, filter_json):
|
||||||
@@ -202,25 +314,50 @@ class Filter(object):
|
|||||||
|
|
||||||
self.contains_url = self.filter_json.get("contains_url", None)
|
self.contains_url = self.filter_json.get("contains_url", None)
|
||||||
|
|
||||||
|
def filters_all_types(self):
|
||||||
|
return "*" in self.not_types
|
||||||
|
|
||||||
|
def filters_all_senders(self):
|
||||||
|
return "*" in self.not_senders
|
||||||
|
|
||||||
|
def filters_all_rooms(self):
|
||||||
|
return "*" in self.not_rooms
|
||||||
|
|
||||||
def check(self, event):
|
def check(self, event):
|
||||||
"""Checks whether the filter matches the given event.
|
"""Checks whether the filter matches the given event.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
bool: True if the event matches
|
bool: True if the event matches
|
||||||
"""
|
"""
|
||||||
sender = event.get("sender", None)
|
# We usually get the full "events" as dictionaries coming through,
|
||||||
if not sender:
|
# except for presence which actually gets passed around as its own
|
||||||
# Presence events have their 'sender' in content.user_id
|
# namedtuple type.
|
||||||
content = event.get("content")
|
if isinstance(event, UserPresenceState):
|
||||||
# account_data has been allowed to have non-dict content, so check type first
|
sender = event.user_id
|
||||||
if isinstance(content, dict):
|
room_id = None
|
||||||
sender = content.get("user_id")
|
ev_type = "m.presence"
|
||||||
|
is_url = False
|
||||||
|
else:
|
||||||
|
sender = event.get("sender", None)
|
||||||
|
if not sender:
|
||||||
|
# Presence events had their 'sender' in content.user_id, but are
|
||||||
|
# now handled above. We don't know if anything else uses this
|
||||||
|
# form. TODO: Check this and probably remove it.
|
||||||
|
content = event.get("content")
|
||||||
|
# account_data has been allowed to have non-dict content, so
|
||||||
|
# check type first
|
||||||
|
if isinstance(content, dict):
|
||||||
|
sender = content.get("user_id")
|
||||||
|
|
||||||
|
room_id = event.get("room_id", None)
|
||||||
|
ev_type = event.get("type", None)
|
||||||
|
is_url = "url" in event.get("content", {})
|
||||||
|
|
||||||
return self.check_fields(
|
return self.check_fields(
|
||||||
event.get("room_id", None),
|
room_id,
|
||||||
sender,
|
sender,
|
||||||
event.get("type", None),
|
ev_type,
|
||||||
"url" in event.get("content", {})
|
is_url,
|
||||||
)
|
)
|
||||||
|
|
||||||
def check_fields(self, room_id, sender, event_type, contains_url):
|
def check_fields(self, room_id, sender, event_type, contains_url):
|
||||||
|
|||||||
@@ -23,7 +23,7 @@ class Ratelimiter(object):
|
|||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.message_counts = collections.OrderedDict()
|
self.message_counts = collections.OrderedDict()
|
||||||
|
|
||||||
def send_message(self, user_id, time_now_s, msg_rate_hz, burst_count):
|
def send_message(self, user_id, time_now_s, msg_rate_hz, burst_count, update=True):
|
||||||
"""Can the user send a message?
|
"""Can the user send a message?
|
||||||
Args:
|
Args:
|
||||||
user_id: The user sending a message.
|
user_id: The user sending a message.
|
||||||
@@ -32,12 +32,15 @@ class Ratelimiter(object):
|
|||||||
second.
|
second.
|
||||||
burst_count: How many messages the user can send before being
|
burst_count: How many messages the user can send before being
|
||||||
limited.
|
limited.
|
||||||
|
update (bool): Whether to update the message rates or not. This is
|
||||||
|
useful to check if a message would be allowed to be sent before
|
||||||
|
its ready to be actually sent.
|
||||||
Returns:
|
Returns:
|
||||||
A pair of a bool indicating if they can send a message now and a
|
A pair of a bool indicating if they can send a message now and a
|
||||||
time in seconds of when they can next send a message.
|
time in seconds of when they can next send a message.
|
||||||
"""
|
"""
|
||||||
self.prune_message_counts(time_now_s)
|
self.prune_message_counts(time_now_s)
|
||||||
message_count, time_start, _ignored = self.message_counts.pop(
|
message_count, time_start, _ignored = self.message_counts.get(
|
||||||
user_id, (0., time_now_s, None),
|
user_id, (0., time_now_s, None),
|
||||||
)
|
)
|
||||||
time_delta = time_now_s - time_start
|
time_delta = time_now_s - time_start
|
||||||
@@ -52,9 +55,10 @@ class Ratelimiter(object):
|
|||||||
allowed = True
|
allowed = True
|
||||||
message_count += 1
|
message_count += 1
|
||||||
|
|
||||||
self.message_counts[user_id] = (
|
if update:
|
||||||
message_count, time_start, msg_rate_hz
|
self.message_counts[user_id] = (
|
||||||
)
|
message_count, time_start, msg_rate_hz
|
||||||
|
)
|
||||||
|
|
||||||
if msg_rate_hz > 0:
|
if msg_rate_hz > 0:
|
||||||
time_allowed = (
|
time_allowed = (
|
||||||
|
|||||||
@@ -25,4 +25,3 @@ SERVER_KEY_PREFIX = "/_matrix/key/v1"
|
|||||||
SERVER_KEY_V2_PREFIX = "/_matrix/key/v2"
|
SERVER_KEY_V2_PREFIX = "/_matrix/key/v2"
|
||||||
MEDIA_PREFIX = "/_matrix/media/r0"
|
MEDIA_PREFIX = "/_matrix/media/r0"
|
||||||
LEGACY_MEDIA_PREFIX = "/_matrix/media/v1"
|
LEGACY_MEDIA_PREFIX = "/_matrix/media/v1"
|
||||||
APP_SERVICE_PREFIX = "/_matrix/appservice/v1"
|
|
||||||
|
|||||||
178
synapse/app/_base.py
Normal file
178
synapse/app/_base.py
Normal file
@@ -0,0 +1,178 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright 2017 New Vector Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import gc
|
||||||
|
import logging
|
||||||
|
import sys
|
||||||
|
|
||||||
|
try:
|
||||||
|
import affinity
|
||||||
|
except Exception:
|
||||||
|
affinity = None
|
||||||
|
|
||||||
|
from daemonize import Daemonize
|
||||||
|
from synapse.util import PreserveLoggingContext
|
||||||
|
from synapse.util.rlimit import change_resource_limit
|
||||||
|
from twisted.internet import error, reactor
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def start_worker_reactor(appname, config):
|
||||||
|
""" Run the reactor in the main process
|
||||||
|
|
||||||
|
Daemonizes if necessary, and then configures some resources, before starting
|
||||||
|
the reactor. Pulls configuration from the 'worker' settings in 'config'.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
appname (str): application name which will be sent to syslog
|
||||||
|
config (synapse.config.Config): config object
|
||||||
|
"""
|
||||||
|
|
||||||
|
logger = logging.getLogger(config.worker_app)
|
||||||
|
|
||||||
|
start_reactor(
|
||||||
|
appname,
|
||||||
|
config.soft_file_limit,
|
||||||
|
config.gc_thresholds,
|
||||||
|
config.worker_pid_file,
|
||||||
|
config.worker_daemonize,
|
||||||
|
config.worker_cpu_affinity,
|
||||||
|
logger,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def start_reactor(
|
||||||
|
appname,
|
||||||
|
soft_file_limit,
|
||||||
|
gc_thresholds,
|
||||||
|
pid_file,
|
||||||
|
daemonize,
|
||||||
|
cpu_affinity,
|
||||||
|
logger,
|
||||||
|
):
|
||||||
|
""" Run the reactor in the main process
|
||||||
|
|
||||||
|
Daemonizes if necessary, and then configures some resources, before starting
|
||||||
|
the reactor
|
||||||
|
|
||||||
|
Args:
|
||||||
|
appname (str): application name which will be sent to syslog
|
||||||
|
soft_file_limit (int):
|
||||||
|
gc_thresholds:
|
||||||
|
pid_file (str): name of pid file to write to if daemonize is True
|
||||||
|
daemonize (bool): true to run the reactor in a background process
|
||||||
|
cpu_affinity (int|None): cpu affinity mask
|
||||||
|
logger (logging.Logger): logger instance to pass to Daemonize
|
||||||
|
"""
|
||||||
|
|
||||||
|
def run():
|
||||||
|
# make sure that we run the reactor with the sentinel log context,
|
||||||
|
# otherwise other PreserveLoggingContext instances will get confused
|
||||||
|
# and complain when they see the logcontext arbitrarily swapping
|
||||||
|
# between the sentinel and `run` logcontexts.
|
||||||
|
with PreserveLoggingContext():
|
||||||
|
logger.info("Running")
|
||||||
|
if cpu_affinity is not None:
|
||||||
|
if not affinity:
|
||||||
|
quit_with_error(
|
||||||
|
"Missing package 'affinity' required for cpu_affinity\n"
|
||||||
|
"option\n\n"
|
||||||
|
"Install by running:\n\n"
|
||||||
|
" pip install affinity\n\n"
|
||||||
|
)
|
||||||
|
logger.info("Setting CPU affinity to %s" % cpu_affinity)
|
||||||
|
affinity.set_process_affinity_mask(0, cpu_affinity)
|
||||||
|
change_resource_limit(soft_file_limit)
|
||||||
|
if gc_thresholds:
|
||||||
|
gc.set_threshold(*gc_thresholds)
|
||||||
|
reactor.run()
|
||||||
|
|
||||||
|
if daemonize:
|
||||||
|
daemon = Daemonize(
|
||||||
|
app=appname,
|
||||||
|
pid=pid_file,
|
||||||
|
action=run,
|
||||||
|
auto_close_fds=False,
|
||||||
|
verbose=True,
|
||||||
|
logger=logger,
|
||||||
|
)
|
||||||
|
daemon.start()
|
||||||
|
else:
|
||||||
|
run()
|
||||||
|
|
||||||
|
|
||||||
|
def quit_with_error(error_string):
|
||||||
|
message_lines = error_string.split("\n")
|
||||||
|
line_length = max([len(l) for l in message_lines if len(l) < 80]) + 2
|
||||||
|
sys.stderr.write("*" * line_length + '\n')
|
||||||
|
for line in message_lines:
|
||||||
|
sys.stderr.write(" %s\n" % (line.rstrip(),))
|
||||||
|
sys.stderr.write("*" * line_length + '\n')
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
|
def listen_tcp(bind_addresses, port, factory, backlog=50):
|
||||||
|
"""
|
||||||
|
Create a TCP socket for a port and several addresses
|
||||||
|
"""
|
||||||
|
for address in bind_addresses:
|
||||||
|
try:
|
||||||
|
reactor.listenTCP(
|
||||||
|
port,
|
||||||
|
factory,
|
||||||
|
backlog,
|
||||||
|
address
|
||||||
|
)
|
||||||
|
except error.CannotListenError as e:
|
||||||
|
check_bind_error(e, address, bind_addresses)
|
||||||
|
|
||||||
|
|
||||||
|
def listen_ssl(bind_addresses, port, factory, context_factory, backlog=50):
|
||||||
|
"""
|
||||||
|
Create an SSL socket for a port and several addresses
|
||||||
|
"""
|
||||||
|
for address in bind_addresses:
|
||||||
|
try:
|
||||||
|
reactor.listenSSL(
|
||||||
|
port,
|
||||||
|
factory,
|
||||||
|
context_factory,
|
||||||
|
backlog,
|
||||||
|
address
|
||||||
|
)
|
||||||
|
except error.CannotListenError as e:
|
||||||
|
check_bind_error(e, address, bind_addresses)
|
||||||
|
|
||||||
|
|
||||||
|
def check_bind_error(e, address, bind_addresses):
|
||||||
|
"""
|
||||||
|
This method checks an exception occurred while binding on 0.0.0.0.
|
||||||
|
If :: is specified in the bind addresses a warning is shown.
|
||||||
|
The exception is still raised otherwise.
|
||||||
|
|
||||||
|
Binding on both 0.0.0.0 and :: causes an exception on Linux and macOS
|
||||||
|
because :: binds on both IPv4 and IPv6 (as per RFC 3493).
|
||||||
|
When binding on 0.0.0.0 after :: this can safely be ignored.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
e (Exception): Exception that was caught.
|
||||||
|
address (str): Address on which binding was attempted.
|
||||||
|
bind_addresses (list): Addresses on which the service listens.
|
||||||
|
"""
|
||||||
|
if address == '0.0.0.0' and '::' in bind_addresses:
|
||||||
|
logger.warn('Failed to listen on 0.0.0.0, continuing because listening on [::]')
|
||||||
|
else:
|
||||||
|
raise e
|
||||||
@@ -13,35 +13,30 @@
|
|||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
import logging
|
||||||
|
import sys
|
||||||
|
|
||||||
import synapse
|
import synapse
|
||||||
|
from synapse import events
|
||||||
from synapse.server import HomeServer
|
from synapse.app import _base
|
||||||
from synapse.config._base import ConfigError
|
from synapse.config._base import ConfigError
|
||||||
from synapse.config.logger import setup_logging
|
|
||||||
from synapse.config.homeserver import HomeServerConfig
|
from synapse.config.homeserver import HomeServerConfig
|
||||||
|
from synapse.config.logger import setup_logging
|
||||||
from synapse.http.site import SynapseSite
|
from synapse.http.site import SynapseSite
|
||||||
from synapse.metrics.resource import MetricsResource, METRICS_PREFIX
|
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||||
|
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||||
from synapse.replication.slave.storage.directory import DirectoryStore
|
from synapse.replication.slave.storage.directory import DirectoryStore
|
||||||
from synapse.replication.slave.storage.events import SlavedEventStore
|
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||||
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
|
||||||
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||||
|
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||||
|
from synapse.server import HomeServer
|
||||||
from synapse.storage.engines import create_engine
|
from synapse.storage.engines import create_engine
|
||||||
from synapse.util.async import sleep
|
|
||||||
from synapse.util.httpresourcetree import create_resource_tree
|
from synapse.util.httpresourcetree import create_resource_tree
|
||||||
from synapse.util.logcontext import LoggingContext
|
from synapse.util.logcontext import LoggingContext, preserve_fn
|
||||||
from synapse.util.manhole import manhole
|
from synapse.util.manhole import manhole
|
||||||
from synapse.util.rlimit import change_resource_limit
|
|
||||||
from synapse.util.versionstring import get_version_string
|
from synapse.util.versionstring import get_version_string
|
||||||
|
from twisted.internet import reactor
|
||||||
from twisted.internet import reactor, defer
|
from twisted.web.resource import NoResource
|
||||||
from twisted.web.resource import Resource
|
|
||||||
|
|
||||||
from daemonize import Daemonize
|
|
||||||
|
|
||||||
import sys
|
|
||||||
import logging
|
|
||||||
import gc
|
|
||||||
|
|
||||||
logger = logging.getLogger("synapse.app.appservice")
|
logger = logging.getLogger("synapse.app.appservice")
|
||||||
|
|
||||||
@@ -54,19 +49,6 @@ class AppserviceSlaveStore(
|
|||||||
|
|
||||||
|
|
||||||
class AppserviceServer(HomeServer):
|
class AppserviceServer(HomeServer):
|
||||||
def get_db_conn(self, run_new_connection=True):
|
|
||||||
# Any param beginning with cp_ is a parameter for adbapi, and should
|
|
||||||
# not be passed to the database engine.
|
|
||||||
db_params = {
|
|
||||||
k: v for k, v in self.db_config.get("args", {}).items()
|
|
||||||
if not k.startswith("cp_")
|
|
||||||
}
|
|
||||||
db_conn = self.database_engine.module.connect(**db_params)
|
|
||||||
|
|
||||||
if run_new_connection:
|
|
||||||
self.database_engine.on_new_connection(db_conn)
|
|
||||||
return db_conn
|
|
||||||
|
|
||||||
def setup(self):
|
def setup(self):
|
||||||
logger.info("Setting up.")
|
logger.info("Setting up.")
|
||||||
self.datastore = AppserviceSlaveStore(self.get_db_conn(), self)
|
self.datastore = AppserviceSlaveStore(self.get_db_conn(), self)
|
||||||
@@ -74,7 +56,7 @@ class AppserviceServer(HomeServer):
|
|||||||
|
|
||||||
def _listen_http(self, listener_config):
|
def _listen_http(self, listener_config):
|
||||||
port = listener_config["port"]
|
port = listener_config["port"]
|
||||||
bind_address = listener_config.get("bind_address", "")
|
bind_addresses = listener_config["bind_addresses"]
|
||||||
site_tag = listener_config.get("tag", port)
|
site_tag = listener_config.get("tag", port)
|
||||||
resources = {}
|
resources = {}
|
||||||
for res in listener_config["resources"]:
|
for res in listener_config["resources"]:
|
||||||
@@ -82,17 +64,19 @@ class AppserviceServer(HomeServer):
|
|||||||
if name == "metrics":
|
if name == "metrics":
|
||||||
resources[METRICS_PREFIX] = MetricsResource(self)
|
resources[METRICS_PREFIX] = MetricsResource(self)
|
||||||
|
|
||||||
root_resource = create_resource_tree(resources, Resource())
|
root_resource = create_resource_tree(resources, NoResource())
|
||||||
reactor.listenTCP(
|
|
||||||
|
_base.listen_tcp(
|
||||||
|
bind_addresses,
|
||||||
port,
|
port,
|
||||||
SynapseSite(
|
SynapseSite(
|
||||||
"synapse.access.http.%s" % (site_tag,),
|
"synapse.access.http.%s" % (site_tag,),
|
||||||
site_tag,
|
site_tag,
|
||||||
listener_config,
|
listener_config,
|
||||||
root_resource,
|
root_resource,
|
||||||
),
|
)
|
||||||
interface=bind_address
|
|
||||||
)
|
)
|
||||||
|
|
||||||
logger.info("Synapse appservice now listening on port %d", port)
|
logger.info("Synapse appservice now listening on port %d", port)
|
||||||
|
|
||||||
def start_listening(self, listeners):
|
def start_listening(self, listeners):
|
||||||
@@ -100,42 +84,37 @@ class AppserviceServer(HomeServer):
|
|||||||
if listener["type"] == "http":
|
if listener["type"] == "http":
|
||||||
self._listen_http(listener)
|
self._listen_http(listener)
|
||||||
elif listener["type"] == "manhole":
|
elif listener["type"] == "manhole":
|
||||||
reactor.listenTCP(
|
_base.listen_tcp(
|
||||||
|
listener["bind_addresses"],
|
||||||
listener["port"],
|
listener["port"],
|
||||||
manhole(
|
manhole(
|
||||||
username="matrix",
|
username="matrix",
|
||||||
password="rabbithole",
|
password="rabbithole",
|
||||||
globals={"hs": self},
|
globals={"hs": self},
|
||||||
),
|
)
|
||||||
interface=listener.get("bind_address", '127.0.0.1')
|
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
self.get_tcp_replication().start_replication(self)
|
||||||
def replicate(self):
|
|
||||||
http_client = self.get_simple_http_client()
|
|
||||||
store = self.get_datastore()
|
|
||||||
replication_url = self.config.worker_replication_url
|
|
||||||
appservice_handler = self.get_application_service_handler()
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
def build_tcp_replication(self):
|
||||||
def replicate(results):
|
return ASReplicationHandler(self)
|
||||||
stream = results.get("events")
|
|
||||||
if stream:
|
|
||||||
max_stream_id = stream["position"]
|
|
||||||
yield appservice_handler.notify_interested_services(max_stream_id)
|
|
||||||
|
|
||||||
while True:
|
|
||||||
try:
|
class ASReplicationHandler(ReplicationClientHandler):
|
||||||
args = store.stream_positions()
|
def __init__(self, hs):
|
||||||
args["timeout"] = 30000
|
super(ASReplicationHandler, self).__init__(hs.get_datastore())
|
||||||
result = yield http_client.get_json(replication_url, args=args)
|
self.appservice_handler = hs.get_application_service_handler()
|
||||||
yield store.process_replication(result)
|
|
||||||
replicate(result)
|
def on_rdata(self, stream_name, token, rows):
|
||||||
except:
|
super(ASReplicationHandler, self).on_rdata(stream_name, token, rows)
|
||||||
logger.exception("Error replicating from %r", replication_url)
|
|
||||||
yield sleep(30)
|
if stream_name == "events":
|
||||||
|
max_stream_id = self.store.get_room_max_stream_ordering()
|
||||||
|
preserve_fn(
|
||||||
|
self.appservice_handler.notify_interested_services
|
||||||
|
)(max_stream_id)
|
||||||
|
|
||||||
|
|
||||||
def start(config_options):
|
def start(config_options):
|
||||||
@@ -149,7 +128,9 @@ def start(config_options):
|
|||||||
|
|
||||||
assert config.worker_app == "synapse.app.appservice"
|
assert config.worker_app == "synapse.app.appservice"
|
||||||
|
|
||||||
setup_logging(config.worker_log_config, config.worker_log_file)
|
setup_logging(config, use_worker_options=True)
|
||||||
|
|
||||||
|
events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||||
|
|
||||||
database_engine = create_engine(config.database_config)
|
database_engine = create_engine(config.database_config)
|
||||||
|
|
||||||
@@ -176,32 +157,13 @@ def start(config_options):
|
|||||||
ps.setup()
|
ps.setup()
|
||||||
ps.start_listening(config.worker_listeners)
|
ps.start_listening(config.worker_listeners)
|
||||||
|
|
||||||
def run():
|
|
||||||
with LoggingContext("run"):
|
|
||||||
logger.info("Running")
|
|
||||||
change_resource_limit(config.soft_file_limit)
|
|
||||||
if config.gc_thresholds:
|
|
||||||
gc.set_threshold(*config.gc_thresholds)
|
|
||||||
reactor.run()
|
|
||||||
|
|
||||||
def start():
|
def start():
|
||||||
ps.replicate()
|
|
||||||
ps.get_datastore().start_profiling()
|
ps.get_datastore().start_profiling()
|
||||||
|
ps.get_state_handler().start_caching()
|
||||||
|
|
||||||
reactor.callWhenRunning(start)
|
reactor.callWhenRunning(start)
|
||||||
|
|
||||||
if config.worker_daemonize:
|
_base.start_worker_reactor("synapse-appservice", config)
|
||||||
daemon = Daemonize(
|
|
||||||
app="synapse-appservice",
|
|
||||||
pid=config.worker_pid_file,
|
|
||||||
action=run,
|
|
||||||
auto_close_fds=False,
|
|
||||||
verbose=True,
|
|
||||||
logger=logger,
|
|
||||||
)
|
|
||||||
daemon.start()
|
|
||||||
else:
|
|
||||||
run()
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
|||||||
172
synapse/app/client_reader.py
Normal file
172
synapse/app/client_reader.py
Normal file
@@ -0,0 +1,172 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright 2016 OpenMarket Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
import logging
|
||||||
|
import sys
|
||||||
|
|
||||||
|
import synapse
|
||||||
|
from synapse import events
|
||||||
|
from synapse.app import _base
|
||||||
|
from synapse.config._base import ConfigError
|
||||||
|
from synapse.config.homeserver import HomeServerConfig
|
||||||
|
from synapse.config.logger import setup_logging
|
||||||
|
from synapse.crypto import context_factory
|
||||||
|
from synapse.http.server import JsonResource
|
||||||
|
from synapse.http.site import SynapseSite
|
||||||
|
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||||
|
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||||
|
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||||
|
from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
|
||||||
|
from synapse.replication.slave.storage.directory import DirectoryStore
|
||||||
|
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||||
|
from synapse.replication.slave.storage.keys import SlavedKeyStore
|
||||||
|
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||||
|
from synapse.replication.slave.storage.room import RoomStore
|
||||||
|
from synapse.replication.slave.storage.transactions import TransactionStore
|
||||||
|
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||||
|
from synapse.rest.client.v1.room import PublicRoomListRestServlet
|
||||||
|
from synapse.server import HomeServer
|
||||||
|
from synapse.storage.engines import create_engine
|
||||||
|
from synapse.util.httpresourcetree import create_resource_tree
|
||||||
|
from synapse.util.logcontext import LoggingContext
|
||||||
|
from synapse.util.manhole import manhole
|
||||||
|
from synapse.util.versionstring import get_version_string
|
||||||
|
from twisted.internet import reactor
|
||||||
|
from twisted.web.resource import NoResource
|
||||||
|
|
||||||
|
logger = logging.getLogger("synapse.app.client_reader")
|
||||||
|
|
||||||
|
|
||||||
|
class ClientReaderSlavedStore(
|
||||||
|
SlavedEventStore,
|
||||||
|
SlavedKeyStore,
|
||||||
|
RoomStore,
|
||||||
|
DirectoryStore,
|
||||||
|
SlavedApplicationServiceStore,
|
||||||
|
SlavedRegistrationStore,
|
||||||
|
TransactionStore,
|
||||||
|
SlavedClientIpStore,
|
||||||
|
BaseSlavedStore,
|
||||||
|
):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class ClientReaderServer(HomeServer):
|
||||||
|
def setup(self):
|
||||||
|
logger.info("Setting up.")
|
||||||
|
self.datastore = ClientReaderSlavedStore(self.get_db_conn(), self)
|
||||||
|
logger.info("Finished setting up.")
|
||||||
|
|
||||||
|
def _listen_http(self, listener_config):
|
||||||
|
port = listener_config["port"]
|
||||||
|
bind_addresses = listener_config["bind_addresses"]
|
||||||
|
site_tag = listener_config.get("tag", port)
|
||||||
|
resources = {}
|
||||||
|
for res in listener_config["resources"]:
|
||||||
|
for name in res["names"]:
|
||||||
|
if name == "metrics":
|
||||||
|
resources[METRICS_PREFIX] = MetricsResource(self)
|
||||||
|
elif name == "client":
|
||||||
|
resource = JsonResource(self, canonical_json=False)
|
||||||
|
PublicRoomListRestServlet(self).register(resource)
|
||||||
|
resources.update({
|
||||||
|
"/_matrix/client/r0": resource,
|
||||||
|
"/_matrix/client/unstable": resource,
|
||||||
|
"/_matrix/client/v2_alpha": resource,
|
||||||
|
"/_matrix/client/api/v1": resource,
|
||||||
|
})
|
||||||
|
|
||||||
|
root_resource = create_resource_tree(resources, NoResource())
|
||||||
|
|
||||||
|
_base.listen_tcp(
|
||||||
|
bind_addresses,
|
||||||
|
port,
|
||||||
|
SynapseSite(
|
||||||
|
"synapse.access.http.%s" % (site_tag,),
|
||||||
|
site_tag,
|
||||||
|
listener_config,
|
||||||
|
root_resource,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
logger.info("Synapse client reader now listening on port %d", port)
|
||||||
|
|
||||||
|
def start_listening(self, listeners):
|
||||||
|
for listener in listeners:
|
||||||
|
if listener["type"] == "http":
|
||||||
|
self._listen_http(listener)
|
||||||
|
elif listener["type"] == "manhole":
|
||||||
|
_base.listen_tcp(
|
||||||
|
listener["bind_addresses"],
|
||||||
|
listener["port"],
|
||||||
|
manhole(
|
||||||
|
username="matrix",
|
||||||
|
password="rabbithole",
|
||||||
|
globals={"hs": self},
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
else:
|
||||||
|
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||||
|
|
||||||
|
self.get_tcp_replication().start_replication(self)
|
||||||
|
|
||||||
|
def build_tcp_replication(self):
|
||||||
|
return ReplicationClientHandler(self.get_datastore())
|
||||||
|
|
||||||
|
|
||||||
|
def start(config_options):
|
||||||
|
try:
|
||||||
|
config = HomeServerConfig.load_config(
|
||||||
|
"Synapse client reader", config_options
|
||||||
|
)
|
||||||
|
except ConfigError as e:
|
||||||
|
sys.stderr.write("\n" + e.message + "\n")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
assert config.worker_app == "synapse.app.client_reader"
|
||||||
|
|
||||||
|
setup_logging(config, use_worker_options=True)
|
||||||
|
|
||||||
|
events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||||
|
|
||||||
|
database_engine = create_engine(config.database_config)
|
||||||
|
|
||||||
|
tls_server_context_factory = context_factory.ServerContextFactory(config)
|
||||||
|
|
||||||
|
ss = ClientReaderServer(
|
||||||
|
config.server_name,
|
||||||
|
db_config=config.database_config,
|
||||||
|
tls_server_context_factory=tls_server_context_factory,
|
||||||
|
config=config,
|
||||||
|
version_string="Synapse/" + get_version_string(synapse),
|
||||||
|
database_engine=database_engine,
|
||||||
|
)
|
||||||
|
|
||||||
|
ss.setup()
|
||||||
|
ss.start_listening(config.worker_listeners)
|
||||||
|
|
||||||
|
def start():
|
||||||
|
ss.get_state_handler().start_caching()
|
||||||
|
ss.get_datastore().start_profiling()
|
||||||
|
|
||||||
|
reactor.callWhenRunning(start)
|
||||||
|
|
||||||
|
_base.start_worker_reactor("synapse-client-reader", config)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
with LoggingContext("main"):
|
||||||
|
start(sys.argv[1:])
|
||||||
189
synapse/app/event_creator.py
Normal file
189
synapse/app/event_creator.py
Normal file
@@ -0,0 +1,189 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright 2018 New Vector Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
import logging
|
||||||
|
import sys
|
||||||
|
|
||||||
|
import synapse
|
||||||
|
from synapse import events
|
||||||
|
from synapse.app import _base
|
||||||
|
from synapse.config._base import ConfigError
|
||||||
|
from synapse.config.homeserver import HomeServerConfig
|
||||||
|
from synapse.config.logger import setup_logging
|
||||||
|
from synapse.crypto import context_factory
|
||||||
|
from synapse.http.server import JsonResource
|
||||||
|
from synapse.http.site import SynapseSite
|
||||||
|
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||||
|
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||||
|
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
|
||||||
|
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||||
|
from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
|
||||||
|
from synapse.replication.slave.storage.devices import SlavedDeviceStore
|
||||||
|
from synapse.replication.slave.storage.directory import DirectoryStore
|
||||||
|
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||||
|
from synapse.replication.slave.storage.profile import SlavedProfileStore
|
||||||
|
from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore
|
||||||
|
from synapse.replication.slave.storage.pushers import SlavedPusherStore
|
||||||
|
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
|
||||||
|
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||||
|
from synapse.replication.slave.storage.room import RoomStore
|
||||||
|
from synapse.replication.slave.storage.transactions import TransactionStore
|
||||||
|
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||||
|
from synapse.rest.client.v1.room import (
|
||||||
|
RoomSendEventRestServlet, RoomMembershipRestServlet, RoomStateEventRestServlet,
|
||||||
|
JoinRoomAliasServlet,
|
||||||
|
)
|
||||||
|
from synapse.server import HomeServer
|
||||||
|
from synapse.storage.engines import create_engine
|
||||||
|
from synapse.util.httpresourcetree import create_resource_tree
|
||||||
|
from synapse.util.logcontext import LoggingContext
|
||||||
|
from synapse.util.manhole import manhole
|
||||||
|
from synapse.util.versionstring import get_version_string
|
||||||
|
from twisted.internet import reactor
|
||||||
|
from twisted.web.resource import NoResource
|
||||||
|
|
||||||
|
logger = logging.getLogger("synapse.app.event_creator")
|
||||||
|
|
||||||
|
|
||||||
|
class EventCreatorSlavedStore(
|
||||||
|
DirectoryStore,
|
||||||
|
TransactionStore,
|
||||||
|
SlavedProfileStore,
|
||||||
|
SlavedAccountDataStore,
|
||||||
|
SlavedPusherStore,
|
||||||
|
SlavedReceiptsStore,
|
||||||
|
SlavedPushRuleStore,
|
||||||
|
SlavedDeviceStore,
|
||||||
|
SlavedClientIpStore,
|
||||||
|
SlavedApplicationServiceStore,
|
||||||
|
SlavedEventStore,
|
||||||
|
SlavedRegistrationStore,
|
||||||
|
RoomStore,
|
||||||
|
BaseSlavedStore,
|
||||||
|
):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class EventCreatorServer(HomeServer):
|
||||||
|
def setup(self):
|
||||||
|
logger.info("Setting up.")
|
||||||
|
self.datastore = EventCreatorSlavedStore(self.get_db_conn(), self)
|
||||||
|
logger.info("Finished setting up.")
|
||||||
|
|
||||||
|
def _listen_http(self, listener_config):
|
||||||
|
port = listener_config["port"]
|
||||||
|
bind_addresses = listener_config["bind_addresses"]
|
||||||
|
site_tag = listener_config.get("tag", port)
|
||||||
|
resources = {}
|
||||||
|
for res in listener_config["resources"]:
|
||||||
|
for name in res["names"]:
|
||||||
|
if name == "metrics":
|
||||||
|
resources[METRICS_PREFIX] = MetricsResource(self)
|
||||||
|
elif name == "client":
|
||||||
|
resource = JsonResource(self, canonical_json=False)
|
||||||
|
RoomSendEventRestServlet(self).register(resource)
|
||||||
|
RoomMembershipRestServlet(self).register(resource)
|
||||||
|
RoomStateEventRestServlet(self).register(resource)
|
||||||
|
JoinRoomAliasServlet(self).register(resource)
|
||||||
|
resources.update({
|
||||||
|
"/_matrix/client/r0": resource,
|
||||||
|
"/_matrix/client/unstable": resource,
|
||||||
|
"/_matrix/client/v2_alpha": resource,
|
||||||
|
"/_matrix/client/api/v1": resource,
|
||||||
|
})
|
||||||
|
|
||||||
|
root_resource = create_resource_tree(resources, NoResource())
|
||||||
|
|
||||||
|
_base.listen_tcp(
|
||||||
|
bind_addresses,
|
||||||
|
port,
|
||||||
|
SynapseSite(
|
||||||
|
"synapse.access.http.%s" % (site_tag,),
|
||||||
|
site_tag,
|
||||||
|
listener_config,
|
||||||
|
root_resource,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
logger.info("Synapse event creator now listening on port %d", port)
|
||||||
|
|
||||||
|
def start_listening(self, listeners):
|
||||||
|
for listener in listeners:
|
||||||
|
if listener["type"] == "http":
|
||||||
|
self._listen_http(listener)
|
||||||
|
elif listener["type"] == "manhole":
|
||||||
|
_base.listen_tcp(
|
||||||
|
listener["bind_addresses"],
|
||||||
|
listener["port"],
|
||||||
|
manhole(
|
||||||
|
username="matrix",
|
||||||
|
password="rabbithole",
|
||||||
|
globals={"hs": self},
|
||||||
|
)
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||||
|
|
||||||
|
self.get_tcp_replication().start_replication(self)
|
||||||
|
|
||||||
|
def build_tcp_replication(self):
|
||||||
|
return ReplicationClientHandler(self.get_datastore())
|
||||||
|
|
||||||
|
|
||||||
|
def start(config_options):
|
||||||
|
try:
|
||||||
|
config = HomeServerConfig.load_config(
|
||||||
|
"Synapse event creator", config_options
|
||||||
|
)
|
||||||
|
except ConfigError as e:
|
||||||
|
sys.stderr.write("\n" + e.message + "\n")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
assert config.worker_app == "synapse.app.event_creator"
|
||||||
|
|
||||||
|
assert config.worker_replication_http_port is not None
|
||||||
|
|
||||||
|
setup_logging(config, use_worker_options=True)
|
||||||
|
|
||||||
|
events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||||
|
|
||||||
|
database_engine = create_engine(config.database_config)
|
||||||
|
|
||||||
|
tls_server_context_factory = context_factory.ServerContextFactory(config)
|
||||||
|
|
||||||
|
ss = EventCreatorServer(
|
||||||
|
config.server_name,
|
||||||
|
db_config=config.database_config,
|
||||||
|
tls_server_context_factory=tls_server_context_factory,
|
||||||
|
config=config,
|
||||||
|
version_string="Synapse/" + get_version_string(synapse),
|
||||||
|
database_engine=database_engine,
|
||||||
|
)
|
||||||
|
|
||||||
|
ss.setup()
|
||||||
|
ss.start_listening(config.worker_listeners)
|
||||||
|
|
||||||
|
def start():
|
||||||
|
ss.get_state_handler().start_caching()
|
||||||
|
ss.get_datastore().start_profiling()
|
||||||
|
|
||||||
|
reactor.callWhenRunning(start)
|
||||||
|
|
||||||
|
_base.start_worker_reactor("synapse-event-creator", config)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
with LoggingContext("main"):
|
||||||
|
start(sys.argv[1:])
|
||||||
@@ -13,41 +13,35 @@
|
|||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
import logging
|
||||||
|
import sys
|
||||||
|
|
||||||
import synapse
|
import synapse
|
||||||
|
from synapse import events
|
||||||
|
from synapse.api.urls import FEDERATION_PREFIX
|
||||||
|
from synapse.app import _base
|
||||||
from synapse.config._base import ConfigError
|
from synapse.config._base import ConfigError
|
||||||
from synapse.config.homeserver import HomeServerConfig
|
from synapse.config.homeserver import HomeServerConfig
|
||||||
from synapse.config.logger import setup_logging
|
from synapse.config.logger import setup_logging
|
||||||
|
from synapse.crypto import context_factory
|
||||||
|
from synapse.federation.transport.server import TransportLayerServer
|
||||||
from synapse.http.site import SynapseSite
|
from synapse.http.site import SynapseSite
|
||||||
from synapse.metrics.resource import MetricsResource, METRICS_PREFIX
|
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||||
from synapse.replication.slave.storage._base import BaseSlavedStore
|
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||||
|
from synapse.replication.slave.storage.directory import DirectoryStore
|
||||||
from synapse.replication.slave.storage.events import SlavedEventStore
|
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||||
from synapse.replication.slave.storage.keys import SlavedKeyStore
|
from synapse.replication.slave.storage.keys import SlavedKeyStore
|
||||||
from synapse.replication.slave.storage.room import RoomStore
|
from synapse.replication.slave.storage.room import RoomStore
|
||||||
from synapse.replication.slave.storage.transactions import TransactionStore
|
from synapse.replication.slave.storage.transactions import TransactionStore
|
||||||
from synapse.replication.slave.storage.directory import DirectoryStore
|
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||||
from synapse.server import HomeServer
|
from synapse.server import HomeServer
|
||||||
from synapse.storage.engines import create_engine
|
from synapse.storage.engines import create_engine
|
||||||
from synapse.util.async import sleep
|
|
||||||
from synapse.util.httpresourcetree import create_resource_tree
|
from synapse.util.httpresourcetree import create_resource_tree
|
||||||
from synapse.util.logcontext import LoggingContext
|
from synapse.util.logcontext import LoggingContext
|
||||||
from synapse.util.manhole import manhole
|
from synapse.util.manhole import manhole
|
||||||
from synapse.util.rlimit import change_resource_limit
|
|
||||||
from synapse.util.versionstring import get_version_string
|
from synapse.util.versionstring import get_version_string
|
||||||
from synapse.api.urls import FEDERATION_PREFIX
|
from twisted.internet import reactor
|
||||||
from synapse.federation.transport.server import TransportLayerServer
|
from twisted.web.resource import NoResource
|
||||||
from synapse.crypto import context_factory
|
|
||||||
|
|
||||||
|
|
||||||
from twisted.internet import reactor, defer
|
|
||||||
from twisted.web.resource import Resource
|
|
||||||
|
|
||||||
from daemonize import Daemonize
|
|
||||||
|
|
||||||
import sys
|
|
||||||
import logging
|
|
||||||
import gc
|
|
||||||
|
|
||||||
logger = logging.getLogger("synapse.app.federation_reader")
|
logger = logging.getLogger("synapse.app.federation_reader")
|
||||||
|
|
||||||
@@ -64,19 +58,6 @@ class FederationReaderSlavedStore(
|
|||||||
|
|
||||||
|
|
||||||
class FederationReaderServer(HomeServer):
|
class FederationReaderServer(HomeServer):
|
||||||
def get_db_conn(self, run_new_connection=True):
|
|
||||||
# Any param beginning with cp_ is a parameter for adbapi, and should
|
|
||||||
# not be passed to the database engine.
|
|
||||||
db_params = {
|
|
||||||
k: v for k, v in self.db_config.get("args", {}).items()
|
|
||||||
if not k.startswith("cp_")
|
|
||||||
}
|
|
||||||
db_conn = self.database_engine.module.connect(**db_params)
|
|
||||||
|
|
||||||
if run_new_connection:
|
|
||||||
self.database_engine.on_new_connection(db_conn)
|
|
||||||
return db_conn
|
|
||||||
|
|
||||||
def setup(self):
|
def setup(self):
|
||||||
logger.info("Setting up.")
|
logger.info("Setting up.")
|
||||||
self.datastore = FederationReaderSlavedStore(self.get_db_conn(), self)
|
self.datastore = FederationReaderSlavedStore(self.get_db_conn(), self)
|
||||||
@@ -84,7 +65,7 @@ class FederationReaderServer(HomeServer):
|
|||||||
|
|
||||||
def _listen_http(self, listener_config):
|
def _listen_http(self, listener_config):
|
||||||
port = listener_config["port"]
|
port = listener_config["port"]
|
||||||
bind_address = listener_config.get("bind_address", "")
|
bind_addresses = listener_config["bind_addresses"]
|
||||||
site_tag = listener_config.get("tag", port)
|
site_tag = listener_config.get("tag", port)
|
||||||
resources = {}
|
resources = {}
|
||||||
for res in listener_config["resources"]:
|
for res in listener_config["resources"]:
|
||||||
@@ -96,17 +77,19 @@ class FederationReaderServer(HomeServer):
|
|||||||
FEDERATION_PREFIX: TransportLayerServer(self),
|
FEDERATION_PREFIX: TransportLayerServer(self),
|
||||||
})
|
})
|
||||||
|
|
||||||
root_resource = create_resource_tree(resources, Resource())
|
root_resource = create_resource_tree(resources, NoResource())
|
||||||
reactor.listenTCP(
|
|
||||||
|
_base.listen_tcp(
|
||||||
|
bind_addresses,
|
||||||
port,
|
port,
|
||||||
SynapseSite(
|
SynapseSite(
|
||||||
"synapse.access.http.%s" % (site_tag,),
|
"synapse.access.http.%s" % (site_tag,),
|
||||||
site_tag,
|
site_tag,
|
||||||
listener_config,
|
listener_config,
|
||||||
root_resource,
|
root_resource,
|
||||||
),
|
)
|
||||||
interface=bind_address
|
|
||||||
)
|
)
|
||||||
|
|
||||||
logger.info("Synapse federation reader now listening on port %d", port)
|
logger.info("Synapse federation reader now listening on port %d", port)
|
||||||
|
|
||||||
def start_listening(self, listeners):
|
def start_listening(self, listeners):
|
||||||
@@ -114,33 +97,22 @@ class FederationReaderServer(HomeServer):
|
|||||||
if listener["type"] == "http":
|
if listener["type"] == "http":
|
||||||
self._listen_http(listener)
|
self._listen_http(listener)
|
||||||
elif listener["type"] == "manhole":
|
elif listener["type"] == "manhole":
|
||||||
reactor.listenTCP(
|
_base.listen_tcp(
|
||||||
|
listener["bind_addresses"],
|
||||||
listener["port"],
|
listener["port"],
|
||||||
manhole(
|
manhole(
|
||||||
username="matrix",
|
username="matrix",
|
||||||
password="rabbithole",
|
password="rabbithole",
|
||||||
globals={"hs": self},
|
globals={"hs": self},
|
||||||
),
|
)
|
||||||
interface=listener.get("bind_address", '127.0.0.1')
|
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
self.get_tcp_replication().start_replication(self)
|
||||||
def replicate(self):
|
|
||||||
http_client = self.get_simple_http_client()
|
|
||||||
store = self.get_datastore()
|
|
||||||
replication_url = self.config.worker_replication_url
|
|
||||||
|
|
||||||
while True:
|
def build_tcp_replication(self):
|
||||||
try:
|
return ReplicationClientHandler(self.get_datastore())
|
||||||
args = store.stream_positions()
|
|
||||||
args["timeout"] = 30000
|
|
||||||
result = yield http_client.get_json(replication_url, args=args)
|
|
||||||
yield store.process_replication(result)
|
|
||||||
except:
|
|
||||||
logger.exception("Error replicating from %r", replication_url)
|
|
||||||
yield sleep(5)
|
|
||||||
|
|
||||||
|
|
||||||
def start(config_options):
|
def start(config_options):
|
||||||
@@ -154,7 +126,9 @@ def start(config_options):
|
|||||||
|
|
||||||
assert config.worker_app == "synapse.app.federation_reader"
|
assert config.worker_app == "synapse.app.federation_reader"
|
||||||
|
|
||||||
setup_logging(config.worker_log_config, config.worker_log_file)
|
setup_logging(config, use_worker_options=True)
|
||||||
|
|
||||||
|
events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||||
|
|
||||||
database_engine = create_engine(config.database_config)
|
database_engine = create_engine(config.database_config)
|
||||||
|
|
||||||
@@ -170,35 +144,15 @@ def start(config_options):
|
|||||||
)
|
)
|
||||||
|
|
||||||
ss.setup()
|
ss.setup()
|
||||||
ss.get_handlers()
|
|
||||||
ss.start_listening(config.worker_listeners)
|
ss.start_listening(config.worker_listeners)
|
||||||
|
|
||||||
def run():
|
|
||||||
with LoggingContext("run"):
|
|
||||||
logger.info("Running")
|
|
||||||
change_resource_limit(config.soft_file_limit)
|
|
||||||
if config.gc_thresholds:
|
|
||||||
gc.set_threshold(*config.gc_thresholds)
|
|
||||||
reactor.run()
|
|
||||||
|
|
||||||
def start():
|
def start():
|
||||||
|
ss.get_state_handler().start_caching()
|
||||||
ss.get_datastore().start_profiling()
|
ss.get_datastore().start_profiling()
|
||||||
ss.replicate()
|
|
||||||
|
|
||||||
reactor.callWhenRunning(start)
|
reactor.callWhenRunning(start)
|
||||||
|
|
||||||
if config.worker_daemonize:
|
_base.start_worker_reactor("synapse-federation-reader", config)
|
||||||
daemon = Daemonize(
|
|
||||||
app="synapse-federation-reader",
|
|
||||||
pid=config.worker_pid_file,
|
|
||||||
action=run,
|
|
||||||
auto_close_fds=False,
|
|
||||||
verbose=True,
|
|
||||||
logger=logger,
|
|
||||||
)
|
|
||||||
daemon.start()
|
|
||||||
else:
|
|
||||||
run()
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
|||||||
257
synapse/app/federation_sender.py
Normal file
257
synapse/app/federation_sender.py
Normal file
@@ -0,0 +1,257 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright 2016 OpenMarket Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
import logging
|
||||||
|
import sys
|
||||||
|
|
||||||
|
import synapse
|
||||||
|
from synapse import events
|
||||||
|
from synapse.app import _base
|
||||||
|
from synapse.config._base import ConfigError
|
||||||
|
from synapse.config.homeserver import HomeServerConfig
|
||||||
|
from synapse.config.logger import setup_logging
|
||||||
|
from synapse.crypto import context_factory
|
||||||
|
from synapse.federation import send_queue
|
||||||
|
from synapse.http.site import SynapseSite
|
||||||
|
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||||
|
from synapse.replication.slave.storage.deviceinbox import SlavedDeviceInboxStore
|
||||||
|
from synapse.replication.slave.storage.devices import SlavedDeviceStore
|
||||||
|
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||||
|
from synapse.replication.slave.storage.presence import SlavedPresenceStore
|
||||||
|
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
|
||||||
|
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||||
|
from synapse.replication.slave.storage.transactions import TransactionStore
|
||||||
|
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||||
|
from synapse.server import HomeServer
|
||||||
|
from synapse.storage.engines import create_engine
|
||||||
|
from synapse.util.async import Linearizer
|
||||||
|
from synapse.util.httpresourcetree import create_resource_tree
|
||||||
|
from synapse.util.logcontext import LoggingContext, preserve_fn
|
||||||
|
from synapse.util.manhole import manhole
|
||||||
|
from synapse.util.versionstring import get_version_string
|
||||||
|
from twisted.internet import defer, reactor
|
||||||
|
from twisted.web.resource import NoResource
|
||||||
|
|
||||||
|
logger = logging.getLogger("synapse.app.federation_sender")
|
||||||
|
|
||||||
|
|
||||||
|
class FederationSenderSlaveStore(
|
||||||
|
SlavedDeviceInboxStore, TransactionStore, SlavedReceiptsStore, SlavedEventStore,
|
||||||
|
SlavedRegistrationStore, SlavedDeviceStore, SlavedPresenceStore,
|
||||||
|
):
|
||||||
|
def __init__(self, db_conn, hs):
|
||||||
|
super(FederationSenderSlaveStore, self).__init__(db_conn, hs)
|
||||||
|
|
||||||
|
# We pull out the current federation stream position now so that we
|
||||||
|
# always have a known value for the federation position in memory so
|
||||||
|
# that we don't have to bounce via a deferred once when we start the
|
||||||
|
# replication streams.
|
||||||
|
self.federation_out_pos_startup = self._get_federation_out_pos(db_conn)
|
||||||
|
|
||||||
|
def _get_federation_out_pos(self, db_conn):
|
||||||
|
sql = (
|
||||||
|
"SELECT stream_id FROM federation_stream_position"
|
||||||
|
" WHERE type = ?"
|
||||||
|
)
|
||||||
|
sql = self.database_engine.convert_param_style(sql)
|
||||||
|
|
||||||
|
txn = db_conn.cursor()
|
||||||
|
txn.execute(sql, ("federation",))
|
||||||
|
rows = txn.fetchall()
|
||||||
|
txn.close()
|
||||||
|
|
||||||
|
return rows[0][0] if rows else -1
|
||||||
|
|
||||||
|
|
||||||
|
class FederationSenderServer(HomeServer):
|
||||||
|
def setup(self):
|
||||||
|
logger.info("Setting up.")
|
||||||
|
self.datastore = FederationSenderSlaveStore(self.get_db_conn(), self)
|
||||||
|
logger.info("Finished setting up.")
|
||||||
|
|
||||||
|
def _listen_http(self, listener_config):
|
||||||
|
port = listener_config["port"]
|
||||||
|
bind_addresses = listener_config["bind_addresses"]
|
||||||
|
site_tag = listener_config.get("tag", port)
|
||||||
|
resources = {}
|
||||||
|
for res in listener_config["resources"]:
|
||||||
|
for name in res["names"]:
|
||||||
|
if name == "metrics":
|
||||||
|
resources[METRICS_PREFIX] = MetricsResource(self)
|
||||||
|
|
||||||
|
root_resource = create_resource_tree(resources, NoResource())
|
||||||
|
|
||||||
|
_base.listen_tcp(
|
||||||
|
bind_addresses,
|
||||||
|
port,
|
||||||
|
SynapseSite(
|
||||||
|
"synapse.access.http.%s" % (site_tag,),
|
||||||
|
site_tag,
|
||||||
|
listener_config,
|
||||||
|
root_resource,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
logger.info("Synapse federation_sender now listening on port %d", port)
|
||||||
|
|
||||||
|
def start_listening(self, listeners):
|
||||||
|
for listener in listeners:
|
||||||
|
if listener["type"] == "http":
|
||||||
|
self._listen_http(listener)
|
||||||
|
elif listener["type"] == "manhole":
|
||||||
|
_base.listen_tcp(
|
||||||
|
listener["bind_addresses"],
|
||||||
|
listener["port"],
|
||||||
|
manhole(
|
||||||
|
username="matrix",
|
||||||
|
password="rabbithole",
|
||||||
|
globals={"hs": self},
|
||||||
|
)
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||||
|
|
||||||
|
self.get_tcp_replication().start_replication(self)
|
||||||
|
|
||||||
|
def build_tcp_replication(self):
|
||||||
|
return FederationSenderReplicationHandler(self)
|
||||||
|
|
||||||
|
|
||||||
|
class FederationSenderReplicationHandler(ReplicationClientHandler):
|
||||||
|
def __init__(self, hs):
|
||||||
|
super(FederationSenderReplicationHandler, self).__init__(hs.get_datastore())
|
||||||
|
self.send_handler = FederationSenderHandler(hs, self)
|
||||||
|
|
||||||
|
def on_rdata(self, stream_name, token, rows):
|
||||||
|
super(FederationSenderReplicationHandler, self).on_rdata(
|
||||||
|
stream_name, token, rows
|
||||||
|
)
|
||||||
|
self.send_handler.process_replication_rows(stream_name, token, rows)
|
||||||
|
|
||||||
|
def get_streams_to_replicate(self):
|
||||||
|
args = super(FederationSenderReplicationHandler, self).get_streams_to_replicate()
|
||||||
|
args.update(self.send_handler.stream_positions())
|
||||||
|
return args
|
||||||
|
|
||||||
|
|
||||||
|
def start(config_options):
|
||||||
|
try:
|
||||||
|
config = HomeServerConfig.load_config(
|
||||||
|
"Synapse federation sender", config_options
|
||||||
|
)
|
||||||
|
except ConfigError as e:
|
||||||
|
sys.stderr.write("\n" + e.message + "\n")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
assert config.worker_app == "synapse.app.federation_sender"
|
||||||
|
|
||||||
|
setup_logging(config, use_worker_options=True)
|
||||||
|
|
||||||
|
events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||||
|
|
||||||
|
database_engine = create_engine(config.database_config)
|
||||||
|
|
||||||
|
if config.send_federation:
|
||||||
|
sys.stderr.write(
|
||||||
|
"\nThe send_federation must be disabled in the main synapse process"
|
||||||
|
"\nbefore they can be run in a separate worker."
|
||||||
|
"\nPlease add ``send_federation: false`` to the main config"
|
||||||
|
"\n"
|
||||||
|
)
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
# Force the pushers to start since they will be disabled in the main config
|
||||||
|
config.send_federation = True
|
||||||
|
|
||||||
|
tls_server_context_factory = context_factory.ServerContextFactory(config)
|
||||||
|
|
||||||
|
ps = FederationSenderServer(
|
||||||
|
config.server_name,
|
||||||
|
db_config=config.database_config,
|
||||||
|
tls_server_context_factory=tls_server_context_factory,
|
||||||
|
config=config,
|
||||||
|
version_string="Synapse/" + get_version_string(synapse),
|
||||||
|
database_engine=database_engine,
|
||||||
|
)
|
||||||
|
|
||||||
|
ps.setup()
|
||||||
|
ps.start_listening(config.worker_listeners)
|
||||||
|
|
||||||
|
def start():
|
||||||
|
ps.get_datastore().start_profiling()
|
||||||
|
ps.get_state_handler().start_caching()
|
||||||
|
|
||||||
|
reactor.callWhenRunning(start)
|
||||||
|
_base.start_worker_reactor("synapse-federation-sender", config)
|
||||||
|
|
||||||
|
|
||||||
|
class FederationSenderHandler(object):
|
||||||
|
"""Processes the replication stream and forwards the appropriate entries
|
||||||
|
to the federation sender.
|
||||||
|
"""
|
||||||
|
def __init__(self, hs, replication_client):
|
||||||
|
self.store = hs.get_datastore()
|
||||||
|
self.federation_sender = hs.get_federation_sender()
|
||||||
|
self.replication_client = replication_client
|
||||||
|
|
||||||
|
self.federation_position = self.store.federation_out_pos_startup
|
||||||
|
self._fed_position_linearizer = Linearizer(name="_fed_position_linearizer")
|
||||||
|
|
||||||
|
self._last_ack = self.federation_position
|
||||||
|
|
||||||
|
self._room_serials = {}
|
||||||
|
self._room_typing = {}
|
||||||
|
|
||||||
|
def on_start(self):
|
||||||
|
# There may be some events that are persisted but haven't been sent,
|
||||||
|
# so send them now.
|
||||||
|
self.federation_sender.notify_new_events(
|
||||||
|
self.store.get_room_max_stream_ordering()
|
||||||
|
)
|
||||||
|
|
||||||
|
def stream_positions(self):
|
||||||
|
return {"federation": self.federation_position}
|
||||||
|
|
||||||
|
def process_replication_rows(self, stream_name, token, rows):
|
||||||
|
# The federation stream contains things that we want to send out, e.g.
|
||||||
|
# presence, typing, etc.
|
||||||
|
if stream_name == "federation":
|
||||||
|
send_queue.process_rows_for_federation(self.federation_sender, rows)
|
||||||
|
preserve_fn(self.update_token)(token)
|
||||||
|
|
||||||
|
# We also need to poke the federation sender when new events happen
|
||||||
|
elif stream_name == "events":
|
||||||
|
self.federation_sender.notify_new_events(token)
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def update_token(self, token):
|
||||||
|
self.federation_position = token
|
||||||
|
|
||||||
|
# We linearize here to ensure we don't have races updating the token
|
||||||
|
with (yield self._fed_position_linearizer.queue(None)):
|
||||||
|
if self._last_ack < self.federation_position:
|
||||||
|
yield self.store.update_federation_out_pos(
|
||||||
|
"federation", self.federation_position
|
||||||
|
)
|
||||||
|
|
||||||
|
# We ACK this token over replication so that the master can drop
|
||||||
|
# its in memory queues
|
||||||
|
self.replication_client.send_federation_ack(self.federation_position)
|
||||||
|
self._last_ack = self.federation_position
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
with LoggingContext("main"):
|
||||||
|
start(sys.argv[1:])
|
||||||
227
synapse/app/frontend_proxy.py
Normal file
227
synapse/app/frontend_proxy.py
Normal file
@@ -0,0 +1,227 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright 2016 OpenMarket Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
import logging
|
||||||
|
import sys
|
||||||
|
|
||||||
|
import synapse
|
||||||
|
from synapse import events
|
||||||
|
from synapse.api.errors import SynapseError
|
||||||
|
from synapse.app import _base
|
||||||
|
from synapse.config._base import ConfigError
|
||||||
|
from synapse.config.homeserver import HomeServerConfig
|
||||||
|
from synapse.config.logger import setup_logging
|
||||||
|
from synapse.crypto import context_factory
|
||||||
|
from synapse.http.server import JsonResource
|
||||||
|
from synapse.http.servlet import (
|
||||||
|
RestServlet, parse_json_object_from_request,
|
||||||
|
)
|
||||||
|
from synapse.http.site import SynapseSite
|
||||||
|
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||||
|
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||||
|
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||||
|
from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
|
||||||
|
from synapse.replication.slave.storage.devices import SlavedDeviceStore
|
||||||
|
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||||
|
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||||
|
from synapse.rest.client.v2_alpha._base import client_v2_patterns
|
||||||
|
from synapse.server import HomeServer
|
||||||
|
from synapse.storage.engines import create_engine
|
||||||
|
from synapse.util.httpresourcetree import create_resource_tree
|
||||||
|
from synapse.util.logcontext import LoggingContext
|
||||||
|
from synapse.util.manhole import manhole
|
||||||
|
from synapse.util.versionstring import get_version_string
|
||||||
|
from twisted.internet import defer, reactor
|
||||||
|
from twisted.web.resource import NoResource
|
||||||
|
|
||||||
|
logger = logging.getLogger("synapse.app.frontend_proxy")
|
||||||
|
|
||||||
|
|
||||||
|
class KeyUploadServlet(RestServlet):
|
||||||
|
PATTERNS = client_v2_patterns("/keys/upload(/(?P<device_id>[^/]+))?$")
|
||||||
|
|
||||||
|
def __init__(self, hs):
|
||||||
|
"""
|
||||||
|
Args:
|
||||||
|
hs (synapse.server.HomeServer): server
|
||||||
|
"""
|
||||||
|
super(KeyUploadServlet, self).__init__()
|
||||||
|
self.auth = hs.get_auth()
|
||||||
|
self.store = hs.get_datastore()
|
||||||
|
self.http_client = hs.get_simple_http_client()
|
||||||
|
self.main_uri = hs.config.worker_main_http_uri
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def on_POST(self, request, device_id):
|
||||||
|
requester = yield self.auth.get_user_by_req(request, allow_guest=True)
|
||||||
|
user_id = requester.user.to_string()
|
||||||
|
body = parse_json_object_from_request(request)
|
||||||
|
|
||||||
|
if device_id is not None:
|
||||||
|
# passing the device_id here is deprecated; however, we allow it
|
||||||
|
# for now for compatibility with older clients.
|
||||||
|
if (requester.device_id is not None and
|
||||||
|
device_id != requester.device_id):
|
||||||
|
logger.warning("Client uploading keys for a different device "
|
||||||
|
"(logged in as %s, uploading for %s)",
|
||||||
|
requester.device_id, device_id)
|
||||||
|
else:
|
||||||
|
device_id = requester.device_id
|
||||||
|
|
||||||
|
if device_id is None:
|
||||||
|
raise SynapseError(
|
||||||
|
400,
|
||||||
|
"To upload keys, you must pass device_id when authenticating"
|
||||||
|
)
|
||||||
|
|
||||||
|
if body:
|
||||||
|
# They're actually trying to upload something, proxy to main synapse.
|
||||||
|
# Pass through the auth headers, if any, in case the access token
|
||||||
|
# is there.
|
||||||
|
auth_headers = request.requestHeaders.getRawHeaders("Authorization", [])
|
||||||
|
headers = {
|
||||||
|
"Authorization": auth_headers,
|
||||||
|
}
|
||||||
|
result = yield self.http_client.post_json_get_json(
|
||||||
|
self.main_uri + request.uri,
|
||||||
|
body,
|
||||||
|
headers=headers,
|
||||||
|
)
|
||||||
|
|
||||||
|
defer.returnValue((200, result))
|
||||||
|
else:
|
||||||
|
# Just interested in counts.
|
||||||
|
result = yield self.store.count_e2e_one_time_keys(user_id, device_id)
|
||||||
|
defer.returnValue((200, {"one_time_key_counts": result}))
|
||||||
|
|
||||||
|
|
||||||
|
class FrontendProxySlavedStore(
|
||||||
|
SlavedDeviceStore,
|
||||||
|
SlavedClientIpStore,
|
||||||
|
SlavedApplicationServiceStore,
|
||||||
|
SlavedRegistrationStore,
|
||||||
|
BaseSlavedStore,
|
||||||
|
):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class FrontendProxyServer(HomeServer):
|
||||||
|
def setup(self):
|
||||||
|
logger.info("Setting up.")
|
||||||
|
self.datastore = FrontendProxySlavedStore(self.get_db_conn(), self)
|
||||||
|
logger.info("Finished setting up.")
|
||||||
|
|
||||||
|
def _listen_http(self, listener_config):
|
||||||
|
port = listener_config["port"]
|
||||||
|
bind_addresses = listener_config["bind_addresses"]
|
||||||
|
site_tag = listener_config.get("tag", port)
|
||||||
|
resources = {}
|
||||||
|
for res in listener_config["resources"]:
|
||||||
|
for name in res["names"]:
|
||||||
|
if name == "metrics":
|
||||||
|
resources[METRICS_PREFIX] = MetricsResource(self)
|
||||||
|
elif name == "client":
|
||||||
|
resource = JsonResource(self, canonical_json=False)
|
||||||
|
KeyUploadServlet(self).register(resource)
|
||||||
|
resources.update({
|
||||||
|
"/_matrix/client/r0": resource,
|
||||||
|
"/_matrix/client/unstable": resource,
|
||||||
|
"/_matrix/client/v2_alpha": resource,
|
||||||
|
"/_matrix/client/api/v1": resource,
|
||||||
|
})
|
||||||
|
|
||||||
|
root_resource = create_resource_tree(resources, NoResource())
|
||||||
|
|
||||||
|
_base.listen_tcp(
|
||||||
|
bind_addresses,
|
||||||
|
port,
|
||||||
|
SynapseSite(
|
||||||
|
"synapse.access.http.%s" % (site_tag,),
|
||||||
|
site_tag,
|
||||||
|
listener_config,
|
||||||
|
root_resource,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
logger.info("Synapse client reader now listening on port %d", port)
|
||||||
|
|
||||||
|
def start_listening(self, listeners):
|
||||||
|
for listener in listeners:
|
||||||
|
if listener["type"] == "http":
|
||||||
|
self._listen_http(listener)
|
||||||
|
elif listener["type"] == "manhole":
|
||||||
|
_base.listen_tcp(
|
||||||
|
listener["bind_addresses"],
|
||||||
|
listener["port"],
|
||||||
|
manhole(
|
||||||
|
username="matrix",
|
||||||
|
password="rabbithole",
|
||||||
|
globals={"hs": self},
|
||||||
|
)
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||||
|
|
||||||
|
self.get_tcp_replication().start_replication(self)
|
||||||
|
|
||||||
|
def build_tcp_replication(self):
|
||||||
|
return ReplicationClientHandler(self.get_datastore())
|
||||||
|
|
||||||
|
|
||||||
|
def start(config_options):
|
||||||
|
try:
|
||||||
|
config = HomeServerConfig.load_config(
|
||||||
|
"Synapse frontend proxy", config_options
|
||||||
|
)
|
||||||
|
except ConfigError as e:
|
||||||
|
sys.stderr.write("\n" + e.message + "\n")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
assert config.worker_app == "synapse.app.frontend_proxy"
|
||||||
|
|
||||||
|
assert config.worker_main_http_uri is not None
|
||||||
|
|
||||||
|
setup_logging(config, use_worker_options=True)
|
||||||
|
|
||||||
|
events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||||
|
|
||||||
|
database_engine = create_engine(config.database_config)
|
||||||
|
|
||||||
|
tls_server_context_factory = context_factory.ServerContextFactory(config)
|
||||||
|
|
||||||
|
ss = FrontendProxyServer(
|
||||||
|
config.server_name,
|
||||||
|
db_config=config.database_config,
|
||||||
|
tls_server_context_factory=tls_server_context_factory,
|
||||||
|
config=config,
|
||||||
|
version_string="Synapse/" + get_version_string(synapse),
|
||||||
|
database_engine=database_engine,
|
||||||
|
)
|
||||||
|
|
||||||
|
ss.setup()
|
||||||
|
ss.start_listening(config.worker_listeners)
|
||||||
|
|
||||||
|
def start():
|
||||||
|
ss.get_state_handler().start_caching()
|
||||||
|
ss.get_datastore().start_profiling()
|
||||||
|
|
||||||
|
reactor.callWhenRunning(start)
|
||||||
|
|
||||||
|
_base.start_worker_reactor("synapse-frontend-proxy", config)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
with LoggingContext("main"):
|
||||||
|
start(sys.argv[1:])
|
||||||
@@ -13,59 +13,53 @@
|
|||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
import synapse
|
|
||||||
|
|
||||||
import gc
|
import gc
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
|
import synapse
|
||||||
|
import synapse.config.logger
|
||||||
|
from synapse import events
|
||||||
|
from synapse.api.urls import CONTENT_REPO_PREFIX, FEDERATION_PREFIX, \
|
||||||
|
LEGACY_MEDIA_PREFIX, MEDIA_PREFIX, SERVER_KEY_PREFIX, SERVER_KEY_V2_PREFIX, \
|
||||||
|
STATIC_PREFIX, WEB_CLIENT_PREFIX
|
||||||
|
from synapse.app import _base
|
||||||
|
from synapse.app._base import quit_with_error, listen_ssl, listen_tcp
|
||||||
from synapse.config._base import ConfigError
|
from synapse.config._base import ConfigError
|
||||||
|
|
||||||
from synapse.python_dependencies import (
|
|
||||||
check_requirements, DEPENDENCY_LINKS
|
|
||||||
)
|
|
||||||
|
|
||||||
from synapse.rest import ClientRestResource
|
|
||||||
from synapse.storage.engines import create_engine, IncorrectDatabaseSetup
|
|
||||||
from synapse.storage import are_all_users_on_domain
|
|
||||||
from synapse.storage.prepare_database import UpgradeDatabaseException, prepare_database
|
|
||||||
|
|
||||||
from synapse.server import HomeServer
|
|
||||||
|
|
||||||
from twisted.internet import reactor, task, defer
|
|
||||||
from twisted.application import service
|
|
||||||
from twisted.web.resource import Resource, EncodingResourceWrapper
|
|
||||||
from twisted.web.static import File
|
|
||||||
from twisted.web.server import GzipEncoderFactory
|
|
||||||
from synapse.http.server import RootRedirect
|
|
||||||
from synapse.rest.media.v0.content_repository import ContentRepoResource
|
|
||||||
from synapse.rest.media.v1.media_repository import MediaRepositoryResource
|
|
||||||
from synapse.rest.key.v1.server_key_resource import LocalKey
|
|
||||||
from synapse.rest.key.v2 import KeyApiV2Resource
|
|
||||||
from synapse.api.urls import (
|
|
||||||
FEDERATION_PREFIX, WEB_CLIENT_PREFIX, CONTENT_REPO_PREFIX,
|
|
||||||
SERVER_KEY_PREFIX, LEGACY_MEDIA_PREFIX, MEDIA_PREFIX, STATIC_PREFIX,
|
|
||||||
SERVER_KEY_V2_PREFIX,
|
|
||||||
)
|
|
||||||
from synapse.config.homeserver import HomeServerConfig
|
from synapse.config.homeserver import HomeServerConfig
|
||||||
from synapse.crypto import context_factory
|
from synapse.crypto import context_factory
|
||||||
from synapse.util.logcontext import LoggingContext
|
|
||||||
from synapse.metrics import register_memory_metrics, get_metrics_for
|
|
||||||
from synapse.metrics.resource import MetricsResource, METRICS_PREFIX
|
|
||||||
from synapse.replication.resource import ReplicationResource, REPLICATION_PREFIX
|
|
||||||
from synapse.federation.transport.server import TransportLayerServer
|
from synapse.federation.transport.server import TransportLayerServer
|
||||||
|
from synapse.module_api import ModuleApi
|
||||||
|
from synapse.http.additional_resource import AdditionalResource
|
||||||
|
from synapse.http.server import RootRedirect
|
||||||
|
from synapse.http.site import SynapseSite
|
||||||
|
from synapse.metrics import register_memory_metrics
|
||||||
|
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||||
|
from synapse.python_dependencies import CONDITIONAL_REQUIREMENTS, \
|
||||||
|
check_requirements
|
||||||
|
from synapse.replication.http import ReplicationRestResource, REPLICATION_PREFIX
|
||||||
|
from synapse.replication.tcp.resource import ReplicationStreamProtocolFactory
|
||||||
|
from synapse.rest import ClientRestResource
|
||||||
|
from synapse.rest.key.v1.server_key_resource import LocalKey
|
||||||
|
from synapse.rest.key.v2 import KeyApiV2Resource
|
||||||
|
from synapse.rest.media.v0.content_repository import ContentRepoResource
|
||||||
|
from synapse.server import HomeServer
|
||||||
|
from synapse.storage import are_all_users_on_domain
|
||||||
|
from synapse.storage.engines import IncorrectDatabaseSetup, create_engine
|
||||||
|
from synapse.storage.prepare_database import UpgradeDatabaseException, prepare_database
|
||||||
|
from synapse.util.caches import CACHE_SIZE_FACTOR
|
||||||
|
from synapse.util.httpresourcetree import create_resource_tree
|
||||||
|
from synapse.util.logcontext import LoggingContext
|
||||||
|
from synapse.util.manhole import manhole
|
||||||
|
from synapse.util.module_loader import load_module
|
||||||
from synapse.util.rlimit import change_resource_limit
|
from synapse.util.rlimit import change_resource_limit
|
||||||
from synapse.util.versionstring import get_version_string
|
from synapse.util.versionstring import get_version_string
|
||||||
from synapse.util.httpresourcetree import create_resource_tree
|
from twisted.application import service
|
||||||
from synapse.util.manhole import manhole
|
from twisted.internet import defer, reactor
|
||||||
|
from twisted.web.resource import EncodingResourceWrapper, NoResource
|
||||||
from synapse.http.site import SynapseSite
|
from twisted.web.server import GzipEncoderFactory
|
||||||
|
from twisted.web.static import File
|
||||||
from synapse import events
|
|
||||||
|
|
||||||
from daemonize import Daemonize
|
|
||||||
|
|
||||||
logger = logging.getLogger("synapse.app.homeserver")
|
logger = logging.getLogger("synapse.app.homeserver")
|
||||||
|
|
||||||
@@ -90,7 +84,7 @@ def build_resource_for_web_client(hs):
|
|||||||
"\n"
|
"\n"
|
||||||
"You can also disable hosting of the webclient via the\n"
|
"You can also disable hosting of the webclient via the\n"
|
||||||
"configuration option `web_client`\n"
|
"configuration option `web_client`\n"
|
||||||
% {"dep": DEPENDENCY_LINKS["matrix-angular-sdk"]}
|
% {"dep": CONDITIONAL_REQUIREMENTS["web_client"].keys()[0]}
|
||||||
)
|
)
|
||||||
syweb_path = os.path.dirname(syweb.__file__)
|
syweb_path = os.path.dirname(syweb.__file__)
|
||||||
webclient_path = os.path.join(syweb_path, "webclient")
|
webclient_path = os.path.join(syweb_path, "webclient")
|
||||||
@@ -107,7 +101,7 @@ def build_resource_for_web_client(hs):
|
|||||||
class SynapseHomeServer(HomeServer):
|
class SynapseHomeServer(HomeServer):
|
||||||
def _listener_http(self, config, listener_config):
|
def _listener_http(self, config, listener_config):
|
||||||
port = listener_config["port"]
|
port = listener_config["port"]
|
||||||
bind_address = listener_config.get("bind_address", "")
|
bind_addresses = listener_config["bind_addresses"]
|
||||||
tls = listener_config.get("tls", False)
|
tls = listener_config.get("tls", False)
|
||||||
site_tag = listener_config.get("tag", port)
|
site_tag = listener_config.get("tag", port)
|
||||||
|
|
||||||
@@ -117,64 +111,29 @@ class SynapseHomeServer(HomeServer):
|
|||||||
resources = {}
|
resources = {}
|
||||||
for res in listener_config["resources"]:
|
for res in listener_config["resources"]:
|
||||||
for name in res["names"]:
|
for name in res["names"]:
|
||||||
if name == "client":
|
resources.update(self._configure_named_resource(
|
||||||
client_resource = ClientRestResource(self)
|
name, res.get("compress", False),
|
||||||
if res["compress"]:
|
))
|
||||||
client_resource = gz_wrap(client_resource)
|
|
||||||
|
|
||||||
resources.update({
|
additional_resources = listener_config.get("additional_resources", {})
|
||||||
"/_matrix/client/api/v1": client_resource,
|
logger.debug("Configuring additional resources: %r",
|
||||||
"/_matrix/client/r0": client_resource,
|
additional_resources)
|
||||||
"/_matrix/client/unstable": client_resource,
|
module_api = ModuleApi(self, self.get_auth_handler())
|
||||||
"/_matrix/client/v2_alpha": client_resource,
|
for path, resmodule in additional_resources.items():
|
||||||
"/_matrix/client/versions": client_resource,
|
handler_cls, config = load_module(resmodule)
|
||||||
})
|
handler = handler_cls(config, module_api)
|
||||||
|
resources[path] = AdditionalResource(self, handler.handle_request)
|
||||||
if name == "federation":
|
|
||||||
resources.update({
|
|
||||||
FEDERATION_PREFIX: TransportLayerServer(self),
|
|
||||||
})
|
|
||||||
|
|
||||||
if name in ["static", "client"]:
|
|
||||||
resources.update({
|
|
||||||
STATIC_PREFIX: File(
|
|
||||||
os.path.join(os.path.dirname(synapse.__file__), "static")
|
|
||||||
),
|
|
||||||
})
|
|
||||||
|
|
||||||
if name in ["media", "federation", "client"]:
|
|
||||||
media_repo = MediaRepositoryResource(self)
|
|
||||||
resources.update({
|
|
||||||
MEDIA_PREFIX: media_repo,
|
|
||||||
LEGACY_MEDIA_PREFIX: media_repo,
|
|
||||||
CONTENT_REPO_PREFIX: ContentRepoResource(
|
|
||||||
self, self.config.uploads_path
|
|
||||||
),
|
|
||||||
})
|
|
||||||
|
|
||||||
if name in ["keys", "federation"]:
|
|
||||||
resources.update({
|
|
||||||
SERVER_KEY_PREFIX: LocalKey(self),
|
|
||||||
SERVER_KEY_V2_PREFIX: KeyApiV2Resource(self),
|
|
||||||
})
|
|
||||||
|
|
||||||
if name == "webclient":
|
|
||||||
resources[WEB_CLIENT_PREFIX] = build_resource_for_web_client(self)
|
|
||||||
|
|
||||||
if name == "metrics" and self.get_config().enable_metrics:
|
|
||||||
resources[METRICS_PREFIX] = MetricsResource(self)
|
|
||||||
|
|
||||||
if name == "replication":
|
|
||||||
resources[REPLICATION_PREFIX] = ReplicationResource(self)
|
|
||||||
|
|
||||||
if WEB_CLIENT_PREFIX in resources:
|
if WEB_CLIENT_PREFIX in resources:
|
||||||
root_resource = RootRedirect(WEB_CLIENT_PREFIX)
|
root_resource = RootRedirect(WEB_CLIENT_PREFIX)
|
||||||
else:
|
else:
|
||||||
root_resource = Resource()
|
root_resource = NoResource()
|
||||||
|
|
||||||
root_resource = create_resource_tree(resources, root_resource)
|
root_resource = create_resource_tree(resources, root_resource)
|
||||||
|
|
||||||
if tls:
|
if tls:
|
||||||
reactor.listenSSL(
|
listen_ssl(
|
||||||
|
bind_addresses,
|
||||||
port,
|
port,
|
||||||
SynapseSite(
|
SynapseSite(
|
||||||
"synapse.access.https.%s" % (site_tag,),
|
"synapse.access.https.%s" % (site_tag,),
|
||||||
@@ -183,21 +142,90 @@ class SynapseHomeServer(HomeServer):
|
|||||||
root_resource,
|
root_resource,
|
||||||
),
|
),
|
||||||
self.tls_server_context_factory,
|
self.tls_server_context_factory,
|
||||||
interface=bind_address
|
|
||||||
)
|
)
|
||||||
|
|
||||||
else:
|
else:
|
||||||
reactor.listenTCP(
|
listen_tcp(
|
||||||
|
bind_addresses,
|
||||||
port,
|
port,
|
||||||
SynapseSite(
|
SynapseSite(
|
||||||
"synapse.access.http.%s" % (site_tag,),
|
"synapse.access.http.%s" % (site_tag,),
|
||||||
site_tag,
|
site_tag,
|
||||||
listener_config,
|
listener_config,
|
||||||
root_resource,
|
root_resource,
|
||||||
),
|
)
|
||||||
interface=bind_address
|
|
||||||
)
|
)
|
||||||
logger.info("Synapse now listening on port %d", port)
|
logger.info("Synapse now listening on port %d", port)
|
||||||
|
|
||||||
|
def _configure_named_resource(self, name, compress=False):
|
||||||
|
"""Build a resource map for a named resource
|
||||||
|
|
||||||
|
Args:
|
||||||
|
name (str): named resource: one of "client", "federation", etc
|
||||||
|
compress (bool): whether to enable gzip compression for this
|
||||||
|
resource
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
dict[str, Resource]: map from path to HTTP resource
|
||||||
|
"""
|
||||||
|
resources = {}
|
||||||
|
if name == "client":
|
||||||
|
client_resource = ClientRestResource(self)
|
||||||
|
if compress:
|
||||||
|
client_resource = gz_wrap(client_resource)
|
||||||
|
|
||||||
|
resources.update({
|
||||||
|
"/_matrix/client/api/v1": client_resource,
|
||||||
|
"/_matrix/client/r0": client_resource,
|
||||||
|
"/_matrix/client/unstable": client_resource,
|
||||||
|
"/_matrix/client/v2_alpha": client_resource,
|
||||||
|
"/_matrix/client/versions": client_resource,
|
||||||
|
})
|
||||||
|
|
||||||
|
if name == "federation":
|
||||||
|
resources.update({
|
||||||
|
FEDERATION_PREFIX: TransportLayerServer(self),
|
||||||
|
})
|
||||||
|
|
||||||
|
if name in ["static", "client"]:
|
||||||
|
resources.update({
|
||||||
|
STATIC_PREFIX: File(
|
||||||
|
os.path.join(os.path.dirname(synapse.__file__), "static")
|
||||||
|
),
|
||||||
|
})
|
||||||
|
|
||||||
|
if name in ["media", "federation", "client"]:
|
||||||
|
if self.get_config().enable_media_repo:
|
||||||
|
media_repo = self.get_media_repository_resource()
|
||||||
|
resources.update({
|
||||||
|
MEDIA_PREFIX: media_repo,
|
||||||
|
LEGACY_MEDIA_PREFIX: media_repo,
|
||||||
|
CONTENT_REPO_PREFIX: ContentRepoResource(
|
||||||
|
self, self.config.uploads_path
|
||||||
|
),
|
||||||
|
})
|
||||||
|
elif name == "media":
|
||||||
|
raise ConfigError(
|
||||||
|
"'media' resource conflicts with enable_media_repo=False",
|
||||||
|
)
|
||||||
|
|
||||||
|
if name in ["keys", "federation"]:
|
||||||
|
resources.update({
|
||||||
|
SERVER_KEY_PREFIX: LocalKey(self),
|
||||||
|
SERVER_KEY_V2_PREFIX: KeyApiV2Resource(self),
|
||||||
|
})
|
||||||
|
|
||||||
|
if name == "webclient":
|
||||||
|
resources[WEB_CLIENT_PREFIX] = build_resource_for_web_client(self)
|
||||||
|
|
||||||
|
if name == "metrics" and self.get_config().enable_metrics:
|
||||||
|
resources[METRICS_PREFIX] = MetricsResource(self)
|
||||||
|
|
||||||
|
if name == "replication":
|
||||||
|
resources[REPLICATION_PREFIX] = ReplicationRestResource(self)
|
||||||
|
|
||||||
|
return resources
|
||||||
|
|
||||||
def start_listening(self):
|
def start_listening(self):
|
||||||
config = self.get_config()
|
config = self.get_config()
|
||||||
|
|
||||||
@@ -205,15 +233,25 @@ class SynapseHomeServer(HomeServer):
|
|||||||
if listener["type"] == "http":
|
if listener["type"] == "http":
|
||||||
self._listener_http(config, listener)
|
self._listener_http(config, listener)
|
||||||
elif listener["type"] == "manhole":
|
elif listener["type"] == "manhole":
|
||||||
reactor.listenTCP(
|
listen_tcp(
|
||||||
|
listener["bind_addresses"],
|
||||||
listener["port"],
|
listener["port"],
|
||||||
manhole(
|
manhole(
|
||||||
username="matrix",
|
username="matrix",
|
||||||
password="rabbithole",
|
password="rabbithole",
|
||||||
globals={"hs": self},
|
globals={"hs": self},
|
||||||
),
|
)
|
||||||
interface=listener.get("bind_address", '127.0.0.1')
|
|
||||||
)
|
)
|
||||||
|
elif listener["type"] == "replication":
|
||||||
|
bind_addresses = listener["bind_addresses"]
|
||||||
|
for address in bind_addresses:
|
||||||
|
factory = ReplicationStreamProtocolFactory(self)
|
||||||
|
server_listener = reactor.listenTCP(
|
||||||
|
listener["port"], factory, interface=address
|
||||||
|
)
|
||||||
|
reactor.addSystemEventTrigger(
|
||||||
|
"before", "shutdown", server_listener.stopListening,
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||||
|
|
||||||
@@ -233,29 +271,6 @@ class SynapseHomeServer(HomeServer):
|
|||||||
except IncorrectDatabaseSetup as e:
|
except IncorrectDatabaseSetup as e:
|
||||||
quit_with_error(e.message)
|
quit_with_error(e.message)
|
||||||
|
|
||||||
def get_db_conn(self, run_new_connection=True):
|
|
||||||
# Any param beginning with cp_ is a parameter for adbapi, and should
|
|
||||||
# not be passed to the database engine.
|
|
||||||
db_params = {
|
|
||||||
k: v for k, v in self.db_config.get("args", {}).items()
|
|
||||||
if not k.startswith("cp_")
|
|
||||||
}
|
|
||||||
db_conn = self.database_engine.module.connect(**db_params)
|
|
||||||
|
|
||||||
if run_new_connection:
|
|
||||||
self.database_engine.on_new_connection(db_conn)
|
|
||||||
return db_conn
|
|
||||||
|
|
||||||
|
|
||||||
def quit_with_error(error_string):
|
|
||||||
message_lines = error_string.split("\n")
|
|
||||||
line_length = max([len(l) for l in message_lines if len(l) < 80]) + 2
|
|
||||||
sys.stderr.write("*" * line_length + '\n')
|
|
||||||
for line in message_lines:
|
|
||||||
sys.stderr.write(" %s\n" % (line.rstrip(),))
|
|
||||||
sys.stderr.write("*" * line_length + '\n')
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
|
|
||||||
def setup(config_options):
|
def setup(config_options):
|
||||||
"""
|
"""
|
||||||
@@ -280,7 +295,7 @@ def setup(config_options):
|
|||||||
# generating config files and shouldn't try to continue.
|
# generating config files and shouldn't try to continue.
|
||||||
sys.exit(0)
|
sys.exit(0)
|
||||||
|
|
||||||
config.setup_logging()
|
synapse.config.logger.setup_logging(config, use_worker_options=False)
|
||||||
|
|
||||||
# check any extra requirements we have now we have a config
|
# check any extra requirements we have now we have a config
|
||||||
check_requirements(config)
|
check_requirements(config)
|
||||||
@@ -334,7 +349,7 @@ def setup(config_options):
|
|||||||
hs.get_state_handler().start_caching()
|
hs.get_state_handler().start_caching()
|
||||||
hs.get_datastore().start_profiling()
|
hs.get_datastore().start_profiling()
|
||||||
hs.get_datastore().start_doing_background_updates()
|
hs.get_datastore().start_doing_background_updates()
|
||||||
hs.get_replication_layer().start_get_pdu_cache()
|
hs.get_federation_client().start_get_pdu_cache()
|
||||||
|
|
||||||
register_memory_metrics(hs)
|
register_memory_metrics(hs)
|
||||||
|
|
||||||
@@ -383,10 +398,15 @@ def run(hs):
|
|||||||
ThreadPool._worker = profile(ThreadPool._worker)
|
ThreadPool._worker = profile(ThreadPool._worker)
|
||||||
reactor.run = profile(reactor.run)
|
reactor.run = profile(reactor.run)
|
||||||
|
|
||||||
start_time = hs.get_clock().time()
|
clock = hs.get_clock()
|
||||||
|
start_time = clock.time()
|
||||||
|
|
||||||
stats = {}
|
stats = {}
|
||||||
|
|
||||||
|
# Contains the list of processes we will be monitoring
|
||||||
|
# currently either 0 or 1
|
||||||
|
stats_process = []
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def phone_stats_home():
|
def phone_stats_home():
|
||||||
logger.info("Gathering stats for reporting")
|
logger.info("Gathering stats for reporting")
|
||||||
@@ -395,41 +415,36 @@ def run(hs):
|
|||||||
if uptime < 0:
|
if uptime < 0:
|
||||||
uptime = 0
|
uptime = 0
|
||||||
|
|
||||||
# If the stats directory is empty then this is the first time we've
|
|
||||||
# reported stats.
|
|
||||||
first_time = not stats
|
|
||||||
|
|
||||||
stats["homeserver"] = hs.config.server_name
|
stats["homeserver"] = hs.config.server_name
|
||||||
stats["timestamp"] = now
|
stats["timestamp"] = now
|
||||||
stats["uptime_seconds"] = uptime
|
stats["uptime_seconds"] = uptime
|
||||||
stats["total_users"] = yield hs.get_datastore().count_all_users()
|
stats["total_users"] = yield hs.get_datastore().count_all_users()
|
||||||
|
|
||||||
|
total_nonbridged_users = yield hs.get_datastore().count_nonbridged_users()
|
||||||
|
stats["total_nonbridged_users"] = total_nonbridged_users
|
||||||
|
|
||||||
room_count = yield hs.get_datastore().get_room_count()
|
room_count = yield hs.get_datastore().get_room_count()
|
||||||
stats["total_room_count"] = room_count
|
stats["total_room_count"] = room_count
|
||||||
|
|
||||||
stats["daily_active_users"] = yield hs.get_datastore().count_daily_users()
|
stats["daily_active_users"] = yield hs.get_datastore().count_daily_users()
|
||||||
daily_messages = yield hs.get_datastore().count_daily_messages()
|
stats["daily_active_rooms"] = yield hs.get_datastore().count_daily_active_rooms()
|
||||||
if daily_messages is not None:
|
stats["daily_messages"] = yield hs.get_datastore().count_daily_messages()
|
||||||
stats["daily_messages"] = daily_messages
|
|
||||||
else:
|
|
||||||
stats.pop("daily_messages", None)
|
|
||||||
|
|
||||||
if first_time:
|
r30_results = yield hs.get_datastore().count_r30_users()
|
||||||
# Add callbacks to report the synapse stats as metrics whenever
|
for name, count in r30_results.iteritems():
|
||||||
# prometheus requests them, typically every 30s.
|
stats["r30_users_" + name] = count
|
||||||
# As some of the stats are expensive to calculate we only update
|
|
||||||
# them when synapse phones home to matrix.org every 24 hours.
|
daily_sent_messages = yield hs.get_datastore().count_daily_sent_messages()
|
||||||
metrics = get_metrics_for("synapse.usage")
|
stats["daily_sent_messages"] = daily_sent_messages
|
||||||
metrics.add_callback("timestamp", lambda: stats["timestamp"])
|
stats["cache_factor"] = CACHE_SIZE_FACTOR
|
||||||
metrics.add_callback("uptime_seconds", lambda: stats["uptime_seconds"])
|
stats["event_cache_size"] = hs.config.event_cache_size
|
||||||
metrics.add_callback("total_users", lambda: stats["total_users"])
|
|
||||||
metrics.add_callback("total_room_count", lambda: stats["total_room_count"])
|
if len(stats_process) > 0:
|
||||||
metrics.add_callback(
|
stats["memory_rss"] = 0
|
||||||
"daily_active_users", lambda: stats["daily_active_users"]
|
stats["cpu_average"] = 0
|
||||||
)
|
for process in stats_process:
|
||||||
metrics.add_callback(
|
stats["memory_rss"] += process.memory_info().rss
|
||||||
"daily_messages", lambda: stats.get("daily_messages", 0)
|
stats["cpu_average"] += int(process.cpu_percent(interval=None))
|
||||||
)
|
|
||||||
|
|
||||||
logger.info("Reporting stats to matrix.org: %s" % (stats,))
|
logger.info("Reporting stats to matrix.org: %s" % (stats,))
|
||||||
try:
|
try:
|
||||||
@@ -440,37 +455,48 @@ def run(hs):
|
|||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.warn("Error reporting stats: %s", e)
|
logger.warn("Error reporting stats: %s", e)
|
||||||
|
|
||||||
|
def performance_stats_init():
|
||||||
|
try:
|
||||||
|
import psutil
|
||||||
|
process = psutil.Process()
|
||||||
|
# Ensure we can fetch both, and make the initial request for cpu_percent
|
||||||
|
# so the next request will use this as the initial point.
|
||||||
|
process.memory_info().rss
|
||||||
|
process.cpu_percent(interval=None)
|
||||||
|
logger.info("report_stats can use psutil")
|
||||||
|
stats_process.append(process)
|
||||||
|
except (ImportError, AttributeError):
|
||||||
|
logger.warn(
|
||||||
|
"report_stats enabled but psutil is not installed or incorrect version."
|
||||||
|
" Disabling reporting of memory/cpu stats."
|
||||||
|
" Ensuring psutil is available will help matrix.org track performance"
|
||||||
|
" changes across releases."
|
||||||
|
)
|
||||||
|
|
||||||
if hs.config.report_stats:
|
if hs.config.report_stats:
|
||||||
phone_home_task = task.LoopingCall(phone_stats_home)
|
logger.info("Scheduling stats reporting for 3 hour intervals")
|
||||||
logger.info("Scheduling stats reporting for 24 hour intervals")
|
clock.looping_call(phone_stats_home, 3 * 60 * 60 * 1000)
|
||||||
phone_home_task.start(60 * 60 * 24, now=False)
|
|
||||||
|
|
||||||
def in_thread():
|
# We need to defer this init for the cases that we daemonize
|
||||||
# Uncomment to enable tracing of log context changes.
|
# otherwise the process ID we get is that of the non-daemon process
|
||||||
# sys.settrace(logcontext_tracer)
|
clock.call_later(0, performance_stats_init)
|
||||||
with LoggingContext("run"):
|
|
||||||
change_resource_limit(hs.config.soft_file_limit)
|
|
||||||
if hs.config.gc_thresholds:
|
|
||||||
gc.set_threshold(*hs.config.gc_thresholds)
|
|
||||||
reactor.run()
|
|
||||||
|
|
||||||
if hs.config.daemonize:
|
# We wait 5 minutes to send the first set of stats as the server can
|
||||||
|
# be quite busy the first few minutes
|
||||||
|
clock.call_later(5 * 60, phone_stats_home)
|
||||||
|
|
||||||
if hs.config.print_pidfile:
|
if hs.config.daemonize and hs.config.print_pidfile:
|
||||||
print (hs.config.pid_file)
|
print (hs.config.pid_file)
|
||||||
|
|
||||||
daemon = Daemonize(
|
_base.start_reactor(
|
||||||
app="synapse-homeserver",
|
"synapse-homeserver",
|
||||||
pid=hs.config.pid_file,
|
hs.config.soft_file_limit,
|
||||||
action=lambda: in_thread(),
|
hs.config.gc_thresholds,
|
||||||
auto_close_fds=False,
|
hs.config.pid_file,
|
||||||
verbose=True,
|
hs.config.daemonize,
|
||||||
logger=logger,
|
hs.config.cpu_affinity,
|
||||||
)
|
logger,
|
||||||
|
)
|
||||||
daemon.start()
|
|
||||||
else:
|
|
||||||
in_thread()
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
|
|||||||
@@ -13,43 +13,37 @@
|
|||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
import logging
|
||||||
|
import sys
|
||||||
|
|
||||||
import synapse
|
import synapse
|
||||||
|
from synapse import events
|
||||||
from synapse.config._base import ConfigError
|
|
||||||
from synapse.config.homeserver import HomeServerConfig
|
|
||||||
from synapse.config.logger import setup_logging
|
|
||||||
from synapse.http.site import SynapseSite
|
|
||||||
from synapse.metrics.resource import MetricsResource, METRICS_PREFIX
|
|
||||||
from synapse.replication.slave.storage._base import BaseSlavedStore
|
|
||||||
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
|
||||||
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
|
||||||
from synapse.rest.media.v0.content_repository import ContentRepoResource
|
|
||||||
from synapse.rest.media.v1.media_repository import MediaRepositoryResource
|
|
||||||
from synapse.server import HomeServer
|
|
||||||
from synapse.storage.client_ips import ClientIpStore
|
|
||||||
from synapse.storage.engines import create_engine
|
|
||||||
from synapse.storage.media_repository import MediaRepositoryStore
|
|
||||||
from synapse.util.async import sleep
|
|
||||||
from synapse.util.httpresourcetree import create_resource_tree
|
|
||||||
from synapse.util.logcontext import LoggingContext
|
|
||||||
from synapse.util.manhole import manhole
|
|
||||||
from synapse.util.rlimit import change_resource_limit
|
|
||||||
from synapse.util.versionstring import get_version_string
|
|
||||||
from synapse.api.urls import (
|
from synapse.api.urls import (
|
||||||
CONTENT_REPO_PREFIX, LEGACY_MEDIA_PREFIX, MEDIA_PREFIX
|
CONTENT_REPO_PREFIX, LEGACY_MEDIA_PREFIX, MEDIA_PREFIX
|
||||||
)
|
)
|
||||||
|
from synapse.app import _base
|
||||||
|
from synapse.config._base import ConfigError
|
||||||
|
from synapse.config.homeserver import HomeServerConfig
|
||||||
|
from synapse.config.logger import setup_logging
|
||||||
from synapse.crypto import context_factory
|
from synapse.crypto import context_factory
|
||||||
|
from synapse.http.site import SynapseSite
|
||||||
|
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||||
from twisted.internet import reactor, defer
|
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||||
from twisted.web.resource import Resource
|
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||||
|
from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
|
||||||
from daemonize import Daemonize
|
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||||
|
from synapse.replication.slave.storage.transactions import TransactionStore
|
||||||
import sys
|
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||||
import logging
|
from synapse.rest.media.v0.content_repository import ContentRepoResource
|
||||||
import gc
|
from synapse.server import HomeServer
|
||||||
|
from synapse.storage.engines import create_engine
|
||||||
|
from synapse.storage.media_repository import MediaRepositoryStore
|
||||||
|
from synapse.util.httpresourcetree import create_resource_tree
|
||||||
|
from synapse.util.logcontext import LoggingContext
|
||||||
|
from synapse.util.manhole import manhole
|
||||||
|
from synapse.util.versionstring import get_version_string
|
||||||
|
from twisted.internet import reactor
|
||||||
|
from twisted.web.resource import NoResource
|
||||||
|
|
||||||
logger = logging.getLogger("synapse.app.media_repository")
|
logger = logging.getLogger("synapse.app.media_repository")
|
||||||
|
|
||||||
@@ -57,27 +51,15 @@ logger = logging.getLogger("synapse.app.media_repository")
|
|||||||
class MediaRepositorySlavedStore(
|
class MediaRepositorySlavedStore(
|
||||||
SlavedApplicationServiceStore,
|
SlavedApplicationServiceStore,
|
||||||
SlavedRegistrationStore,
|
SlavedRegistrationStore,
|
||||||
|
SlavedClientIpStore,
|
||||||
|
TransactionStore,
|
||||||
BaseSlavedStore,
|
BaseSlavedStore,
|
||||||
MediaRepositoryStore,
|
MediaRepositoryStore,
|
||||||
ClientIpStore,
|
|
||||||
):
|
):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
class MediaRepositoryServer(HomeServer):
|
class MediaRepositoryServer(HomeServer):
|
||||||
def get_db_conn(self, run_new_connection=True):
|
|
||||||
# Any param beginning with cp_ is a parameter for adbapi, and should
|
|
||||||
# not be passed to the database engine.
|
|
||||||
db_params = {
|
|
||||||
k: v for k, v in self.db_config.get("args", {}).items()
|
|
||||||
if not k.startswith("cp_")
|
|
||||||
}
|
|
||||||
db_conn = self.database_engine.module.connect(**db_params)
|
|
||||||
|
|
||||||
if run_new_connection:
|
|
||||||
self.database_engine.on_new_connection(db_conn)
|
|
||||||
return db_conn
|
|
||||||
|
|
||||||
def setup(self):
|
def setup(self):
|
||||||
logger.info("Setting up.")
|
logger.info("Setting up.")
|
||||||
self.datastore = MediaRepositorySlavedStore(self.get_db_conn(), self)
|
self.datastore = MediaRepositorySlavedStore(self.get_db_conn(), self)
|
||||||
@@ -85,7 +67,7 @@ class MediaRepositoryServer(HomeServer):
|
|||||||
|
|
||||||
def _listen_http(self, listener_config):
|
def _listen_http(self, listener_config):
|
||||||
port = listener_config["port"]
|
port = listener_config["port"]
|
||||||
bind_address = listener_config.get("bind_address", "")
|
bind_addresses = listener_config["bind_addresses"]
|
||||||
site_tag = listener_config.get("tag", port)
|
site_tag = listener_config.get("tag", port)
|
||||||
resources = {}
|
resources = {}
|
||||||
for res in listener_config["resources"]:
|
for res in listener_config["resources"]:
|
||||||
@@ -93,7 +75,7 @@ class MediaRepositoryServer(HomeServer):
|
|||||||
if name == "metrics":
|
if name == "metrics":
|
||||||
resources[METRICS_PREFIX] = MetricsResource(self)
|
resources[METRICS_PREFIX] = MetricsResource(self)
|
||||||
elif name == "media":
|
elif name == "media":
|
||||||
media_repo = MediaRepositoryResource(self)
|
media_repo = self.get_media_repository_resource()
|
||||||
resources.update({
|
resources.update({
|
||||||
MEDIA_PREFIX: media_repo,
|
MEDIA_PREFIX: media_repo,
|
||||||
LEGACY_MEDIA_PREFIX: media_repo,
|
LEGACY_MEDIA_PREFIX: media_repo,
|
||||||
@@ -102,17 +84,19 @@ class MediaRepositoryServer(HomeServer):
|
|||||||
),
|
),
|
||||||
})
|
})
|
||||||
|
|
||||||
root_resource = create_resource_tree(resources, Resource())
|
root_resource = create_resource_tree(resources, NoResource())
|
||||||
reactor.listenTCP(
|
|
||||||
|
_base.listen_tcp(
|
||||||
|
bind_addresses,
|
||||||
port,
|
port,
|
||||||
SynapseSite(
|
SynapseSite(
|
||||||
"synapse.access.http.%s" % (site_tag,),
|
"synapse.access.http.%s" % (site_tag,),
|
||||||
site_tag,
|
site_tag,
|
||||||
listener_config,
|
listener_config,
|
||||||
root_resource,
|
root_resource,
|
||||||
),
|
)
|
||||||
interface=bind_address
|
|
||||||
)
|
)
|
||||||
|
|
||||||
logger.info("Synapse media repository now listening on port %d", port)
|
logger.info("Synapse media repository now listening on port %d", port)
|
||||||
|
|
||||||
def start_listening(self, listeners):
|
def start_listening(self, listeners):
|
||||||
@@ -120,33 +104,22 @@ class MediaRepositoryServer(HomeServer):
|
|||||||
if listener["type"] == "http":
|
if listener["type"] == "http":
|
||||||
self._listen_http(listener)
|
self._listen_http(listener)
|
||||||
elif listener["type"] == "manhole":
|
elif listener["type"] == "manhole":
|
||||||
reactor.listenTCP(
|
_base.listen_tcp(
|
||||||
|
listener["bind_addresses"],
|
||||||
listener["port"],
|
listener["port"],
|
||||||
manhole(
|
manhole(
|
||||||
username="matrix",
|
username="matrix",
|
||||||
password="rabbithole",
|
password="rabbithole",
|
||||||
globals={"hs": self},
|
globals={"hs": self},
|
||||||
),
|
)
|
||||||
interface=listener.get("bind_address", '127.0.0.1')
|
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
self.get_tcp_replication().start_replication(self)
|
||||||
def replicate(self):
|
|
||||||
http_client = self.get_simple_http_client()
|
|
||||||
store = self.get_datastore()
|
|
||||||
replication_url = self.config.worker_replication_url
|
|
||||||
|
|
||||||
while True:
|
def build_tcp_replication(self):
|
||||||
try:
|
return ReplicationClientHandler(self.get_datastore())
|
||||||
args = store.stream_positions()
|
|
||||||
args["timeout"] = 30000
|
|
||||||
result = yield http_client.get_json(replication_url, args=args)
|
|
||||||
yield store.process_replication(result)
|
|
||||||
except:
|
|
||||||
logger.exception("Error replicating from %r", replication_url)
|
|
||||||
yield sleep(5)
|
|
||||||
|
|
||||||
|
|
||||||
def start(config_options):
|
def start(config_options):
|
||||||
@@ -160,7 +133,16 @@ def start(config_options):
|
|||||||
|
|
||||||
assert config.worker_app == "synapse.app.media_repository"
|
assert config.worker_app == "synapse.app.media_repository"
|
||||||
|
|
||||||
setup_logging(config.worker_log_config, config.worker_log_file)
|
if config.enable_media_repo:
|
||||||
|
_base.quit_with_error(
|
||||||
|
"enable_media_repo must be disabled in the main synapse process\n"
|
||||||
|
"before the media repo can be run in a separate worker.\n"
|
||||||
|
"Please add ``enable_media_repo: false`` to the main config\n"
|
||||||
|
)
|
||||||
|
|
||||||
|
setup_logging(config, use_worker_options=True)
|
||||||
|
|
||||||
|
events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||||
|
|
||||||
database_engine = create_engine(config.database_config)
|
database_engine = create_engine(config.database_config)
|
||||||
|
|
||||||
@@ -176,35 +158,15 @@ def start(config_options):
|
|||||||
)
|
)
|
||||||
|
|
||||||
ss.setup()
|
ss.setup()
|
||||||
ss.get_handlers()
|
|
||||||
ss.start_listening(config.worker_listeners)
|
ss.start_listening(config.worker_listeners)
|
||||||
|
|
||||||
def run():
|
|
||||||
with LoggingContext("run"):
|
|
||||||
logger.info("Running")
|
|
||||||
change_resource_limit(config.soft_file_limit)
|
|
||||||
if config.gc_thresholds:
|
|
||||||
gc.set_threshold(*config.gc_thresholds)
|
|
||||||
reactor.run()
|
|
||||||
|
|
||||||
def start():
|
def start():
|
||||||
|
ss.get_state_handler().start_caching()
|
||||||
ss.get_datastore().start_profiling()
|
ss.get_datastore().start_profiling()
|
||||||
ss.replicate()
|
|
||||||
|
|
||||||
reactor.callWhenRunning(start)
|
reactor.callWhenRunning(start)
|
||||||
|
|
||||||
if config.worker_daemonize:
|
_base.start_worker_reactor("synapse-media-repository", config)
|
||||||
daemon = Daemonize(
|
|
||||||
app="synapse-media-repository",
|
|
||||||
pid=config.worker_pid_file,
|
|
||||||
action=run,
|
|
||||||
auto_close_fds=False,
|
|
||||||
verbose=True,
|
|
||||||
logger=logger,
|
|
||||||
)
|
|
||||||
daemon.start()
|
|
||||||
else:
|
|
||||||
run()
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
|||||||
@@ -13,37 +13,31 @@
|
|||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
import logging
|
||||||
|
import sys
|
||||||
|
|
||||||
import synapse
|
import synapse
|
||||||
|
from synapse import events
|
||||||
from synapse.server import HomeServer
|
from synapse.app import _base
|
||||||
from synapse.config._base import ConfigError
|
from synapse.config._base import ConfigError
|
||||||
from synapse.config.logger import setup_logging
|
|
||||||
from synapse.config.homeserver import HomeServerConfig
|
from synapse.config.homeserver import HomeServerConfig
|
||||||
|
from synapse.config.logger import setup_logging
|
||||||
from synapse.http.site import SynapseSite
|
from synapse.http.site import SynapseSite
|
||||||
from synapse.metrics.resource import MetricsResource, METRICS_PREFIX
|
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||||
from synapse.storage.roommember import RoomMemberStore
|
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
|
||||||
from synapse.replication.slave.storage.events import SlavedEventStore
|
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||||
from synapse.replication.slave.storage.pushers import SlavedPusherStore
|
from synapse.replication.slave.storage.pushers import SlavedPusherStore
|
||||||
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
|
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
|
||||||
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
|
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||||
from synapse.storage.engines import create_engine
|
from synapse.server import HomeServer
|
||||||
from synapse.storage import DataStore
|
from synapse.storage import DataStore
|
||||||
from synapse.util.async import sleep
|
from synapse.storage.engines import create_engine
|
||||||
from synapse.util.httpresourcetree import create_resource_tree
|
from synapse.util.httpresourcetree import create_resource_tree
|
||||||
from synapse.util.logcontext import LoggingContext, preserve_fn
|
from synapse.util.logcontext import LoggingContext, preserve_fn
|
||||||
from synapse.util.manhole import manhole
|
from synapse.util.manhole import manhole
|
||||||
from synapse.util.rlimit import change_resource_limit
|
|
||||||
from synapse.util.versionstring import get_version_string
|
from synapse.util.versionstring import get_version_string
|
||||||
|
from twisted.internet import defer, reactor
|
||||||
from twisted.internet import reactor, defer
|
from twisted.web.resource import NoResource
|
||||||
from twisted.web.resource import Resource
|
|
||||||
|
|
||||||
from daemonize import Daemonize
|
|
||||||
|
|
||||||
import sys
|
|
||||||
import logging
|
|
||||||
import gc
|
|
||||||
|
|
||||||
logger = logging.getLogger("synapse.app.pusher")
|
logger = logging.getLogger("synapse.app.pusher")
|
||||||
|
|
||||||
@@ -80,46 +74,19 @@ class PusherSlaveStore(
|
|||||||
DataStore.get_profile_displayname.__func__
|
DataStore.get_profile_displayname.__func__
|
||||||
)
|
)
|
||||||
|
|
||||||
who_forgot_in_room = (
|
|
||||||
RoomMemberStore.__dict__["who_forgot_in_room"]
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class PusherServer(HomeServer):
|
class PusherServer(HomeServer):
|
||||||
|
|
||||||
def get_db_conn(self, run_new_connection=True):
|
|
||||||
# Any param beginning with cp_ is a parameter for adbapi, and should
|
|
||||||
# not be passed to the database engine.
|
|
||||||
db_params = {
|
|
||||||
k: v for k, v in self.db_config.get("args", {}).items()
|
|
||||||
if not k.startswith("cp_")
|
|
||||||
}
|
|
||||||
db_conn = self.database_engine.module.connect(**db_params)
|
|
||||||
|
|
||||||
if run_new_connection:
|
|
||||||
self.database_engine.on_new_connection(db_conn)
|
|
||||||
return db_conn
|
|
||||||
|
|
||||||
def setup(self):
|
def setup(self):
|
||||||
logger.info("Setting up.")
|
logger.info("Setting up.")
|
||||||
self.datastore = PusherSlaveStore(self.get_db_conn(), self)
|
self.datastore = PusherSlaveStore(self.get_db_conn(), self)
|
||||||
logger.info("Finished setting up.")
|
logger.info("Finished setting up.")
|
||||||
|
|
||||||
def remove_pusher(self, app_id, push_key, user_id):
|
def remove_pusher(self, app_id, push_key, user_id):
|
||||||
http_client = self.get_simple_http_client()
|
self.get_tcp_replication().send_remove_pusher(app_id, push_key, user_id)
|
||||||
replication_url = self.config.worker_replication_url
|
|
||||||
url = replication_url + "/remove_pushers"
|
|
||||||
return http_client.post_json_get_json(url, {
|
|
||||||
"remove": [{
|
|
||||||
"app_id": app_id,
|
|
||||||
"push_key": push_key,
|
|
||||||
"user_id": user_id,
|
|
||||||
}]
|
|
||||||
})
|
|
||||||
|
|
||||||
def _listen_http(self, listener_config):
|
def _listen_http(self, listener_config):
|
||||||
port = listener_config["port"]
|
port = listener_config["port"]
|
||||||
bind_address = listener_config.get("bind_address", "")
|
bind_addresses = listener_config["bind_addresses"]
|
||||||
site_tag = listener_config.get("tag", port)
|
site_tag = listener_config.get("tag", port)
|
||||||
resources = {}
|
resources = {}
|
||||||
for res in listener_config["resources"]:
|
for res in listener_config["resources"]:
|
||||||
@@ -127,17 +94,19 @@ class PusherServer(HomeServer):
|
|||||||
if name == "metrics":
|
if name == "metrics":
|
||||||
resources[METRICS_PREFIX] = MetricsResource(self)
|
resources[METRICS_PREFIX] = MetricsResource(self)
|
||||||
|
|
||||||
root_resource = create_resource_tree(resources, Resource())
|
root_resource = create_resource_tree(resources, NoResource())
|
||||||
reactor.listenTCP(
|
|
||||||
|
_base.listen_tcp(
|
||||||
|
bind_addresses,
|
||||||
port,
|
port,
|
||||||
SynapseSite(
|
SynapseSite(
|
||||||
"synapse.access.http.%s" % (site_tag,),
|
"synapse.access.http.%s" % (site_tag,),
|
||||||
site_tag,
|
site_tag,
|
||||||
listener_config,
|
listener_config,
|
||||||
root_resource,
|
root_resource,
|
||||||
),
|
)
|
||||||
interface=bind_address
|
|
||||||
)
|
)
|
||||||
|
|
||||||
logger.info("Synapse pusher now listening on port %d", port)
|
logger.info("Synapse pusher now listening on port %d", port)
|
||||||
|
|
||||||
def start_listening(self, listeners):
|
def start_listening(self, listeners):
|
||||||
@@ -145,85 +114,64 @@ class PusherServer(HomeServer):
|
|||||||
if listener["type"] == "http":
|
if listener["type"] == "http":
|
||||||
self._listen_http(listener)
|
self._listen_http(listener)
|
||||||
elif listener["type"] == "manhole":
|
elif listener["type"] == "manhole":
|
||||||
reactor.listenTCP(
|
_base.listen_tcp(
|
||||||
|
listener["bind_addresses"],
|
||||||
listener["port"],
|
listener["port"],
|
||||||
manhole(
|
manhole(
|
||||||
username="matrix",
|
username="matrix",
|
||||||
password="rabbithole",
|
password="rabbithole",
|
||||||
globals={"hs": self},
|
globals={"hs": self},
|
||||||
),
|
)
|
||||||
interface=listener.get("bind_address", '127.0.0.1')
|
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||||
|
|
||||||
|
self.get_tcp_replication().start_replication(self)
|
||||||
|
|
||||||
|
def build_tcp_replication(self):
|
||||||
|
return PusherReplicationHandler(self)
|
||||||
|
|
||||||
|
|
||||||
|
class PusherReplicationHandler(ReplicationClientHandler):
|
||||||
|
def __init__(self, hs):
|
||||||
|
super(PusherReplicationHandler, self).__init__(hs.get_datastore())
|
||||||
|
|
||||||
|
self.pusher_pool = hs.get_pusherpool()
|
||||||
|
|
||||||
|
def on_rdata(self, stream_name, token, rows):
|
||||||
|
super(PusherReplicationHandler, self).on_rdata(stream_name, token, rows)
|
||||||
|
preserve_fn(self.poke_pushers)(stream_name, token, rows)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def replicate(self):
|
def poke_pushers(self, stream_name, token, rows):
|
||||||
http_client = self.get_simple_http_client()
|
if stream_name == "pushers":
|
||||||
store = self.get_datastore()
|
for row in rows:
|
||||||
replication_url = self.config.worker_replication_url
|
if row.deleted:
|
||||||
pusher_pool = self.get_pusherpool()
|
yield self.stop_pusher(row.user_id, row.app_id, row.pushkey)
|
||||||
|
else:
|
||||||
def stop_pusher(user_id, app_id, pushkey):
|
yield self.start_pusher(row.user_id, row.app_id, row.pushkey)
|
||||||
key = "%s:%s" % (app_id, pushkey)
|
elif stream_name == "events":
|
||||||
pushers_for_user = pusher_pool.pushers.get(user_id, {})
|
yield self.pusher_pool.on_new_notifications(
|
||||||
pusher = pushers_for_user.pop(key, None)
|
token, token,
|
||||||
if pusher is None:
|
|
||||||
return
|
|
||||||
logger.info("Stopping pusher %r / %r", user_id, key)
|
|
||||||
pusher.on_stop()
|
|
||||||
|
|
||||||
def start_pusher(user_id, app_id, pushkey):
|
|
||||||
key = "%s:%s" % (app_id, pushkey)
|
|
||||||
logger.info("Starting pusher %r / %r", user_id, key)
|
|
||||||
return pusher_pool._refresh_pusher(app_id, pushkey, user_id)
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def poke_pushers(results):
|
|
||||||
pushers_rows = set(
|
|
||||||
map(tuple, results.get("pushers", {}).get("rows", []))
|
|
||||||
)
|
)
|
||||||
deleted_pushers_rows = set(
|
elif stream_name == "receipts":
|
||||||
map(tuple, results.get("deleted_pushers", {}).get("rows", []))
|
yield self.pusher_pool.on_new_receipts(
|
||||||
|
token, token, set(row.room_id for row in rows)
|
||||||
)
|
)
|
||||||
for row in sorted(pushers_rows | deleted_pushers_rows):
|
|
||||||
if row in deleted_pushers_rows:
|
|
||||||
user_id, app_id, pushkey = row[1:4]
|
|
||||||
stop_pusher(user_id, app_id, pushkey)
|
|
||||||
elif row in pushers_rows:
|
|
||||||
user_id = row[1]
|
|
||||||
app_id = row[5]
|
|
||||||
pushkey = row[8]
|
|
||||||
yield start_pusher(user_id, app_id, pushkey)
|
|
||||||
|
|
||||||
stream = results.get("events")
|
def stop_pusher(self, user_id, app_id, pushkey):
|
||||||
if stream:
|
key = "%s:%s" % (app_id, pushkey)
|
||||||
min_stream_id = stream["rows"][0][0]
|
pushers_for_user = self.pusher_pool.pushers.get(user_id, {})
|
||||||
max_stream_id = stream["position"]
|
pusher = pushers_for_user.pop(key, None)
|
||||||
preserve_fn(pusher_pool.on_new_notifications)(
|
if pusher is None:
|
||||||
min_stream_id, max_stream_id
|
return
|
||||||
)
|
logger.info("Stopping pusher %r / %r", user_id, key)
|
||||||
|
pusher.on_stop()
|
||||||
|
|
||||||
stream = results.get("receipts")
|
def start_pusher(self, user_id, app_id, pushkey):
|
||||||
if stream:
|
key = "%s:%s" % (app_id, pushkey)
|
||||||
rows = stream["rows"]
|
logger.info("Starting pusher %r / %r", user_id, key)
|
||||||
affected_room_ids = set(row[1] for row in rows)
|
return self.pusher_pool._refresh_pusher(app_id, pushkey, user_id)
|
||||||
min_stream_id = rows[0][0]
|
|
||||||
max_stream_id = stream["position"]
|
|
||||||
preserve_fn(pusher_pool.on_new_receipts)(
|
|
||||||
min_stream_id, max_stream_id, affected_room_ids
|
|
||||||
)
|
|
||||||
|
|
||||||
while True:
|
|
||||||
try:
|
|
||||||
args = store.stream_positions()
|
|
||||||
args["timeout"] = 30000
|
|
||||||
result = yield http_client.get_json(replication_url, args=args)
|
|
||||||
yield store.process_replication(result)
|
|
||||||
poke_pushers(result)
|
|
||||||
except:
|
|
||||||
logger.exception("Error replicating from %r", replication_url)
|
|
||||||
yield sleep(30)
|
|
||||||
|
|
||||||
|
|
||||||
def start(config_options):
|
def start(config_options):
|
||||||
@@ -237,7 +185,9 @@ def start(config_options):
|
|||||||
|
|
||||||
assert config.worker_app == "synapse.app.pusher"
|
assert config.worker_app == "synapse.app.pusher"
|
||||||
|
|
||||||
setup_logging(config.worker_log_config, config.worker_log_file)
|
setup_logging(config, use_worker_options=True)
|
||||||
|
|
||||||
|
events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||||
|
|
||||||
if config.start_pushers:
|
if config.start_pushers:
|
||||||
sys.stderr.write(
|
sys.stderr.write(
|
||||||
@@ -264,33 +214,14 @@ def start(config_options):
|
|||||||
ps.setup()
|
ps.setup()
|
||||||
ps.start_listening(config.worker_listeners)
|
ps.start_listening(config.worker_listeners)
|
||||||
|
|
||||||
def run():
|
|
||||||
with LoggingContext("run"):
|
|
||||||
logger.info("Running")
|
|
||||||
change_resource_limit(config.soft_file_limit)
|
|
||||||
if config.gc_thresholds:
|
|
||||||
gc.set_threshold(*config.gc_thresholds)
|
|
||||||
reactor.run()
|
|
||||||
|
|
||||||
def start():
|
def start():
|
||||||
ps.replicate()
|
|
||||||
ps.get_pusherpool().start()
|
ps.get_pusherpool().start()
|
||||||
ps.get_datastore().start_profiling()
|
ps.get_datastore().start_profiling()
|
||||||
|
ps.get_state_handler().start_caching()
|
||||||
|
|
||||||
reactor.callWhenRunning(start)
|
reactor.callWhenRunning(start)
|
||||||
|
|
||||||
if config.worker_daemonize:
|
_base.start_worker_reactor("synapse-pusher", config)
|
||||||
daemon = Daemonize(
|
|
||||||
app="synapse-pusher",
|
|
||||||
pid=config.worker_pid_file,
|
|
||||||
action=run,
|
|
||||||
auto_close_fds=False,
|
|
||||||
verbose=True,
|
|
||||||
logger=logger,
|
|
||||||
)
|
|
||||||
daemon.start()
|
|
||||||
else:
|
|
||||||
run()
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
|||||||
@@ -13,93 +13,85 @@
|
|||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
import contextlib
|
||||||
|
import logging
|
||||||
|
import sys
|
||||||
|
|
||||||
import synapse
|
import synapse
|
||||||
|
from synapse.api.constants import EventTypes
|
||||||
from synapse.api.constants import EventTypes, PresenceState
|
from synapse.app import _base
|
||||||
from synapse.config._base import ConfigError
|
from synapse.config._base import ConfigError
|
||||||
from synapse.config.homeserver import HomeServerConfig
|
from synapse.config.homeserver import HomeServerConfig
|
||||||
from synapse.config.logger import setup_logging
|
from synapse.config.logger import setup_logging
|
||||||
from synapse.events import FrozenEvent
|
from synapse.handlers.presence import PresenceHandler, get_interested_parties
|
||||||
from synapse.handlers.presence import PresenceHandler
|
|
||||||
from synapse.http.site import SynapseSite
|
|
||||||
from synapse.http.server import JsonResource
|
from synapse.http.server import JsonResource
|
||||||
from synapse.metrics.resource import MetricsResource, METRICS_PREFIX
|
from synapse.http.site import SynapseSite
|
||||||
from synapse.rest.client.v2_alpha import sync
|
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||||
from synapse.rest.client.v1 import events
|
|
||||||
from synapse.replication.slave.storage._base import BaseSlavedStore
|
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||||
from synapse.replication.slave.storage.events import SlavedEventStore
|
|
||||||
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
|
|
||||||
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
|
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
|
||||||
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||||
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
|
||||||
|
from synapse.replication.slave.storage.deviceinbox import SlavedDeviceInboxStore
|
||||||
|
from synapse.replication.slave.storage.devices import SlavedDeviceStore
|
||||||
|
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||||
from synapse.replication.slave.storage.filtering import SlavedFilteringStore
|
from synapse.replication.slave.storage.filtering import SlavedFilteringStore
|
||||||
from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore
|
|
||||||
from synapse.replication.slave.storage.presence import SlavedPresenceStore
|
from synapse.replication.slave.storage.presence import SlavedPresenceStore
|
||||||
|
from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore
|
||||||
|
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
|
||||||
|
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||||
|
from synapse.replication.slave.storage.room import RoomStore
|
||||||
|
from synapse.replication.slave.storage.groups import SlavedGroupServerStore
|
||||||
|
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||||
|
from synapse.rest.client.v1 import events
|
||||||
|
from synapse.rest.client.v1.initial_sync import InitialSyncRestServlet
|
||||||
|
from synapse.rest.client.v1.room import RoomInitialSyncRestServlet
|
||||||
|
from synapse.rest.client.v2_alpha import sync
|
||||||
from synapse.server import HomeServer
|
from synapse.server import HomeServer
|
||||||
from synapse.storage.client_ips import ClientIpStore
|
|
||||||
from synapse.storage.engines import create_engine
|
from synapse.storage.engines import create_engine
|
||||||
from synapse.storage.presence import PresenceStore, UserPresenceState
|
from synapse.storage.presence import UserPresenceState
|
||||||
from synapse.storage.roommember import RoomMemberStore
|
from synapse.storage.roommember import RoomMemberStore
|
||||||
from synapse.util.async import sleep
|
|
||||||
from synapse.util.httpresourcetree import create_resource_tree
|
from synapse.util.httpresourcetree import create_resource_tree
|
||||||
from synapse.util.logcontext import LoggingContext, preserve_fn
|
from synapse.util.logcontext import LoggingContext, preserve_fn
|
||||||
from synapse.util.manhole import manhole
|
from synapse.util.manhole import manhole
|
||||||
from synapse.util.rlimit import change_resource_limit
|
|
||||||
from synapse.util.stringutils import random_string
|
from synapse.util.stringutils import random_string
|
||||||
from synapse.util.versionstring import get_version_string
|
from synapse.util.versionstring import get_version_string
|
||||||
|
from twisted.internet import defer, reactor
|
||||||
from twisted.internet import reactor, defer
|
from twisted.web.resource import NoResource
|
||||||
from twisted.web.resource import Resource
|
|
||||||
|
|
||||||
from daemonize import Daemonize
|
|
||||||
|
|
||||||
import sys
|
|
||||||
import logging
|
|
||||||
import contextlib
|
|
||||||
import gc
|
|
||||||
import ujson as json
|
|
||||||
|
|
||||||
logger = logging.getLogger("synapse.app.synchrotron")
|
logger = logging.getLogger("synapse.app.synchrotron")
|
||||||
|
|
||||||
|
|
||||||
class SynchrotronSlavedStore(
|
class SynchrotronSlavedStore(
|
||||||
SlavedPushRuleStore,
|
|
||||||
SlavedEventStore,
|
|
||||||
SlavedReceiptsStore,
|
SlavedReceiptsStore,
|
||||||
SlavedAccountDataStore,
|
SlavedAccountDataStore,
|
||||||
SlavedApplicationServiceStore,
|
SlavedApplicationServiceStore,
|
||||||
SlavedRegistrationStore,
|
SlavedRegistrationStore,
|
||||||
SlavedFilteringStore,
|
SlavedFilteringStore,
|
||||||
SlavedPresenceStore,
|
SlavedPresenceStore,
|
||||||
|
SlavedGroupServerStore,
|
||||||
|
SlavedDeviceInboxStore,
|
||||||
|
SlavedDeviceStore,
|
||||||
|
SlavedPushRuleStore,
|
||||||
|
SlavedEventStore,
|
||||||
|
SlavedClientIpStore,
|
||||||
|
RoomStore,
|
||||||
BaseSlavedStore,
|
BaseSlavedStore,
|
||||||
ClientIpStore, # After BaseSlavedStore because the constructor is different
|
|
||||||
):
|
):
|
||||||
who_forgot_in_room = (
|
did_forget = (
|
||||||
RoomMemberStore.__dict__["who_forgot_in_room"]
|
RoomMemberStore.__dict__["did_forget"]
|
||||||
)
|
)
|
||||||
|
|
||||||
# XXX: This is a bit broken because we don't persist the accepted list in a
|
|
||||||
# way that can be replicated. This means that we don't have a way to
|
|
||||||
# invalidate the cache correctly.
|
|
||||||
get_presence_list_accepted = PresenceStore.__dict__[
|
|
||||||
"get_presence_list_accepted"
|
|
||||||
]
|
|
||||||
get_presence_list_observers_accepted = PresenceStore.__dict__[
|
|
||||||
"get_presence_list_observers_accepted"
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
UPDATE_SYNCING_USERS_MS = 10 * 1000
|
UPDATE_SYNCING_USERS_MS = 10 * 1000
|
||||||
|
|
||||||
|
|
||||||
class SynchrotronPresence(object):
|
class SynchrotronPresence(object):
|
||||||
def __init__(self, hs):
|
def __init__(self, hs):
|
||||||
|
self.hs = hs
|
||||||
self.is_mine_id = hs.is_mine_id
|
self.is_mine_id = hs.is_mine_id
|
||||||
self.http_client = hs.get_simple_http_client()
|
self.http_client = hs.get_simple_http_client()
|
||||||
self.store = hs.get_datastore()
|
self.store = hs.get_datastore()
|
||||||
self.user_to_num_current_syncs = {}
|
self.user_to_num_current_syncs = {}
|
||||||
self.syncing_users_url = hs.config.worker_replication_url + "/syncing_users"
|
|
||||||
self.clock = hs.get_clock()
|
self.clock = hs.get_clock()
|
||||||
self.notifier = hs.get_notifier()
|
self.notifier = hs.get_notifier()
|
||||||
|
|
||||||
@@ -109,17 +101,52 @@ class SynchrotronPresence(object):
|
|||||||
for state in active_presence
|
for state in active_presence
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# user_id -> last_sync_ms. Lists the users that have stopped syncing
|
||||||
|
# but we haven't notified the master of that yet
|
||||||
|
self.users_going_offline = {}
|
||||||
|
|
||||||
|
self._send_stop_syncing_loop = self.clock.looping_call(
|
||||||
|
self.send_stop_syncing, 10 * 1000
|
||||||
|
)
|
||||||
|
|
||||||
self.process_id = random_string(16)
|
self.process_id = random_string(16)
|
||||||
logger.info("Presence process_id is %r", self.process_id)
|
logger.info("Presence process_id is %r", self.process_id)
|
||||||
|
|
||||||
self._sending_sync = False
|
def send_user_sync(self, user_id, is_syncing, last_sync_ms):
|
||||||
self._need_to_send_sync = False
|
self.hs.get_tcp_replication().send_user_sync(user_id, is_syncing, last_sync_ms)
|
||||||
self.clock.looping_call(
|
|
||||||
self._send_syncing_users_regularly,
|
|
||||||
UPDATE_SYNCING_USERS_MS,
|
|
||||||
)
|
|
||||||
|
|
||||||
reactor.addSystemEventTrigger("before", "shutdown", self._on_shutdown)
|
def mark_as_coming_online(self, user_id):
|
||||||
|
"""A user has started syncing. Send a UserSync to the master, unless they
|
||||||
|
had recently stopped syncing.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
user_id (str)
|
||||||
|
"""
|
||||||
|
going_offline = self.users_going_offline.pop(user_id, None)
|
||||||
|
if not going_offline:
|
||||||
|
# Safe to skip because we haven't yet told the master they were offline
|
||||||
|
self.send_user_sync(user_id, True, self.clock.time_msec())
|
||||||
|
|
||||||
|
def mark_as_going_offline(self, user_id):
|
||||||
|
"""A user has stopped syncing. We wait before notifying the master as
|
||||||
|
its likely they'll come back soon. This allows us to avoid sending
|
||||||
|
a stopped syncing immediately followed by a started syncing notification
|
||||||
|
to the master
|
||||||
|
|
||||||
|
Args:
|
||||||
|
user_id (str)
|
||||||
|
"""
|
||||||
|
self.users_going_offline[user_id] = self.clock.time_msec()
|
||||||
|
|
||||||
|
def send_stop_syncing(self):
|
||||||
|
"""Check if there are any users who have stopped syncing a while ago
|
||||||
|
and haven't come back yet. If there are poke the master about them.
|
||||||
|
"""
|
||||||
|
now = self.clock.time_msec()
|
||||||
|
for user_id, last_sync_ms in self.users_going_offline.items():
|
||||||
|
if now - last_sync_ms > 10 * 1000:
|
||||||
|
self.users_going_offline.pop(user_id, None)
|
||||||
|
self.send_user_sync(user_id, False, last_sync_ms)
|
||||||
|
|
||||||
def set_state(self, user, state, ignore_status_msg=False):
|
def set_state(self, user, state, ignore_status_msg=False):
|
||||||
# TODO Hows this supposed to work?
|
# TODO Hows this supposed to work?
|
||||||
@@ -127,18 +154,16 @@ class SynchrotronPresence(object):
|
|||||||
|
|
||||||
get_states = PresenceHandler.get_states.__func__
|
get_states = PresenceHandler.get_states.__func__
|
||||||
get_state = PresenceHandler.get_state.__func__
|
get_state = PresenceHandler.get_state.__func__
|
||||||
_get_interested_parties = PresenceHandler._get_interested_parties.__func__
|
|
||||||
current_state_for_users = PresenceHandler.current_state_for_users.__func__
|
current_state_for_users = PresenceHandler.current_state_for_users.__func__
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def user_syncing(self, user_id, affect_presence):
|
def user_syncing(self, user_id, affect_presence):
|
||||||
if affect_presence:
|
if affect_presence:
|
||||||
curr_sync = self.user_to_num_current_syncs.get(user_id, 0)
|
curr_sync = self.user_to_num_current_syncs.get(user_id, 0)
|
||||||
self.user_to_num_current_syncs[user_id] = curr_sync + 1
|
self.user_to_num_current_syncs[user_id] = curr_sync + 1
|
||||||
prev_states = yield self.current_state_for_users([user_id])
|
|
||||||
if prev_states[user_id].state == PresenceState.OFFLINE:
|
# If we went from no in flight sync to some, notify replication
|
||||||
# TODO: Don't block the sync request on this HTTP hit.
|
if self.user_to_num_current_syncs[user_id] == 1:
|
||||||
yield self._send_syncing_users_now()
|
self.mark_as_coming_online(user_id)
|
||||||
|
|
||||||
def _end():
|
def _end():
|
||||||
# We check that the user_id is in user_to_num_current_syncs because
|
# We check that the user_id is in user_to_num_current_syncs because
|
||||||
@@ -147,6 +172,10 @@ class SynchrotronPresence(object):
|
|||||||
if affect_presence and user_id in self.user_to_num_current_syncs:
|
if affect_presence and user_id in self.user_to_num_current_syncs:
|
||||||
self.user_to_num_current_syncs[user_id] -= 1
|
self.user_to_num_current_syncs[user_id] -= 1
|
||||||
|
|
||||||
|
# If we went from one in flight sync to non, notify replication
|
||||||
|
if self.user_to_num_current_syncs[user_id] == 0:
|
||||||
|
self.mark_as_going_offline(user_id)
|
||||||
|
|
||||||
@contextlib.contextmanager
|
@contextlib.contextmanager
|
||||||
def _user_syncing():
|
def _user_syncing():
|
||||||
try:
|
try:
|
||||||
@@ -154,56 +183,12 @@ class SynchrotronPresence(object):
|
|||||||
finally:
|
finally:
|
||||||
_end()
|
_end()
|
||||||
|
|
||||||
defer.returnValue(_user_syncing())
|
return defer.succeed(_user_syncing())
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def _on_shutdown(self):
|
|
||||||
# When the synchrotron is shutdown tell the master to clear the in
|
|
||||||
# progress syncs for this process
|
|
||||||
self.user_to_num_current_syncs.clear()
|
|
||||||
yield self._send_syncing_users_now()
|
|
||||||
|
|
||||||
def _send_syncing_users_regularly(self):
|
|
||||||
# Only send an update if we aren't in the middle of sending one.
|
|
||||||
if not self._sending_sync:
|
|
||||||
preserve_fn(self._send_syncing_users_now)()
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def _send_syncing_users_now(self):
|
|
||||||
if self._sending_sync:
|
|
||||||
# We don't want to race with sending another update.
|
|
||||||
# Instead we wait for that update to finish and send another
|
|
||||||
# update afterwards.
|
|
||||||
self._need_to_send_sync = True
|
|
||||||
return
|
|
||||||
|
|
||||||
# Flag that we are sending an update.
|
|
||||||
self._sending_sync = True
|
|
||||||
|
|
||||||
yield self.http_client.post_json_get_json(self.syncing_users_url, {
|
|
||||||
"process_id": self.process_id,
|
|
||||||
"syncing_users": [
|
|
||||||
user_id for user_id, count in self.user_to_num_current_syncs.items()
|
|
||||||
if count > 0
|
|
||||||
],
|
|
||||||
})
|
|
||||||
|
|
||||||
# Unset the flag as we are no longer sending an update.
|
|
||||||
self._sending_sync = False
|
|
||||||
if self._need_to_send_sync:
|
|
||||||
# If something happened while we were sending the update then
|
|
||||||
# we might need to send another update.
|
|
||||||
# TODO: Check if the update that was sent matches the current state
|
|
||||||
# as we only need to send an update if they are different.
|
|
||||||
self._need_to_send_sync = False
|
|
||||||
yield self._send_syncing_users_now()
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def notify_from_replication(self, states, stream_id):
|
def notify_from_replication(self, states, stream_id):
|
||||||
parties = yield self._get_interested_parties(
|
parties = yield get_interested_parties(self.store, states)
|
||||||
states, calculate_remote_hosts=False
|
room_ids_to_states, users_to_states = parties
|
||||||
)
|
|
||||||
room_ids_to_states, users_to_states, _ = parties
|
|
||||||
|
|
||||||
self.notifier.on_new_event(
|
self.notifier.on_new_event(
|
||||||
"presence_key", stream_id, rooms=room_ids_to_states.keys(),
|
"presence_key", stream_id, rooms=room_ids_to_states.keys(),
|
||||||
@@ -211,26 +196,24 @@ class SynchrotronPresence(object):
|
|||||||
)
|
)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def process_replication(self, result):
|
def process_replication_rows(self, token, rows):
|
||||||
stream = result.get("presence", {"rows": []})
|
states = [UserPresenceState(
|
||||||
states = []
|
row.user_id, row.state, row.last_active_ts,
|
||||||
for row in stream["rows"]:
|
row.last_federation_update_ts, row.last_user_sync_ts, row.status_msg,
|
||||||
(
|
row.currently_active
|
||||||
position, user_id, state, last_active_ts,
|
) for row in rows]
|
||||||
last_federation_update_ts, last_user_sync_ts, status_msg,
|
|
||||||
currently_active
|
|
||||||
) = row
|
|
||||||
state = UserPresenceState(
|
|
||||||
user_id, state, last_active_ts,
|
|
||||||
last_federation_update_ts, last_user_sync_ts, status_msg,
|
|
||||||
currently_active
|
|
||||||
)
|
|
||||||
self.user_to_current_state[user_id] = state
|
|
||||||
states.append(state)
|
|
||||||
|
|
||||||
if states and "position" in stream:
|
for state in states:
|
||||||
stream_id = int(stream["position"])
|
self.user_to_current_state[row.user_id] = state
|
||||||
yield self.notify_from_replication(states, stream_id)
|
|
||||||
|
stream_id = token
|
||||||
|
yield self.notify_from_replication(states, stream_id)
|
||||||
|
|
||||||
|
def get_currently_syncing_users(self):
|
||||||
|
return [
|
||||||
|
user_id for user_id, count in self.user_to_num_current_syncs.iteritems()
|
||||||
|
if count > 0
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
class SynchrotronTyping(object):
|
class SynchrotronTyping(object):
|
||||||
@@ -240,18 +223,17 @@ class SynchrotronTyping(object):
|
|||||||
self._room_typing = {}
|
self._room_typing = {}
|
||||||
|
|
||||||
def stream_positions(self):
|
def stream_positions(self):
|
||||||
|
# We must update this typing token from the response of the previous
|
||||||
|
# sync. In particular, the stream id may "reset" back to zero/a low
|
||||||
|
# value which we *must* use for the next replication request.
|
||||||
return {"typing": self._latest_room_serial}
|
return {"typing": self._latest_room_serial}
|
||||||
|
|
||||||
def process_replication(self, result):
|
def process_replication_rows(self, token, rows):
|
||||||
stream = result.get("typing")
|
self._latest_room_serial = token
|
||||||
if stream:
|
|
||||||
self._latest_room_serial = int(stream["position"])
|
|
||||||
|
|
||||||
for row in stream["rows"]:
|
for row in rows:
|
||||||
position, room_id, typing_json = row
|
self._room_serials[row.room_id] = token
|
||||||
typing = json.loads(typing_json)
|
self._room_typing[row.room_id] = row.user_ids
|
||||||
self._room_serials[room_id] = position
|
|
||||||
self._room_typing[room_id] = typing
|
|
||||||
|
|
||||||
|
|
||||||
class SynchrotronApplicationService(object):
|
class SynchrotronApplicationService(object):
|
||||||
@@ -260,19 +242,6 @@ class SynchrotronApplicationService(object):
|
|||||||
|
|
||||||
|
|
||||||
class SynchrotronServer(HomeServer):
|
class SynchrotronServer(HomeServer):
|
||||||
def get_db_conn(self, run_new_connection=True):
|
|
||||||
# Any param beginning with cp_ is a parameter for adbapi, and should
|
|
||||||
# not be passed to the database engine.
|
|
||||||
db_params = {
|
|
||||||
k: v for k, v in self.db_config.get("args", {}).items()
|
|
||||||
if not k.startswith("cp_")
|
|
||||||
}
|
|
||||||
db_conn = self.database_engine.module.connect(**db_params)
|
|
||||||
|
|
||||||
if run_new_connection:
|
|
||||||
self.database_engine.on_new_connection(db_conn)
|
|
||||||
return db_conn
|
|
||||||
|
|
||||||
def setup(self):
|
def setup(self):
|
||||||
logger.info("Setting up.")
|
logger.info("Setting up.")
|
||||||
self.datastore = SynchrotronSlavedStore(self.get_db_conn(), self)
|
self.datastore = SynchrotronSlavedStore(self.get_db_conn(), self)
|
||||||
@@ -280,7 +249,7 @@ class SynchrotronServer(HomeServer):
|
|||||||
|
|
||||||
def _listen_http(self, listener_config):
|
def _listen_http(self, listener_config):
|
||||||
port = listener_config["port"]
|
port = listener_config["port"]
|
||||||
bind_address = listener_config.get("bind_address", "")
|
bind_addresses = listener_config["bind_addresses"]
|
||||||
site_tag = listener_config.get("tag", port)
|
site_tag = listener_config.get("tag", port)
|
||||||
resources = {}
|
resources = {}
|
||||||
for res in listener_config["resources"]:
|
for res in listener_config["resources"]:
|
||||||
@@ -291,6 +260,8 @@ class SynchrotronServer(HomeServer):
|
|||||||
resource = JsonResource(self, canonical_json=False)
|
resource = JsonResource(self, canonical_json=False)
|
||||||
sync.register_servlets(self, resource)
|
sync.register_servlets(self, resource)
|
||||||
events.register_servlets(self, resource)
|
events.register_servlets(self, resource)
|
||||||
|
InitialSyncRestServlet(self).register(resource)
|
||||||
|
RoomInitialSyncRestServlet(self).register(resource)
|
||||||
resources.update({
|
resources.update({
|
||||||
"/_matrix/client/r0": resource,
|
"/_matrix/client/r0": resource,
|
||||||
"/_matrix/client/unstable": resource,
|
"/_matrix/client/unstable": resource,
|
||||||
@@ -298,17 +269,19 @@ class SynchrotronServer(HomeServer):
|
|||||||
"/_matrix/client/api/v1": resource,
|
"/_matrix/client/api/v1": resource,
|
||||||
})
|
})
|
||||||
|
|
||||||
root_resource = create_resource_tree(resources, Resource())
|
root_resource = create_resource_tree(resources, NoResource())
|
||||||
reactor.listenTCP(
|
|
||||||
|
_base.listen_tcp(
|
||||||
|
bind_addresses,
|
||||||
port,
|
port,
|
||||||
SynapseSite(
|
SynapseSite(
|
||||||
"synapse.access.http.%s" % (site_tag,),
|
"synapse.access.http.%s" % (site_tag,),
|
||||||
site_tag,
|
site_tag,
|
||||||
listener_config,
|
listener_config,
|
||||||
root_resource,
|
root_resource,
|
||||||
),
|
)
|
||||||
interface=bind_address
|
|
||||||
)
|
)
|
||||||
|
|
||||||
logger.info("Synapse synchrotron now listening on port %d", port)
|
logger.info("Synapse synchrotron now listening on port %d", port)
|
||||||
|
|
||||||
def start_listening(self, listeners):
|
def start_listening(self, listeners):
|
||||||
@@ -316,101 +289,22 @@ class SynchrotronServer(HomeServer):
|
|||||||
if listener["type"] == "http":
|
if listener["type"] == "http":
|
||||||
self._listen_http(listener)
|
self._listen_http(listener)
|
||||||
elif listener["type"] == "manhole":
|
elif listener["type"] == "manhole":
|
||||||
reactor.listenTCP(
|
_base.listen_tcp(
|
||||||
|
listener["bind_addresses"],
|
||||||
listener["port"],
|
listener["port"],
|
||||||
manhole(
|
manhole(
|
||||||
username="matrix",
|
username="matrix",
|
||||||
password="rabbithole",
|
password="rabbithole",
|
||||||
globals={"hs": self},
|
globals={"hs": self},
|
||||||
),
|
)
|
||||||
interface=listener.get("bind_address", '127.0.0.1')
|
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
self.get_tcp_replication().start_replication(self)
|
||||||
def replicate(self):
|
|
||||||
http_client = self.get_simple_http_client()
|
|
||||||
store = self.get_datastore()
|
|
||||||
replication_url = self.config.worker_replication_url
|
|
||||||
notifier = self.get_notifier()
|
|
||||||
presence_handler = self.get_presence_handler()
|
|
||||||
typing_handler = self.get_typing_handler()
|
|
||||||
|
|
||||||
def notify_from_stream(
|
def build_tcp_replication(self):
|
||||||
result, stream_name, stream_key, room=None, user=None
|
return SyncReplicationHandler(self)
|
||||||
):
|
|
||||||
stream = result.get(stream_name)
|
|
||||||
if stream:
|
|
||||||
position_index = stream["field_names"].index("position")
|
|
||||||
if room:
|
|
||||||
room_index = stream["field_names"].index(room)
|
|
||||||
if user:
|
|
||||||
user_index = stream["field_names"].index(user)
|
|
||||||
|
|
||||||
users = ()
|
|
||||||
rooms = ()
|
|
||||||
for row in stream["rows"]:
|
|
||||||
position = row[position_index]
|
|
||||||
|
|
||||||
if user:
|
|
||||||
users = (row[user_index],)
|
|
||||||
|
|
||||||
if room:
|
|
||||||
rooms = (row[room_index],)
|
|
||||||
|
|
||||||
notifier.on_new_event(
|
|
||||||
stream_key, position, users=users, rooms=rooms
|
|
||||||
)
|
|
||||||
|
|
||||||
def notify(result):
|
|
||||||
stream = result.get("events")
|
|
||||||
if stream:
|
|
||||||
max_position = stream["position"]
|
|
||||||
for row in stream["rows"]:
|
|
||||||
position = row[0]
|
|
||||||
internal = json.loads(row[1])
|
|
||||||
event_json = json.loads(row[2])
|
|
||||||
event = FrozenEvent(event_json, internal_metadata_dict=internal)
|
|
||||||
extra_users = ()
|
|
||||||
if event.type == EventTypes.Member:
|
|
||||||
extra_users = (event.state_key,)
|
|
||||||
notifier.on_new_room_event(
|
|
||||||
event, position, max_position, extra_users
|
|
||||||
)
|
|
||||||
|
|
||||||
notify_from_stream(
|
|
||||||
result, "push_rules", "push_rules_key", user="user_id"
|
|
||||||
)
|
|
||||||
notify_from_stream(
|
|
||||||
result, "user_account_data", "account_data_key", user="user_id"
|
|
||||||
)
|
|
||||||
notify_from_stream(
|
|
||||||
result, "room_account_data", "account_data_key", user="user_id"
|
|
||||||
)
|
|
||||||
notify_from_stream(
|
|
||||||
result, "tag_account_data", "account_data_key", user="user_id"
|
|
||||||
)
|
|
||||||
notify_from_stream(
|
|
||||||
result, "receipts", "receipt_key", room="room_id"
|
|
||||||
)
|
|
||||||
notify_from_stream(
|
|
||||||
result, "typing", "typing_key", room="room_id"
|
|
||||||
)
|
|
||||||
|
|
||||||
while True:
|
|
||||||
try:
|
|
||||||
args = store.stream_positions()
|
|
||||||
args.update(typing_handler.stream_positions())
|
|
||||||
args["timeout"] = 30000
|
|
||||||
result = yield http_client.get_json(replication_url, args=args)
|
|
||||||
yield store.process_replication(result)
|
|
||||||
typing_handler.process_replication(result)
|
|
||||||
yield presence_handler.process_replication(result)
|
|
||||||
notify(result)
|
|
||||||
except:
|
|
||||||
logger.exception("Error replicating from %r", replication_url)
|
|
||||||
yield sleep(5)
|
|
||||||
|
|
||||||
def build_presence_handler(self):
|
def build_presence_handler(self):
|
||||||
return SynchrotronPresence(self)
|
return SynchrotronPresence(self)
|
||||||
@@ -419,6 +313,82 @@ class SynchrotronServer(HomeServer):
|
|||||||
return SynchrotronTyping(self)
|
return SynchrotronTyping(self)
|
||||||
|
|
||||||
|
|
||||||
|
class SyncReplicationHandler(ReplicationClientHandler):
|
||||||
|
def __init__(self, hs):
|
||||||
|
super(SyncReplicationHandler, self).__init__(hs.get_datastore())
|
||||||
|
|
||||||
|
self.store = hs.get_datastore()
|
||||||
|
self.typing_handler = hs.get_typing_handler()
|
||||||
|
# NB this is a SynchrotronPresence, not a normal PresenceHandler
|
||||||
|
self.presence_handler = hs.get_presence_handler()
|
||||||
|
self.notifier = hs.get_notifier()
|
||||||
|
|
||||||
|
def on_rdata(self, stream_name, token, rows):
|
||||||
|
super(SyncReplicationHandler, self).on_rdata(stream_name, token, rows)
|
||||||
|
|
||||||
|
preserve_fn(self.process_and_notify)(stream_name, token, rows)
|
||||||
|
|
||||||
|
def get_streams_to_replicate(self):
|
||||||
|
args = super(SyncReplicationHandler, self).get_streams_to_replicate()
|
||||||
|
args.update(self.typing_handler.stream_positions())
|
||||||
|
return args
|
||||||
|
|
||||||
|
def get_currently_syncing_users(self):
|
||||||
|
return self.presence_handler.get_currently_syncing_users()
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def process_and_notify(self, stream_name, token, rows):
|
||||||
|
if stream_name == "events":
|
||||||
|
# We shouldn't get multiple rows per token for events stream, so
|
||||||
|
# we don't need to optimise this for multiple rows.
|
||||||
|
for row in rows:
|
||||||
|
event = yield self.store.get_event(row.event_id)
|
||||||
|
extra_users = ()
|
||||||
|
if event.type == EventTypes.Member:
|
||||||
|
extra_users = (event.state_key,)
|
||||||
|
max_token = self.store.get_room_max_stream_ordering()
|
||||||
|
self.notifier.on_new_room_event(
|
||||||
|
event, token, max_token, extra_users
|
||||||
|
)
|
||||||
|
elif stream_name == "push_rules":
|
||||||
|
self.notifier.on_new_event(
|
||||||
|
"push_rules_key", token, users=[row.user_id for row in rows],
|
||||||
|
)
|
||||||
|
elif stream_name in ("account_data", "tag_account_data",):
|
||||||
|
self.notifier.on_new_event(
|
||||||
|
"account_data_key", token, users=[row.user_id for row in rows],
|
||||||
|
)
|
||||||
|
elif stream_name == "receipts":
|
||||||
|
self.notifier.on_new_event(
|
||||||
|
"receipt_key", token, rooms=[row.room_id for row in rows],
|
||||||
|
)
|
||||||
|
elif stream_name == "typing":
|
||||||
|
self.typing_handler.process_replication_rows(token, rows)
|
||||||
|
self.notifier.on_new_event(
|
||||||
|
"typing_key", token, rooms=[row.room_id for row in rows],
|
||||||
|
)
|
||||||
|
elif stream_name == "to_device":
|
||||||
|
entities = [row.entity for row in rows if row.entity.startswith("@")]
|
||||||
|
if entities:
|
||||||
|
self.notifier.on_new_event(
|
||||||
|
"to_device_key", token, users=entities,
|
||||||
|
)
|
||||||
|
elif stream_name == "device_lists":
|
||||||
|
all_room_ids = set()
|
||||||
|
for row in rows:
|
||||||
|
room_ids = yield self.store.get_rooms_for_user(row.user_id)
|
||||||
|
all_room_ids.update(room_ids)
|
||||||
|
self.notifier.on_new_event(
|
||||||
|
"device_list_key", token, rooms=all_room_ids,
|
||||||
|
)
|
||||||
|
elif stream_name == "presence":
|
||||||
|
yield self.presence_handler.process_replication_rows(token, rows)
|
||||||
|
elif stream_name == "receipts":
|
||||||
|
self.notifier.on_new_event(
|
||||||
|
"groups_key", token, users=[row.user_id for row in rows],
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def start(config_options):
|
def start(config_options):
|
||||||
try:
|
try:
|
||||||
config = HomeServerConfig.load_config(
|
config = HomeServerConfig.load_config(
|
||||||
@@ -430,7 +400,9 @@ def start(config_options):
|
|||||||
|
|
||||||
assert config.worker_app == "synapse.app.synchrotron"
|
assert config.worker_app == "synapse.app.synchrotron"
|
||||||
|
|
||||||
setup_logging(config.worker_log_config, config.worker_log_file)
|
setup_logging(config, use_worker_options=True)
|
||||||
|
|
||||||
|
synapse.events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||||
|
|
||||||
database_engine = create_engine(config.database_config)
|
database_engine = create_engine(config.database_config)
|
||||||
|
|
||||||
@@ -446,32 +418,13 @@ def start(config_options):
|
|||||||
ss.setup()
|
ss.setup()
|
||||||
ss.start_listening(config.worker_listeners)
|
ss.start_listening(config.worker_listeners)
|
||||||
|
|
||||||
def run():
|
|
||||||
with LoggingContext("run"):
|
|
||||||
logger.info("Running")
|
|
||||||
change_resource_limit(config.soft_file_limit)
|
|
||||||
if config.gc_thresholds:
|
|
||||||
gc.set_threshold(*config.gc_thresholds)
|
|
||||||
reactor.run()
|
|
||||||
|
|
||||||
def start():
|
def start():
|
||||||
ss.get_datastore().start_profiling()
|
ss.get_datastore().start_profiling()
|
||||||
ss.replicate()
|
ss.get_state_handler().start_caching()
|
||||||
|
|
||||||
reactor.callWhenRunning(start)
|
reactor.callWhenRunning(start)
|
||||||
|
|
||||||
if config.worker_daemonize:
|
_base.start_worker_reactor("synapse-synchrotron", config)
|
||||||
daemon = Daemonize(
|
|
||||||
app="synapse-synchrotron",
|
|
||||||
pid=config.worker_pid_file,
|
|
||||||
action=run,
|
|
||||||
auto_close_fds=False,
|
|
||||||
verbose=True,
|
|
||||||
logger=logger,
|
|
||||||
)
|
|
||||||
daemon.start()
|
|
||||||
else:
|
|
||||||
run()
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
|||||||
@@ -23,14 +23,27 @@ import signal
|
|||||||
import subprocess
|
import subprocess
|
||||||
import sys
|
import sys
|
||||||
import yaml
|
import yaml
|
||||||
|
import errno
|
||||||
|
import time
|
||||||
|
|
||||||
SYNAPSE = ["python", "-B", "-m", "synapse.app.homeserver"]
|
SYNAPSE = [sys.executable, "-B", "-m", "synapse.app.homeserver"]
|
||||||
|
|
||||||
GREEN = "\x1b[1;32m"
|
GREEN = "\x1b[1;32m"
|
||||||
|
YELLOW = "\x1b[1;33m"
|
||||||
RED = "\x1b[1;31m"
|
RED = "\x1b[1;31m"
|
||||||
NORMAL = "\x1b[m"
|
NORMAL = "\x1b[m"
|
||||||
|
|
||||||
|
|
||||||
|
def pid_running(pid):
|
||||||
|
try:
|
||||||
|
os.kill(pid, 0)
|
||||||
|
return True
|
||||||
|
except OSError as err:
|
||||||
|
if err.errno == errno.EPERM:
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
def write(message, colour=NORMAL, stream=sys.stdout):
|
def write(message, colour=NORMAL, stream=sys.stdout):
|
||||||
if colour == NORMAL:
|
if colour == NORMAL:
|
||||||
stream.write(message + "\n")
|
stream.write(message + "\n")
|
||||||
@@ -38,6 +51,11 @@ def write(message, colour=NORMAL, stream=sys.stdout):
|
|||||||
stream.write(colour + message + NORMAL + "\n")
|
stream.write(colour + message + NORMAL + "\n")
|
||||||
|
|
||||||
|
|
||||||
|
def abort(message, colour=RED, stream=sys.stderr):
|
||||||
|
write(message, colour, stream)
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
def start(configfile):
|
def start(configfile):
|
||||||
write("Starting ...")
|
write("Starting ...")
|
||||||
args = SYNAPSE
|
args = SYNAPSE
|
||||||
@@ -45,7 +63,8 @@ def start(configfile):
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
subprocess.check_call(args)
|
subprocess.check_call(args)
|
||||||
write("started synapse.app.homeserver(%r)" % (configfile,), colour=GREEN)
|
write("started synapse.app.homeserver(%r)" %
|
||||||
|
(configfile,), colour=GREEN)
|
||||||
except subprocess.CalledProcessError as e:
|
except subprocess.CalledProcessError as e:
|
||||||
write(
|
write(
|
||||||
"error starting (exit code: %d); see above for logs" % e.returncode,
|
"error starting (exit code: %d); see above for logs" % e.returncode,
|
||||||
@@ -76,8 +95,16 @@ def start_worker(app, configfile, worker_configfile):
|
|||||||
def stop(pidfile, app):
|
def stop(pidfile, app):
|
||||||
if os.path.exists(pidfile):
|
if os.path.exists(pidfile):
|
||||||
pid = int(open(pidfile).read())
|
pid = int(open(pidfile).read())
|
||||||
os.kill(pid, signal.SIGTERM)
|
try:
|
||||||
write("stopped %s" % (app,), colour=GREEN)
|
os.kill(pid, signal.SIGTERM)
|
||||||
|
write("stopped %s" % (app,), colour=GREEN)
|
||||||
|
except OSError as err:
|
||||||
|
if err.errno == errno.ESRCH:
|
||||||
|
write("%s not running" % (app,), colour=YELLOW)
|
||||||
|
elif err.errno == errno.EPERM:
|
||||||
|
abort("Cannot stop %s: Operation not permitted" % (app,))
|
||||||
|
else:
|
||||||
|
abort("Cannot stop %s: Unknown error" % (app,))
|
||||||
|
|
||||||
|
|
||||||
Worker = collections.namedtuple("Worker", [
|
Worker = collections.namedtuple("Worker", [
|
||||||
@@ -98,7 +125,7 @@ def main():
|
|||||||
"configfile",
|
"configfile",
|
||||||
nargs="?",
|
nargs="?",
|
||||||
default="homeserver.yaml",
|
default="homeserver.yaml",
|
||||||
help="the homeserver config file, defaults to homserver.yaml",
|
help="the homeserver config file, defaults to homeserver.yaml",
|
||||||
)
|
)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"-w", "--worker",
|
"-w", "--worker",
|
||||||
@@ -157,6 +184,9 @@ def main():
|
|||||||
worker_configfiles.append(worker_configfile)
|
worker_configfiles.append(worker_configfile)
|
||||||
|
|
||||||
if options.all_processes:
|
if options.all_processes:
|
||||||
|
# To start the main synapse with -a you need to add a worker file
|
||||||
|
# with worker_app == "synapse.app.homeserver"
|
||||||
|
start_stop_synapse = False
|
||||||
worker_configdir = options.all_processes
|
worker_configdir = options.all_processes
|
||||||
if not os.path.isdir(worker_configdir):
|
if not os.path.isdir(worker_configdir):
|
||||||
write(
|
write(
|
||||||
@@ -173,10 +203,29 @@ def main():
|
|||||||
with open(worker_configfile) as stream:
|
with open(worker_configfile) as stream:
|
||||||
worker_config = yaml.load(stream)
|
worker_config = yaml.load(stream)
|
||||||
worker_app = worker_config["worker_app"]
|
worker_app = worker_config["worker_app"]
|
||||||
worker_pidfile = worker_config["worker_pid_file"]
|
if worker_app == "synapse.app.homeserver":
|
||||||
worker_daemonize = worker_config["worker_daemonize"]
|
# We need to special case all of this to pick up options that may
|
||||||
assert worker_daemonize # TODO print something more user friendly
|
# be set in the main config file or in this worker config file.
|
||||||
worker_cache_factor = worker_config.get("synctl_cache_factor")
|
worker_pidfile = (
|
||||||
|
worker_config.get("pid_file")
|
||||||
|
or pidfile
|
||||||
|
)
|
||||||
|
worker_cache_factor = worker_config.get("synctl_cache_factor") or cache_factor
|
||||||
|
daemonize = worker_config.get("daemonize") or config.get("daemonize")
|
||||||
|
assert daemonize, "Main process must have daemonize set to true"
|
||||||
|
|
||||||
|
# The master process doesn't support using worker_* config.
|
||||||
|
for key in worker_config:
|
||||||
|
if key == "worker_app": # But we allow worker_app
|
||||||
|
continue
|
||||||
|
assert not key.startswith("worker_"), \
|
||||||
|
"Main process cannot use worker_* config"
|
||||||
|
else:
|
||||||
|
worker_pidfile = worker_config["worker_pid_file"]
|
||||||
|
worker_daemonize = worker_config["worker_daemonize"]
|
||||||
|
assert worker_daemonize, "In config %r: expected '%s' to be True" % (
|
||||||
|
worker_configfile, "worker_daemonize")
|
||||||
|
worker_cache_factor = worker_config.get("synctl_cache_factor")
|
||||||
workers.append(Worker(
|
workers.append(Worker(
|
||||||
worker_app, worker_configfile, worker_pidfile, worker_cache_factor,
|
worker_app, worker_configfile, worker_pidfile, worker_cache_factor,
|
||||||
))
|
))
|
||||||
@@ -190,10 +239,26 @@ def main():
|
|||||||
if start_stop_synapse:
|
if start_stop_synapse:
|
||||||
stop(pidfile, "synapse.app.homeserver")
|
stop(pidfile, "synapse.app.homeserver")
|
||||||
|
|
||||||
# TODO: Wait for synapse to actually shutdown before starting it again
|
# Wait for synapse to actually shutdown before starting it again
|
||||||
|
if action == "restart":
|
||||||
|
running_pids = []
|
||||||
|
if start_stop_synapse and os.path.exists(pidfile):
|
||||||
|
running_pids.append(int(open(pidfile).read()))
|
||||||
|
for worker in workers:
|
||||||
|
if os.path.exists(worker.pidfile):
|
||||||
|
running_pids.append(int(open(worker.pidfile).read()))
|
||||||
|
if len(running_pids) > 0:
|
||||||
|
write("Waiting for process to exit before restarting...")
|
||||||
|
for running_pid in running_pids:
|
||||||
|
while pid_running(running_pid):
|
||||||
|
time.sleep(0.2)
|
||||||
|
write("All processes exited; now restarting...")
|
||||||
|
|
||||||
if action == "start" or action == "restart":
|
if action == "start" or action == "restart":
|
||||||
if start_stop_synapse:
|
if start_stop_synapse:
|
||||||
|
# Check if synapse is already running
|
||||||
|
if os.path.exists(pidfile) and pid_running(int(open(pidfile).read())):
|
||||||
|
abort("synapse.app.homeserver already running")
|
||||||
start(configfile)
|
start(configfile)
|
||||||
|
|
||||||
for worker in workers:
|
for worker in workers:
|
||||||
|
|||||||
224
synapse/app/user_dir.py
Normal file
224
synapse/app/user_dir.py
Normal file
@@ -0,0 +1,224 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright 2017 Vector Creations Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import logging
|
||||||
|
import sys
|
||||||
|
|
||||||
|
import synapse
|
||||||
|
from synapse import events
|
||||||
|
from synapse.app import _base
|
||||||
|
from synapse.config._base import ConfigError
|
||||||
|
from synapse.config.homeserver import HomeServerConfig
|
||||||
|
from synapse.config.logger import setup_logging
|
||||||
|
from synapse.crypto import context_factory
|
||||||
|
from synapse.http.server import JsonResource
|
||||||
|
from synapse.http.site import SynapseSite
|
||||||
|
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||||
|
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||||
|
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||||
|
from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
|
||||||
|
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||||
|
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||||
|
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||||
|
from synapse.rest.client.v2_alpha import user_directory
|
||||||
|
from synapse.server import HomeServer
|
||||||
|
from synapse.storage.engines import create_engine
|
||||||
|
from synapse.storage.user_directory import UserDirectoryStore
|
||||||
|
from synapse.util.caches.stream_change_cache import StreamChangeCache
|
||||||
|
from synapse.util.httpresourcetree import create_resource_tree
|
||||||
|
from synapse.util.logcontext import LoggingContext, preserve_fn
|
||||||
|
from synapse.util.manhole import manhole
|
||||||
|
from synapse.util.versionstring import get_version_string
|
||||||
|
from twisted.internet import reactor
|
||||||
|
from twisted.web.resource import NoResource
|
||||||
|
|
||||||
|
logger = logging.getLogger("synapse.app.user_dir")
|
||||||
|
|
||||||
|
|
||||||
|
class UserDirectorySlaveStore(
|
||||||
|
SlavedEventStore,
|
||||||
|
SlavedApplicationServiceStore,
|
||||||
|
SlavedRegistrationStore,
|
||||||
|
SlavedClientIpStore,
|
||||||
|
UserDirectoryStore,
|
||||||
|
BaseSlavedStore,
|
||||||
|
):
|
||||||
|
def __init__(self, db_conn, hs):
|
||||||
|
super(UserDirectorySlaveStore, self).__init__(db_conn, hs)
|
||||||
|
|
||||||
|
events_max = self._stream_id_gen.get_current_token()
|
||||||
|
curr_state_delta_prefill, min_curr_state_delta_id = self._get_cache_dict(
|
||||||
|
db_conn, "current_state_delta_stream",
|
||||||
|
entity_column="room_id",
|
||||||
|
stream_column="stream_id",
|
||||||
|
max_value=events_max, # As we share the stream id with events token
|
||||||
|
limit=1000,
|
||||||
|
)
|
||||||
|
self._curr_state_delta_stream_cache = StreamChangeCache(
|
||||||
|
"_curr_state_delta_stream_cache", min_curr_state_delta_id,
|
||||||
|
prefilled_cache=curr_state_delta_prefill,
|
||||||
|
)
|
||||||
|
|
||||||
|
self._current_state_delta_pos = events_max
|
||||||
|
|
||||||
|
def stream_positions(self):
|
||||||
|
result = super(UserDirectorySlaveStore, self).stream_positions()
|
||||||
|
result["current_state_deltas"] = self._current_state_delta_pos
|
||||||
|
return result
|
||||||
|
|
||||||
|
def process_replication_rows(self, stream_name, token, rows):
|
||||||
|
if stream_name == "current_state_deltas":
|
||||||
|
self._current_state_delta_pos = token
|
||||||
|
for row in rows:
|
||||||
|
self._curr_state_delta_stream_cache.entity_has_changed(
|
||||||
|
row.room_id, token
|
||||||
|
)
|
||||||
|
return super(UserDirectorySlaveStore, self).process_replication_rows(
|
||||||
|
stream_name, token, rows
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class UserDirectoryServer(HomeServer):
|
||||||
|
def setup(self):
|
||||||
|
logger.info("Setting up.")
|
||||||
|
self.datastore = UserDirectorySlaveStore(self.get_db_conn(), self)
|
||||||
|
logger.info("Finished setting up.")
|
||||||
|
|
||||||
|
def _listen_http(self, listener_config):
|
||||||
|
port = listener_config["port"]
|
||||||
|
bind_addresses = listener_config["bind_addresses"]
|
||||||
|
site_tag = listener_config.get("tag", port)
|
||||||
|
resources = {}
|
||||||
|
for res in listener_config["resources"]:
|
||||||
|
for name in res["names"]:
|
||||||
|
if name == "metrics":
|
||||||
|
resources[METRICS_PREFIX] = MetricsResource(self)
|
||||||
|
elif name == "client":
|
||||||
|
resource = JsonResource(self, canonical_json=False)
|
||||||
|
user_directory.register_servlets(self, resource)
|
||||||
|
resources.update({
|
||||||
|
"/_matrix/client/r0": resource,
|
||||||
|
"/_matrix/client/unstable": resource,
|
||||||
|
"/_matrix/client/v2_alpha": resource,
|
||||||
|
"/_matrix/client/api/v1": resource,
|
||||||
|
})
|
||||||
|
|
||||||
|
root_resource = create_resource_tree(resources, NoResource())
|
||||||
|
|
||||||
|
_base.listen_tcp(
|
||||||
|
bind_addresses,
|
||||||
|
port,
|
||||||
|
SynapseSite(
|
||||||
|
"synapse.access.http.%s" % (site_tag,),
|
||||||
|
site_tag,
|
||||||
|
listener_config,
|
||||||
|
root_resource,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
logger.info("Synapse user_dir now listening on port %d", port)
|
||||||
|
|
||||||
|
def start_listening(self, listeners):
|
||||||
|
for listener in listeners:
|
||||||
|
if listener["type"] == "http":
|
||||||
|
self._listen_http(listener)
|
||||||
|
elif listener["type"] == "manhole":
|
||||||
|
_base.listen_tcp(
|
||||||
|
listener["bind_addresses"],
|
||||||
|
listener["port"],
|
||||||
|
manhole(
|
||||||
|
username="matrix",
|
||||||
|
password="rabbithole",
|
||||||
|
globals={"hs": self},
|
||||||
|
)
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||||
|
|
||||||
|
self.get_tcp_replication().start_replication(self)
|
||||||
|
|
||||||
|
def build_tcp_replication(self):
|
||||||
|
return UserDirectoryReplicationHandler(self)
|
||||||
|
|
||||||
|
|
||||||
|
class UserDirectoryReplicationHandler(ReplicationClientHandler):
|
||||||
|
def __init__(self, hs):
|
||||||
|
super(UserDirectoryReplicationHandler, self).__init__(hs.get_datastore())
|
||||||
|
self.user_directory = hs.get_user_directory_handler()
|
||||||
|
|
||||||
|
def on_rdata(self, stream_name, token, rows):
|
||||||
|
super(UserDirectoryReplicationHandler, self).on_rdata(
|
||||||
|
stream_name, token, rows
|
||||||
|
)
|
||||||
|
if stream_name == "current_state_deltas":
|
||||||
|
preserve_fn(self.user_directory.notify_new_event)()
|
||||||
|
|
||||||
|
|
||||||
|
def start(config_options):
|
||||||
|
try:
|
||||||
|
config = HomeServerConfig.load_config(
|
||||||
|
"Synapse user directory", config_options
|
||||||
|
)
|
||||||
|
except ConfigError as e:
|
||||||
|
sys.stderr.write("\n" + e.message + "\n")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
assert config.worker_app == "synapse.app.user_dir"
|
||||||
|
|
||||||
|
setup_logging(config, use_worker_options=True)
|
||||||
|
|
||||||
|
events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||||
|
|
||||||
|
database_engine = create_engine(config.database_config)
|
||||||
|
|
||||||
|
if config.update_user_directory:
|
||||||
|
sys.stderr.write(
|
||||||
|
"\nThe update_user_directory must be disabled in the main synapse process"
|
||||||
|
"\nbefore they can be run in a separate worker."
|
||||||
|
"\nPlease add ``update_user_directory: false`` to the main config"
|
||||||
|
"\n"
|
||||||
|
)
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
# Force the pushers to start since they will be disabled in the main config
|
||||||
|
config.update_user_directory = True
|
||||||
|
|
||||||
|
tls_server_context_factory = context_factory.ServerContextFactory(config)
|
||||||
|
|
||||||
|
ps = UserDirectoryServer(
|
||||||
|
config.server_name,
|
||||||
|
db_config=config.database_config,
|
||||||
|
tls_server_context_factory=tls_server_context_factory,
|
||||||
|
config=config,
|
||||||
|
version_string="Synapse/" + get_version_string(synapse),
|
||||||
|
database_engine=database_engine,
|
||||||
|
)
|
||||||
|
|
||||||
|
ps.setup()
|
||||||
|
ps.start_listening(config.worker_listeners)
|
||||||
|
|
||||||
|
def start():
|
||||||
|
ps.get_datastore().start_profiling()
|
||||||
|
ps.get_state_handler().start_caching()
|
||||||
|
|
||||||
|
reactor.callWhenRunning(start)
|
||||||
|
|
||||||
|
_base.start_worker_reactor("synapse-user-dir", config)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
with LoggingContext("main"):
|
||||||
|
start(sys.argv[1:])
|
||||||
@@ -13,6 +13,8 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
from synapse.api.constants import EventTypes
|
from synapse.api.constants import EventTypes
|
||||||
|
from synapse.util.caches.descriptors import cachedInlineCallbacks
|
||||||
|
from synapse.types import GroupID, get_domain_from_id
|
||||||
|
|
||||||
from twisted.internet import defer
|
from twisted.internet import defer
|
||||||
|
|
||||||
@@ -80,19 +82,27 @@ class ApplicationService(object):
|
|||||||
# values.
|
# values.
|
||||||
NS_LIST = [NS_USERS, NS_ALIASES, NS_ROOMS]
|
NS_LIST = [NS_USERS, NS_ALIASES, NS_ROOMS]
|
||||||
|
|
||||||
def __init__(self, token, url=None, namespaces=None, hs_token=None,
|
def __init__(self, token, hostname, url=None, namespaces=None, hs_token=None,
|
||||||
sender=None, id=None, protocols=None):
|
sender=None, id=None, protocols=None, rate_limited=True):
|
||||||
self.token = token
|
self.token = token
|
||||||
self.url = url
|
self.url = url
|
||||||
self.hs_token = hs_token
|
self.hs_token = hs_token
|
||||||
self.sender = sender
|
self.sender = sender
|
||||||
|
self.server_name = hostname
|
||||||
self.namespaces = self._check_namespaces(namespaces)
|
self.namespaces = self._check_namespaces(namespaces)
|
||||||
self.id = id
|
self.id = id
|
||||||
|
|
||||||
|
if "|" in self.id:
|
||||||
|
raise Exception("application service ID cannot contain '|' character")
|
||||||
|
|
||||||
|
# .protocols is a publicly visible field
|
||||||
if protocols:
|
if protocols:
|
||||||
self.protocols = set(protocols)
|
self.protocols = set(protocols)
|
||||||
else:
|
else:
|
||||||
self.protocols = set()
|
self.protocols = set()
|
||||||
|
|
||||||
|
self.rate_limited = rate_limited
|
||||||
|
|
||||||
def _check_namespaces(self, namespaces):
|
def _check_namespaces(self, namespaces):
|
||||||
# Sanity check that it is of the form:
|
# Sanity check that it is of the form:
|
||||||
# {
|
# {
|
||||||
@@ -117,29 +127,41 @@ class ApplicationService(object):
|
|||||||
raise ValueError(
|
raise ValueError(
|
||||||
"Expected bool for 'exclusive' in ns '%s'" % ns
|
"Expected bool for 'exclusive' in ns '%s'" % ns
|
||||||
)
|
)
|
||||||
if not isinstance(regex_obj.get("regex"), basestring):
|
group_id = regex_obj.get("group_id")
|
||||||
|
if group_id:
|
||||||
|
if not isinstance(group_id, str):
|
||||||
|
raise ValueError(
|
||||||
|
"Expected string for 'group_id' in ns '%s'" % ns
|
||||||
|
)
|
||||||
|
try:
|
||||||
|
GroupID.from_string(group_id)
|
||||||
|
except Exception:
|
||||||
|
raise ValueError(
|
||||||
|
"Expected valid group ID for 'group_id' in ns '%s'" % ns
|
||||||
|
)
|
||||||
|
|
||||||
|
if get_domain_from_id(group_id) != self.server_name:
|
||||||
|
raise ValueError(
|
||||||
|
"Expected 'group_id' to be this host in ns '%s'" % ns
|
||||||
|
)
|
||||||
|
|
||||||
|
regex = regex_obj.get("regex")
|
||||||
|
if isinstance(regex, basestring):
|
||||||
|
regex_obj["regex"] = re.compile(regex) # Pre-compile regex
|
||||||
|
else:
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
"Expected string for 'regex' in ns '%s'" % ns
|
"Expected string for 'regex' in ns '%s'" % ns
|
||||||
)
|
)
|
||||||
return namespaces
|
return namespaces
|
||||||
|
|
||||||
def _matches_regex(self, test_string, namespace_key, return_obj=False):
|
def _matches_regex(self, test_string, namespace_key):
|
||||||
if not isinstance(test_string, basestring):
|
|
||||||
logger.error(
|
|
||||||
"Expected a string to test regex against, but got %s",
|
|
||||||
test_string
|
|
||||||
)
|
|
||||||
return False
|
|
||||||
|
|
||||||
for regex_obj in self.namespaces[namespace_key]:
|
for regex_obj in self.namespaces[namespace_key]:
|
||||||
if re.match(regex_obj["regex"], test_string):
|
if regex_obj["regex"].match(test_string):
|
||||||
if return_obj:
|
return regex_obj
|
||||||
return regex_obj
|
return None
|
||||||
return True
|
|
||||||
return False
|
|
||||||
|
|
||||||
def _is_exclusive(self, ns_key, test_string):
|
def _is_exclusive(self, ns_key, test_string):
|
||||||
regex_obj = self._matches_regex(test_string, ns_key, return_obj=True)
|
regex_obj = self._matches_regex(test_string, ns_key)
|
||||||
if regex_obj:
|
if regex_obj:
|
||||||
return regex_obj["exclusive"]
|
return regex_obj["exclusive"]
|
||||||
return False
|
return False
|
||||||
@@ -159,7 +181,14 @@ class ApplicationService(object):
|
|||||||
if not store:
|
if not store:
|
||||||
defer.returnValue(False)
|
defer.returnValue(False)
|
||||||
|
|
||||||
member_list = yield store.get_users_in_room(event.room_id)
|
does_match = yield self._matches_user_in_member_list(event.room_id, store)
|
||||||
|
defer.returnValue(does_match)
|
||||||
|
|
||||||
|
@cachedInlineCallbacks(num_args=1, cache_context=True)
|
||||||
|
def _matches_user_in_member_list(self, room_id, store, cache_context):
|
||||||
|
member_list = yield store.get_users_in_room(
|
||||||
|
room_id, on_invalidate=cache_context.invalidate
|
||||||
|
)
|
||||||
|
|
||||||
# check joined member events
|
# check joined member events
|
||||||
for user_id in member_list:
|
for user_id in member_list:
|
||||||
@@ -212,10 +241,10 @@ class ApplicationService(object):
|
|||||||
)
|
)
|
||||||
|
|
||||||
def is_interested_in_alias(self, alias):
|
def is_interested_in_alias(self, alias):
|
||||||
return self._matches_regex(alias, ApplicationService.NS_ALIASES)
|
return bool(self._matches_regex(alias, ApplicationService.NS_ALIASES))
|
||||||
|
|
||||||
def is_interested_in_room(self, room_id):
|
def is_interested_in_room(self, room_id):
|
||||||
return self._matches_regex(room_id, ApplicationService.NS_ROOMS)
|
return bool(self._matches_regex(room_id, ApplicationService.NS_ROOMS))
|
||||||
|
|
||||||
def is_exclusive_user(self, user_id):
|
def is_exclusive_user(self, user_id):
|
||||||
return (
|
return (
|
||||||
@@ -232,5 +261,33 @@ class ApplicationService(object):
|
|||||||
def is_exclusive_room(self, room_id):
|
def is_exclusive_room(self, room_id):
|
||||||
return self._is_exclusive(ApplicationService.NS_ROOMS, room_id)
|
return self._is_exclusive(ApplicationService.NS_ROOMS, room_id)
|
||||||
|
|
||||||
|
def get_exlusive_user_regexes(self):
|
||||||
|
"""Get the list of regexes used to determine if a user is exclusively
|
||||||
|
registered by the AS
|
||||||
|
"""
|
||||||
|
return [
|
||||||
|
regex_obj["regex"]
|
||||||
|
for regex_obj in self.namespaces[ApplicationService.NS_USERS]
|
||||||
|
if regex_obj["exclusive"]
|
||||||
|
]
|
||||||
|
|
||||||
|
def get_groups_for_user(self, user_id):
|
||||||
|
"""Get the groups that this user is associated with by this AS
|
||||||
|
|
||||||
|
Args:
|
||||||
|
user_id (str): The ID of the user.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
iterable[str]: an iterable that yields group_id strings.
|
||||||
|
"""
|
||||||
|
return (
|
||||||
|
regex_obj["group_id"]
|
||||||
|
for regex_obj in self.namespaces[ApplicationService.NS_USERS]
|
||||||
|
if "group_id" in regex_obj and regex_obj["regex"].match(user_id)
|
||||||
|
)
|
||||||
|
|
||||||
|
def is_rate_limited(self):
|
||||||
|
return self.rate_limited
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
return "ApplicationService: %s" % (self.__dict__,)
|
return "ApplicationService: %s" % (self.__dict__,)
|
||||||
|
|||||||
@@ -14,10 +14,13 @@
|
|||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
from twisted.internet import defer
|
from twisted.internet import defer
|
||||||
|
|
||||||
|
from synapse.api.constants import ThirdPartyEntityKind
|
||||||
from synapse.api.errors import CodeMessageException
|
from synapse.api.errors import CodeMessageException
|
||||||
from synapse.http.client import SimpleHttpClient
|
from synapse.http.client import SimpleHttpClient
|
||||||
from synapse.events.utils import serialize_event
|
from synapse.events.utils import serialize_event
|
||||||
from synapse.types import ThirdPartyEntityKind
|
from synapse.util.logcontext import preserve_fn, make_deferred_yieldable
|
||||||
|
from synapse.util.caches.response_cache import ResponseCache
|
||||||
|
from synapse.types import ThirdPartyInstanceID
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
import urllib
|
import urllib
|
||||||
@@ -25,6 +28,20 @@ import urllib
|
|||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
HOUR_IN_MS = 60 * 60 * 1000
|
||||||
|
|
||||||
|
|
||||||
|
APP_SERVICE_PREFIX = "/_matrix/app/unstable"
|
||||||
|
|
||||||
|
|
||||||
|
def _is_valid_3pe_metadata(info):
|
||||||
|
if "instances" not in info:
|
||||||
|
return False
|
||||||
|
if not isinstance(info["instances"], list):
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
def _is_valid_3pe_result(r, field):
|
def _is_valid_3pe_result(r, field):
|
||||||
if not isinstance(r, dict):
|
if not isinstance(r, dict):
|
||||||
return False
|
return False
|
||||||
@@ -56,8 +73,12 @@ class ApplicationServiceApi(SimpleHttpClient):
|
|||||||
super(ApplicationServiceApi, self).__init__(hs)
|
super(ApplicationServiceApi, self).__init__(hs)
|
||||||
self.clock = hs.get_clock()
|
self.clock = hs.get_clock()
|
||||||
|
|
||||||
|
self.protocol_meta_cache = ResponseCache(hs, timeout_ms=HOUR_IN_MS)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def query_user(self, service, user_id):
|
def query_user(self, service, user_id):
|
||||||
|
if service.url is None:
|
||||||
|
defer.returnValue(False)
|
||||||
uri = service.url + ("/users/%s" % urllib.quote(user_id))
|
uri = service.url + ("/users/%s" % urllib.quote(user_id))
|
||||||
response = None
|
response = None
|
||||||
try:
|
try:
|
||||||
@@ -77,6 +98,8 @@ class ApplicationServiceApi(SimpleHttpClient):
|
|||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def query_alias(self, service, alias):
|
def query_alias(self, service, alias):
|
||||||
|
if service.url is None:
|
||||||
|
defer.returnValue(False)
|
||||||
uri = service.url + ("/rooms/%s" % urllib.quote(alias))
|
uri = service.url + ("/rooms/%s" % urllib.quote(alias))
|
||||||
response = None
|
response = None
|
||||||
try:
|
try:
|
||||||
@@ -97,16 +120,22 @@ class ApplicationServiceApi(SimpleHttpClient):
|
|||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def query_3pe(self, service, kind, protocol, fields):
|
def query_3pe(self, service, kind, protocol, fields):
|
||||||
if kind == ThirdPartyEntityKind.USER:
|
if kind == ThirdPartyEntityKind.USER:
|
||||||
uri = "%s/3pu/%s" % (service.url, urllib.quote(protocol))
|
|
||||||
required_field = "userid"
|
required_field = "userid"
|
||||||
elif kind == ThirdPartyEntityKind.LOCATION:
|
elif kind == ThirdPartyEntityKind.LOCATION:
|
||||||
uri = "%s/3pl/%s" % (service.url, urllib.quote(protocol))
|
|
||||||
required_field = "alias"
|
required_field = "alias"
|
||||||
else:
|
else:
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
"Unrecognised 'kind' argument %r to query_3pe()", kind
|
"Unrecognised 'kind' argument %r to query_3pe()", kind
|
||||||
)
|
)
|
||||||
|
if service.url is None:
|
||||||
|
defer.returnValue([])
|
||||||
|
|
||||||
|
uri = "%s%s/thirdparty/%s/%s" % (
|
||||||
|
service.url,
|
||||||
|
APP_SERVICE_PREFIX,
|
||||||
|
kind,
|
||||||
|
urllib.quote(protocol)
|
||||||
|
)
|
||||||
try:
|
try:
|
||||||
response = yield self.get_json(uri, fields)
|
response = yield self.get_json(uri, fields)
|
||||||
if not isinstance(response, list):
|
if not isinstance(response, list):
|
||||||
@@ -131,8 +160,51 @@ class ApplicationServiceApi(SimpleHttpClient):
|
|||||||
logger.warning("query_3pe to %s threw exception %s", uri, ex)
|
logger.warning("query_3pe to %s threw exception %s", uri, ex)
|
||||||
defer.returnValue([])
|
defer.returnValue([])
|
||||||
|
|
||||||
|
def get_3pe_protocol(self, service, protocol):
|
||||||
|
if service.url is None:
|
||||||
|
defer.returnValue({})
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def _get():
|
||||||
|
uri = "%s%s/thirdparty/protocol/%s" % (
|
||||||
|
service.url,
|
||||||
|
APP_SERVICE_PREFIX,
|
||||||
|
urllib.quote(protocol)
|
||||||
|
)
|
||||||
|
try:
|
||||||
|
info = yield self.get_json(uri, {})
|
||||||
|
|
||||||
|
if not _is_valid_3pe_metadata(info):
|
||||||
|
logger.warning("query_3pe_protocol to %s did not return a"
|
||||||
|
" valid result", uri)
|
||||||
|
defer.returnValue(None)
|
||||||
|
|
||||||
|
for instance in info.get("instances", []):
|
||||||
|
network_id = instance.get("network_id", None)
|
||||||
|
if network_id is not None:
|
||||||
|
instance["instance_id"] = ThirdPartyInstanceID(
|
||||||
|
service.id, network_id,
|
||||||
|
).to_string()
|
||||||
|
|
||||||
|
defer.returnValue(info)
|
||||||
|
except Exception as ex:
|
||||||
|
logger.warning("query_3pe_protocol to %s threw exception %s",
|
||||||
|
uri, ex)
|
||||||
|
defer.returnValue(None)
|
||||||
|
|
||||||
|
key = (service.id, protocol)
|
||||||
|
result = self.protocol_meta_cache.get(key)
|
||||||
|
if not result:
|
||||||
|
result = self.protocol_meta_cache.set(
|
||||||
|
key, preserve_fn(_get)()
|
||||||
|
)
|
||||||
|
return make_deferred_yieldable(result)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def push_bulk(self, service, events, txn_id=None):
|
def push_bulk(self, service, events, txn_id=None):
|
||||||
|
if service.url is None:
|
||||||
|
defer.returnValue(True)
|
||||||
|
|
||||||
events = self._serialize(events)
|
events = self._serialize(events)
|
||||||
|
|
||||||
if txn_id is None:
|
if txn_id is None:
|
||||||
|
|||||||
@@ -123,7 +123,7 @@ class _ServiceQueuer(object):
|
|||||||
with Measure(self.clock, "servicequeuer.send"):
|
with Measure(self.clock, "servicequeuer.send"):
|
||||||
try:
|
try:
|
||||||
yield self.txn_ctrl.send(service, events)
|
yield self.txn_ctrl.send(service, events)
|
||||||
except:
|
except Exception:
|
||||||
logger.exception("AS request failed")
|
logger.exception("AS request failed")
|
||||||
finally:
|
finally:
|
||||||
self.requests_in_flight.discard(service.id)
|
self.requests_in_flight.discard(service.id)
|
||||||
@@ -150,12 +150,12 @@ class _TransactionController(object):
|
|||||||
if service_is_up:
|
if service_is_up:
|
||||||
sent = yield txn.send(self.as_api)
|
sent = yield txn.send(self.as_api)
|
||||||
if sent:
|
if sent:
|
||||||
txn.complete(self.store)
|
yield txn.complete(self.store)
|
||||||
else:
|
else:
|
||||||
self._start_recoverer(service)
|
preserve_fn(self._start_recoverer)(service)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.exception(e)
|
logger.exception(e)
|
||||||
self._start_recoverer(service)
|
preserve_fn(self._start_recoverer)(service)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def on_recovered(self, recoverer):
|
def on_recovered(self, recoverer):
|
||||||
|
|||||||
@@ -64,11 +64,12 @@ class Config(object):
|
|||||||
if isinstance(value, int) or isinstance(value, long):
|
if isinstance(value, int) or isinstance(value, long):
|
||||||
return value
|
return value
|
||||||
second = 1000
|
second = 1000
|
||||||
hour = 60 * 60 * second
|
minute = 60 * second
|
||||||
|
hour = 60 * minute
|
||||||
day = 24 * hour
|
day = 24 * hour
|
||||||
week = 7 * day
|
week = 7 * day
|
||||||
year = 365 * day
|
year = 365 * day
|
||||||
sizes = {"s": second, "h": hour, "d": day, "w": week, "y": year}
|
sizes = {"s": second, "m": minute, "h": hour, "d": day, "w": week, "y": year}
|
||||||
size = 1
|
size = 1
|
||||||
suffix = value[-1]
|
suffix = value[-1]
|
||||||
if suffix in sizes:
|
if suffix in sizes:
|
||||||
@@ -80,22 +81,38 @@ class Config(object):
|
|||||||
def abspath(file_path):
|
def abspath(file_path):
|
||||||
return os.path.abspath(file_path) if file_path else file_path
|
return os.path.abspath(file_path) if file_path else file_path
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def path_exists(cls, file_path):
|
||||||
|
"""Check if a file exists
|
||||||
|
|
||||||
|
Unlike os.path.exists, this throws an exception if there is an error
|
||||||
|
checking if the file exists (for example, if there is a perms error on
|
||||||
|
the parent dir).
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if the file exists; False if not.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
os.stat(file_path)
|
||||||
|
return True
|
||||||
|
except OSError as e:
|
||||||
|
if e.errno != errno.ENOENT:
|
||||||
|
raise e
|
||||||
|
return False
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def check_file(cls, file_path, config_name):
|
def check_file(cls, file_path, config_name):
|
||||||
if file_path is None:
|
if file_path is None:
|
||||||
raise ConfigError(
|
raise ConfigError(
|
||||||
"Missing config for %s."
|
"Missing config for %s."
|
||||||
" You must specify a path for the config file. You can "
|
|
||||||
"do this with the -c or --config-path option. "
|
|
||||||
"Adding --generate-config along with --server-name "
|
|
||||||
"<server name> will generate a config file at the given path."
|
|
||||||
% (config_name,)
|
% (config_name,)
|
||||||
)
|
)
|
||||||
if not os.path.exists(file_path):
|
try:
|
||||||
|
os.stat(file_path)
|
||||||
|
except OSError as e:
|
||||||
raise ConfigError(
|
raise ConfigError(
|
||||||
"File %s config for %s doesn't exist."
|
"Error accessing file '%s' (config for %s): %s"
|
||||||
" Try running again with --generate-config"
|
% (file_path, config_name, e.strerror)
|
||||||
% (file_path, config_name,)
|
|
||||||
)
|
)
|
||||||
return cls.abspath(file_path)
|
return cls.abspath(file_path)
|
||||||
|
|
||||||
@@ -247,7 +264,7 @@ class Config(object):
|
|||||||
" -c CONFIG-FILE\""
|
" -c CONFIG-FILE\""
|
||||||
)
|
)
|
||||||
(config_path,) = config_files
|
(config_path,) = config_files
|
||||||
if not os.path.exists(config_path):
|
if not cls.path_exists(config_path):
|
||||||
if config_args.keys_directory:
|
if config_args.keys_directory:
|
||||||
config_dir_path = config_args.keys_directory
|
config_dir_path = config_args.keys_directory
|
||||||
else:
|
else:
|
||||||
@@ -260,7 +277,7 @@ class Config(object):
|
|||||||
"Must specify a server_name to a generate config for."
|
"Must specify a server_name to a generate config for."
|
||||||
" Pass -H server.name."
|
" Pass -H server.name."
|
||||||
)
|
)
|
||||||
if not os.path.exists(config_dir_path):
|
if not cls.path_exists(config_dir_path):
|
||||||
os.makedirs(config_dir_path)
|
os.makedirs(config_dir_path)
|
||||||
with open(config_path, "wb") as config_file:
|
with open(config_path, "wb") as config_file:
|
||||||
config_bytes, config = obj.generate_config(
|
config_bytes, config = obj.generate_config(
|
||||||
|
|||||||
@@ -86,7 +86,7 @@ def load_appservices(hostname, config_files):
|
|||||||
|
|
||||||
def _load_appservice(hostname, as_info, config_filename):
|
def _load_appservice(hostname, as_info, config_filename):
|
||||||
required_string_fields = [
|
required_string_fields = [
|
||||||
"id", "url", "as_token", "hs_token", "sender_localpart"
|
"id", "as_token", "hs_token", "sender_localpart"
|
||||||
]
|
]
|
||||||
for field in required_string_fields:
|
for field in required_string_fields:
|
||||||
if not isinstance(as_info.get(field), basestring):
|
if not isinstance(as_info.get(field), basestring):
|
||||||
@@ -94,6 +94,14 @@ def _load_appservice(hostname, as_info, config_filename):
|
|||||||
field, config_filename,
|
field, config_filename,
|
||||||
))
|
))
|
||||||
|
|
||||||
|
# 'url' must either be a string or explicitly null, not missing
|
||||||
|
# to avoid accidentally turning off push for ASes.
|
||||||
|
if (not isinstance(as_info.get("url"), basestring) and
|
||||||
|
as_info.get("url", "") is not None):
|
||||||
|
raise KeyError(
|
||||||
|
"Required string field or explicit null: 'url' (%s)" % (config_filename,)
|
||||||
|
)
|
||||||
|
|
||||||
localpart = as_info["sender_localpart"]
|
localpart = as_info["sender_localpart"]
|
||||||
if urllib.quote(localpart) != localpart:
|
if urllib.quote(localpart) != localpart:
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
@@ -102,6 +110,11 @@ def _load_appservice(hostname, as_info, config_filename):
|
|||||||
user = UserID(localpart, hostname)
|
user = UserID(localpart, hostname)
|
||||||
user_id = user.to_string()
|
user_id = user.to_string()
|
||||||
|
|
||||||
|
# Rate limiting for users of this AS is on by default (excludes sender)
|
||||||
|
rate_limited = True
|
||||||
|
if isinstance(as_info.get("rate_limited"), bool):
|
||||||
|
rate_limited = as_info.get("rate_limited")
|
||||||
|
|
||||||
# namespace checks
|
# namespace checks
|
||||||
if not isinstance(as_info.get("namespaces"), dict):
|
if not isinstance(as_info.get("namespaces"), dict):
|
||||||
raise KeyError("Requires 'namespaces' object.")
|
raise KeyError("Requires 'namespaces' object.")
|
||||||
@@ -132,12 +145,21 @@ def _load_appservice(hostname, as_info, config_filename):
|
|||||||
for p in protocols:
|
for p in protocols:
|
||||||
if not isinstance(p, str):
|
if not isinstance(p, str):
|
||||||
raise KeyError("Bad value for 'protocols' item")
|
raise KeyError("Bad value for 'protocols' item")
|
||||||
|
|
||||||
|
if as_info["url"] is None:
|
||||||
|
logger.info(
|
||||||
|
"(%s) Explicitly empty 'url' provided. This application service"
|
||||||
|
" will not receive events or queries.",
|
||||||
|
config_filename,
|
||||||
|
)
|
||||||
return ApplicationService(
|
return ApplicationService(
|
||||||
token=as_info["as_token"],
|
token=as_info["as_token"],
|
||||||
|
hostname=hostname,
|
||||||
url=as_info["url"],
|
url=as_info["url"],
|
||||||
namespaces=as_info["namespaces"],
|
namespaces=as_info["namespaces"],
|
||||||
hs_token=as_info["hs_token"],
|
hs_token=as_info["hs_token"],
|
||||||
sender=user_id,
|
sender=user_id,
|
||||||
id=as_info["id"],
|
id=as_info["id"],
|
||||||
protocols=protocols,
|
protocols=protocols,
|
||||||
|
rate_limited=rate_limited
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -41,7 +41,7 @@ class CasConfig(Config):
|
|||||||
#cas_config:
|
#cas_config:
|
||||||
# enabled: true
|
# enabled: true
|
||||||
# server_url: "https://cas-server.com"
|
# server_url: "https://cas-server.com"
|
||||||
# service_url: "https://homesever.domain.com:8448"
|
# service_url: "https://homeserver.domain.com:8448"
|
||||||
# #required_attributes:
|
# #required_attributes:
|
||||||
# # name: value
|
# # name: value
|
||||||
"""
|
"""
|
||||||
|
|||||||
@@ -68,6 +68,18 @@ class EmailConfig(Config):
|
|||||||
self.email_notif_for_new_users = email_config.get(
|
self.email_notif_for_new_users = email_config.get(
|
||||||
"notif_for_new_users", True
|
"notif_for_new_users", True
|
||||||
)
|
)
|
||||||
|
self.email_riot_base_url = email_config.get(
|
||||||
|
"riot_base_url", None
|
||||||
|
)
|
||||||
|
self.email_smtp_user = email_config.get(
|
||||||
|
"smtp_user", None
|
||||||
|
)
|
||||||
|
self.email_smtp_pass = email_config.get(
|
||||||
|
"smtp_pass", None
|
||||||
|
)
|
||||||
|
self.require_transport_security = email_config.get(
|
||||||
|
"require_transport_security", False
|
||||||
|
)
|
||||||
if "app_name" in email_config:
|
if "app_name" in email_config:
|
||||||
self.email_app_name = email_config["app_name"]
|
self.email_app_name = email_config["app_name"]
|
||||||
else:
|
else:
|
||||||
@@ -85,14 +97,25 @@ class EmailConfig(Config):
|
|||||||
def default_config(self, config_dir_path, server_name, **kwargs):
|
def default_config(self, config_dir_path, server_name, **kwargs):
|
||||||
return """
|
return """
|
||||||
# Enable sending emails for notification events
|
# Enable sending emails for notification events
|
||||||
|
# Defining a custom URL for Riot is only needed if email notifications
|
||||||
|
# should contain links to a self-hosted installation of Riot; when set
|
||||||
|
# the "app_name" setting is ignored.
|
||||||
|
#
|
||||||
|
# If your SMTP server requires authentication, the optional smtp_user &
|
||||||
|
# smtp_pass variables should be used
|
||||||
|
#
|
||||||
#email:
|
#email:
|
||||||
# enable_notifs: false
|
# enable_notifs: false
|
||||||
# smtp_host: "localhost"
|
# smtp_host: "localhost"
|
||||||
# smtp_port: 25
|
# smtp_port: 25
|
||||||
|
# smtp_user: "exampleusername"
|
||||||
|
# smtp_pass: "examplepassword"
|
||||||
|
# require_transport_security: False
|
||||||
# notif_from: "Your Friendly %(app)s Home Server <noreply@example.com>"
|
# notif_from: "Your Friendly %(app)s Home Server <noreply@example.com>"
|
||||||
# app_name: Matrix
|
# app_name: Matrix
|
||||||
# template_dir: res/templates
|
# template_dir: res/templates
|
||||||
# notif_template_html: notif_mail.html
|
# notif_template_html: notif_mail.html
|
||||||
# notif_template_text: notif_mail.txt
|
# notif_template_text: notif_mail.txt
|
||||||
# notif_for_new_users: True
|
# notif_for_new_users: True
|
||||||
|
# riot_base_url: "http://localhost/riot"
|
||||||
"""
|
"""
|
||||||
|
|||||||
32
synapse/config/groups.py
Normal file
32
synapse/config/groups.py
Normal file
@@ -0,0 +1,32 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright 2017 New Vector Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
from ._base import Config
|
||||||
|
|
||||||
|
|
||||||
|
class GroupsConfig(Config):
|
||||||
|
def read_config(self, config):
|
||||||
|
self.enable_group_creation = config.get("enable_group_creation", False)
|
||||||
|
self.group_creation_prefix = config.get("group_creation_prefix", "")
|
||||||
|
|
||||||
|
def default_config(self, **kwargs):
|
||||||
|
return """\
|
||||||
|
# Whether to allow non server admins to create groups on this server
|
||||||
|
enable_group_creation: false
|
||||||
|
|
||||||
|
# If enabled, non server admins can only create groups with local parts
|
||||||
|
# starting with this prefix
|
||||||
|
# group_creation_prefix: "unofficial/"
|
||||||
|
"""
|
||||||
@@ -30,17 +30,22 @@ from .saml2 import SAML2Config
|
|||||||
from .cas import CasConfig
|
from .cas import CasConfig
|
||||||
from .password import PasswordConfig
|
from .password import PasswordConfig
|
||||||
from .jwt import JWTConfig
|
from .jwt import JWTConfig
|
||||||
from .ldap import LDAPConfig
|
from .password_auth_providers import PasswordAuthProviderConfig
|
||||||
from .emailconfig import EmailConfig
|
from .emailconfig import EmailConfig
|
||||||
from .workers import WorkerConfig
|
from .workers import WorkerConfig
|
||||||
|
from .push import PushConfig
|
||||||
|
from .spam_checker import SpamCheckerConfig
|
||||||
|
from .groups import GroupsConfig
|
||||||
|
from .user_directory import UserDirectoryConfig
|
||||||
|
|
||||||
|
|
||||||
class HomeServerConfig(TlsConfig, ServerConfig, DatabaseConfig, LoggingConfig,
|
class HomeServerConfig(TlsConfig, ServerConfig, DatabaseConfig, LoggingConfig,
|
||||||
RatelimitConfig, ContentRepositoryConfig, CaptchaConfig,
|
RatelimitConfig, ContentRepositoryConfig, CaptchaConfig,
|
||||||
VoipConfig, RegistrationConfig, MetricsConfig, ApiConfig,
|
VoipConfig, RegistrationConfig, MetricsConfig, ApiConfig,
|
||||||
AppServiceConfig, KeyConfig, SAML2Config, CasConfig,
|
AppServiceConfig, KeyConfig, SAML2Config, CasConfig,
|
||||||
JWTConfig, LDAPConfig, PasswordConfig, EmailConfig,
|
JWTConfig, PasswordConfig, EmailConfig,
|
||||||
WorkerConfig,):
|
WorkerConfig, PasswordAuthProviderConfig, PushConfig,
|
||||||
|
SpamCheckerConfig, GroupsConfig, UserDirectoryConfig,):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -118,10 +118,9 @@ class KeyConfig(Config):
|
|||||||
signing_keys = self.read_file(signing_key_path, "signing_key")
|
signing_keys = self.read_file(signing_key_path, "signing_key")
|
||||||
try:
|
try:
|
||||||
return read_signing_keys(signing_keys.splitlines(True))
|
return read_signing_keys(signing_keys.splitlines(True))
|
||||||
except Exception:
|
except Exception as e:
|
||||||
raise ConfigError(
|
raise ConfigError(
|
||||||
"Error reading signing_key."
|
"Error reading signing_key: %s" % (str(e))
|
||||||
" Try running again with --generate-config"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
def read_old_signing_keys(self, old_signing_keys):
|
def read_old_signing_keys(self, old_signing_keys):
|
||||||
@@ -141,7 +140,8 @@ class KeyConfig(Config):
|
|||||||
|
|
||||||
def generate_files(self, config):
|
def generate_files(self, config):
|
||||||
signing_key_path = config["signing_key_path"]
|
signing_key_path = config["signing_key_path"]
|
||||||
if not os.path.exists(signing_key_path):
|
|
||||||
|
if not self.path_exists(signing_key_path):
|
||||||
with open(signing_key_path, "w") as signing_key_file:
|
with open(signing_key_path, "w") as signing_key_file:
|
||||||
key_id = "a_" + random_string(4)
|
key_id = "a_" + random_string(4)
|
||||||
write_signing_keys(
|
write_signing_keys(
|
||||||
|
|||||||
@@ -1,100 +0,0 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
# Copyright 2015 Niklas Riekenbrauck
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
from ._base import Config, ConfigError
|
|
||||||
|
|
||||||
|
|
||||||
MISSING_LDAP3 = (
|
|
||||||
"Missing ldap3 library. This is required for LDAP Authentication."
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class LDAPMode(object):
|
|
||||||
SIMPLE = "simple",
|
|
||||||
SEARCH = "search",
|
|
||||||
|
|
||||||
LIST = (SIMPLE, SEARCH)
|
|
||||||
|
|
||||||
|
|
||||||
class LDAPConfig(Config):
|
|
||||||
def read_config(self, config):
|
|
||||||
ldap_config = config.get("ldap_config", {})
|
|
||||||
|
|
||||||
self.ldap_enabled = ldap_config.get("enabled", False)
|
|
||||||
|
|
||||||
if self.ldap_enabled:
|
|
||||||
# verify dependencies are available
|
|
||||||
try:
|
|
||||||
import ldap3
|
|
||||||
ldap3 # to stop unused lint
|
|
||||||
except ImportError:
|
|
||||||
raise ConfigError(MISSING_LDAP3)
|
|
||||||
|
|
||||||
self.ldap_mode = LDAPMode.SIMPLE
|
|
||||||
|
|
||||||
# verify config sanity
|
|
||||||
self.require_keys(ldap_config, [
|
|
||||||
"uri",
|
|
||||||
"base",
|
|
||||||
"attributes",
|
|
||||||
])
|
|
||||||
|
|
||||||
self.ldap_uri = ldap_config["uri"]
|
|
||||||
self.ldap_start_tls = ldap_config.get("start_tls", False)
|
|
||||||
self.ldap_base = ldap_config["base"]
|
|
||||||
self.ldap_attributes = ldap_config["attributes"]
|
|
||||||
|
|
||||||
if "bind_dn" in ldap_config:
|
|
||||||
self.ldap_mode = LDAPMode.SEARCH
|
|
||||||
self.require_keys(ldap_config, [
|
|
||||||
"bind_dn",
|
|
||||||
"bind_password",
|
|
||||||
])
|
|
||||||
|
|
||||||
self.ldap_bind_dn = ldap_config["bind_dn"]
|
|
||||||
self.ldap_bind_password = ldap_config["bind_password"]
|
|
||||||
self.ldap_filter = ldap_config.get("filter", None)
|
|
||||||
|
|
||||||
# verify attribute lookup
|
|
||||||
self.require_keys(ldap_config['attributes'], [
|
|
||||||
"uid",
|
|
||||||
"name",
|
|
||||||
"mail",
|
|
||||||
])
|
|
||||||
|
|
||||||
def require_keys(self, config, required):
|
|
||||||
missing = [key for key in required if key not in config]
|
|
||||||
if missing:
|
|
||||||
raise ConfigError(
|
|
||||||
"LDAP enabled but missing required config values: {}".format(
|
|
||||||
", ".join(missing)
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
def default_config(self, **kwargs):
|
|
||||||
return """\
|
|
||||||
# ldap_config:
|
|
||||||
# enabled: true
|
|
||||||
# uri: "ldap://ldap.example.com:389"
|
|
||||||
# start_tls: true
|
|
||||||
# base: "ou=users,dc=example,dc=com"
|
|
||||||
# attributes:
|
|
||||||
# uid: "cn"
|
|
||||||
# mail: "email"
|
|
||||||
# name: "givenName"
|
|
||||||
# #bind_dn:
|
|
||||||
# #bind_password:
|
|
||||||
# #filter: "(objectClass=posixAccount)"
|
|
||||||
"""
|
|
||||||
@@ -15,47 +15,48 @@
|
|||||||
|
|
||||||
from ._base import Config
|
from ._base import Config
|
||||||
from synapse.util.logcontext import LoggingContextFilter
|
from synapse.util.logcontext import LoggingContextFilter
|
||||||
from twisted.python.log import PythonLoggingObserver
|
from twisted.logger import globalLogBeginner, STDLibLogObserver
|
||||||
import logging
|
import logging
|
||||||
import logging.config
|
import logging.config
|
||||||
import yaml
|
import yaml
|
||||||
from string import Template
|
from string import Template
|
||||||
import os
|
import os
|
||||||
import signal
|
import signal
|
||||||
from synapse.util.debug import debug_deferreds
|
|
||||||
|
|
||||||
|
|
||||||
DEFAULT_LOG_CONFIG = Template("""
|
DEFAULT_LOG_CONFIG = Template("""
|
||||||
version: 1
|
version: 1
|
||||||
|
|
||||||
formatters:
|
formatters:
|
||||||
precise:
|
precise:
|
||||||
format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s\
|
format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - \
|
||||||
- %(message)s'
|
%(request)s - %(message)s'
|
||||||
|
|
||||||
filters:
|
filters:
|
||||||
context:
|
context:
|
||||||
(): synapse.util.logcontext.LoggingContextFilter
|
(): synapse.util.logcontext.LoggingContextFilter
|
||||||
request: ""
|
request: ""
|
||||||
|
|
||||||
handlers:
|
handlers:
|
||||||
file:
|
file:
|
||||||
class: logging.handlers.RotatingFileHandler
|
class: logging.handlers.RotatingFileHandler
|
||||||
formatter: precise
|
formatter: precise
|
||||||
filename: ${log_file}
|
filename: ${log_file}
|
||||||
maxBytes: 104857600
|
maxBytes: 104857600
|
||||||
backupCount: 10
|
backupCount: 10
|
||||||
filters: [context]
|
filters: [context]
|
||||||
level: INFO
|
console:
|
||||||
console:
|
class: logging.StreamHandler
|
||||||
class: logging.StreamHandler
|
formatter: precise
|
||||||
formatter: precise
|
filters: [context]
|
||||||
|
|
||||||
loggers:
|
loggers:
|
||||||
synapse:
|
synapse:
|
||||||
level: INFO
|
level: INFO
|
||||||
|
|
||||||
synapse.storage.SQL:
|
synapse.storage.SQL:
|
||||||
|
# beware: increasing this to DEBUG will make synapse log sensitive
|
||||||
|
# information such as access tokens.
|
||||||
level: INFO
|
level: INFO
|
||||||
|
|
||||||
root:
|
root:
|
||||||
@@ -68,35 +69,24 @@ class LoggingConfig(Config):
|
|||||||
|
|
||||||
def read_config(self, config):
|
def read_config(self, config):
|
||||||
self.verbosity = config.get("verbose", 0)
|
self.verbosity = config.get("verbose", 0)
|
||||||
|
self.no_redirect_stdio = config.get("no_redirect_stdio", False)
|
||||||
self.log_config = self.abspath(config.get("log_config"))
|
self.log_config = self.abspath(config.get("log_config"))
|
||||||
self.log_file = self.abspath(config.get("log_file"))
|
self.log_file = self.abspath(config.get("log_file"))
|
||||||
if config.get("full_twisted_stacktraces"):
|
|
||||||
debug_deferreds()
|
|
||||||
|
|
||||||
def default_config(self, config_dir_path, server_name, **kwargs):
|
def default_config(self, config_dir_path, server_name, **kwargs):
|
||||||
log_file = self.abspath("homeserver.log")
|
|
||||||
log_config = self.abspath(
|
log_config = self.abspath(
|
||||||
os.path.join(config_dir_path, server_name + ".log.config")
|
os.path.join(config_dir_path, server_name + ".log.config")
|
||||||
)
|
)
|
||||||
return """
|
return """
|
||||||
# Logging verbosity level.
|
|
||||||
verbose: 0
|
|
||||||
|
|
||||||
# File to write logging to
|
|
||||||
log_file: "%(log_file)s"
|
|
||||||
|
|
||||||
# A yaml python logging config file
|
# A yaml python logging config file
|
||||||
log_config: "%(log_config)s"
|
log_config: "%(log_config)s"
|
||||||
|
|
||||||
# Stop twisted from discarding the stack traces of exceptions in
|
|
||||||
# deferreds by waiting a reactor tick before running a deferred's
|
|
||||||
# callbacks.
|
|
||||||
# full_twisted_stacktraces: true
|
|
||||||
""" % locals()
|
""" % locals()
|
||||||
|
|
||||||
def read_arguments(self, args):
|
def read_arguments(self, args):
|
||||||
if args.verbose is not None:
|
if args.verbose is not None:
|
||||||
self.verbosity = args.verbose
|
self.verbosity = args.verbose
|
||||||
|
if args.no_redirect_stdio is not None:
|
||||||
|
self.no_redirect_stdio = args.no_redirect_stdio
|
||||||
if args.log_config is not None:
|
if args.log_config is not None:
|
||||||
self.log_config = args.log_config
|
self.log_config = args.log_config
|
||||||
if args.log_file is not None:
|
if args.log_file is not None:
|
||||||
@@ -106,48 +96,68 @@ class LoggingConfig(Config):
|
|||||||
logging_group = parser.add_argument_group("logging")
|
logging_group = parser.add_argument_group("logging")
|
||||||
logging_group.add_argument(
|
logging_group.add_argument(
|
||||||
'-v', '--verbose', dest="verbose", action='count',
|
'-v', '--verbose', dest="verbose", action='count',
|
||||||
help="The verbosity level."
|
help="The verbosity level. Specify multiple times to increase "
|
||||||
|
"verbosity. (Ignored if --log-config is specified.)"
|
||||||
)
|
)
|
||||||
logging_group.add_argument(
|
logging_group.add_argument(
|
||||||
'-f', '--log-file', dest="log_file",
|
'-f', '--log-file', dest="log_file",
|
||||||
help="File to log to."
|
help="File to log to. (Ignored if --log-config is specified.)"
|
||||||
)
|
)
|
||||||
logging_group.add_argument(
|
logging_group.add_argument(
|
||||||
'--log-config', dest="log_config", default=None,
|
'--log-config', dest="log_config", default=None,
|
||||||
help="Python logging config file"
|
help="Python logging config file"
|
||||||
)
|
)
|
||||||
|
logging_group.add_argument(
|
||||||
|
'-n', '--no-redirect-stdio',
|
||||||
|
action='store_true', default=None,
|
||||||
|
help="Do not redirect stdout/stderr to the log"
|
||||||
|
)
|
||||||
|
|
||||||
def generate_files(self, config):
|
def generate_files(self, config):
|
||||||
log_config = config.get("log_config")
|
log_config = config.get("log_config")
|
||||||
if log_config and not os.path.exists(log_config):
|
if log_config and not os.path.exists(log_config):
|
||||||
|
log_file = self.abspath("homeserver.log")
|
||||||
with open(log_config, "wb") as log_config_file:
|
with open(log_config, "wb") as log_config_file:
|
||||||
log_config_file.write(
|
log_config_file.write(
|
||||||
DEFAULT_LOG_CONFIG.substitute(log_file=config["log_file"])
|
DEFAULT_LOG_CONFIG.substitute(log_file=log_file)
|
||||||
)
|
)
|
||||||
|
|
||||||
def setup_logging(self):
|
|
||||||
setup_logging(self.log_config, self.log_file, self.verbosity)
|
|
||||||
|
|
||||||
|
def setup_logging(config, use_worker_options=False):
|
||||||
|
""" Set up python logging
|
||||||
|
|
||||||
|
Args:
|
||||||
|
config (LoggingConfig | synapse.config.workers.WorkerConfig):
|
||||||
|
configuration data
|
||||||
|
|
||||||
|
use_worker_options (bool): True to use 'worker_log_config' and
|
||||||
|
'worker_log_file' options instead of 'log_config' and 'log_file'.
|
||||||
|
"""
|
||||||
|
log_config = (config.worker_log_config if use_worker_options
|
||||||
|
else config.log_config)
|
||||||
|
log_file = (config.worker_log_file if use_worker_options
|
||||||
|
else config.log_file)
|
||||||
|
|
||||||
def setup_logging(log_config=None, log_file=None, verbosity=None):
|
|
||||||
log_format = (
|
log_format = (
|
||||||
"%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s"
|
"%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s"
|
||||||
" - %(message)s"
|
" - %(message)s"
|
||||||
)
|
)
|
||||||
if log_config is None:
|
|
||||||
|
|
||||||
|
if log_config is None:
|
||||||
|
# We don't have a logfile, so fall back to the 'verbosity' param from
|
||||||
|
# the config or cmdline. (Note that we generate a log config for new
|
||||||
|
# installs, so this will be an unusual case)
|
||||||
level = logging.INFO
|
level = logging.INFO
|
||||||
level_for_storage = logging.INFO
|
level_for_storage = logging.INFO
|
||||||
if verbosity:
|
if config.verbosity:
|
||||||
level = logging.DEBUG
|
level = logging.DEBUG
|
||||||
if verbosity > 1:
|
if config.verbosity > 1:
|
||||||
level_for_storage = logging.DEBUG
|
level_for_storage = logging.DEBUG
|
||||||
|
|
||||||
# FIXME: we need a logging.WARN for a -q quiet option
|
|
||||||
logger = logging.getLogger('')
|
logger = logging.getLogger('')
|
||||||
logger.setLevel(level)
|
logger.setLevel(level)
|
||||||
|
|
||||||
logging.getLogger('synapse.storage').setLevel(level_for_storage)
|
logging.getLogger('synapse.storage.SQL').setLevel(level_for_storage)
|
||||||
|
|
||||||
formatter = logging.Formatter(log_format)
|
formatter = logging.Formatter(log_format)
|
||||||
if log_file:
|
if log_file:
|
||||||
@@ -160,24 +170,50 @@ def setup_logging(log_config=None, log_file=None, verbosity=None):
|
|||||||
logger.info("Closing log file due to SIGHUP")
|
logger.info("Closing log file due to SIGHUP")
|
||||||
handler.doRollover()
|
handler.doRollover()
|
||||||
logger.info("Opened new log file due to SIGHUP")
|
logger.info("Opened new log file due to SIGHUP")
|
||||||
|
|
||||||
# TODO(paul): obviously this is a terrible mechanism for
|
|
||||||
# stealing SIGHUP, because it means no other part of synapse
|
|
||||||
# can use it instead. If we want to catch SIGHUP anywhere
|
|
||||||
# else as well, I'd suggest we find a nicer way to broadcast
|
|
||||||
# it around.
|
|
||||||
if getattr(signal, "SIGHUP"):
|
|
||||||
signal.signal(signal.SIGHUP, sighup)
|
|
||||||
else:
|
else:
|
||||||
handler = logging.StreamHandler()
|
handler = logging.StreamHandler()
|
||||||
|
|
||||||
|
def sighup(signum, stack):
|
||||||
|
pass
|
||||||
|
|
||||||
handler.setFormatter(formatter)
|
handler.setFormatter(formatter)
|
||||||
|
|
||||||
handler.addFilter(LoggingContextFilter(request=""))
|
handler.addFilter(LoggingContextFilter(request=""))
|
||||||
|
|
||||||
logger.addHandler(handler)
|
logger.addHandler(handler)
|
||||||
else:
|
else:
|
||||||
with open(log_config, 'r') as f:
|
def load_log_config():
|
||||||
logging.config.dictConfig(yaml.load(f))
|
with open(log_config, 'r') as f:
|
||||||
|
logging.config.dictConfig(yaml.load(f))
|
||||||
|
|
||||||
observer = PythonLoggingObserver()
|
def sighup(signum, stack):
|
||||||
observer.start()
|
# it might be better to use a file watcher or something for this.
|
||||||
|
logging.info("Reloading log config from %s due to SIGHUP",
|
||||||
|
log_config)
|
||||||
|
load_log_config()
|
||||||
|
|
||||||
|
load_log_config()
|
||||||
|
|
||||||
|
# TODO(paul): obviously this is a terrible mechanism for
|
||||||
|
# stealing SIGHUP, because it means no other part of synapse
|
||||||
|
# can use it instead. If we want to catch SIGHUP anywhere
|
||||||
|
# else as well, I'd suggest we find a nicer way to broadcast
|
||||||
|
# it around.
|
||||||
|
if getattr(signal, "SIGHUP"):
|
||||||
|
signal.signal(signal.SIGHUP, sighup)
|
||||||
|
|
||||||
|
# It's critical to point twisted's internal logging somewhere, otherwise it
|
||||||
|
# stacks up and leaks kup to 64K object;
|
||||||
|
# see: https://twistedmatrix.com/trac/ticket/8164
|
||||||
|
#
|
||||||
|
# Routing to the python logging framework could be a performance problem if
|
||||||
|
# the handlers blocked for a long time as python.logging is a blocking API
|
||||||
|
# see https://twistedmatrix.com/documents/current/core/howto/logger.html
|
||||||
|
# filed as https://github.com/matrix-org/synapse/issues/1727
|
||||||
|
#
|
||||||
|
# However this may not be too much of a problem if we are just writing to a file.
|
||||||
|
observer = STDLibLogObserver()
|
||||||
|
globalLogBeginner.beginLoggingTo(
|
||||||
|
[observer],
|
||||||
|
redirectStandardIO=not config.no_redirect_stdio,
|
||||||
|
)
|
||||||
|
|||||||
69
synapse/config/password_auth_providers.py
Normal file
69
synapse/config/password_auth_providers.py
Normal file
@@ -0,0 +1,69 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright 2016 Openmarket
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
from ._base import Config
|
||||||
|
|
||||||
|
from synapse.util.module_loader import load_module
|
||||||
|
|
||||||
|
LDAP_PROVIDER = 'ldap_auth_provider.LdapAuthProvider'
|
||||||
|
|
||||||
|
|
||||||
|
class PasswordAuthProviderConfig(Config):
|
||||||
|
def read_config(self, config):
|
||||||
|
self.password_providers = []
|
||||||
|
providers = []
|
||||||
|
|
||||||
|
# We want to be backwards compatible with the old `ldap_config`
|
||||||
|
# param.
|
||||||
|
ldap_config = config.get("ldap_config", {})
|
||||||
|
if ldap_config.get("enabled", False):
|
||||||
|
providers.append({
|
||||||
|
'module': LDAP_PROVIDER,
|
||||||
|
'config': ldap_config,
|
||||||
|
})
|
||||||
|
|
||||||
|
providers.extend(config.get("password_providers", []))
|
||||||
|
for provider in providers:
|
||||||
|
mod_name = provider['module']
|
||||||
|
|
||||||
|
# This is for backwards compat when the ldap auth provider resided
|
||||||
|
# in this package.
|
||||||
|
if mod_name == "synapse.util.ldap_auth_provider.LdapAuthProvider":
|
||||||
|
mod_name = LDAP_PROVIDER
|
||||||
|
|
||||||
|
(provider_class, provider_config) = load_module({
|
||||||
|
"module": mod_name,
|
||||||
|
"config": provider['config'],
|
||||||
|
})
|
||||||
|
|
||||||
|
self.password_providers.append((provider_class, provider_config))
|
||||||
|
|
||||||
|
def default_config(self, **kwargs):
|
||||||
|
return """\
|
||||||
|
# password_providers:
|
||||||
|
# - module: "ldap_auth_provider.LdapAuthProvider"
|
||||||
|
# config:
|
||||||
|
# enabled: true
|
||||||
|
# uri: "ldap://ldap.example.com:389"
|
||||||
|
# start_tls: true
|
||||||
|
# base: "ou=users,dc=example,dc=com"
|
||||||
|
# attributes:
|
||||||
|
# uid: "cn"
|
||||||
|
# mail: "email"
|
||||||
|
# name: "givenName"
|
||||||
|
# #bind_dn:
|
||||||
|
# #bind_password:
|
||||||
|
# #filter: "(objectClass=posixAccount)"
|
||||||
|
"""
|
||||||
61
synapse/config/push.py
Normal file
61
synapse/config/push.py
Normal file
@@ -0,0 +1,61 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright 2015, 2016 OpenMarket Ltd
|
||||||
|
# Copyright 2017 New Vector Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
from ._base import Config
|
||||||
|
|
||||||
|
|
||||||
|
class PushConfig(Config):
|
||||||
|
def read_config(self, config):
|
||||||
|
push_config = config.get("push", {})
|
||||||
|
self.push_include_content = push_config.get("include_content", True)
|
||||||
|
|
||||||
|
# There was a a 'redact_content' setting but mistakenly read from the
|
||||||
|
# 'email'section'. Check for the flag in the 'push' section, and log,
|
||||||
|
# but do not honour it to avoid nasty surprises when people upgrade.
|
||||||
|
if push_config.get("redact_content") is not None:
|
||||||
|
print(
|
||||||
|
"The push.redact_content content option has never worked. "
|
||||||
|
"Please set push.include_content if you want this behaviour"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Now check for the one in the 'email' section and honour it,
|
||||||
|
# with a warning.
|
||||||
|
push_config = config.get("email", {})
|
||||||
|
redact_content = push_config.get("redact_content")
|
||||||
|
if redact_content is not None:
|
||||||
|
print(
|
||||||
|
"The 'email.redact_content' option is deprecated: "
|
||||||
|
"please set push.include_content instead"
|
||||||
|
)
|
||||||
|
self.push_include_content = not redact_content
|
||||||
|
|
||||||
|
def default_config(self, config_dir_path, server_name, **kwargs):
|
||||||
|
return """
|
||||||
|
# Clients requesting push notifications can either have the body of
|
||||||
|
# the message sent in the notification poke along with other details
|
||||||
|
# like the sender, or just the event ID and room ID (`event_id_only`).
|
||||||
|
# If clients choose the former, this option controls whether the
|
||||||
|
# notification request includes the content of the event (other details
|
||||||
|
# like the sender are still included). For `event_id_only` push, it
|
||||||
|
# has no effect.
|
||||||
|
|
||||||
|
# For modern android devices the notification content will still appear
|
||||||
|
# because it is loaded by the app. iPhone, however will send a
|
||||||
|
# notification saying only that a message arrived and who it came from.
|
||||||
|
#
|
||||||
|
#push:
|
||||||
|
# include_content: true
|
||||||
|
"""
|
||||||
@@ -31,8 +31,9 @@ class RegistrationConfig(Config):
|
|||||||
strtobool(str(config["disable_registration"]))
|
strtobool(str(config["disable_registration"]))
|
||||||
)
|
)
|
||||||
|
|
||||||
|
self.registrations_require_3pid = config.get("registrations_require_3pid", [])
|
||||||
|
self.allowed_local_3pids = config.get("allowed_local_3pids", [])
|
||||||
self.registration_shared_secret = config.get("registration_shared_secret")
|
self.registration_shared_secret = config.get("registration_shared_secret")
|
||||||
self.user_creation_max_duration = int(config["user_creation_max_duration"])
|
|
||||||
|
|
||||||
self.bcrypt_rounds = config.get("bcrypt_rounds", 12)
|
self.bcrypt_rounds = config.get("bcrypt_rounds", 12)
|
||||||
self.trusted_third_party_id_servers = config["trusted_third_party_id_servers"]
|
self.trusted_third_party_id_servers = config["trusted_third_party_id_servers"]
|
||||||
@@ -42,6 +43,8 @@ class RegistrationConfig(Config):
|
|||||||
self.allow_guest_access and config.get("invite_3pid_guest", False)
|
self.allow_guest_access and config.get("invite_3pid_guest", False)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
self.auto_join_rooms = config.get("auto_join_rooms", [])
|
||||||
|
|
||||||
def default_config(self, **kwargs):
|
def default_config(self, **kwargs):
|
||||||
registration_shared_secret = random_string_with_symbols(50)
|
registration_shared_secret = random_string_with_symbols(50)
|
||||||
|
|
||||||
@@ -51,18 +54,32 @@ class RegistrationConfig(Config):
|
|||||||
# Enable registration for new users.
|
# Enable registration for new users.
|
||||||
enable_registration: False
|
enable_registration: False
|
||||||
|
|
||||||
|
# The user must provide all of the below types of 3PID when registering.
|
||||||
|
#
|
||||||
|
# registrations_require_3pid:
|
||||||
|
# - email
|
||||||
|
# - msisdn
|
||||||
|
|
||||||
|
# Mandate that users are only allowed to associate certain formats of
|
||||||
|
# 3PIDs with accounts on this server.
|
||||||
|
#
|
||||||
|
# allowed_local_3pids:
|
||||||
|
# - medium: email
|
||||||
|
# pattern: ".*@matrix\\.org"
|
||||||
|
# - medium: email
|
||||||
|
# pattern: ".*@vector\\.im"
|
||||||
|
# - medium: msisdn
|
||||||
|
# pattern: "\\+44"
|
||||||
|
|
||||||
# If set, allows registration by anyone who also has the shared
|
# If set, allows registration by anyone who also has the shared
|
||||||
# secret, even if registration is otherwise disabled.
|
# secret, even if registration is otherwise disabled.
|
||||||
registration_shared_secret: "%(registration_shared_secret)s"
|
registration_shared_secret: "%(registration_shared_secret)s"
|
||||||
|
|
||||||
# Sets the expiry for the short term user creation in
|
|
||||||
# milliseconds. For instance the bellow duration is two weeks
|
|
||||||
# in milliseconds.
|
|
||||||
user_creation_max_duration: 1209600000
|
|
||||||
|
|
||||||
# Set the number of bcrypt rounds used to generate password hash.
|
# Set the number of bcrypt rounds used to generate password hash.
|
||||||
# Larger numbers increase the work factor needed to generate the hash.
|
# Larger numbers increase the work factor needed to generate the hash.
|
||||||
# The default number of rounds is 12.
|
# The default number is 12 (which equates to 2^12 rounds).
|
||||||
|
# N.B. that increasing this will exponentially increase the time required
|
||||||
|
# to register or login - e.g. 24 => 2^24 rounds which will take >20 mins.
|
||||||
bcrypt_rounds: 12
|
bcrypt_rounds: 12
|
||||||
|
|
||||||
# Allows users to register as guests without a password/email/etc, and
|
# Allows users to register as guests without a password/email/etc, and
|
||||||
@@ -75,6 +92,12 @@ class RegistrationConfig(Config):
|
|||||||
trusted_third_party_id_servers:
|
trusted_third_party_id_servers:
|
||||||
- matrix.org
|
- matrix.org
|
||||||
- vector.im
|
- vector.im
|
||||||
|
- riot.im
|
||||||
|
|
||||||
|
# Users who register on this homeserver will automatically be joined
|
||||||
|
# to these rooms
|
||||||
|
#auto_join_rooms:
|
||||||
|
# - "#example:example.com"
|
||||||
""" % locals()
|
""" % locals()
|
||||||
|
|
||||||
def add_arguments(self, parser):
|
def add_arguments(self, parser):
|
||||||
|
|||||||
@@ -16,6 +16,8 @@
|
|||||||
from ._base import Config, ConfigError
|
from ._base import Config, ConfigError
|
||||||
from collections import namedtuple
|
from collections import namedtuple
|
||||||
|
|
||||||
|
from synapse.util.module_loader import load_module
|
||||||
|
|
||||||
|
|
||||||
MISSING_NETADDR = (
|
MISSING_NETADDR = (
|
||||||
"Missing netaddr library. This is required for URL preview API."
|
"Missing netaddr library. This is required for URL preview API."
|
||||||
@@ -36,6 +38,14 @@ ThumbnailRequirement = namedtuple(
|
|||||||
"ThumbnailRequirement", ["width", "height", "method", "media_type"]
|
"ThumbnailRequirement", ["width", "height", "method", "media_type"]
|
||||||
)
|
)
|
||||||
|
|
||||||
|
MediaStorageProviderConfig = namedtuple(
|
||||||
|
"MediaStorageProviderConfig", (
|
||||||
|
"store_local", # Whether to store newly uploaded local files
|
||||||
|
"store_remote", # Whether to store newly downloaded remote files
|
||||||
|
"store_synchronous", # Whether to wait for successful storage for local uploads
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def parse_thumbnail_requirements(thumbnail_sizes):
|
def parse_thumbnail_requirements(thumbnail_sizes):
|
||||||
""" Takes a list of dictionaries with "width", "height", and "method" keys
|
""" Takes a list of dictionaries with "width", "height", and "method" keys
|
||||||
@@ -70,7 +80,64 @@ class ContentRepositoryConfig(Config):
|
|||||||
self.max_upload_size = self.parse_size(config["max_upload_size"])
|
self.max_upload_size = self.parse_size(config["max_upload_size"])
|
||||||
self.max_image_pixels = self.parse_size(config["max_image_pixels"])
|
self.max_image_pixels = self.parse_size(config["max_image_pixels"])
|
||||||
self.max_spider_size = self.parse_size(config["max_spider_size"])
|
self.max_spider_size = self.parse_size(config["max_spider_size"])
|
||||||
|
|
||||||
self.media_store_path = self.ensure_directory(config["media_store_path"])
|
self.media_store_path = self.ensure_directory(config["media_store_path"])
|
||||||
|
|
||||||
|
backup_media_store_path = config.get("backup_media_store_path")
|
||||||
|
|
||||||
|
synchronous_backup_media_store = config.get(
|
||||||
|
"synchronous_backup_media_store", False
|
||||||
|
)
|
||||||
|
|
||||||
|
storage_providers = config.get("media_storage_providers", [])
|
||||||
|
|
||||||
|
if backup_media_store_path:
|
||||||
|
if storage_providers:
|
||||||
|
raise ConfigError(
|
||||||
|
"Cannot use both 'backup_media_store_path' and 'storage_providers'"
|
||||||
|
)
|
||||||
|
|
||||||
|
storage_providers = [{
|
||||||
|
"module": "file_system",
|
||||||
|
"store_local": True,
|
||||||
|
"store_synchronous": synchronous_backup_media_store,
|
||||||
|
"store_remote": True,
|
||||||
|
"config": {
|
||||||
|
"directory": backup_media_store_path,
|
||||||
|
}
|
||||||
|
}]
|
||||||
|
|
||||||
|
# This is a list of config that can be used to create the storage
|
||||||
|
# providers. The entries are tuples of (Class, class_config,
|
||||||
|
# MediaStorageProviderConfig), where Class is the class of the provider,
|
||||||
|
# the class_config the config to pass to it, and
|
||||||
|
# MediaStorageProviderConfig are options for StorageProviderWrapper.
|
||||||
|
#
|
||||||
|
# We don't create the storage providers here as not all workers need
|
||||||
|
# them to be started.
|
||||||
|
self.media_storage_providers = []
|
||||||
|
|
||||||
|
for provider_config in storage_providers:
|
||||||
|
# We special case the module "file_system" so as not to need to
|
||||||
|
# expose FileStorageProviderBackend
|
||||||
|
if provider_config["module"] == "file_system":
|
||||||
|
provider_config["module"] = (
|
||||||
|
"synapse.rest.media.v1.storage_provider"
|
||||||
|
".FileStorageProviderBackend"
|
||||||
|
)
|
||||||
|
|
||||||
|
provider_class, parsed_config = load_module(provider_config)
|
||||||
|
|
||||||
|
wrapper_config = MediaStorageProviderConfig(
|
||||||
|
provider_config.get("store_local", False),
|
||||||
|
provider_config.get("store_remote", False),
|
||||||
|
provider_config.get("store_synchronous", False),
|
||||||
|
)
|
||||||
|
|
||||||
|
self.media_storage_providers.append(
|
||||||
|
(provider_class, parsed_config, wrapper_config,)
|
||||||
|
)
|
||||||
|
|
||||||
self.uploads_path = self.ensure_directory(config["uploads_path"])
|
self.uploads_path = self.ensure_directory(config["uploads_path"])
|
||||||
self.dynamic_thumbnails = config["dynamic_thumbnails"]
|
self.dynamic_thumbnails = config["dynamic_thumbnails"]
|
||||||
self.thumbnail_requirements = parse_thumbnail_requirements(
|
self.thumbnail_requirements = parse_thumbnail_requirements(
|
||||||
@@ -115,6 +182,20 @@ class ContentRepositoryConfig(Config):
|
|||||||
# Directory where uploaded images and attachments are stored.
|
# Directory where uploaded images and attachments are stored.
|
||||||
media_store_path: "%(media_store)s"
|
media_store_path: "%(media_store)s"
|
||||||
|
|
||||||
|
# Media storage providers allow media to be stored in different
|
||||||
|
# locations.
|
||||||
|
# media_storage_providers:
|
||||||
|
# - module: file_system
|
||||||
|
# # Whether to write new local files.
|
||||||
|
# store_local: false
|
||||||
|
# # Whether to write new remote media
|
||||||
|
# store_remote: false
|
||||||
|
# # Whether to block upload requests waiting for write to this
|
||||||
|
# # provider to complete
|
||||||
|
# store_synchronous: false
|
||||||
|
# config:
|
||||||
|
# directory: /mnt/some/other/directory
|
||||||
|
|
||||||
# Directory where in-progress uploads are stored.
|
# Directory where in-progress uploads are stored.
|
||||||
uploads_path: "%(uploads_path)s"
|
uploads_path: "%(uploads_path)s"
|
||||||
|
|
||||||
@@ -167,6 +248,8 @@ class ContentRepositoryConfig(Config):
|
|||||||
# - '10.0.0.0/8'
|
# - '10.0.0.0/8'
|
||||||
# - '172.16.0.0/12'
|
# - '172.16.0.0/12'
|
||||||
# - '192.168.0.0/16'
|
# - '192.168.0.0/16'
|
||||||
|
# - '100.64.0.0/10'
|
||||||
|
# - '169.254.0.0/16'
|
||||||
#
|
#
|
||||||
# List of IP address CIDR ranges that the URL preview spider is allowed
|
# List of IP address CIDR ranges that the URL preview spider is allowed
|
||||||
# to access even if they are specified in url_preview_ip_range_blacklist.
|
# to access even if they are specified in url_preview_ip_range_blacklist.
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
# Copyright 2014-2016 OpenMarket Ltd
|
# Copyright 2014-2016 OpenMarket Ltd
|
||||||
|
# Copyright 2017 New Vector Ltd
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@@ -29,7 +30,41 @@ class ServerConfig(Config):
|
|||||||
self.user_agent_suffix = config.get("user_agent_suffix")
|
self.user_agent_suffix = config.get("user_agent_suffix")
|
||||||
self.use_frozen_dicts = config.get("use_frozen_dicts", False)
|
self.use_frozen_dicts = config.get("use_frozen_dicts", False)
|
||||||
self.public_baseurl = config.get("public_baseurl")
|
self.public_baseurl = config.get("public_baseurl")
|
||||||
self.secondary_directory_servers = config.get("secondary_directory_servers", [])
|
self.cpu_affinity = config.get("cpu_affinity")
|
||||||
|
|
||||||
|
# Whether to send federation traffic out in this process. This only
|
||||||
|
# applies to some federation traffic, and so shouldn't be used to
|
||||||
|
# "disable" federation
|
||||||
|
self.send_federation = config.get("send_federation", True)
|
||||||
|
|
||||||
|
# Whether to update the user directory or not. This should be set to
|
||||||
|
# false only if we are updating the user directory in a worker
|
||||||
|
self.update_user_directory = config.get("update_user_directory", True)
|
||||||
|
|
||||||
|
# whether to enable the media repository endpoints. This should be set
|
||||||
|
# to false if the media repository is running as a separate endpoint;
|
||||||
|
# doing so ensures that we will not run cache cleanup jobs on the
|
||||||
|
# master, potentially causing inconsistency.
|
||||||
|
self.enable_media_repo = config.get("enable_media_repo", True)
|
||||||
|
|
||||||
|
self.filter_timeline_limit = config.get("filter_timeline_limit", -1)
|
||||||
|
|
||||||
|
# Whether we should block invites sent to users on this server
|
||||||
|
# (other than those sent by local server admins)
|
||||||
|
self.block_non_admin_invites = config.get(
|
||||||
|
"block_non_admin_invites", False,
|
||||||
|
)
|
||||||
|
|
||||||
|
# FIXME: federation_domain_whitelist needs sytests
|
||||||
|
self.federation_domain_whitelist = None
|
||||||
|
federation_domain_whitelist = config.get(
|
||||||
|
"federation_domain_whitelist", None
|
||||||
|
)
|
||||||
|
# turn the whitelist into a hash for speed of lookup
|
||||||
|
if federation_domain_whitelist is not None:
|
||||||
|
self.federation_domain_whitelist = {}
|
||||||
|
for domain in federation_domain_whitelist:
|
||||||
|
self.federation_domain_whitelist[domain] = True
|
||||||
|
|
||||||
if self.public_baseurl is not None:
|
if self.public_baseurl is not None:
|
||||||
if self.public_baseurl[-1] != '/':
|
if self.public_baseurl[-1] != '/':
|
||||||
@@ -38,6 +73,15 @@ class ServerConfig(Config):
|
|||||||
|
|
||||||
self.listeners = config.get("listeners", [])
|
self.listeners = config.get("listeners", [])
|
||||||
|
|
||||||
|
for listener in self.listeners:
|
||||||
|
bind_address = listener.pop("bind_address", None)
|
||||||
|
bind_addresses = listener.setdefault("bind_addresses", [])
|
||||||
|
|
||||||
|
if bind_address:
|
||||||
|
bind_addresses.append(bind_address)
|
||||||
|
elif not bind_addresses:
|
||||||
|
bind_addresses.append('')
|
||||||
|
|
||||||
self.gc_thresholds = read_gc_thresholds(config.get("gc_thresholds", None))
|
self.gc_thresholds = read_gc_thresholds(config.get("gc_thresholds", None))
|
||||||
|
|
||||||
bind_port = config.get("bind_port")
|
bind_port = config.get("bind_port")
|
||||||
@@ -50,7 +94,7 @@ class ServerConfig(Config):
|
|||||||
|
|
||||||
self.listeners.append({
|
self.listeners.append({
|
||||||
"port": bind_port,
|
"port": bind_port,
|
||||||
"bind_address": bind_host,
|
"bind_addresses": [bind_host],
|
||||||
"tls": True,
|
"tls": True,
|
||||||
"type": "http",
|
"type": "http",
|
||||||
"resources": [
|
"resources": [
|
||||||
@@ -69,7 +113,7 @@ class ServerConfig(Config):
|
|||||||
if unsecure_port:
|
if unsecure_port:
|
||||||
self.listeners.append({
|
self.listeners.append({
|
||||||
"port": unsecure_port,
|
"port": unsecure_port,
|
||||||
"bind_address": bind_host,
|
"bind_addresses": [bind_host],
|
||||||
"tls": False,
|
"tls": False,
|
||||||
"type": "http",
|
"type": "http",
|
||||||
"resources": [
|
"resources": [
|
||||||
@@ -88,7 +132,7 @@ class ServerConfig(Config):
|
|||||||
if manhole:
|
if manhole:
|
||||||
self.listeners.append({
|
self.listeners.append({
|
||||||
"port": manhole,
|
"port": manhole,
|
||||||
"bind_address": "127.0.0.1",
|
"bind_addresses": ["127.0.0.1"],
|
||||||
"type": "manhole",
|
"type": "manhole",
|
||||||
})
|
})
|
||||||
|
|
||||||
@@ -96,7 +140,7 @@ class ServerConfig(Config):
|
|||||||
if metrics_port:
|
if metrics_port:
|
||||||
self.listeners.append({
|
self.listeners.append({
|
||||||
"port": metrics_port,
|
"port": metrics_port,
|
||||||
"bind_address": config.get("metrics_bind_host", "127.0.0.1"),
|
"bind_addresses": [config.get("metrics_bind_host", "127.0.0.1")],
|
||||||
"tls": False,
|
"tls": False,
|
||||||
"type": "http",
|
"type": "http",
|
||||||
"resources": [
|
"resources": [
|
||||||
@@ -128,9 +172,36 @@ class ServerConfig(Config):
|
|||||||
# When running as a daemon, the file to store the pid in
|
# When running as a daemon, the file to store the pid in
|
||||||
pid_file: %(pid_file)s
|
pid_file: %(pid_file)s
|
||||||
|
|
||||||
|
# CPU affinity mask. Setting this restricts the CPUs on which the
|
||||||
|
# process will be scheduled. It is represented as a bitmask, with the
|
||||||
|
# lowest order bit corresponding to the first logical CPU and the
|
||||||
|
# highest order bit corresponding to the last logical CPU. Not all CPUs
|
||||||
|
# may exist on a given system but a mask may specify more CPUs than are
|
||||||
|
# present.
|
||||||
|
#
|
||||||
|
# For example:
|
||||||
|
# 0x00000001 is processor #0,
|
||||||
|
# 0x00000003 is processors #0 and #1,
|
||||||
|
# 0xFFFFFFFF is all processors (#0 through #31).
|
||||||
|
#
|
||||||
|
# Pinning a Python process to a single CPU is desirable, because Python
|
||||||
|
# is inherently single-threaded due to the GIL, and can suffer a
|
||||||
|
# 30-40%% slowdown due to cache blow-out and thread context switching
|
||||||
|
# if the scheduler happens to schedule the underlying threads across
|
||||||
|
# different cores. See
|
||||||
|
# https://www.mirantis.com/blog/improve-performance-python-programs-restricting-single-cpu/.
|
||||||
|
#
|
||||||
|
# cpu_affinity: 0xFFFFFFFF
|
||||||
|
|
||||||
# Whether to serve a web client from the HTTP/HTTPS root resource.
|
# Whether to serve a web client from the HTTP/HTTPS root resource.
|
||||||
web_client: True
|
web_client: True
|
||||||
|
|
||||||
|
# The root directory to server for the above web client.
|
||||||
|
# If left undefined, synapse will serve the matrix-angular-sdk web client.
|
||||||
|
# Make sure matrix-angular-sdk is installed with pip if web_client is True
|
||||||
|
# and web_client_location is undefined
|
||||||
|
# web_client_location: "/path/to/web/root"
|
||||||
|
|
||||||
# The public-facing base URL for the client API (not including _matrix/...)
|
# The public-facing base URL for the client API (not including _matrix/...)
|
||||||
# public_baseurl: https://example.com:8448/
|
# public_baseurl: https://example.com:8448/
|
||||||
|
|
||||||
@@ -142,13 +213,24 @@ class ServerConfig(Config):
|
|||||||
# The GC threshold parameters to pass to `gc.set_threshold`, if defined
|
# The GC threshold parameters to pass to `gc.set_threshold`, if defined
|
||||||
# gc_thresholds: [700, 10, 10]
|
# gc_thresholds: [700, 10, 10]
|
||||||
|
|
||||||
# A list of other Home Servers to fetch the public room directory from
|
# Set the limit on the returned events in the timeline in the get
|
||||||
# and include in the public room directory of this home server
|
# and sync operations. The default value is -1, means no upper limit.
|
||||||
# This is a temporary stopgap solution to populate new server with a
|
# filter_timeline_limit: 5000
|
||||||
# list of rooms until there exists a good solution of a decentralized
|
|
||||||
# room directory.
|
# Whether room invites to users on this server should be blocked
|
||||||
# secondary_directory_servers:
|
# (except those sent by local server admins). The default is False.
|
||||||
# - matrix.org
|
# block_non_admin_invites: True
|
||||||
|
|
||||||
|
# Restrict federation to the following whitelist of domains.
|
||||||
|
# N.B. we recommend also firewalling your federation listener to limit
|
||||||
|
# inbound federation traffic as early as possible, rather than relying
|
||||||
|
# purely on this application-layer restriction. If not specified, the
|
||||||
|
# default is to whitelist everything.
|
||||||
|
#
|
||||||
|
# federation_domain_whitelist:
|
||||||
|
# - lon.example.com
|
||||||
|
# - nyc.example.com
|
||||||
|
# - syd.example.com
|
||||||
|
|
||||||
# List of ports that Synapse should listen on, their purpose and their
|
# List of ports that Synapse should listen on, their purpose and their
|
||||||
# configuration.
|
# configuration.
|
||||||
@@ -159,9 +241,13 @@ class ServerConfig(Config):
|
|||||||
# The port to listen for HTTPS requests on.
|
# The port to listen for HTTPS requests on.
|
||||||
port: %(bind_port)s
|
port: %(bind_port)s
|
||||||
|
|
||||||
# Local interface to listen on.
|
# Local addresses to listen on.
|
||||||
# The empty string will cause synapse to listen on all interfaces.
|
# On Linux and Mac OS, `::` will listen on all IPv4 and IPv6
|
||||||
bind_address: ''
|
# addresses by default. For most other OSes, this will only listen
|
||||||
|
# on IPv6.
|
||||||
|
bind_addresses:
|
||||||
|
- '::'
|
||||||
|
- '0.0.0.0'
|
||||||
|
|
||||||
# This is a 'http' listener, allows us to specify 'resources'.
|
# This is a 'http' listener, allows us to specify 'resources'.
|
||||||
type: http
|
type: http
|
||||||
@@ -188,11 +274,18 @@ class ServerConfig(Config):
|
|||||||
- names: [federation] # Federation APIs
|
- names: [federation] # Federation APIs
|
||||||
compress: false
|
compress: false
|
||||||
|
|
||||||
|
# optional list of additional endpoints which can be loaded via
|
||||||
|
# dynamic modules
|
||||||
|
# additional_resources:
|
||||||
|
# "/_matrix/my/custom/endpoint":
|
||||||
|
# module: my_module.CustomRequestHandler
|
||||||
|
# config: {}
|
||||||
|
|
||||||
# Unsecure HTTP listener,
|
# Unsecure HTTP listener,
|
||||||
# For when matrix traffic passes through loadbalancer that unwraps TLS.
|
# For when matrix traffic passes through loadbalancer that unwraps TLS.
|
||||||
- port: %(unsecure_port)s
|
- port: %(unsecure_port)s
|
||||||
tls: false
|
tls: false
|
||||||
bind_address: ''
|
bind_addresses: ['::', '0.0.0.0']
|
||||||
type: http
|
type: http
|
||||||
|
|
||||||
x_forwarded: false
|
x_forwarded: false
|
||||||
@@ -206,7 +299,7 @@ class ServerConfig(Config):
|
|||||||
# Turn on the twisted ssh manhole service on localhost on the given
|
# Turn on the twisted ssh manhole service on localhost on the given
|
||||||
# port.
|
# port.
|
||||||
# - port: 9000
|
# - port: 9000
|
||||||
# bind_address: 127.0.0.1
|
# bind_addresses: ['::1', '127.0.0.1']
|
||||||
# type: manhole
|
# type: manhole
|
||||||
""" % locals()
|
""" % locals()
|
||||||
|
|
||||||
@@ -244,7 +337,7 @@ def read_gc_thresholds(thresholds):
|
|||||||
return (
|
return (
|
||||||
int(thresholds[0]), int(thresholds[1]), int(thresholds[2]),
|
int(thresholds[0]), int(thresholds[1]), int(thresholds[2]),
|
||||||
)
|
)
|
||||||
except:
|
except Exception:
|
||||||
raise ConfigError(
|
raise ConfigError(
|
||||||
"Value of `gc_threshold` must be a list of three integers if set"
|
"Value of `gc_threshold` must be a list of three integers if set"
|
||||||
)
|
)
|
||||||
|
|||||||
35
synapse/config/spam_checker.py
Normal file
35
synapse/config/spam_checker.py
Normal file
@@ -0,0 +1,35 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright 2017 New Vector Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
from synapse.util.module_loader import load_module
|
||||||
|
|
||||||
|
from ._base import Config
|
||||||
|
|
||||||
|
|
||||||
|
class SpamCheckerConfig(Config):
|
||||||
|
def read_config(self, config):
|
||||||
|
self.spam_checker = None
|
||||||
|
|
||||||
|
provider = config.get("spam_checker", None)
|
||||||
|
if provider is not None:
|
||||||
|
self.spam_checker = load_module(provider)
|
||||||
|
|
||||||
|
def default_config(self, **kwargs):
|
||||||
|
return """\
|
||||||
|
# spam_checker:
|
||||||
|
# module: "my_custom_project.SuperSpamChecker"
|
||||||
|
# config:
|
||||||
|
# example_option: 'things'
|
||||||
|
"""
|
||||||
@@ -19,6 +19,9 @@ from OpenSSL import crypto
|
|||||||
import subprocess
|
import subprocess
|
||||||
import os
|
import os
|
||||||
|
|
||||||
|
from hashlib import sha256
|
||||||
|
from unpaddedbase64 import encode_base64
|
||||||
|
|
||||||
GENERATE_DH_PARAMS = False
|
GENERATE_DH_PARAMS = False
|
||||||
|
|
||||||
|
|
||||||
@@ -42,6 +45,19 @@ class TlsConfig(Config):
|
|||||||
config.get("tls_dh_params_path"), "tls_dh_params"
|
config.get("tls_dh_params_path"), "tls_dh_params"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
self.tls_fingerprints = config["tls_fingerprints"]
|
||||||
|
|
||||||
|
# Check that our own certificate is included in the list of fingerprints
|
||||||
|
# and include it if it is not.
|
||||||
|
x509_certificate_bytes = crypto.dump_certificate(
|
||||||
|
crypto.FILETYPE_ASN1,
|
||||||
|
self.tls_certificate
|
||||||
|
)
|
||||||
|
sha256_fingerprint = encode_base64(sha256(x509_certificate_bytes).digest())
|
||||||
|
sha256_fingerprints = set(f["sha256"] for f in self.tls_fingerprints)
|
||||||
|
if sha256_fingerprint not in sha256_fingerprints:
|
||||||
|
self.tls_fingerprints.append({u"sha256": sha256_fingerprint})
|
||||||
|
|
||||||
# This config option applies to non-federation HTTP clients
|
# This config option applies to non-federation HTTP clients
|
||||||
# (e.g. for talking to recaptcha, identity servers, and such)
|
# (e.g. for talking to recaptcha, identity servers, and such)
|
||||||
# It should never be used in production, and is intended for
|
# It should never be used in production, and is intended for
|
||||||
@@ -73,6 +89,34 @@ class TlsConfig(Config):
|
|||||||
|
|
||||||
# Don't bind to the https port
|
# Don't bind to the https port
|
||||||
no_tls: False
|
no_tls: False
|
||||||
|
|
||||||
|
# List of allowed TLS fingerprints for this server to publish along
|
||||||
|
# with the signing keys for this server. Other matrix servers that
|
||||||
|
# make HTTPS requests to this server will check that the TLS
|
||||||
|
# certificates returned by this server match one of the fingerprints.
|
||||||
|
#
|
||||||
|
# Synapse automatically adds the fingerprint of its own certificate
|
||||||
|
# to the list. So if federation traffic is handled directly by synapse
|
||||||
|
# then no modification to the list is required.
|
||||||
|
#
|
||||||
|
# If synapse is run behind a load balancer that handles the TLS then it
|
||||||
|
# will be necessary to add the fingerprints of the certificates used by
|
||||||
|
# the loadbalancers to this list if they are different to the one
|
||||||
|
# synapse is using.
|
||||||
|
#
|
||||||
|
# Homeservers are permitted to cache the list of TLS fingerprints
|
||||||
|
# returned in the key responses up to the "valid_until_ts" returned in
|
||||||
|
# key. It may be necessary to publish the fingerprints of a new
|
||||||
|
# certificate and wait until the "valid_until_ts" of the previous key
|
||||||
|
# responses have passed before deploying it.
|
||||||
|
#
|
||||||
|
# You can calculate a fingerprint from a given TLS listener via:
|
||||||
|
# openssl s_client -connect $host:$port < /dev/null 2> /dev/null |
|
||||||
|
# openssl x509 -outform DER | openssl sha256 -binary | base64 | tr -d '='
|
||||||
|
# or by checking matrix.org/federationtester/api/report?server_name=$host
|
||||||
|
#
|
||||||
|
tls_fingerprints: []
|
||||||
|
# tls_fingerprints: [{"sha256": "<base64_encoded_sha256_fingerprint>"}]
|
||||||
""" % locals()
|
""" % locals()
|
||||||
|
|
||||||
def read_tls_certificate(self, cert_path):
|
def read_tls_certificate(self, cert_path):
|
||||||
@@ -88,7 +132,7 @@ class TlsConfig(Config):
|
|||||||
tls_private_key_path = config["tls_private_key_path"]
|
tls_private_key_path = config["tls_private_key_path"]
|
||||||
tls_dh_params_path = config["tls_dh_params_path"]
|
tls_dh_params_path = config["tls_dh_params_path"]
|
||||||
|
|
||||||
if not os.path.exists(tls_private_key_path):
|
if not self.path_exists(tls_private_key_path):
|
||||||
with open(tls_private_key_path, "w") as private_key_file:
|
with open(tls_private_key_path, "w") as private_key_file:
|
||||||
tls_private_key = crypto.PKey()
|
tls_private_key = crypto.PKey()
|
||||||
tls_private_key.generate_key(crypto.TYPE_RSA, 2048)
|
tls_private_key.generate_key(crypto.TYPE_RSA, 2048)
|
||||||
@@ -103,7 +147,7 @@ class TlsConfig(Config):
|
|||||||
crypto.FILETYPE_PEM, private_key_pem
|
crypto.FILETYPE_PEM, private_key_pem
|
||||||
)
|
)
|
||||||
|
|
||||||
if not os.path.exists(tls_certificate_path):
|
if not self.path_exists(tls_certificate_path):
|
||||||
with open(tls_certificate_path, "w") as certificate_file:
|
with open(tls_certificate_path, "w") as certificate_file:
|
||||||
cert = crypto.X509()
|
cert = crypto.X509()
|
||||||
subject = cert.get_subject()
|
subject = cert.get_subject()
|
||||||
@@ -121,7 +165,7 @@ class TlsConfig(Config):
|
|||||||
|
|
||||||
certificate_file.write(cert_pem)
|
certificate_file.write(cert_pem)
|
||||||
|
|
||||||
if not os.path.exists(tls_dh_params_path):
|
if not self.path_exists(tls_dh_params_path):
|
||||||
if GENERATE_DH_PARAMS:
|
if GENERATE_DH_PARAMS:
|
||||||
subprocess.check_call([
|
subprocess.check_call([
|
||||||
"openssl", "dhparam",
|
"openssl", "dhparam",
|
||||||
|
|||||||
44
synapse/config/user_directory.py
Normal file
44
synapse/config/user_directory.py
Normal file
@@ -0,0 +1,44 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright 2017 New Vector Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
from ._base import Config
|
||||||
|
|
||||||
|
|
||||||
|
class UserDirectoryConfig(Config):
|
||||||
|
"""User Directory Configuration
|
||||||
|
Configuration for the behaviour of the /user_directory API
|
||||||
|
"""
|
||||||
|
|
||||||
|
def read_config(self, config):
|
||||||
|
self.user_directory_search_all_users = False
|
||||||
|
user_directory_config = config.get("user_directory", None)
|
||||||
|
if user_directory_config:
|
||||||
|
self.user_directory_search_all_users = (
|
||||||
|
user_directory_config.get("search_all_users", False)
|
||||||
|
)
|
||||||
|
|
||||||
|
def default_config(self, config_dir_path, server_name, **kwargs):
|
||||||
|
return """
|
||||||
|
# User Directory configuration
|
||||||
|
#
|
||||||
|
# 'search_all_users' defines whether to search all users visible to your HS
|
||||||
|
# when searching the user directory, rather than limiting to users visible
|
||||||
|
# in public rooms. Defaults to false. If you set it True, you'll have to run
|
||||||
|
# UPDATE user_directory_stream_pos SET stream_id = NULL;
|
||||||
|
# on your database to tell it to rebuild the user_directory search indexes.
|
||||||
|
#
|
||||||
|
#user_directory:
|
||||||
|
# search_all_users: false
|
||||||
|
"""
|
||||||
@@ -19,8 +19,11 @@ class VoipConfig(Config):
|
|||||||
|
|
||||||
def read_config(self, config):
|
def read_config(self, config):
|
||||||
self.turn_uris = config.get("turn_uris", [])
|
self.turn_uris = config.get("turn_uris", [])
|
||||||
self.turn_shared_secret = config["turn_shared_secret"]
|
self.turn_shared_secret = config.get("turn_shared_secret")
|
||||||
|
self.turn_username = config.get("turn_username")
|
||||||
|
self.turn_password = config.get("turn_password")
|
||||||
self.turn_user_lifetime = self.parse_duration(config["turn_user_lifetime"])
|
self.turn_user_lifetime = self.parse_duration(config["turn_user_lifetime"])
|
||||||
|
self.turn_allow_guests = config.get("turn_allow_guests", True)
|
||||||
|
|
||||||
def default_config(self, **kwargs):
|
def default_config(self, **kwargs):
|
||||||
return """\
|
return """\
|
||||||
@@ -32,6 +35,18 @@ class VoipConfig(Config):
|
|||||||
# The shared secret used to compute passwords for the TURN server
|
# The shared secret used to compute passwords for the TURN server
|
||||||
turn_shared_secret: "YOUR_SHARED_SECRET"
|
turn_shared_secret: "YOUR_SHARED_SECRET"
|
||||||
|
|
||||||
|
# The Username and password if the TURN server needs them and
|
||||||
|
# does not use a token
|
||||||
|
#turn_username: "TURNSERVER_USERNAME"
|
||||||
|
#turn_password: "TURNSERVER_PASSWORD"
|
||||||
|
|
||||||
# How long generated TURN credentials last
|
# How long generated TURN credentials last
|
||||||
turn_user_lifetime: "1h"
|
turn_user_lifetime: "1h"
|
||||||
|
|
||||||
|
# Whether guests should be allowed to use the TURN server.
|
||||||
|
# This defaults to True, otherwise VoIP will be unreliable for guests.
|
||||||
|
# However, it does introduce a slight security risk as it allows users to
|
||||||
|
# connect to arbitrary endpoints without having first signed up for a
|
||||||
|
# valid account (e.g. by passing a CAPTCHA).
|
||||||
|
turn_allow_guests: True
|
||||||
"""
|
"""
|
||||||
|
|||||||
@@ -23,9 +23,37 @@ class WorkerConfig(Config):
|
|||||||
|
|
||||||
def read_config(self, config):
|
def read_config(self, config):
|
||||||
self.worker_app = config.get("worker_app")
|
self.worker_app = config.get("worker_app")
|
||||||
|
|
||||||
|
# Canonicalise worker_app so that master always has None
|
||||||
|
if self.worker_app == "synapse.app.homeserver":
|
||||||
|
self.worker_app = None
|
||||||
|
|
||||||
self.worker_listeners = config.get("worker_listeners")
|
self.worker_listeners = config.get("worker_listeners")
|
||||||
self.worker_daemonize = config.get("worker_daemonize")
|
self.worker_daemonize = config.get("worker_daemonize")
|
||||||
self.worker_pid_file = config.get("worker_pid_file")
|
self.worker_pid_file = config.get("worker_pid_file")
|
||||||
self.worker_log_file = config.get("worker_log_file")
|
self.worker_log_file = config.get("worker_log_file")
|
||||||
self.worker_log_config = config.get("worker_log_config")
|
self.worker_log_config = config.get("worker_log_config")
|
||||||
self.worker_replication_url = config.get("worker_replication_url")
|
|
||||||
|
# The host used to connect to the main synapse
|
||||||
|
self.worker_replication_host = config.get("worker_replication_host", None)
|
||||||
|
|
||||||
|
# The port on the main synapse for TCP replication
|
||||||
|
self.worker_replication_port = config.get("worker_replication_port", None)
|
||||||
|
|
||||||
|
# The port on the main synapse for HTTP replication endpoint
|
||||||
|
self.worker_replication_http_port = config.get("worker_replication_http_port")
|
||||||
|
|
||||||
|
self.worker_name = config.get("worker_name", self.worker_app)
|
||||||
|
|
||||||
|
self.worker_main_http_uri = config.get("worker_main_http_uri", None)
|
||||||
|
self.worker_cpu_affinity = config.get("worker_cpu_affinity")
|
||||||
|
|
||||||
|
if self.worker_listeners:
|
||||||
|
for listener in self.worker_listeners:
|
||||||
|
bind_address = listener.pop("bind_address", None)
|
||||||
|
bind_addresses = listener.setdefault("bind_addresses", [])
|
||||||
|
|
||||||
|
if bind_address:
|
||||||
|
bind_addresses.append(bind_address)
|
||||||
|
elif not bind_addresses:
|
||||||
|
bind_addresses.append('')
|
||||||
|
|||||||
@@ -34,7 +34,7 @@ class ServerContextFactory(ssl.ContextFactory):
|
|||||||
try:
|
try:
|
||||||
_ecCurve = _OpenSSLECCurve(_defaultCurveName)
|
_ecCurve = _OpenSSLECCurve(_defaultCurveName)
|
||||||
_ecCurve.addECKeyToContext(context)
|
_ecCurve.addECKeyToContext(context)
|
||||||
except:
|
except Exception:
|
||||||
logger.exception("Failed to enable elliptic curve for TLS")
|
logger.exception("Failed to enable elliptic curve for TLS")
|
||||||
context.set_options(SSL.OP_NO_SSLv2 | SSL.OP_NO_SSLv3)
|
context.set_options(SSL.OP_NO_SSLv2 | SSL.OP_NO_SSLv3)
|
||||||
context.use_certificate_chain_file(config.tls_certificate_file)
|
context.use_certificate_chain_file(config.tls_certificate_file)
|
||||||
|
|||||||
@@ -32,18 +32,25 @@ def check_event_content_hash(event, hash_algorithm=hashlib.sha256):
|
|||||||
"""Check whether the hash for this PDU matches the contents"""
|
"""Check whether the hash for this PDU matches the contents"""
|
||||||
name, expected_hash = compute_content_hash(event, hash_algorithm)
|
name, expected_hash = compute_content_hash(event, hash_algorithm)
|
||||||
logger.debug("Expecting hash: %s", encode_base64(expected_hash))
|
logger.debug("Expecting hash: %s", encode_base64(expected_hash))
|
||||||
if name not in event.hashes:
|
|
||||||
|
# some malformed events lack a 'hashes'. Protect against it being missing
|
||||||
|
# or a weird type by basically treating it the same as an unhashed event.
|
||||||
|
hashes = event.get("hashes")
|
||||||
|
if not isinstance(hashes, dict):
|
||||||
|
raise SynapseError(400, "Malformed 'hashes'", Codes.UNAUTHORIZED)
|
||||||
|
|
||||||
|
if name not in hashes:
|
||||||
raise SynapseError(
|
raise SynapseError(
|
||||||
400,
|
400,
|
||||||
"Algorithm %s not in hashes %s" % (
|
"Algorithm %s not in hashes %s" % (
|
||||||
name, list(event.hashes),
|
name, list(hashes),
|
||||||
),
|
),
|
||||||
Codes.UNAUTHORIZED,
|
Codes.UNAUTHORIZED,
|
||||||
)
|
)
|
||||||
message_hash_base64 = event.hashes[name]
|
message_hash_base64 = hashes[name]
|
||||||
try:
|
try:
|
||||||
message_hash_bytes = decode_base64(message_hash_base64)
|
message_hash_bytes = decode_base64(message_hash_base64)
|
||||||
except:
|
except Exception:
|
||||||
raise SynapseError(
|
raise SynapseError(
|
||||||
400,
|
400,
|
||||||
"Invalid base64: %s" % (message_hash_base64,),
|
"Invalid base64: %s" % (message_hash_base64,),
|
||||||
|
|||||||
@@ -13,14 +13,11 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
|
from synapse.util import logcontext
|
||||||
from twisted.web.http import HTTPClient
|
from twisted.web.http import HTTPClient
|
||||||
from twisted.internet.protocol import Factory
|
from twisted.internet.protocol import Factory
|
||||||
from twisted.internet import defer, reactor
|
from twisted.internet import defer, reactor
|
||||||
from synapse.http.endpoint import matrix_federation_endpoint
|
from synapse.http.endpoint import matrix_federation_endpoint
|
||||||
from synapse.util.logcontext import (
|
|
||||||
preserve_context_over_fn, preserve_context_over_deferred
|
|
||||||
)
|
|
||||||
import simplejson as json
|
import simplejson as json
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
@@ -43,14 +40,10 @@ def fetch_server_key(server_name, ssl_context_factory, path=KEY_API_V1):
|
|||||||
|
|
||||||
for i in range(5):
|
for i in range(5):
|
||||||
try:
|
try:
|
||||||
protocol = yield preserve_context_over_fn(
|
with logcontext.PreserveLoggingContext():
|
||||||
endpoint.connect, factory
|
protocol = yield endpoint.connect(factory)
|
||||||
)
|
server_response, server_certificate = yield protocol.remote_key
|
||||||
server_response, server_certificate = yield preserve_context_over_deferred(
|
defer.returnValue((server_response, server_certificate))
|
||||||
protocol.remote_key
|
|
||||||
)
|
|
||||||
defer.returnValue((server_response, server_certificate))
|
|
||||||
return
|
|
||||||
except SynapseKeyClientError as e:
|
except SynapseKeyClientError as e:
|
||||||
logger.exception("Error getting key for %r" % (server_name,))
|
logger.exception("Error getting key for %r" % (server_name,))
|
||||||
if e.status.startswith("4"):
|
if e.status.startswith("4"):
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
# Copyright 2014-2016 OpenMarket Ltd
|
# Copyright 2014-2016 OpenMarket Ltd
|
||||||
|
# Copyright 2017 New Vector Ltd.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@@ -15,11 +16,9 @@
|
|||||||
|
|
||||||
from synapse.crypto.keyclient import fetch_server_key
|
from synapse.crypto.keyclient import fetch_server_key
|
||||||
from synapse.api.errors import SynapseError, Codes
|
from synapse.api.errors import SynapseError, Codes
|
||||||
from synapse.util.retryutils import get_retry_limiter
|
from synapse.util import unwrapFirstError, logcontext
|
||||||
from synapse.util import unwrapFirstError
|
|
||||||
from synapse.util.async import ObservableDeferred
|
|
||||||
from synapse.util.logcontext import (
|
from synapse.util.logcontext import (
|
||||||
preserve_context_over_deferred, preserve_context_over_fn, PreserveLoggingContext,
|
PreserveLoggingContext,
|
||||||
preserve_fn
|
preserve_fn
|
||||||
)
|
)
|
||||||
from synapse.util.metrics import Measure
|
from synapse.util.metrics import Measure
|
||||||
@@ -58,7 +57,8 @@ Attributes:
|
|||||||
json_object(dict): The JSON object to verify.
|
json_object(dict): The JSON object to verify.
|
||||||
deferred(twisted.internet.defer.Deferred):
|
deferred(twisted.internet.defer.Deferred):
|
||||||
A deferred (server_name, key_id, verify_key) tuple that resolves when
|
A deferred (server_name, key_id, verify_key) tuple that resolves when
|
||||||
a verify key has been fetched
|
a verify key has been fetched. The deferreds' callbacks are run with no
|
||||||
|
logcontext.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
|
||||||
@@ -75,31 +75,41 @@ class Keyring(object):
|
|||||||
self.perspective_servers = self.config.perspectives
|
self.perspective_servers = self.config.perspectives
|
||||||
self.hs = hs
|
self.hs = hs
|
||||||
|
|
||||||
|
# map from server name to Deferred. Has an entry for each server with
|
||||||
|
# an ongoing key download; the Deferred completes once the download
|
||||||
|
# completes.
|
||||||
|
#
|
||||||
|
# These are regular, logcontext-agnostic Deferreds.
|
||||||
self.key_downloads = {}
|
self.key_downloads = {}
|
||||||
|
|
||||||
def verify_json_for_server(self, server_name, json_object):
|
def verify_json_for_server(self, server_name, json_object):
|
||||||
return self.verify_json_objects_for_server(
|
return logcontext.make_deferred_yieldable(
|
||||||
[(server_name, json_object)]
|
self.verify_json_objects_for_server(
|
||||||
)[0]
|
[(server_name, json_object)]
|
||||||
|
)[0]
|
||||||
|
)
|
||||||
|
|
||||||
def verify_json_objects_for_server(self, server_and_json):
|
def verify_json_objects_for_server(self, server_and_json):
|
||||||
"""Bulk verfies signatures of json objects, bulk fetching keys as
|
"""Bulk verifies signatures of json objects, bulk fetching keys as
|
||||||
necessary.
|
necessary.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
server_and_json (list): List of pairs of (server_name, json_object)
|
server_and_json (list): List of pairs of (server_name, json_object)
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
list of deferreds indicating success or failure to verify each
|
List<Deferred>: for each input pair, a deferred indicating success
|
||||||
json object's signature for the given server_name.
|
or failure to verify each json object's signature for the given
|
||||||
|
server_name. The deferreds run their callbacks in the sentinel
|
||||||
|
logcontext.
|
||||||
"""
|
"""
|
||||||
verify_requests = []
|
verify_requests = []
|
||||||
|
|
||||||
for server_name, json_object in server_and_json:
|
for server_name, json_object in server_and_json:
|
||||||
logger.debug("Verifying for %s", server_name)
|
|
||||||
|
|
||||||
key_ids = signature_ids(json_object, server_name)
|
key_ids = signature_ids(json_object, server_name)
|
||||||
if not key_ids:
|
if not key_ids:
|
||||||
|
logger.warn("Request from %s: no supported signature keys",
|
||||||
|
server_name)
|
||||||
deferred = defer.fail(SynapseError(
|
deferred = defer.fail(SynapseError(
|
||||||
400,
|
400,
|
||||||
"Not signed with a supported algorithm",
|
"Not signed with a supported algorithm",
|
||||||
@@ -108,97 +118,81 @@ class Keyring(object):
|
|||||||
else:
|
else:
|
||||||
deferred = defer.Deferred()
|
deferred = defer.Deferred()
|
||||||
|
|
||||||
|
logger.debug("Verifying for %s with key_ids %s",
|
||||||
|
server_name, key_ids)
|
||||||
|
|
||||||
verify_request = VerifyKeyRequest(
|
verify_request = VerifyKeyRequest(
|
||||||
server_name, key_ids, json_object, deferred
|
server_name, key_ids, json_object, deferred
|
||||||
)
|
)
|
||||||
|
|
||||||
verify_requests.append(verify_request)
|
verify_requests.append(verify_request)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
preserve_fn(self._start_key_lookups)(verify_requests)
|
||||||
def handle_key_deferred(verify_request):
|
|
||||||
server_name = verify_request.server_name
|
|
||||||
try:
|
|
||||||
_, key_id, verify_key = yield verify_request.deferred
|
|
||||||
except IOError as e:
|
|
||||||
logger.warn(
|
|
||||||
"Got IOError when downloading keys for %s: %s %s",
|
|
||||||
server_name, type(e).__name__, str(e.message),
|
|
||||||
)
|
|
||||||
raise SynapseError(
|
|
||||||
502,
|
|
||||||
"Error downloading keys for %s" % (server_name,),
|
|
||||||
Codes.UNAUTHORIZED,
|
|
||||||
)
|
|
||||||
except Exception as e:
|
|
||||||
logger.exception(
|
|
||||||
"Got Exception when downloading keys for %s: %s %s",
|
|
||||||
server_name, type(e).__name__, str(e.message),
|
|
||||||
)
|
|
||||||
raise SynapseError(
|
|
||||||
401,
|
|
||||||
"No key for %s with id %s" % (server_name, key_ids),
|
|
||||||
Codes.UNAUTHORIZED,
|
|
||||||
)
|
|
||||||
|
|
||||||
json_object = verify_request.json_object
|
|
||||||
|
|
||||||
try:
|
|
||||||
verify_signed_json(json_object, server_name, verify_key)
|
|
||||||
except:
|
|
||||||
raise SynapseError(
|
|
||||||
401,
|
|
||||||
"Invalid signature for server %s with key %s:%s" % (
|
|
||||||
server_name, verify_key.alg, verify_key.version
|
|
||||||
),
|
|
||||||
Codes.UNAUTHORIZED,
|
|
||||||
)
|
|
||||||
|
|
||||||
server_to_deferred = {
|
|
||||||
server_name: defer.Deferred()
|
|
||||||
for server_name, _ in server_and_json
|
|
||||||
}
|
|
||||||
|
|
||||||
with PreserveLoggingContext():
|
|
||||||
|
|
||||||
# We want to wait for any previous lookups to complete before
|
|
||||||
# proceeding.
|
|
||||||
wait_on_deferred = self.wait_for_previous_lookups(
|
|
||||||
[server_name for server_name, _ in server_and_json],
|
|
||||||
server_to_deferred,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Actually start fetching keys.
|
|
||||||
wait_on_deferred.addBoth(
|
|
||||||
lambda _: self.get_server_verify_keys(verify_requests)
|
|
||||||
)
|
|
||||||
|
|
||||||
# When we've finished fetching all the keys for a given server_name,
|
|
||||||
# resolve the deferred passed to `wait_for_previous_lookups` so that
|
|
||||||
# any lookups waiting will proceed.
|
|
||||||
server_to_request_ids = {}
|
|
||||||
|
|
||||||
def remove_deferreds(res, server_name, verify_request):
|
|
||||||
request_id = id(verify_request)
|
|
||||||
server_to_request_ids[server_name].discard(request_id)
|
|
||||||
if not server_to_request_ids[server_name]:
|
|
||||||
d = server_to_deferred.pop(server_name, None)
|
|
||||||
if d:
|
|
||||||
d.callback(None)
|
|
||||||
return res
|
|
||||||
|
|
||||||
for verify_request in verify_requests:
|
|
||||||
server_name = verify_request.server_name
|
|
||||||
request_id = id(verify_request)
|
|
||||||
server_to_request_ids.setdefault(server_name, set()).add(request_id)
|
|
||||||
deferred.addBoth(remove_deferreds, server_name, verify_request)
|
|
||||||
|
|
||||||
# Pass those keys to handle_key_deferred so that the json object
|
# Pass those keys to handle_key_deferred so that the json object
|
||||||
# signatures can be verified
|
# signatures can be verified
|
||||||
|
handle = preserve_fn(_handle_key_deferred)
|
||||||
return [
|
return [
|
||||||
preserve_context_over_fn(handle_key_deferred, verify_request)
|
handle(rq) for rq in verify_requests
|
||||||
for verify_request in verify_requests
|
|
||||||
]
|
]
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def _start_key_lookups(self, verify_requests):
|
||||||
|
"""Sets off the key fetches for each verify request
|
||||||
|
|
||||||
|
Once each fetch completes, verify_request.deferred will be resolved.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
verify_requests (List[VerifyKeyRequest]):
|
||||||
|
"""
|
||||||
|
|
||||||
|
# create a deferred for each server we're going to look up the keys
|
||||||
|
# for; we'll resolve them once we have completed our lookups.
|
||||||
|
# These will be passed into wait_for_previous_lookups to block
|
||||||
|
# any other lookups until we have finished.
|
||||||
|
# The deferreds are called with no logcontext.
|
||||||
|
server_to_deferred = {
|
||||||
|
rq.server_name: defer.Deferred()
|
||||||
|
for rq in verify_requests
|
||||||
|
}
|
||||||
|
|
||||||
|
# We want to wait for any previous lookups to complete before
|
||||||
|
# proceeding.
|
||||||
|
yield self.wait_for_previous_lookups(
|
||||||
|
[rq.server_name for rq in verify_requests],
|
||||||
|
server_to_deferred,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Actually start fetching keys.
|
||||||
|
self._get_server_verify_keys(verify_requests)
|
||||||
|
|
||||||
|
# When we've finished fetching all the keys for a given server_name,
|
||||||
|
# resolve the deferred passed to `wait_for_previous_lookups` so that
|
||||||
|
# any lookups waiting will proceed.
|
||||||
|
#
|
||||||
|
# map from server name to a set of request ids
|
||||||
|
server_to_request_ids = {}
|
||||||
|
|
||||||
|
for verify_request in verify_requests:
|
||||||
|
server_name = verify_request.server_name
|
||||||
|
request_id = id(verify_request)
|
||||||
|
server_to_request_ids.setdefault(server_name, set()).add(request_id)
|
||||||
|
|
||||||
|
def remove_deferreds(res, verify_request):
|
||||||
|
server_name = verify_request.server_name
|
||||||
|
request_id = id(verify_request)
|
||||||
|
server_to_request_ids[server_name].discard(request_id)
|
||||||
|
if not server_to_request_ids[server_name]:
|
||||||
|
d = server_to_deferred.pop(server_name, None)
|
||||||
|
if d:
|
||||||
|
d.callback(None)
|
||||||
|
return res
|
||||||
|
|
||||||
|
for verify_request in verify_requests:
|
||||||
|
verify_request.deferred.addBoth(
|
||||||
|
remove_deferreds, verify_request,
|
||||||
|
)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def wait_for_previous_lookups(self, server_names, server_to_deferred):
|
def wait_for_previous_lookups(self, server_names, server_to_deferred):
|
||||||
"""Waits for any previous key lookups for the given servers to finish.
|
"""Waits for any previous key lookups for the given servers to finish.
|
||||||
@@ -206,7 +200,13 @@ class Keyring(object):
|
|||||||
Args:
|
Args:
|
||||||
server_names (list): list of server_names we want to lookup
|
server_names (list): list of server_names we want to lookup
|
||||||
server_to_deferred (dict): server_name to deferred which gets
|
server_to_deferred (dict): server_name to deferred which gets
|
||||||
resolved once we've finished looking up keys for that server
|
resolved once we've finished looking up keys for that server.
|
||||||
|
The Deferreds should be regular twisted ones which call their
|
||||||
|
callbacks with no logcontext.
|
||||||
|
|
||||||
|
Returns: a Deferred which resolves once all key lookups for the given
|
||||||
|
servers have completed. Follows the synapse rules of logcontext
|
||||||
|
preservation.
|
||||||
"""
|
"""
|
||||||
while True:
|
while True:
|
||||||
wait_on = [
|
wait_on = [
|
||||||
@@ -220,19 +220,23 @@ class Keyring(object):
|
|||||||
else:
|
else:
|
||||||
break
|
break
|
||||||
|
|
||||||
|
def rm(r, server_name_):
|
||||||
|
self.key_downloads.pop(server_name_, None)
|
||||||
|
return r
|
||||||
|
|
||||||
for server_name, deferred in server_to_deferred.items():
|
for server_name, deferred in server_to_deferred.items():
|
||||||
d = ObservableDeferred(preserve_context_over_deferred(deferred))
|
self.key_downloads[server_name] = deferred
|
||||||
self.key_downloads[server_name] = d
|
deferred.addBoth(rm, server_name)
|
||||||
|
|
||||||
def rm(r, server_name):
|
def _get_server_verify_keys(self, verify_requests):
|
||||||
self.key_downloads.pop(server_name, None)
|
"""Tries to find at least one key for each verify request
|
||||||
return r
|
|
||||||
|
|
||||||
d.addBoth(rm, server_name)
|
For each verify_request, verify_request.deferred is called back with
|
||||||
|
params (server_name, key_id, VerifyKey) if a key is found, or errbacked
|
||||||
|
with a SynapseError if none of the keys are found.
|
||||||
|
|
||||||
def get_server_verify_keys(self, verify_requests):
|
Args:
|
||||||
"""Takes a dict of KeyGroups and tries to find at least one key for
|
verify_requests (list[VerifyKeyRequest]): list of verify requests
|
||||||
each group.
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
# These are functions that produce keys given a list of key ids
|
# These are functions that produce keys given a list of key ids
|
||||||
@@ -245,8 +249,11 @@ class Keyring(object):
|
|||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def do_iterations():
|
def do_iterations():
|
||||||
with Measure(self.clock, "get_server_verify_keys"):
|
with Measure(self.clock, "get_server_verify_keys"):
|
||||||
|
# dict[str, dict[str, VerifyKey]]: results so far.
|
||||||
|
# map server_name -> key_id -> VerifyKey
|
||||||
merged_results = {}
|
merged_results = {}
|
||||||
|
|
||||||
|
# dict[str, set(str)]: keys to fetch for each server
|
||||||
missing_keys = {}
|
missing_keys = {}
|
||||||
for verify_request in verify_requests:
|
for verify_request in verify_requests:
|
||||||
missing_keys.setdefault(verify_request.server_name, set()).update(
|
missing_keys.setdefault(verify_request.server_name, set()).update(
|
||||||
@@ -290,33 +297,45 @@ class Keyring(object):
|
|||||||
if not missing_keys:
|
if not missing_keys:
|
||||||
break
|
break
|
||||||
|
|
||||||
for verify_request in requests_missing_keys.values():
|
with PreserveLoggingContext():
|
||||||
verify_request.deferred.errback(SynapseError(
|
for verify_request in requests_missing_keys:
|
||||||
401,
|
verify_request.deferred.errback(SynapseError(
|
||||||
"No key for %s with id %s" % (
|
401,
|
||||||
verify_request.server_name, verify_request.key_ids,
|
"No key for %s with id %s" % (
|
||||||
),
|
verify_request.server_name, verify_request.key_ids,
|
||||||
Codes.UNAUTHORIZED,
|
),
|
||||||
))
|
Codes.UNAUTHORIZED,
|
||||||
|
))
|
||||||
|
|
||||||
def on_err(err):
|
def on_err(err):
|
||||||
for verify_request in verify_requests:
|
with PreserveLoggingContext():
|
||||||
if not verify_request.deferred.called:
|
for verify_request in verify_requests:
|
||||||
verify_request.deferred.errback(err)
|
if not verify_request.deferred.called:
|
||||||
|
verify_request.deferred.errback(err)
|
||||||
|
|
||||||
do_iterations().addErrback(on_err)
|
preserve_fn(do_iterations)().addErrback(on_err)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def get_keys_from_store(self, server_name_and_key_ids):
|
def get_keys_from_store(self, server_name_and_key_ids):
|
||||||
res = yield defer.gatherResults(
|
"""
|
||||||
|
|
||||||
|
Args:
|
||||||
|
server_name_and_key_ids (list[(str, iterable[str])]):
|
||||||
|
list of (server_name, iterable[key_id]) tuples to fetch keys for
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Deferred: resolves to dict[str, dict[str, VerifyKey]]: map from
|
||||||
|
server_name -> key_id -> VerifyKey
|
||||||
|
"""
|
||||||
|
res = yield logcontext.make_deferred_yieldable(defer.gatherResults(
|
||||||
[
|
[
|
||||||
self.store.get_server_verify_keys(
|
preserve_fn(self.store.get_server_verify_keys)(
|
||||||
server_name, key_ids
|
server_name, key_ids
|
||||||
).addCallback(lambda ks, server: (server, ks), server_name)
|
).addCallback(lambda ks, server: (server, ks), server_name)
|
||||||
for server_name, key_ids in server_name_and_key_ids
|
for server_name, key_ids in server_name_and_key_ids
|
||||||
],
|
],
|
||||||
consumeErrors=True,
|
consumeErrors=True,
|
||||||
).addErrback(unwrapFirstError)
|
).addErrback(unwrapFirstError))
|
||||||
|
|
||||||
defer.returnValue(dict(res))
|
defer.returnValue(dict(res))
|
||||||
|
|
||||||
@@ -337,13 +356,13 @@ class Keyring(object):
|
|||||||
)
|
)
|
||||||
defer.returnValue({})
|
defer.returnValue({})
|
||||||
|
|
||||||
results = yield defer.gatherResults(
|
results = yield logcontext.make_deferred_yieldable(defer.gatherResults(
|
||||||
[
|
[
|
||||||
get_key(p_name, p_keys)
|
preserve_fn(get_key)(p_name, p_keys)
|
||||||
for p_name, p_keys in self.perspective_servers.items()
|
for p_name, p_keys in self.perspective_servers.items()
|
||||||
],
|
],
|
||||||
consumeErrors=True,
|
consumeErrors=True,
|
||||||
).addErrback(unwrapFirstError)
|
).addErrback(unwrapFirstError))
|
||||||
|
|
||||||
union_of_keys = {}
|
union_of_keys = {}
|
||||||
for result in results:
|
for result in results:
|
||||||
@@ -356,40 +375,34 @@ class Keyring(object):
|
|||||||
def get_keys_from_server(self, server_name_and_key_ids):
|
def get_keys_from_server(self, server_name_and_key_ids):
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def get_key(server_name, key_ids):
|
def get_key(server_name, key_ids):
|
||||||
limiter = yield get_retry_limiter(
|
keys = None
|
||||||
server_name,
|
try:
|
||||||
self.clock,
|
keys = yield self.get_server_verify_key_v2_direct(
|
||||||
self.store,
|
server_name, key_ids
|
||||||
)
|
)
|
||||||
with limiter:
|
except Exception as e:
|
||||||
keys = None
|
logger.info(
|
||||||
try:
|
"Unable to get key %r for %r directly: %s %s",
|
||||||
keys = yield self.get_server_verify_key_v2_direct(
|
key_ids, server_name,
|
||||||
server_name, key_ids
|
type(e).__name__, str(e.message),
|
||||||
)
|
)
|
||||||
except Exception as e:
|
|
||||||
logger.info(
|
|
||||||
"Unable to get key %r for %r directly: %s %s",
|
|
||||||
key_ids, server_name,
|
|
||||||
type(e).__name__, str(e.message),
|
|
||||||
)
|
|
||||||
|
|
||||||
if not keys:
|
if not keys:
|
||||||
keys = yield self.get_server_verify_key_v1_direct(
|
keys = yield self.get_server_verify_key_v1_direct(
|
||||||
server_name, key_ids
|
server_name, key_ids
|
||||||
)
|
)
|
||||||
|
|
||||||
keys = {server_name: keys}
|
keys = {server_name: keys}
|
||||||
|
|
||||||
defer.returnValue(keys)
|
defer.returnValue(keys)
|
||||||
|
|
||||||
results = yield defer.gatherResults(
|
results = yield logcontext.make_deferred_yieldable(defer.gatherResults(
|
||||||
[
|
[
|
||||||
get_key(server_name, key_ids)
|
preserve_fn(get_key)(server_name, key_ids)
|
||||||
for server_name, key_ids in server_name_and_key_ids
|
for server_name, key_ids in server_name_and_key_ids
|
||||||
],
|
],
|
||||||
consumeErrors=True,
|
consumeErrors=True,
|
||||||
).addErrback(unwrapFirstError)
|
).addErrback(unwrapFirstError))
|
||||||
|
|
||||||
merged = {}
|
merged = {}
|
||||||
for result in results:
|
for result in results:
|
||||||
@@ -466,9 +479,9 @@ class Keyring(object):
|
|||||||
for server_name, response_keys in processed_response.items():
|
for server_name, response_keys in processed_response.items():
|
||||||
keys.setdefault(server_name, {}).update(response_keys)
|
keys.setdefault(server_name, {}).update(response_keys)
|
||||||
|
|
||||||
yield defer.gatherResults(
|
yield logcontext.make_deferred_yieldable(defer.gatherResults(
|
||||||
[
|
[
|
||||||
self.store_keys(
|
preserve_fn(self.store_keys)(
|
||||||
server_name=server_name,
|
server_name=server_name,
|
||||||
from_server=perspective_name,
|
from_server=perspective_name,
|
||||||
verify_keys=response_keys,
|
verify_keys=response_keys,
|
||||||
@@ -476,7 +489,7 @@ class Keyring(object):
|
|||||||
for server_name, response_keys in keys.items()
|
for server_name, response_keys in keys.items()
|
||||||
],
|
],
|
||||||
consumeErrors=True
|
consumeErrors=True
|
||||||
).addErrback(unwrapFirstError)
|
).addErrback(unwrapFirstError))
|
||||||
|
|
||||||
defer.returnValue(keys)
|
defer.returnValue(keys)
|
||||||
|
|
||||||
@@ -524,7 +537,7 @@ class Keyring(object):
|
|||||||
|
|
||||||
keys.update(response_keys)
|
keys.update(response_keys)
|
||||||
|
|
||||||
yield defer.gatherResults(
|
yield logcontext.make_deferred_yieldable(defer.gatherResults(
|
||||||
[
|
[
|
||||||
preserve_fn(self.store_keys)(
|
preserve_fn(self.store_keys)(
|
||||||
server_name=key_server_name,
|
server_name=key_server_name,
|
||||||
@@ -534,7 +547,7 @@ class Keyring(object):
|
|||||||
for key_server_name, verify_keys in keys.items()
|
for key_server_name, verify_keys in keys.items()
|
||||||
],
|
],
|
||||||
consumeErrors=True
|
consumeErrors=True
|
||||||
).addErrback(unwrapFirstError)
|
).addErrback(unwrapFirstError))
|
||||||
|
|
||||||
defer.returnValue(keys)
|
defer.returnValue(keys)
|
||||||
|
|
||||||
@@ -600,7 +613,7 @@ class Keyring(object):
|
|||||||
response_keys.update(verify_keys)
|
response_keys.update(verify_keys)
|
||||||
response_keys.update(old_verify_keys)
|
response_keys.update(old_verify_keys)
|
||||||
|
|
||||||
yield defer.gatherResults(
|
yield logcontext.make_deferred_yieldable(defer.gatherResults(
|
||||||
[
|
[
|
||||||
preserve_fn(self.store.store_server_keys_json)(
|
preserve_fn(self.store.store_server_keys_json)(
|
||||||
server_name=server_name,
|
server_name=server_name,
|
||||||
@@ -613,7 +626,7 @@ class Keyring(object):
|
|||||||
for key_id in updated_key_ids
|
for key_id in updated_key_ids
|
||||||
],
|
],
|
||||||
consumeErrors=True,
|
consumeErrors=True,
|
||||||
).addErrback(unwrapFirstError)
|
).addErrback(unwrapFirstError))
|
||||||
|
|
||||||
results[server_name] = response_keys
|
results[server_name] = response_keys
|
||||||
|
|
||||||
@@ -691,7 +704,6 @@ class Keyring(object):
|
|||||||
|
|
||||||
defer.returnValue(verify_keys)
|
defer.returnValue(verify_keys)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def store_keys(self, server_name, from_server, verify_keys):
|
def store_keys(self, server_name, from_server, verify_keys):
|
||||||
"""Store a collection of verify keys for a given server
|
"""Store a collection of verify keys for a given server
|
||||||
Args:
|
Args:
|
||||||
@@ -702,7 +714,7 @@ class Keyring(object):
|
|||||||
A deferred that completes when the keys are stored.
|
A deferred that completes when the keys are stored.
|
||||||
"""
|
"""
|
||||||
# TODO(markjh): Store whether the keys have expired.
|
# TODO(markjh): Store whether the keys have expired.
|
||||||
yield defer.gatherResults(
|
return logcontext.make_deferred_yieldable(defer.gatherResults(
|
||||||
[
|
[
|
||||||
preserve_fn(self.store.store_server_verify_key)(
|
preserve_fn(self.store.store_server_verify_key)(
|
||||||
server_name, server_name, key.time_added, key
|
server_name, server_name, key.time_added, key
|
||||||
@@ -710,4 +722,48 @@ class Keyring(object):
|
|||||||
for key_id, key in verify_keys.items()
|
for key_id, key in verify_keys.items()
|
||||||
],
|
],
|
||||||
consumeErrors=True,
|
consumeErrors=True,
|
||||||
).addErrback(unwrapFirstError)
|
).addErrback(unwrapFirstError))
|
||||||
|
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def _handle_key_deferred(verify_request):
|
||||||
|
server_name = verify_request.server_name
|
||||||
|
try:
|
||||||
|
with PreserveLoggingContext():
|
||||||
|
_, key_id, verify_key = yield verify_request.deferred
|
||||||
|
except IOError as e:
|
||||||
|
logger.warn(
|
||||||
|
"Got IOError when downloading keys for %s: %s %s",
|
||||||
|
server_name, type(e).__name__, str(e.message),
|
||||||
|
)
|
||||||
|
raise SynapseError(
|
||||||
|
502,
|
||||||
|
"Error downloading keys for %s" % (server_name,),
|
||||||
|
Codes.UNAUTHORIZED,
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
logger.exception(
|
||||||
|
"Got Exception when downloading keys for %s: %s %s",
|
||||||
|
server_name, type(e).__name__, str(e.message),
|
||||||
|
)
|
||||||
|
raise SynapseError(
|
||||||
|
401,
|
||||||
|
"No key for %s with id %s" % (server_name, verify_request.key_ids),
|
||||||
|
Codes.UNAUTHORIZED,
|
||||||
|
)
|
||||||
|
|
||||||
|
json_object = verify_request.json_object
|
||||||
|
|
||||||
|
logger.debug("Got key %s %s:%s for server %s, verifying" % (
|
||||||
|
key_id, verify_key.alg, verify_key.version, server_name,
|
||||||
|
))
|
||||||
|
try:
|
||||||
|
verify_signed_json(json_object, server_name, verify_key)
|
||||||
|
except Exception:
|
||||||
|
raise SynapseError(
|
||||||
|
401,
|
||||||
|
"Invalid signature for server %s with key %s:%s" % (
|
||||||
|
server_name, verify_key.alg, verify_key.version
|
||||||
|
),
|
||||||
|
Codes.UNAUTHORIZED,
|
||||||
|
)
|
||||||
|
|||||||
678
synapse/event_auth.py
Normal file
678
synapse/event_auth.py
Normal file
@@ -0,0 +1,678 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright 2014 - 2016 OpenMarket Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import logging
|
||||||
|
|
||||||
|
from canonicaljson import encode_canonical_json
|
||||||
|
from signedjson.key import decode_verify_key_bytes
|
||||||
|
from signedjson.sign import verify_signed_json, SignatureVerifyException
|
||||||
|
from unpaddedbase64 import decode_base64
|
||||||
|
|
||||||
|
from synapse.api.constants import EventTypes, Membership, JoinRules
|
||||||
|
from synapse.api.errors import AuthError, SynapseError, EventSizeError
|
||||||
|
from synapse.types import UserID, get_domain_from_id
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def check(event, auth_events, do_sig_check=True, do_size_check=True):
|
||||||
|
""" Checks if this event is correctly authed.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
event: the event being checked.
|
||||||
|
auth_events (dict: event-key -> event): the existing room state.
|
||||||
|
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
True if the auth checks pass.
|
||||||
|
"""
|
||||||
|
if do_size_check:
|
||||||
|
_check_size_limits(event)
|
||||||
|
|
||||||
|
if not hasattr(event, "room_id"):
|
||||||
|
raise AuthError(500, "Event has no room_id: %s" % event)
|
||||||
|
|
||||||
|
if do_sig_check:
|
||||||
|
sender_domain = get_domain_from_id(event.sender)
|
||||||
|
event_id_domain = get_domain_from_id(event.event_id)
|
||||||
|
|
||||||
|
is_invite_via_3pid = (
|
||||||
|
event.type == EventTypes.Member
|
||||||
|
and event.membership == Membership.INVITE
|
||||||
|
and "third_party_invite" in event.content
|
||||||
|
)
|
||||||
|
|
||||||
|
# Check the sender's domain has signed the event
|
||||||
|
if not event.signatures.get(sender_domain):
|
||||||
|
# We allow invites via 3pid to have a sender from a different
|
||||||
|
# HS, as the sender must match the sender of the original
|
||||||
|
# 3pid invite. This is checked further down with the
|
||||||
|
# other dedicated membership checks.
|
||||||
|
if not is_invite_via_3pid:
|
||||||
|
raise AuthError(403, "Event not signed by sender's server")
|
||||||
|
|
||||||
|
# Check the event_id's domain has signed the event
|
||||||
|
if not event.signatures.get(event_id_domain):
|
||||||
|
raise AuthError(403, "Event not signed by sending server")
|
||||||
|
|
||||||
|
if auth_events is None:
|
||||||
|
# Oh, we don't know what the state of the room was, so we
|
||||||
|
# are trusting that this is allowed (at least for now)
|
||||||
|
logger.warn("Trusting event: %s", event.event_id)
|
||||||
|
return True
|
||||||
|
|
||||||
|
if event.type == EventTypes.Create:
|
||||||
|
room_id_domain = get_domain_from_id(event.room_id)
|
||||||
|
if room_id_domain != sender_domain:
|
||||||
|
raise AuthError(
|
||||||
|
403,
|
||||||
|
"Creation event's room_id domain does not match sender's"
|
||||||
|
)
|
||||||
|
# FIXME
|
||||||
|
return True
|
||||||
|
|
||||||
|
creation_event = auth_events.get((EventTypes.Create, ""), None)
|
||||||
|
|
||||||
|
if not creation_event:
|
||||||
|
raise SynapseError(
|
||||||
|
403,
|
||||||
|
"Room %r does not exist" % (event.room_id,)
|
||||||
|
)
|
||||||
|
|
||||||
|
creating_domain = get_domain_from_id(event.room_id)
|
||||||
|
originating_domain = get_domain_from_id(event.sender)
|
||||||
|
if creating_domain != originating_domain:
|
||||||
|
if not _can_federate(event, auth_events):
|
||||||
|
raise AuthError(
|
||||||
|
403,
|
||||||
|
"This room has been marked as unfederatable."
|
||||||
|
)
|
||||||
|
|
||||||
|
# FIXME: Temp hack
|
||||||
|
if event.type == EventTypes.Aliases:
|
||||||
|
if not event.is_state():
|
||||||
|
raise AuthError(
|
||||||
|
403,
|
||||||
|
"Alias event must be a state event",
|
||||||
|
)
|
||||||
|
if not event.state_key:
|
||||||
|
raise AuthError(
|
||||||
|
403,
|
||||||
|
"Alias event must have non-empty state_key"
|
||||||
|
)
|
||||||
|
sender_domain = get_domain_from_id(event.sender)
|
||||||
|
if event.state_key != sender_domain:
|
||||||
|
raise AuthError(
|
||||||
|
403,
|
||||||
|
"Alias event's state_key does not match sender's domain"
|
||||||
|
)
|
||||||
|
return True
|
||||||
|
|
||||||
|
if logger.isEnabledFor(logging.DEBUG):
|
||||||
|
logger.debug(
|
||||||
|
"Auth events: %s",
|
||||||
|
[a.event_id for a in auth_events.values()]
|
||||||
|
)
|
||||||
|
|
||||||
|
if event.type == EventTypes.Member:
|
||||||
|
allowed = _is_membership_change_allowed(
|
||||||
|
event, auth_events
|
||||||
|
)
|
||||||
|
if allowed:
|
||||||
|
logger.debug("Allowing! %s", event)
|
||||||
|
else:
|
||||||
|
logger.debug("Denying! %s", event)
|
||||||
|
return allowed
|
||||||
|
|
||||||
|
_check_event_sender_in_room(event, auth_events)
|
||||||
|
|
||||||
|
# Special case to allow m.room.third_party_invite events wherever
|
||||||
|
# a user is allowed to issue invites. Fixes
|
||||||
|
# https://github.com/vector-im/vector-web/issues/1208 hopefully
|
||||||
|
if event.type == EventTypes.ThirdPartyInvite:
|
||||||
|
user_level = get_user_power_level(event.user_id, auth_events)
|
||||||
|
invite_level = _get_named_level(auth_events, "invite", 0)
|
||||||
|
|
||||||
|
if user_level < invite_level:
|
||||||
|
raise AuthError(
|
||||||
|
403, (
|
||||||
|
"You cannot issue a third party invite for %s." %
|
||||||
|
(event.content.display_name,)
|
||||||
|
)
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
return True
|
||||||
|
|
||||||
|
_can_send_event(event, auth_events)
|
||||||
|
|
||||||
|
if event.type == EventTypes.PowerLevels:
|
||||||
|
_check_power_levels(event, auth_events)
|
||||||
|
|
||||||
|
if event.type == EventTypes.Redaction:
|
||||||
|
check_redaction(event, auth_events)
|
||||||
|
|
||||||
|
logger.debug("Allowing! %s", event)
|
||||||
|
|
||||||
|
|
||||||
|
def _check_size_limits(event):
|
||||||
|
def too_big(field):
|
||||||
|
raise EventSizeError("%s too large" % (field,))
|
||||||
|
|
||||||
|
if len(event.user_id) > 255:
|
||||||
|
too_big("user_id")
|
||||||
|
if len(event.room_id) > 255:
|
||||||
|
too_big("room_id")
|
||||||
|
if event.is_state() and len(event.state_key) > 255:
|
||||||
|
too_big("state_key")
|
||||||
|
if len(event.type) > 255:
|
||||||
|
too_big("type")
|
||||||
|
if len(event.event_id) > 255:
|
||||||
|
too_big("event_id")
|
||||||
|
if len(encode_canonical_json(event.get_pdu_json())) > 65536:
|
||||||
|
too_big("event")
|
||||||
|
|
||||||
|
|
||||||
|
def _can_federate(event, auth_events):
|
||||||
|
creation_event = auth_events.get((EventTypes.Create, ""))
|
||||||
|
|
||||||
|
return creation_event.content.get("m.federate", True) is True
|
||||||
|
|
||||||
|
|
||||||
|
def _is_membership_change_allowed(event, auth_events):
|
||||||
|
membership = event.content["membership"]
|
||||||
|
|
||||||
|
# Check if this is the room creator joining:
|
||||||
|
if len(event.prev_events) == 1 and Membership.JOIN == membership:
|
||||||
|
# Get room creation event:
|
||||||
|
key = (EventTypes.Create, "", )
|
||||||
|
create = auth_events.get(key)
|
||||||
|
if create and event.prev_events[0][0] == create.event_id:
|
||||||
|
if create.content["creator"] == event.state_key:
|
||||||
|
return True
|
||||||
|
|
||||||
|
target_user_id = event.state_key
|
||||||
|
|
||||||
|
creating_domain = get_domain_from_id(event.room_id)
|
||||||
|
target_domain = get_domain_from_id(target_user_id)
|
||||||
|
if creating_domain != target_domain:
|
||||||
|
if not _can_federate(event, auth_events):
|
||||||
|
raise AuthError(
|
||||||
|
403,
|
||||||
|
"This room has been marked as unfederatable."
|
||||||
|
)
|
||||||
|
|
||||||
|
# get info about the caller
|
||||||
|
key = (EventTypes.Member, event.user_id, )
|
||||||
|
caller = auth_events.get(key)
|
||||||
|
|
||||||
|
caller_in_room = caller and caller.membership == Membership.JOIN
|
||||||
|
caller_invited = caller and caller.membership == Membership.INVITE
|
||||||
|
|
||||||
|
# get info about the target
|
||||||
|
key = (EventTypes.Member, target_user_id, )
|
||||||
|
target = auth_events.get(key)
|
||||||
|
|
||||||
|
target_in_room = target and target.membership == Membership.JOIN
|
||||||
|
target_banned = target and target.membership == Membership.BAN
|
||||||
|
|
||||||
|
key = (EventTypes.JoinRules, "", )
|
||||||
|
join_rule_event = auth_events.get(key)
|
||||||
|
if join_rule_event:
|
||||||
|
join_rule = join_rule_event.content.get(
|
||||||
|
"join_rule", JoinRules.INVITE
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
join_rule = JoinRules.INVITE
|
||||||
|
|
||||||
|
user_level = get_user_power_level(event.user_id, auth_events)
|
||||||
|
target_level = get_user_power_level(
|
||||||
|
target_user_id, auth_events
|
||||||
|
)
|
||||||
|
|
||||||
|
# FIXME (erikj): What should we do here as the default?
|
||||||
|
ban_level = _get_named_level(auth_events, "ban", 50)
|
||||||
|
|
||||||
|
logger.debug(
|
||||||
|
"_is_membership_change_allowed: %s",
|
||||||
|
{
|
||||||
|
"caller_in_room": caller_in_room,
|
||||||
|
"caller_invited": caller_invited,
|
||||||
|
"target_banned": target_banned,
|
||||||
|
"target_in_room": target_in_room,
|
||||||
|
"membership": membership,
|
||||||
|
"join_rule": join_rule,
|
||||||
|
"target_user_id": target_user_id,
|
||||||
|
"event.user_id": event.user_id,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
if Membership.INVITE == membership and "third_party_invite" in event.content:
|
||||||
|
if not _verify_third_party_invite(event, auth_events):
|
||||||
|
raise AuthError(403, "You are not invited to this room.")
|
||||||
|
if target_banned:
|
||||||
|
raise AuthError(
|
||||||
|
403, "%s is banned from the room" % (target_user_id,)
|
||||||
|
)
|
||||||
|
return True
|
||||||
|
|
||||||
|
if Membership.JOIN != membership:
|
||||||
|
if (caller_invited
|
||||||
|
and Membership.LEAVE == membership
|
||||||
|
and target_user_id == event.user_id):
|
||||||
|
return True
|
||||||
|
|
||||||
|
if not caller_in_room: # caller isn't joined
|
||||||
|
raise AuthError(
|
||||||
|
403,
|
||||||
|
"%s not in room %s." % (event.user_id, event.room_id,)
|
||||||
|
)
|
||||||
|
|
||||||
|
if Membership.INVITE == membership:
|
||||||
|
# TODO (erikj): We should probably handle this more intelligently
|
||||||
|
# PRIVATE join rules.
|
||||||
|
|
||||||
|
# Invites are valid iff caller is in the room and target isn't.
|
||||||
|
if target_banned:
|
||||||
|
raise AuthError(
|
||||||
|
403, "%s is banned from the room" % (target_user_id,)
|
||||||
|
)
|
||||||
|
elif target_in_room: # the target is already in the room.
|
||||||
|
raise AuthError(403, "%s is already in the room." %
|
||||||
|
target_user_id)
|
||||||
|
else:
|
||||||
|
invite_level = _get_named_level(auth_events, "invite", 0)
|
||||||
|
|
||||||
|
if user_level < invite_level:
|
||||||
|
raise AuthError(
|
||||||
|
403, "You cannot invite user %s." % target_user_id
|
||||||
|
)
|
||||||
|
elif Membership.JOIN == membership:
|
||||||
|
# Joins are valid iff caller == target and they were:
|
||||||
|
# invited: They are accepting the invitation
|
||||||
|
# joined: It's a NOOP
|
||||||
|
if event.user_id != target_user_id:
|
||||||
|
raise AuthError(403, "Cannot force another user to join.")
|
||||||
|
elif target_banned:
|
||||||
|
raise AuthError(403, "You are banned from this room")
|
||||||
|
elif join_rule == JoinRules.PUBLIC:
|
||||||
|
pass
|
||||||
|
elif join_rule == JoinRules.INVITE:
|
||||||
|
if not caller_in_room and not caller_invited:
|
||||||
|
raise AuthError(403, "You are not invited to this room.")
|
||||||
|
else:
|
||||||
|
# TODO (erikj): may_join list
|
||||||
|
# TODO (erikj): private rooms
|
||||||
|
raise AuthError(403, "You are not allowed to join this room")
|
||||||
|
elif Membership.LEAVE == membership:
|
||||||
|
# TODO (erikj): Implement kicks.
|
||||||
|
if target_banned and user_level < ban_level:
|
||||||
|
raise AuthError(
|
||||||
|
403, "You cannot unban user %s." % (target_user_id,)
|
||||||
|
)
|
||||||
|
elif target_user_id != event.user_id:
|
||||||
|
kick_level = _get_named_level(auth_events, "kick", 50)
|
||||||
|
|
||||||
|
if user_level < kick_level or user_level <= target_level:
|
||||||
|
raise AuthError(
|
||||||
|
403, "You cannot kick user %s." % target_user_id
|
||||||
|
)
|
||||||
|
elif Membership.BAN == membership:
|
||||||
|
if user_level < ban_level or user_level <= target_level:
|
||||||
|
raise AuthError(403, "You don't have permission to ban")
|
||||||
|
else:
|
||||||
|
raise AuthError(500, "Unknown membership %s" % membership)
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
def _check_event_sender_in_room(event, auth_events):
|
||||||
|
key = (EventTypes.Member, event.user_id, )
|
||||||
|
member_event = auth_events.get(key)
|
||||||
|
|
||||||
|
return _check_joined_room(
|
||||||
|
member_event,
|
||||||
|
event.user_id,
|
||||||
|
event.room_id
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def _check_joined_room(member, user_id, room_id):
|
||||||
|
if not member or member.membership != Membership.JOIN:
|
||||||
|
raise AuthError(403, "User %s not in room %s (%s)" % (
|
||||||
|
user_id, room_id, repr(member)
|
||||||
|
))
|
||||||
|
|
||||||
|
|
||||||
|
def get_send_level(etype, state_key, auth_events):
|
||||||
|
key = (EventTypes.PowerLevels, "", )
|
||||||
|
send_level_event = auth_events.get(key)
|
||||||
|
send_level = None
|
||||||
|
if send_level_event:
|
||||||
|
send_level = send_level_event.content.get("events", {}).get(
|
||||||
|
etype
|
||||||
|
)
|
||||||
|
if send_level is None:
|
||||||
|
if state_key is not None:
|
||||||
|
send_level = send_level_event.content.get(
|
||||||
|
"state_default", 50
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
send_level = send_level_event.content.get(
|
||||||
|
"events_default", 0
|
||||||
|
)
|
||||||
|
|
||||||
|
if send_level:
|
||||||
|
send_level = int(send_level)
|
||||||
|
else:
|
||||||
|
send_level = 0
|
||||||
|
|
||||||
|
return send_level
|
||||||
|
|
||||||
|
|
||||||
|
def _can_send_event(event, auth_events):
|
||||||
|
send_level = get_send_level(
|
||||||
|
event.type, event.get("state_key", None), auth_events
|
||||||
|
)
|
||||||
|
user_level = get_user_power_level(event.user_id, auth_events)
|
||||||
|
|
||||||
|
if user_level < send_level:
|
||||||
|
raise AuthError(
|
||||||
|
403,
|
||||||
|
"You don't have permission to post that to the room. " +
|
||||||
|
"user_level (%d) < send_level (%d)" % (user_level, send_level)
|
||||||
|
)
|
||||||
|
|
||||||
|
# Check state_key
|
||||||
|
if hasattr(event, "state_key"):
|
||||||
|
if event.state_key.startswith("@"):
|
||||||
|
if event.state_key != event.user_id:
|
||||||
|
raise AuthError(
|
||||||
|
403,
|
||||||
|
"You are not allowed to set others state"
|
||||||
|
)
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
def check_redaction(event, auth_events):
|
||||||
|
"""Check whether the event sender is allowed to redact the target event.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
True if the the sender is allowed to redact the target event if the
|
||||||
|
target event was created by them.
|
||||||
|
False if the sender is allowed to redact the target event with no
|
||||||
|
further checks.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
AuthError if the event sender is definitely not allowed to redact
|
||||||
|
the target event.
|
||||||
|
"""
|
||||||
|
user_level = get_user_power_level(event.user_id, auth_events)
|
||||||
|
|
||||||
|
redact_level = _get_named_level(auth_events, "redact", 50)
|
||||||
|
|
||||||
|
if user_level >= redact_level:
|
||||||
|
return False
|
||||||
|
|
||||||
|
redacter_domain = get_domain_from_id(event.event_id)
|
||||||
|
redactee_domain = get_domain_from_id(event.redacts)
|
||||||
|
if redacter_domain == redactee_domain:
|
||||||
|
return True
|
||||||
|
|
||||||
|
raise AuthError(
|
||||||
|
403,
|
||||||
|
"You don't have permission to redact events"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def _check_power_levels(event, auth_events):
|
||||||
|
user_list = event.content.get("users", {})
|
||||||
|
# Validate users
|
||||||
|
for k, v in user_list.items():
|
||||||
|
try:
|
||||||
|
UserID.from_string(k)
|
||||||
|
except Exception:
|
||||||
|
raise SynapseError(400, "Not a valid user_id: %s" % (k,))
|
||||||
|
|
||||||
|
try:
|
||||||
|
int(v)
|
||||||
|
except Exception:
|
||||||
|
raise SynapseError(400, "Not a valid power level: %s" % (v,))
|
||||||
|
|
||||||
|
key = (event.type, event.state_key, )
|
||||||
|
current_state = auth_events.get(key)
|
||||||
|
|
||||||
|
if not current_state:
|
||||||
|
return
|
||||||
|
|
||||||
|
user_level = get_user_power_level(event.user_id, auth_events)
|
||||||
|
|
||||||
|
# Check other levels:
|
||||||
|
levels_to_check = [
|
||||||
|
("users_default", None),
|
||||||
|
("events_default", None),
|
||||||
|
("state_default", None),
|
||||||
|
("ban", None),
|
||||||
|
("redact", None),
|
||||||
|
("kick", None),
|
||||||
|
("invite", None),
|
||||||
|
]
|
||||||
|
|
||||||
|
old_list = current_state.content.get("users", {})
|
||||||
|
for user in set(old_list.keys() + user_list.keys()):
|
||||||
|
levels_to_check.append(
|
||||||
|
(user, "users")
|
||||||
|
)
|
||||||
|
|
||||||
|
old_list = current_state.content.get("events", {})
|
||||||
|
new_list = event.content.get("events", {})
|
||||||
|
for ev_id in set(old_list.keys() + new_list.keys()):
|
||||||
|
levels_to_check.append(
|
||||||
|
(ev_id, "events")
|
||||||
|
)
|
||||||
|
|
||||||
|
old_state = current_state.content
|
||||||
|
new_state = event.content
|
||||||
|
|
||||||
|
for level_to_check, dir in levels_to_check:
|
||||||
|
old_loc = old_state
|
||||||
|
new_loc = new_state
|
||||||
|
if dir:
|
||||||
|
old_loc = old_loc.get(dir, {})
|
||||||
|
new_loc = new_loc.get(dir, {})
|
||||||
|
|
||||||
|
if level_to_check in old_loc:
|
||||||
|
old_level = int(old_loc[level_to_check])
|
||||||
|
else:
|
||||||
|
old_level = None
|
||||||
|
|
||||||
|
if level_to_check in new_loc:
|
||||||
|
new_level = int(new_loc[level_to_check])
|
||||||
|
else:
|
||||||
|
new_level = None
|
||||||
|
|
||||||
|
if new_level is not None and old_level is not None:
|
||||||
|
if new_level == old_level:
|
||||||
|
continue
|
||||||
|
|
||||||
|
if dir == "users" and level_to_check != event.user_id:
|
||||||
|
if old_level == user_level:
|
||||||
|
raise AuthError(
|
||||||
|
403,
|
||||||
|
"You don't have permission to remove ops level equal "
|
||||||
|
"to your own"
|
||||||
|
)
|
||||||
|
|
||||||
|
if old_level > user_level or new_level > user_level:
|
||||||
|
raise AuthError(
|
||||||
|
403,
|
||||||
|
"You don't have permission to add ops level greater "
|
||||||
|
"than your own"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def _get_power_level_event(auth_events):
|
||||||
|
key = (EventTypes.PowerLevels, "", )
|
||||||
|
return auth_events.get(key)
|
||||||
|
|
||||||
|
|
||||||
|
def get_user_power_level(user_id, auth_events):
|
||||||
|
power_level_event = _get_power_level_event(auth_events)
|
||||||
|
|
||||||
|
if power_level_event:
|
||||||
|
level = power_level_event.content.get("users", {}).get(user_id)
|
||||||
|
if not level:
|
||||||
|
level = power_level_event.content.get("users_default", 0)
|
||||||
|
|
||||||
|
if level is None:
|
||||||
|
return 0
|
||||||
|
else:
|
||||||
|
return int(level)
|
||||||
|
else:
|
||||||
|
key = (EventTypes.Create, "", )
|
||||||
|
create_event = auth_events.get(key)
|
||||||
|
if (create_event is not None and
|
||||||
|
create_event.content["creator"] == user_id):
|
||||||
|
return 100
|
||||||
|
else:
|
||||||
|
return 0
|
||||||
|
|
||||||
|
|
||||||
|
def _get_named_level(auth_events, name, default):
|
||||||
|
power_level_event = _get_power_level_event(auth_events)
|
||||||
|
|
||||||
|
if not power_level_event:
|
||||||
|
return default
|
||||||
|
|
||||||
|
level = power_level_event.content.get(name, None)
|
||||||
|
if level is not None:
|
||||||
|
return int(level)
|
||||||
|
else:
|
||||||
|
return default
|
||||||
|
|
||||||
|
|
||||||
|
def _verify_third_party_invite(event, auth_events):
|
||||||
|
"""
|
||||||
|
Validates that the invite event is authorized by a previous third-party invite.
|
||||||
|
|
||||||
|
Checks that the public key, and keyserver, match those in the third party invite,
|
||||||
|
and that the invite event has a signature issued using that public key.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
event: The m.room.member join event being validated.
|
||||||
|
auth_events: All relevant previous context events which may be used
|
||||||
|
for authorization decisions.
|
||||||
|
|
||||||
|
Return:
|
||||||
|
True if the event fulfills the expectations of a previous third party
|
||||||
|
invite event.
|
||||||
|
"""
|
||||||
|
if "third_party_invite" not in event.content:
|
||||||
|
return False
|
||||||
|
if "signed" not in event.content["third_party_invite"]:
|
||||||
|
return False
|
||||||
|
signed = event.content["third_party_invite"]["signed"]
|
||||||
|
for key in {"mxid", "token"}:
|
||||||
|
if key not in signed:
|
||||||
|
return False
|
||||||
|
|
||||||
|
token = signed["token"]
|
||||||
|
|
||||||
|
invite_event = auth_events.get(
|
||||||
|
(EventTypes.ThirdPartyInvite, token,)
|
||||||
|
)
|
||||||
|
if not invite_event:
|
||||||
|
return False
|
||||||
|
|
||||||
|
if invite_event.sender != event.sender:
|
||||||
|
return False
|
||||||
|
|
||||||
|
if event.user_id != invite_event.user_id:
|
||||||
|
return False
|
||||||
|
|
||||||
|
if signed["mxid"] != event.state_key:
|
||||||
|
return False
|
||||||
|
if signed["token"] != token:
|
||||||
|
return False
|
||||||
|
|
||||||
|
for public_key_object in get_public_keys(invite_event):
|
||||||
|
public_key = public_key_object["public_key"]
|
||||||
|
try:
|
||||||
|
for server, signature_block in signed["signatures"].items():
|
||||||
|
for key_name, encoded_signature in signature_block.items():
|
||||||
|
if not key_name.startswith("ed25519:"):
|
||||||
|
continue
|
||||||
|
verify_key = decode_verify_key_bytes(
|
||||||
|
key_name,
|
||||||
|
decode_base64(public_key)
|
||||||
|
)
|
||||||
|
verify_signed_json(signed, server, verify_key)
|
||||||
|
|
||||||
|
# We got the public key from the invite, so we know that the
|
||||||
|
# correct server signed the signed bundle.
|
||||||
|
# The caller is responsible for checking that the signing
|
||||||
|
# server has not revoked that public key.
|
||||||
|
return True
|
||||||
|
except (KeyError, SignatureVerifyException,):
|
||||||
|
continue
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def get_public_keys(invite_event):
|
||||||
|
public_keys = []
|
||||||
|
if "public_key" in invite_event.content:
|
||||||
|
o = {
|
||||||
|
"public_key": invite_event.content["public_key"],
|
||||||
|
}
|
||||||
|
if "key_validity_url" in invite_event.content:
|
||||||
|
o["key_validity_url"] = invite_event.content["key_validity_url"]
|
||||||
|
public_keys.append(o)
|
||||||
|
public_keys.extend(invite_event.content.get("public_keys", []))
|
||||||
|
return public_keys
|
||||||
|
|
||||||
|
|
||||||
|
def auth_types_for_event(event):
|
||||||
|
"""Given an event, return a list of (EventType, StateKey) that may be
|
||||||
|
needed to auth the event. The returned list may be a superset of what
|
||||||
|
would actually be required depending on the full state of the room.
|
||||||
|
|
||||||
|
Used to limit the number of events to fetch from the database to
|
||||||
|
actually auth the event.
|
||||||
|
"""
|
||||||
|
if event.type == EventTypes.Create:
|
||||||
|
return []
|
||||||
|
|
||||||
|
auth_types = []
|
||||||
|
|
||||||
|
auth_types.append((EventTypes.PowerLevels, "", ))
|
||||||
|
auth_types.append((EventTypes.Member, event.user_id, ))
|
||||||
|
auth_types.append((EventTypes.Create, "", ))
|
||||||
|
|
||||||
|
if event.type == EventTypes.Member:
|
||||||
|
membership = event.content["membership"]
|
||||||
|
if membership in [Membership.JOIN, Membership.INVITE]:
|
||||||
|
auth_types.append((EventTypes.JoinRules, "", ))
|
||||||
|
|
||||||
|
auth_types.append((EventTypes.Member, event.state_key, ))
|
||||||
|
|
||||||
|
if membership == Membership.INVITE:
|
||||||
|
if "third_party_invite" in event.content:
|
||||||
|
key = (
|
||||||
|
EventTypes.ThirdPartyInvite,
|
||||||
|
event.content["third_party_invite"]["signed"]["token"]
|
||||||
|
)
|
||||||
|
auth_types.append(key)
|
||||||
|
|
||||||
|
return auth_types
|
||||||
@@ -36,6 +36,15 @@ class _EventInternalMetadata(object):
|
|||||||
def is_invite_from_remote(self):
|
def is_invite_from_remote(self):
|
||||||
return getattr(self, "invite_from_remote", False)
|
return getattr(self, "invite_from_remote", False)
|
||||||
|
|
||||||
|
def get_send_on_behalf_of(self):
|
||||||
|
"""Whether this server should send the event on behalf of another server.
|
||||||
|
This is used by the federation "send_join" API to forward the initial join
|
||||||
|
event for a server in the room.
|
||||||
|
|
||||||
|
returns a str with the name of the server this event is sent on behalf of.
|
||||||
|
"""
|
||||||
|
return getattr(self, "send_on_behalf_of", None)
|
||||||
|
|
||||||
|
|
||||||
def _event_dict_property(key):
|
def _event_dict_property(key):
|
||||||
def getter(self):
|
def getter(self):
|
||||||
@@ -70,7 +79,6 @@ class EventBase(object):
|
|||||||
auth_events = _event_dict_property("auth_events")
|
auth_events = _event_dict_property("auth_events")
|
||||||
depth = _event_dict_property("depth")
|
depth = _event_dict_property("depth")
|
||||||
content = _event_dict_property("content")
|
content = _event_dict_property("content")
|
||||||
event_id = _event_dict_property("event_id")
|
|
||||||
hashes = _event_dict_property("hashes")
|
hashes = _event_dict_property("hashes")
|
||||||
origin = _event_dict_property("origin")
|
origin = _event_dict_property("origin")
|
||||||
origin_server_ts = _event_dict_property("origin_server_ts")
|
origin_server_ts = _event_dict_property("origin_server_ts")
|
||||||
@@ -79,8 +87,6 @@ class EventBase(object):
|
|||||||
redacts = _event_dict_property("redacts")
|
redacts = _event_dict_property("redacts")
|
||||||
room_id = _event_dict_property("room_id")
|
room_id = _event_dict_property("room_id")
|
||||||
sender = _event_dict_property("sender")
|
sender = _event_dict_property("sender")
|
||||||
state_key = _event_dict_property("state_key")
|
|
||||||
type = _event_dict_property("type")
|
|
||||||
user_id = _event_dict_property("sender")
|
user_id = _event_dict_property("sender")
|
||||||
|
|
||||||
@property
|
@property
|
||||||
@@ -99,7 +105,7 @@ class EventBase(object):
|
|||||||
|
|
||||||
return d
|
return d
|
||||||
|
|
||||||
def get(self, key, default):
|
def get(self, key, default=None):
|
||||||
return self._event_dict.get(key, default)
|
return self._event_dict.get(key, default)
|
||||||
|
|
||||||
def get_internal_metadata_dict(self):
|
def get_internal_metadata_dict(self):
|
||||||
@@ -153,6 +159,11 @@ class FrozenEvent(EventBase):
|
|||||||
else:
|
else:
|
||||||
frozen_dict = event_dict
|
frozen_dict = event_dict
|
||||||
|
|
||||||
|
self.event_id = event_dict["event_id"]
|
||||||
|
self.type = event_dict["type"]
|
||||||
|
if "state_key" in event_dict:
|
||||||
|
self.state_key = event_dict["state_key"]
|
||||||
|
|
||||||
super(FrozenEvent, self).__init__(
|
super(FrozenEvent, self).__init__(
|
||||||
frozen_dict,
|
frozen_dict,
|
||||||
signatures=signatures,
|
signatures=signatures,
|
||||||
|
|||||||
@@ -13,7 +13,7 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
from . import EventBase, FrozenEvent
|
from . import EventBase, FrozenEvent, _event_dict_property
|
||||||
|
|
||||||
from synapse.types import EventID
|
from synapse.types import EventID
|
||||||
|
|
||||||
@@ -34,6 +34,10 @@ class EventBuilder(EventBase):
|
|||||||
internal_metadata_dict=internal_metadata_dict,
|
internal_metadata_dict=internal_metadata_dict,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
event_id = _event_dict_property("event_id")
|
||||||
|
state_key = _event_dict_property("state_key")
|
||||||
|
type = _event_dict_property("type")
|
||||||
|
|
||||||
def build(self):
|
def build(self):
|
||||||
return FrozenEvent.from_event(self)
|
return FrozenEvent.from_event(self)
|
||||||
|
|
||||||
@@ -51,7 +55,7 @@ class EventBuilderFactory(object):
|
|||||||
|
|
||||||
local_part = str(int(self.clock.time())) + i + random_string(5)
|
local_part = str(int(self.clock.time())) + i + random_string(5)
|
||||||
|
|
||||||
e_id = EventID.create(local_part, self.hostname)
|
e_id = EventID(local_part, self.hostname)
|
||||||
|
|
||||||
return e_id.to_string()
|
return e_id.to_string()
|
||||||
|
|
||||||
|
|||||||
@@ -13,11 +13,160 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
|
from twisted.internet import defer
|
||||||
|
|
||||||
|
from frozendict import frozendict
|
||||||
|
|
||||||
|
|
||||||
class EventContext(object):
|
class EventContext(object):
|
||||||
|
"""
|
||||||
|
Attributes:
|
||||||
|
current_state_ids (dict[(str, str), str]):
|
||||||
|
The current state map including the current event.
|
||||||
|
(type, state_key) -> event_id
|
||||||
|
|
||||||
def __init__(self, current_state=None):
|
prev_state_ids (dict[(str, str), str]):
|
||||||
self.current_state = current_state
|
The current state map excluding the current event.
|
||||||
|
(type, state_key) -> event_id
|
||||||
|
|
||||||
|
state_group (int|None): state group id, if the state has been stored
|
||||||
|
as a state group. This is usually only None if e.g. the event is
|
||||||
|
an outlier.
|
||||||
|
rejected (bool|str): A rejection reason if the event was rejected, else
|
||||||
|
False
|
||||||
|
|
||||||
|
push_actions (list[(str, list[object])]): list of (user_id, actions)
|
||||||
|
tuples
|
||||||
|
|
||||||
|
prev_group (int): Previously persisted state group. ``None`` for an
|
||||||
|
outlier.
|
||||||
|
delta_ids (dict[(str, str), str]): Delta from ``prev_group``.
|
||||||
|
(type, state_key) -> event_id. ``None`` for an outlier.
|
||||||
|
|
||||||
|
prev_state_events (?): XXX: is this ever set to anything other than
|
||||||
|
the empty list?
|
||||||
|
"""
|
||||||
|
|
||||||
|
__slots__ = [
|
||||||
|
"current_state_ids",
|
||||||
|
"prev_state_ids",
|
||||||
|
"state_group",
|
||||||
|
"rejected",
|
||||||
|
"prev_group",
|
||||||
|
"delta_ids",
|
||||||
|
"prev_state_events",
|
||||||
|
"app_service",
|
||||||
|
]
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
# The current state including the current event
|
||||||
|
self.current_state_ids = None
|
||||||
|
# The current state excluding the current event
|
||||||
|
self.prev_state_ids = None
|
||||||
self.state_group = None
|
self.state_group = None
|
||||||
|
|
||||||
self.rejected = False
|
self.rejected = False
|
||||||
self.push_actions = []
|
|
||||||
|
# A previously persisted state group and a delta between that
|
||||||
|
# and this state.
|
||||||
|
self.prev_group = None
|
||||||
|
self.delta_ids = None
|
||||||
|
|
||||||
|
self.prev_state_events = None
|
||||||
|
|
||||||
|
self.app_service = None
|
||||||
|
|
||||||
|
def serialize(self, event):
|
||||||
|
"""Converts self to a type that can be serialized as JSON, and then
|
||||||
|
deserialized by `deserialize`
|
||||||
|
|
||||||
|
Args:
|
||||||
|
event (FrozenEvent): The event that this context relates to
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
dict
|
||||||
|
"""
|
||||||
|
|
||||||
|
# We don't serialize the full state dicts, instead they get pulled out
|
||||||
|
# of the DB on the other side. However, the other side can't figure out
|
||||||
|
# the prev_state_ids, so if we're a state event we include the event
|
||||||
|
# id that we replaced in the state.
|
||||||
|
if event.is_state():
|
||||||
|
prev_state_id = self.prev_state_ids.get((event.type, event.state_key))
|
||||||
|
else:
|
||||||
|
prev_state_id = None
|
||||||
|
|
||||||
|
return {
|
||||||
|
"prev_state_id": prev_state_id,
|
||||||
|
"event_type": event.type,
|
||||||
|
"event_state_key": event.state_key if event.is_state() else None,
|
||||||
|
"state_group": self.state_group,
|
||||||
|
"rejected": self.rejected,
|
||||||
|
"prev_group": self.prev_group,
|
||||||
|
"delta_ids": _encode_state_dict(self.delta_ids),
|
||||||
|
"prev_state_events": self.prev_state_events,
|
||||||
|
"app_service_id": self.app_service.id if self.app_service else None
|
||||||
|
}
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def deserialize(store, input):
|
||||||
|
"""Converts a dict that was produced by `serialize` back into a
|
||||||
|
EventContext.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
store (DataStore): Used to convert AS ID to AS object
|
||||||
|
input (dict): A dict produced by `serialize`
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
EventContext
|
||||||
|
"""
|
||||||
|
context = EventContext()
|
||||||
|
context.state_group = input["state_group"]
|
||||||
|
context.rejected = input["rejected"]
|
||||||
|
context.prev_group = input["prev_group"]
|
||||||
|
context.delta_ids = _decode_state_dict(input["delta_ids"])
|
||||||
|
context.prev_state_events = input["prev_state_events"]
|
||||||
|
|
||||||
|
# We use the state_group and prev_state_id stuff to pull the
|
||||||
|
# current_state_ids out of the DB and construct prev_state_ids.
|
||||||
|
prev_state_id = input["prev_state_id"]
|
||||||
|
event_type = input["event_type"]
|
||||||
|
event_state_key = input["event_state_key"]
|
||||||
|
|
||||||
|
context.current_state_ids = yield store.get_state_ids_for_group(
|
||||||
|
context.state_group,
|
||||||
|
)
|
||||||
|
if prev_state_id and event_state_key:
|
||||||
|
context.prev_state_ids = dict(context.current_state_ids)
|
||||||
|
context.prev_state_ids[(event_type, event_state_key)] = prev_state_id
|
||||||
|
else:
|
||||||
|
context.prev_state_ids = context.current_state_ids
|
||||||
|
|
||||||
|
app_service_id = input["app_service_id"]
|
||||||
|
if app_service_id:
|
||||||
|
context.app_service = store.get_app_service_by_id(app_service_id)
|
||||||
|
|
||||||
|
defer.returnValue(context)
|
||||||
|
|
||||||
|
|
||||||
|
def _encode_state_dict(state_dict):
|
||||||
|
"""Since dicts of (type, state_key) -> event_id cannot be serialized in
|
||||||
|
JSON we need to convert them to a form that can.
|
||||||
|
"""
|
||||||
|
if state_dict is None:
|
||||||
|
return None
|
||||||
|
|
||||||
|
return [
|
||||||
|
(etype, state_key, v)
|
||||||
|
for (etype, state_key), v in state_dict.iteritems()
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def _decode_state_dict(input):
|
||||||
|
"""Decodes a state dict encoded using `_encode_state_dict` above
|
||||||
|
"""
|
||||||
|
if input is None:
|
||||||
|
return None
|
||||||
|
|
||||||
|
return frozendict({(etype, state_key,): v for etype, state_key, v in input})
|
||||||
|
|||||||
113
synapse/events/spamcheck.py
Normal file
113
synapse/events/spamcheck.py
Normal file
@@ -0,0 +1,113 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright 2017 New Vector Ltd.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
|
||||||
|
class SpamChecker(object):
|
||||||
|
def __init__(self, hs):
|
||||||
|
self.spam_checker = None
|
||||||
|
|
||||||
|
module = None
|
||||||
|
config = None
|
||||||
|
try:
|
||||||
|
module, config = hs.config.spam_checker
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
|
if module is not None:
|
||||||
|
self.spam_checker = module(config=config)
|
||||||
|
|
||||||
|
def check_event_for_spam(self, event):
|
||||||
|
"""Checks if a given event is considered "spammy" by this server.
|
||||||
|
|
||||||
|
If the server considers an event spammy, then it will be rejected if
|
||||||
|
sent by a local user. If it is sent by a user on another server, then
|
||||||
|
users receive a blank event.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
event (synapse.events.EventBase): the event to be checked
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if the event is spammy.
|
||||||
|
"""
|
||||||
|
if self.spam_checker is None:
|
||||||
|
return False
|
||||||
|
|
||||||
|
return self.spam_checker.check_event_for_spam(event)
|
||||||
|
|
||||||
|
def user_may_invite(self, inviter_userid, invitee_userid, room_id):
|
||||||
|
"""Checks if a given user may send an invite
|
||||||
|
|
||||||
|
If this method returns false, the invite will be rejected.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
userid (string): The sender's user ID
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if the user may send an invite, otherwise False
|
||||||
|
"""
|
||||||
|
if self.spam_checker is None:
|
||||||
|
return True
|
||||||
|
|
||||||
|
return self.spam_checker.user_may_invite(inviter_userid, invitee_userid, room_id)
|
||||||
|
|
||||||
|
def user_may_create_room(self, userid):
|
||||||
|
"""Checks if a given user may create a room
|
||||||
|
|
||||||
|
If this method returns false, the creation request will be rejected.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
userid (string): The sender's user ID
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if the user may create a room, otherwise False
|
||||||
|
"""
|
||||||
|
if self.spam_checker is None:
|
||||||
|
return True
|
||||||
|
|
||||||
|
return self.spam_checker.user_may_create_room(userid)
|
||||||
|
|
||||||
|
def user_may_create_room_alias(self, userid, room_alias):
|
||||||
|
"""Checks if a given user may create a room alias
|
||||||
|
|
||||||
|
If this method returns false, the association request will be rejected.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
userid (string): The sender's user ID
|
||||||
|
room_alias (string): The alias to be created
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if the user may create a room alias, otherwise False
|
||||||
|
"""
|
||||||
|
if self.spam_checker is None:
|
||||||
|
return True
|
||||||
|
|
||||||
|
return self.spam_checker.user_may_create_room_alias(userid, room_alias)
|
||||||
|
|
||||||
|
def user_may_publish_room(self, userid, room_id):
|
||||||
|
"""Checks if a given user may publish a room to the directory
|
||||||
|
|
||||||
|
If this method returns false, the publish request will be rejected.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
userid (string): The sender's user ID
|
||||||
|
room_id (string): The ID of the room that would be published
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if the user may publish the room, otherwise False
|
||||||
|
"""
|
||||||
|
if self.spam_checker is None:
|
||||||
|
return True
|
||||||
|
|
||||||
|
return self.spam_checker.user_may_publish_room(userid, room_id)
|
||||||
@@ -16,6 +16,17 @@
|
|||||||
from synapse.api.constants import EventTypes
|
from synapse.api.constants import EventTypes
|
||||||
from . import EventBase
|
from . import EventBase
|
||||||
|
|
||||||
|
from frozendict import frozendict
|
||||||
|
|
||||||
|
import re
|
||||||
|
|
||||||
|
# Split strings on "." but not "\." This uses a negative lookbehind assertion for '\'
|
||||||
|
# (?<!stuff) matches if the current position in the string is not preceded
|
||||||
|
# by a match for 'stuff'.
|
||||||
|
# TODO: This is fast, but fails to handle "foo\\.bar" which should be treated as
|
||||||
|
# the literal fields "foo\" and "bar" but will instead be treated as "foo\\.bar"
|
||||||
|
SPLIT_FIELD_REGEX = re.compile(r'(?<!\\)\.')
|
||||||
|
|
||||||
|
|
||||||
def prune_event(event):
|
def prune_event(event):
|
||||||
""" Returns a pruned version of the given event, which removes all keys we
|
""" Returns a pruned version of the given event, which removes all keys we
|
||||||
@@ -97,6 +108,83 @@ def prune_event(event):
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def _copy_field(src, dst, field):
|
||||||
|
"""Copy the field in 'src' to 'dst'.
|
||||||
|
|
||||||
|
For example, if src={"foo":{"bar":5}} and dst={}, and field=["foo","bar"]
|
||||||
|
then dst={"foo":{"bar":5}}.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
src(dict): The dict to read from.
|
||||||
|
dst(dict): The dict to modify.
|
||||||
|
field(list<str>): List of keys to drill down to in 'src'.
|
||||||
|
"""
|
||||||
|
if len(field) == 0: # this should be impossible
|
||||||
|
return
|
||||||
|
if len(field) == 1: # common case e.g. 'origin_server_ts'
|
||||||
|
if field[0] in src:
|
||||||
|
dst[field[0]] = src[field[0]]
|
||||||
|
return
|
||||||
|
|
||||||
|
# Else is a nested field e.g. 'content.body'
|
||||||
|
# Pop the last field as that's the key to move across and we need the
|
||||||
|
# parent dict in order to access the data. Drill down to the right dict.
|
||||||
|
key_to_move = field.pop(-1)
|
||||||
|
sub_dict = src
|
||||||
|
for sub_field in field: # e.g. sub_field => "content"
|
||||||
|
if sub_field in sub_dict and type(sub_dict[sub_field]) in [dict, frozendict]:
|
||||||
|
sub_dict = sub_dict[sub_field]
|
||||||
|
else:
|
||||||
|
return
|
||||||
|
|
||||||
|
if key_to_move not in sub_dict:
|
||||||
|
return
|
||||||
|
|
||||||
|
# Insert the key into the output dictionary, creating nested objects
|
||||||
|
# as required. We couldn't do this any earlier or else we'd need to delete
|
||||||
|
# the empty objects if the key didn't exist.
|
||||||
|
sub_out_dict = dst
|
||||||
|
for sub_field in field:
|
||||||
|
sub_out_dict = sub_out_dict.setdefault(sub_field, {})
|
||||||
|
sub_out_dict[key_to_move] = sub_dict[key_to_move]
|
||||||
|
|
||||||
|
|
||||||
|
def only_fields(dictionary, fields):
|
||||||
|
"""Return a new dict with only the fields in 'dictionary' which are present
|
||||||
|
in 'fields'.
|
||||||
|
|
||||||
|
If there are no event fields specified then all fields are included.
|
||||||
|
The entries may include '.' charaters to indicate sub-fields.
|
||||||
|
So ['content.body'] will include the 'body' field of the 'content' object.
|
||||||
|
A literal '.' character in a field name may be escaped using a '\'.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
dictionary(dict): The dictionary to read from.
|
||||||
|
fields(list<str>): A list of fields to copy over. Only shallow refs are
|
||||||
|
taken.
|
||||||
|
Returns:
|
||||||
|
dict: A new dictionary with only the given fields. If fields was empty,
|
||||||
|
the same dictionary is returned.
|
||||||
|
"""
|
||||||
|
if len(fields) == 0:
|
||||||
|
return dictionary
|
||||||
|
|
||||||
|
# for each field, convert it:
|
||||||
|
# ["content.body.thing\.with\.dots"] => [["content", "body", "thing\.with\.dots"]]
|
||||||
|
split_fields = [SPLIT_FIELD_REGEX.split(f) for f in fields]
|
||||||
|
|
||||||
|
# for each element of the output array of arrays:
|
||||||
|
# remove escaping so we can use the right key names.
|
||||||
|
split_fields[:] = [
|
||||||
|
[f.replace(r'\.', r'.') for f in field_array] for field_array in split_fields
|
||||||
|
]
|
||||||
|
|
||||||
|
output = {}
|
||||||
|
for field_array in split_fields:
|
||||||
|
_copy_field(dictionary, output, field_array)
|
||||||
|
return output
|
||||||
|
|
||||||
|
|
||||||
def format_event_raw(d):
|
def format_event_raw(d):
|
||||||
return d
|
return d
|
||||||
|
|
||||||
@@ -137,7 +225,22 @@ def format_event_for_client_v2_without_room_id(d):
|
|||||||
|
|
||||||
def serialize_event(e, time_now_ms, as_client_event=True,
|
def serialize_event(e, time_now_ms, as_client_event=True,
|
||||||
event_format=format_event_for_client_v1,
|
event_format=format_event_for_client_v1,
|
||||||
token_id=None):
|
token_id=None, only_event_fields=None, is_invite=False):
|
||||||
|
"""Serialize event for clients
|
||||||
|
|
||||||
|
Args:
|
||||||
|
e (EventBase)
|
||||||
|
time_now_ms (int)
|
||||||
|
as_client_event (bool)
|
||||||
|
event_format
|
||||||
|
token_id
|
||||||
|
only_event_fields
|
||||||
|
is_invite (bool): Whether this is an invite that is being sent to the
|
||||||
|
invitee
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
dict
|
||||||
|
"""
|
||||||
# FIXME(erikj): To handle the case of presence events and the like
|
# FIXME(erikj): To handle the case of presence events and the like
|
||||||
if not isinstance(e, EventBase):
|
if not isinstance(e, EventBase):
|
||||||
return e
|
return e
|
||||||
@@ -163,7 +266,19 @@ def serialize_event(e, time_now_ms, as_client_event=True,
|
|||||||
if txn_id is not None:
|
if txn_id is not None:
|
||||||
d["unsigned"]["transaction_id"] = txn_id
|
d["unsigned"]["transaction_id"] = txn_id
|
||||||
|
|
||||||
|
# If this is an invite for somebody else, then we don't care about the
|
||||||
|
# invite_room_state as that's meant solely for the invitee. Other clients
|
||||||
|
# will already have the state since they're in the room.
|
||||||
|
if not is_invite:
|
||||||
|
d["unsigned"].pop("invite_room_state", None)
|
||||||
|
|
||||||
if as_client_event:
|
if as_client_event:
|
||||||
return event_format(d)
|
d = event_format(d)
|
||||||
else:
|
|
||||||
return d
|
if only_event_fields:
|
||||||
|
if (not isinstance(only_event_fields, list) or
|
||||||
|
not all(isinstance(f, basestring) for f in only_event_fields)):
|
||||||
|
raise TypeError("only_event_fields must be a list of strings")
|
||||||
|
d = only_fields(d, only_event_fields)
|
||||||
|
|
||||||
|
return d
|
||||||
|
|||||||
@@ -15,12 +15,3 @@
|
|||||||
|
|
||||||
""" This package includes all the federation specific logic.
|
""" This package includes all the federation specific logic.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from .replication import ReplicationLayer
|
|
||||||
from .transport.client import TransportLayerClient
|
|
||||||
|
|
||||||
|
|
||||||
def initialize_http_replication(homeserver):
|
|
||||||
transport = TransportLayerClient(homeserver)
|
|
||||||
|
|
||||||
return ReplicationLayer(homeserver, transport)
|
|
||||||
|
|||||||
@@ -12,27 +12,28 @@
|
|||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
|
|
||||||
from twisted.internet import defer
|
|
||||||
|
|
||||||
from synapse.events.utils import prune_event
|
|
||||||
|
|
||||||
from synapse.crypto.event_signing import check_event_content_hash
|
|
||||||
|
|
||||||
from synapse.api.errors import SynapseError
|
|
||||||
|
|
||||||
from synapse.util import unwrapFirstError
|
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
|
from synapse.api.errors import SynapseError
|
||||||
|
from synapse.crypto.event_signing import check_event_content_hash
|
||||||
|
from synapse.events import FrozenEvent
|
||||||
|
from synapse.events.utils import prune_event
|
||||||
|
from synapse.http.servlet import assert_params_in_request
|
||||||
|
from synapse.util import unwrapFirstError, logcontext
|
||||||
|
from twisted.internet import defer
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class FederationBase(object):
|
class FederationBase(object):
|
||||||
def __init__(self, hs):
|
def __init__(self, hs):
|
||||||
pass
|
self.hs = hs
|
||||||
|
|
||||||
|
self.server_name = hs.hostname
|
||||||
|
self.keyring = hs.get_keyring()
|
||||||
|
self.spam_checker = hs.get_spam_checker()
|
||||||
|
self.store = hs.get_datastore()
|
||||||
|
self._clock = hs.get_clock()
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def _check_sigs_and_hash_and_fetch(self, origin, pdus, outlier=False,
|
def _check_sigs_and_hash_and_fetch(self, origin, pdus, outlier=False,
|
||||||
@@ -56,55 +57,51 @@ class FederationBase(object):
|
|||||||
"""
|
"""
|
||||||
deferreds = self._check_sigs_and_hashes(pdus)
|
deferreds = self._check_sigs_and_hashes(pdus)
|
||||||
|
|
||||||
def callback(pdu):
|
@defer.inlineCallbacks
|
||||||
return pdu
|
def handle_check_result(pdu, deferred):
|
||||||
|
try:
|
||||||
|
res = yield logcontext.make_deferred_yieldable(deferred)
|
||||||
|
except SynapseError:
|
||||||
|
res = None
|
||||||
|
|
||||||
def errback(failure, pdu):
|
|
||||||
failure.trap(SynapseError)
|
|
||||||
return None
|
|
||||||
|
|
||||||
def try_local_db(res, pdu):
|
|
||||||
if not res:
|
if not res:
|
||||||
# Check local db.
|
# Check local db.
|
||||||
return self.store.get_event(
|
res = yield self.store.get_event(
|
||||||
pdu.event_id,
|
pdu.event_id,
|
||||||
allow_rejected=True,
|
allow_rejected=True,
|
||||||
allow_none=True,
|
allow_none=True,
|
||||||
)
|
)
|
||||||
return res
|
|
||||||
|
|
||||||
def try_remote(res, pdu):
|
|
||||||
if not res and pdu.origin != origin:
|
if not res and pdu.origin != origin:
|
||||||
return self.get_pdu(
|
try:
|
||||||
destinations=[pdu.origin],
|
res = yield self.get_pdu(
|
||||||
event_id=pdu.event_id,
|
destinations=[pdu.origin],
|
||||||
outlier=outlier,
|
event_id=pdu.event_id,
|
||||||
timeout=10000,
|
outlier=outlier,
|
||||||
).addErrback(lambda e: None)
|
timeout=10000,
|
||||||
return res
|
)
|
||||||
|
except SynapseError:
|
||||||
|
pass
|
||||||
|
|
||||||
def warn(res, pdu):
|
|
||||||
if not res:
|
if not res:
|
||||||
logger.warn(
|
logger.warn(
|
||||||
"Failed to find copy of %s with valid signature",
|
"Failed to find copy of %s with valid signature",
|
||||||
pdu.event_id,
|
pdu.event_id,
|
||||||
)
|
)
|
||||||
return res
|
|
||||||
|
|
||||||
for pdu, deferred in zip(pdus, deferreds):
|
defer.returnValue(res)
|
||||||
deferred.addCallbacks(
|
|
||||||
callback, errback, errbackArgs=[pdu]
|
handle = logcontext.preserve_fn(handle_check_result)
|
||||||
).addCallback(
|
deferreds2 = [
|
||||||
try_local_db, pdu
|
handle(pdu, deferred)
|
||||||
).addCallback(
|
for pdu, deferred in zip(pdus, deferreds)
|
||||||
try_remote, pdu
|
]
|
||||||
).addCallback(
|
|
||||||
warn, pdu
|
valid_pdus = yield logcontext.make_deferred_yieldable(
|
||||||
|
defer.gatherResults(
|
||||||
|
deferreds2,
|
||||||
|
consumeErrors=True,
|
||||||
)
|
)
|
||||||
|
|
||||||
valid_pdus = yield defer.gatherResults(
|
|
||||||
deferreds,
|
|
||||||
consumeErrors=True
|
|
||||||
).addErrback(unwrapFirstError)
|
).addErrback(unwrapFirstError)
|
||||||
|
|
||||||
if include_none:
|
if include_none:
|
||||||
@@ -113,15 +110,24 @@ class FederationBase(object):
|
|||||||
defer.returnValue([p for p in valid_pdus if p])
|
defer.returnValue([p for p in valid_pdus if p])
|
||||||
|
|
||||||
def _check_sigs_and_hash(self, pdu):
|
def _check_sigs_and_hash(self, pdu):
|
||||||
return self._check_sigs_and_hashes([pdu])[0]
|
return logcontext.make_deferred_yieldable(
|
||||||
|
self._check_sigs_and_hashes([pdu])[0],
|
||||||
|
)
|
||||||
|
|
||||||
def _check_sigs_and_hashes(self, pdus):
|
def _check_sigs_and_hashes(self, pdus):
|
||||||
"""Throws a SynapseError if a PDU does not have the correct
|
"""Checks that each of the received events is correctly signed by the
|
||||||
signatures.
|
sending server.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
pdus (list[FrozenEvent]): the events to be checked
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
FrozenEvent: Either the given event or it redacted if it failed the
|
list[Deferred]: for each input event, a deferred which:
|
||||||
content hash check.
|
* returns the original event if the checks pass
|
||||||
|
* returns a redacted version of the event (if the signature
|
||||||
|
matched but the hash did not)
|
||||||
|
* throws a SynapseError if the signature check failed.
|
||||||
|
The deferreds run their callbacks in the sentinel logcontext.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
redacted_pdus = [
|
redacted_pdus = [
|
||||||
@@ -134,21 +140,33 @@ class FederationBase(object):
|
|||||||
for p in redacted_pdus
|
for p in redacted_pdus
|
||||||
])
|
])
|
||||||
|
|
||||||
|
ctx = logcontext.LoggingContext.current_context()
|
||||||
|
|
||||||
def callback(_, pdu, redacted):
|
def callback(_, pdu, redacted):
|
||||||
if not check_event_content_hash(pdu):
|
with logcontext.PreserveLoggingContext(ctx):
|
||||||
logger.warn(
|
if not check_event_content_hash(pdu):
|
||||||
"Event content has been tampered, redacting %s: %s",
|
logger.warn(
|
||||||
pdu.event_id, pdu.get_pdu_json()
|
"Event content has been tampered, redacting %s: %s",
|
||||||
)
|
pdu.event_id, pdu.get_pdu_json()
|
||||||
return redacted
|
)
|
||||||
return pdu
|
return redacted
|
||||||
|
|
||||||
|
if self.spam_checker.check_event_for_spam(pdu):
|
||||||
|
logger.warn(
|
||||||
|
"Event contains spam, redacting %s: %s",
|
||||||
|
pdu.event_id, pdu.get_pdu_json()
|
||||||
|
)
|
||||||
|
return redacted
|
||||||
|
|
||||||
|
return pdu
|
||||||
|
|
||||||
def errback(failure, pdu):
|
def errback(failure, pdu):
|
||||||
failure.trap(SynapseError)
|
failure.trap(SynapseError)
|
||||||
logger.warn(
|
with logcontext.PreserveLoggingContext(ctx):
|
||||||
"Signature check failed for %s",
|
logger.warn(
|
||||||
pdu.event_id,
|
"Signature check failed for %s",
|
||||||
)
|
pdu.event_id,
|
||||||
|
)
|
||||||
return failure
|
return failure
|
||||||
|
|
||||||
for deferred, pdu, redacted in zip(deferreds, pdus, redacted_pdus):
|
for deferred, pdu, redacted in zip(deferreds, pdus, redacted_pdus):
|
||||||
@@ -159,3 +177,28 @@ class FederationBase(object):
|
|||||||
)
|
)
|
||||||
|
|
||||||
return deferreds
|
return deferreds
|
||||||
|
|
||||||
|
|
||||||
|
def event_from_pdu_json(pdu_json, outlier=False):
|
||||||
|
"""Construct a FrozenEvent from an event json received over federation
|
||||||
|
|
||||||
|
Args:
|
||||||
|
pdu_json (object): pdu as received over federation
|
||||||
|
outlier (bool): True to mark this event as an outlier
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
FrozenEvent
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
SynapseError: if the pdu is missing required fields
|
||||||
|
"""
|
||||||
|
# we could probably enforce a bunch of other fields here (room_id, sender,
|
||||||
|
# origin, etc etc)
|
||||||
|
assert_params_in_request(pdu_json, ('event_id', 'type'))
|
||||||
|
event = FrozenEvent(
|
||||||
|
pdu_json
|
||||||
|
)
|
||||||
|
|
||||||
|
event.internal_metadata.outlier = outlier
|
||||||
|
|
||||||
|
return event
|
||||||
|
|||||||
@@ -14,29 +14,28 @@
|
|||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
|
|
||||||
from twisted.internet import defer
|
|
||||||
|
|
||||||
from .federation_base import FederationBase
|
|
||||||
from synapse.api.constants import Membership
|
|
||||||
from .units import Edu
|
|
||||||
|
|
||||||
from synapse.api.errors import (
|
|
||||||
CodeMessageException, HttpResponseException, SynapseError,
|
|
||||||
)
|
|
||||||
from synapse.util import unwrapFirstError
|
|
||||||
from synapse.util.async import concurrently_execute
|
|
||||||
from synapse.util.caches.expiringcache import ExpiringCache
|
|
||||||
from synapse.util.logutils import log_function
|
|
||||||
from synapse.events import FrozenEvent
|
|
||||||
import synapse.metrics
|
|
||||||
|
|
||||||
from synapse.util.retryutils import get_retry_limiter, NotRetryingDestination
|
|
||||||
|
|
||||||
import copy
|
import copy
|
||||||
import itertools
|
import itertools
|
||||||
import logging
|
import logging
|
||||||
import random
|
import random
|
||||||
|
|
||||||
|
from twisted.internet import defer
|
||||||
|
|
||||||
|
from synapse.api.constants import Membership
|
||||||
|
from synapse.api.errors import (
|
||||||
|
CodeMessageException, HttpResponseException, SynapseError, FederationDeniedError
|
||||||
|
)
|
||||||
|
from synapse.events import builder
|
||||||
|
from synapse.federation.federation_base import (
|
||||||
|
FederationBase,
|
||||||
|
event_from_pdu_json,
|
||||||
|
)
|
||||||
|
import synapse.metrics
|
||||||
|
from synapse.util import logcontext, unwrapFirstError
|
||||||
|
from synapse.util.caches.expiringcache import ExpiringCache
|
||||||
|
from synapse.util.logcontext import make_deferred_yieldable, preserve_fn
|
||||||
|
from synapse.util.logutils import log_function
|
||||||
|
from synapse.util.retryutils import NotRetryingDestination
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
@@ -44,10 +43,6 @@ logger = logging.getLogger(__name__)
|
|||||||
# synapse.federation.federation_client is a silly name
|
# synapse.federation.federation_client is a silly name
|
||||||
metrics = synapse.metrics.get_metrics_for("synapse.federation.client")
|
metrics = synapse.metrics.get_metrics_for("synapse.federation.client")
|
||||||
|
|
||||||
sent_pdus_destination_dist = metrics.register_distribution("sent_pdu_destinations")
|
|
||||||
|
|
||||||
sent_edus_counter = metrics.register_counter("sent_edus")
|
|
||||||
|
|
||||||
sent_queries_counter = metrics.register_counter("sent_queries", labels=["type"])
|
sent_queries_counter = metrics.register_counter("sent_queries", labels=["type"])
|
||||||
|
|
||||||
|
|
||||||
@@ -62,6 +57,8 @@ class FederationClient(FederationBase):
|
|||||||
self._clock.looping_call(
|
self._clock.looping_call(
|
||||||
self._clear_tried_cache, 60 * 1000,
|
self._clear_tried_cache, 60 * 1000,
|
||||||
)
|
)
|
||||||
|
self.state = hs.get_state_handler()
|
||||||
|
self.transport_layer = hs.get_federation_transport_client()
|
||||||
|
|
||||||
def _clear_tried_cache(self):
|
def _clear_tried_cache(self):
|
||||||
"""Clear pdu_destination_tried cache"""
|
"""Clear pdu_destination_tried cache"""
|
||||||
@@ -90,58 +87,9 @@ class FederationClient(FederationBase):
|
|||||||
|
|
||||||
self._get_pdu_cache.start()
|
self._get_pdu_cache.start()
|
||||||
|
|
||||||
@log_function
|
|
||||||
def send_pdu(self, pdu, destinations):
|
|
||||||
"""Informs the replication layer about a new PDU generated within the
|
|
||||||
home server that should be transmitted to others.
|
|
||||||
|
|
||||||
TODO: Figure out when we should actually resolve the deferred.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
pdu (Pdu): The new Pdu.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Deferred: Completes when we have successfully processed the PDU
|
|
||||||
and replicated it to any interested remote home servers.
|
|
||||||
"""
|
|
||||||
order = self._order
|
|
||||||
self._order += 1
|
|
||||||
|
|
||||||
sent_pdus_destination_dist.inc_by(len(destinations))
|
|
||||||
|
|
||||||
logger.debug("[%s] transaction_layer.enqueue_pdu... ", pdu.event_id)
|
|
||||||
|
|
||||||
# TODO, add errback, etc.
|
|
||||||
self._transaction_queue.enqueue_pdu(pdu, destinations, order)
|
|
||||||
|
|
||||||
logger.debug(
|
|
||||||
"[%s] transaction_layer.enqueue_pdu... done",
|
|
||||||
pdu.event_id
|
|
||||||
)
|
|
||||||
|
|
||||||
@log_function
|
|
||||||
def send_edu(self, destination, edu_type, content):
|
|
||||||
edu = Edu(
|
|
||||||
origin=self.server_name,
|
|
||||||
destination=destination,
|
|
||||||
edu_type=edu_type,
|
|
||||||
content=content,
|
|
||||||
)
|
|
||||||
|
|
||||||
sent_edus_counter.inc()
|
|
||||||
|
|
||||||
# TODO, add errback, etc.
|
|
||||||
self._transaction_queue.enqueue_edu(edu)
|
|
||||||
return defer.succeed(None)
|
|
||||||
|
|
||||||
@log_function
|
|
||||||
def send_failure(self, failure, destination):
|
|
||||||
self._transaction_queue.enqueue_failure(failure, destination)
|
|
||||||
return defer.succeed(None)
|
|
||||||
|
|
||||||
@log_function
|
@log_function
|
||||||
def make_query(self, destination, query_type, args,
|
def make_query(self, destination, query_type, args,
|
||||||
retry_on_dns_fail=False):
|
retry_on_dns_fail=False, ignore_backoff=False):
|
||||||
"""Sends a federation Query to a remote homeserver of the given type
|
"""Sends a federation Query to a remote homeserver of the given type
|
||||||
and arguments.
|
and arguments.
|
||||||
|
|
||||||
@@ -151,6 +99,8 @@ class FederationClient(FederationBase):
|
|||||||
handler name used in register_query_handler().
|
handler name used in register_query_handler().
|
||||||
args (dict): Mapping of strings to strings containing the details
|
args (dict): Mapping of strings to strings containing the details
|
||||||
of the query request.
|
of the query request.
|
||||||
|
ignore_backoff (bool): true to ignore the historical backoff data
|
||||||
|
and try the request anyway.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
a Deferred which will eventually yield a JSON object from the
|
a Deferred which will eventually yield a JSON object from the
|
||||||
@@ -159,11 +109,12 @@ class FederationClient(FederationBase):
|
|||||||
sent_queries_counter.inc(query_type)
|
sent_queries_counter.inc(query_type)
|
||||||
|
|
||||||
return self.transport_layer.make_query(
|
return self.transport_layer.make_query(
|
||||||
destination, query_type, args, retry_on_dns_fail=retry_on_dns_fail
|
destination, query_type, args, retry_on_dns_fail=retry_on_dns_fail,
|
||||||
|
ignore_backoff=ignore_backoff,
|
||||||
)
|
)
|
||||||
|
|
||||||
@log_function
|
@log_function
|
||||||
def query_client_keys(self, destination, content):
|
def query_client_keys(self, destination, content, timeout):
|
||||||
"""Query device keys for a device hosted on a remote server.
|
"""Query device keys for a device hosted on a remote server.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
@@ -175,10 +126,22 @@ class FederationClient(FederationBase):
|
|||||||
response
|
response
|
||||||
"""
|
"""
|
||||||
sent_queries_counter.inc("client_device_keys")
|
sent_queries_counter.inc("client_device_keys")
|
||||||
return self.transport_layer.query_client_keys(destination, content)
|
return self.transport_layer.query_client_keys(
|
||||||
|
destination, content, timeout
|
||||||
|
)
|
||||||
|
|
||||||
@log_function
|
@log_function
|
||||||
def claim_client_keys(self, destination, content):
|
def query_user_devices(self, destination, user_id, timeout=30000):
|
||||||
|
"""Query the device keys for a list of user ids hosted on a remote
|
||||||
|
server.
|
||||||
|
"""
|
||||||
|
sent_queries_counter.inc("user_devices")
|
||||||
|
return self.transport_layer.query_user_devices(
|
||||||
|
destination, user_id, timeout
|
||||||
|
)
|
||||||
|
|
||||||
|
@log_function
|
||||||
|
def claim_client_keys(self, destination, content, timeout):
|
||||||
"""Claims one-time keys for a device hosted on a remote server.
|
"""Claims one-time keys for a device hosted on a remote server.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
@@ -190,7 +153,9 @@ class FederationClient(FederationBase):
|
|||||||
response
|
response
|
||||||
"""
|
"""
|
||||||
sent_queries_counter.inc("client_one_time_keys")
|
sent_queries_counter.inc("client_one_time_keys")
|
||||||
return self.transport_layer.claim_client_keys(destination, content)
|
return self.transport_layer.claim_client_keys(
|
||||||
|
destination, content, timeout
|
||||||
|
)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
@log_function
|
@log_function
|
||||||
@@ -220,15 +185,15 @@ class FederationClient(FederationBase):
|
|||||||
logger.debug("backfill transaction_data=%s", repr(transaction_data))
|
logger.debug("backfill transaction_data=%s", repr(transaction_data))
|
||||||
|
|
||||||
pdus = [
|
pdus = [
|
||||||
self.event_from_pdu_json(p, outlier=False)
|
event_from_pdu_json(p, outlier=False)
|
||||||
for p in transaction_data["pdus"]
|
for p in transaction_data["pdus"]
|
||||||
]
|
]
|
||||||
|
|
||||||
# FIXME: We should handle signature failures more gracefully.
|
# FIXME: We should handle signature failures more gracefully.
|
||||||
pdus[:] = yield defer.gatherResults(
|
pdus[:] = yield logcontext.make_deferred_yieldable(defer.gatherResults(
|
||||||
self._check_sigs_and_hashes(pdus),
|
self._check_sigs_and_hashes(pdus),
|
||||||
consumeErrors=True,
|
consumeErrors=True,
|
||||||
).addErrback(unwrapFirstError)
|
).addErrback(unwrapFirstError))
|
||||||
|
|
||||||
defer.returnValue(pdus)
|
defer.returnValue(pdus)
|
||||||
|
|
||||||
@@ -245,8 +210,7 @@ class FederationClient(FederationBase):
|
|||||||
|
|
||||||
Args:
|
Args:
|
||||||
destinations (list): Which home servers to query
|
destinations (list): Which home servers to query
|
||||||
pdu_origin (str): The home server that originally sent the pdu.
|
event_id (str): event to fetch
|
||||||
event_id (str)
|
|
||||||
outlier (bool): Indicates whether the PDU is an `outlier`, i.e. if
|
outlier (bool): Indicates whether the PDU is an `outlier`, i.e. if
|
||||||
it's from an arbitary point in the context as opposed to part
|
it's from an arbitary point in the context as opposed to part
|
||||||
of the current block of PDUs. Defaults to `False`
|
of the current block of PDUs. Defaults to `False`
|
||||||
@@ -266,7 +230,7 @@ class FederationClient(FederationBase):
|
|||||||
|
|
||||||
pdu_attempts = self.pdu_destination_tried.setdefault(event_id, {})
|
pdu_attempts = self.pdu_destination_tried.setdefault(event_id, {})
|
||||||
|
|
||||||
pdu = None
|
signed_pdu = None
|
||||||
for destination in destinations:
|
for destination in destinations:
|
||||||
now = self._clock.time_msec()
|
now = self._clock.time_msec()
|
||||||
last_attempt = pdu_attempts.get(destination, 0)
|
last_attempt = pdu_attempts.get(destination, 0)
|
||||||
@@ -274,31 +238,24 @@ class FederationClient(FederationBase):
|
|||||||
continue
|
continue
|
||||||
|
|
||||||
try:
|
try:
|
||||||
limiter = yield get_retry_limiter(
|
transaction_data = yield self.transport_layer.get_event(
|
||||||
destination,
|
destination, event_id, timeout=timeout,
|
||||||
self._clock,
|
|
||||||
self.store,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
with limiter:
|
logger.debug("transaction_data %r", transaction_data)
|
||||||
transaction_data = yield self.transport_layer.get_event(
|
|
||||||
destination, event_id, timeout=timeout,
|
|
||||||
)
|
|
||||||
|
|
||||||
logger.debug("transaction_data %r", transaction_data)
|
pdu_list = [
|
||||||
|
event_from_pdu_json(p, outlier=outlier)
|
||||||
|
for p in transaction_data["pdus"]
|
||||||
|
]
|
||||||
|
|
||||||
pdu_list = [
|
if pdu_list and pdu_list[0]:
|
||||||
self.event_from_pdu_json(p, outlier=outlier)
|
pdu = pdu_list[0]
|
||||||
for p in transaction_data["pdus"]
|
|
||||||
]
|
|
||||||
|
|
||||||
if pdu_list and pdu_list[0]:
|
# Check signatures are correct.
|
||||||
pdu = pdu_list[0]
|
signed_pdu = yield self._check_sigs_and_hash(pdu)
|
||||||
|
|
||||||
# Check signatures are correct.
|
break
|
||||||
pdu = yield self._check_sigs_and_hashes([pdu])[0]
|
|
||||||
|
|
||||||
break
|
|
||||||
|
|
||||||
pdu_attempts[destination] = now
|
pdu_attempts[destination] = now
|
||||||
|
|
||||||
@@ -310,6 +267,9 @@ class FederationClient(FederationBase):
|
|||||||
except NotRetryingDestination as e:
|
except NotRetryingDestination as e:
|
||||||
logger.info(e.message)
|
logger.info(e.message)
|
||||||
continue
|
continue
|
||||||
|
except FederationDeniedError as e:
|
||||||
|
logger.info(e.message)
|
||||||
|
continue
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
pdu_attempts[destination] = now
|
pdu_attempts[destination] = now
|
||||||
|
|
||||||
@@ -319,10 +279,10 @@ class FederationClient(FederationBase):
|
|||||||
)
|
)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if self._get_pdu_cache is not None and pdu:
|
if self._get_pdu_cache is not None and signed_pdu:
|
||||||
self._get_pdu_cache[event_id] = pdu
|
self._get_pdu_cache[event_id] = signed_pdu
|
||||||
|
|
||||||
defer.returnValue(pdu)
|
defer.returnValue(signed_pdu)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
@log_function
|
@log_function
|
||||||
@@ -380,11 +340,11 @@ class FederationClient(FederationBase):
|
|||||||
)
|
)
|
||||||
|
|
||||||
pdus = [
|
pdus = [
|
||||||
self.event_from_pdu_json(p, outlier=True) for p in result["pdus"]
|
event_from_pdu_json(p, outlier=True) for p in result["pdus"]
|
||||||
]
|
]
|
||||||
|
|
||||||
auth_chain = [
|
auth_chain = [
|
||||||
self.event_from_pdu_json(p, outlier=True)
|
event_from_pdu_json(p, outlier=True)
|
||||||
for p in result.get("auth_chain", [])
|
for p in result.get("auth_chain", [])
|
||||||
]
|
]
|
||||||
|
|
||||||
@@ -457,16 +417,18 @@ class FederationClient(FederationBase):
|
|||||||
batch = set(missing_events[i:i + batch_size])
|
batch = set(missing_events[i:i + batch_size])
|
||||||
|
|
||||||
deferreds = [
|
deferreds = [
|
||||||
self.get_pdu(
|
preserve_fn(self.get_pdu)(
|
||||||
destinations=random_server_list(),
|
destinations=random_server_list(),
|
||||||
event_id=e_id,
|
event_id=e_id,
|
||||||
)
|
)
|
||||||
for e_id in batch
|
for e_id in batch
|
||||||
]
|
]
|
||||||
|
|
||||||
res = yield defer.DeferredList(deferreds, consumeErrors=True)
|
res = yield make_deferred_yieldable(
|
||||||
|
defer.DeferredList(deferreds, consumeErrors=True)
|
||||||
|
)
|
||||||
for success, result in res:
|
for success, result in res:
|
||||||
if success:
|
if success and result:
|
||||||
signed_events.append(result)
|
signed_events.append(result)
|
||||||
batch.discard(result.event_id)
|
batch.discard(result.event_id)
|
||||||
|
|
||||||
@@ -483,7 +445,7 @@ class FederationClient(FederationBase):
|
|||||||
)
|
)
|
||||||
|
|
||||||
auth_chain = [
|
auth_chain = [
|
||||||
self.event_from_pdu_json(p, outlier=True)
|
event_from_pdu_json(p, outlier=True)
|
||||||
for p in res["auth_chain"]
|
for p in res["auth_chain"]
|
||||||
]
|
]
|
||||||
|
|
||||||
@@ -516,8 +478,13 @@ class FederationClient(FederationBase):
|
|||||||
content (object): Any additional data to put into the content field
|
content (object): Any additional data to put into the content field
|
||||||
of the event.
|
of the event.
|
||||||
Return:
|
Return:
|
||||||
A tuple of (origin (str), event (object)) where origin is the remote
|
Deferred: resolves to a tuple of (origin (str), event (object))
|
||||||
homeserver which generated the event.
|
where origin is the remote homeserver which generated the event.
|
||||||
|
|
||||||
|
Fails with a ``CodeMessageException`` if the chosen remote server
|
||||||
|
returns a 300/400 code.
|
||||||
|
|
||||||
|
Fails with a ``RuntimeError`` if no servers were reachable.
|
||||||
"""
|
"""
|
||||||
valid_memberships = {Membership.JOIN, Membership.LEAVE}
|
valid_memberships = {Membership.JOIN, Membership.LEAVE}
|
||||||
if membership not in valid_memberships:
|
if membership not in valid_memberships:
|
||||||
@@ -546,8 +513,10 @@ class FederationClient(FederationBase):
|
|||||||
if "prev_state" not in pdu_dict:
|
if "prev_state" not in pdu_dict:
|
||||||
pdu_dict["prev_state"] = []
|
pdu_dict["prev_state"] = []
|
||||||
|
|
||||||
|
ev = builder.EventBuilder(pdu_dict)
|
||||||
|
|
||||||
defer.returnValue(
|
defer.returnValue(
|
||||||
(destination, self.event_from_pdu_json(pdu_dict))
|
(destination, ev)
|
||||||
)
|
)
|
||||||
break
|
break
|
||||||
except CodeMessageException as e:
|
except CodeMessageException as e:
|
||||||
@@ -568,6 +537,27 @@ class FederationClient(FederationBase):
|
|||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def send_join(self, destinations, pdu):
|
def send_join(self, destinations, pdu):
|
||||||
|
"""Sends a join event to one of a list of homeservers.
|
||||||
|
|
||||||
|
Doing so will cause the remote server to add the event to the graph,
|
||||||
|
and send the event out to the rest of the federation.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
destinations (str): Candidate homeservers which are probably
|
||||||
|
participating in the room.
|
||||||
|
pdu (BaseEvent): event to be sent
|
||||||
|
|
||||||
|
Return:
|
||||||
|
Deferred: resolves to a dict with members ``origin`` (a string
|
||||||
|
giving the serer the event was sent to, ``state`` (?) and
|
||||||
|
``auth_chain``.
|
||||||
|
|
||||||
|
Fails with a ``CodeMessageException`` if the chosen remote server
|
||||||
|
returns a 300/400 code.
|
||||||
|
|
||||||
|
Fails with a ``RuntimeError`` if no servers were reachable.
|
||||||
|
"""
|
||||||
|
|
||||||
for destination in destinations:
|
for destination in destinations:
|
||||||
if destination == self.server_name:
|
if destination == self.server_name:
|
||||||
continue
|
continue
|
||||||
@@ -584,12 +574,12 @@ class FederationClient(FederationBase):
|
|||||||
logger.debug("Got content: %s", content)
|
logger.debug("Got content: %s", content)
|
||||||
|
|
||||||
state = [
|
state = [
|
||||||
self.event_from_pdu_json(p, outlier=True)
|
event_from_pdu_json(p, outlier=True)
|
||||||
for p in content.get("state", [])
|
for p in content.get("state", [])
|
||||||
]
|
]
|
||||||
|
|
||||||
auth_chain = [
|
auth_chain = [
|
||||||
self.event_from_pdu_json(p, outlier=True)
|
event_from_pdu_json(p, outlier=True)
|
||||||
for p in content.get("auth_chain", [])
|
for p in content.get("auth_chain", [])
|
||||||
]
|
]
|
||||||
|
|
||||||
@@ -664,7 +654,7 @@ class FederationClient(FederationBase):
|
|||||||
|
|
||||||
logger.debug("Got response to send_invite: %s", pdu_dict)
|
logger.debug("Got response to send_invite: %s", pdu_dict)
|
||||||
|
|
||||||
pdu = self.event_from_pdu_json(pdu_dict)
|
pdu = event_from_pdu_json(pdu_dict)
|
||||||
|
|
||||||
# Check signatures are correct.
|
# Check signatures are correct.
|
||||||
pdu = yield self._check_sigs_and_hash(pdu)
|
pdu = yield self._check_sigs_and_hash(pdu)
|
||||||
@@ -675,6 +665,26 @@ class FederationClient(FederationBase):
|
|||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def send_leave(self, destinations, pdu):
|
def send_leave(self, destinations, pdu):
|
||||||
|
"""Sends a leave event to one of a list of homeservers.
|
||||||
|
|
||||||
|
Doing so will cause the remote server to add the event to the graph,
|
||||||
|
and send the event out to the rest of the federation.
|
||||||
|
|
||||||
|
This is mostly useful to reject received invites.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
destinations (str): Candidate homeservers which are probably
|
||||||
|
participating in the room.
|
||||||
|
pdu (BaseEvent): event to be sent
|
||||||
|
|
||||||
|
Return:
|
||||||
|
Deferred: resolves to None.
|
||||||
|
|
||||||
|
Fails with a ``CodeMessageException`` if the chosen remote server
|
||||||
|
returns a non-200 code.
|
||||||
|
|
||||||
|
Fails with a ``RuntimeError`` if no servers were reachable.
|
||||||
|
"""
|
||||||
for destination in destinations:
|
for destination in destinations:
|
||||||
if destination == self.server_name:
|
if destination == self.server_name:
|
||||||
continue
|
continue
|
||||||
@@ -700,24 +710,17 @@ class FederationClient(FederationBase):
|
|||||||
|
|
||||||
raise RuntimeError("Failed to send to any server.")
|
raise RuntimeError("Failed to send to any server.")
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
def get_public_rooms(self, destination, limit=None, since_token=None,
|
||||||
def get_public_rooms(self, destinations):
|
search_filter=None, include_all_networks=False,
|
||||||
results_by_server = {}
|
third_party_instance_id=None):
|
||||||
|
if destination == self.server_name:
|
||||||
|
return
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
return self.transport_layer.get_public_rooms(
|
||||||
def _get_result(s):
|
destination, limit, since_token, search_filter,
|
||||||
if s == self.server_name:
|
include_all_networks=include_all_networks,
|
||||||
defer.returnValue()
|
third_party_instance_id=third_party_instance_id,
|
||||||
|
)
|
||||||
try:
|
|
||||||
result = yield self.transport_layer.get_public_rooms(s)
|
|
||||||
results_by_server[s] = result
|
|
||||||
except:
|
|
||||||
logger.exception("Error getting room list from server %r", s)
|
|
||||||
|
|
||||||
yield concurrently_execute(_get_result, destinations, 3)
|
|
||||||
|
|
||||||
defer.returnValue(results_by_server)
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def query_auth(self, destination, room_id, event_id, local_auth):
|
def query_auth(self, destination, room_id, event_id, local_auth):
|
||||||
@@ -741,7 +744,7 @@ class FederationClient(FederationBase):
|
|||||||
)
|
)
|
||||||
|
|
||||||
auth_chain = [
|
auth_chain = [
|
||||||
self.event_from_pdu_json(e)
|
event_from_pdu_json(e)
|
||||||
for e in content["auth_chain"]
|
for e in content["auth_chain"]
|
||||||
]
|
]
|
||||||
|
|
||||||
@@ -761,7 +764,7 @@ class FederationClient(FederationBase):
|
|||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def get_missing_events(self, destination, room_id, earliest_events_ids,
|
def get_missing_events(self, destination, room_id, earliest_events_ids,
|
||||||
latest_events, limit, min_depth):
|
latest_events, limit, min_depth, timeout):
|
||||||
"""Tries to fetch events we are missing. This is called when we receive
|
"""Tries to fetch events we are missing. This is called when we receive
|
||||||
an event without having received all of its ancestors.
|
an event without having received all of its ancestors.
|
||||||
|
|
||||||
@@ -775,6 +778,7 @@ class FederationClient(FederationBase):
|
|||||||
have all previous events for.
|
have all previous events for.
|
||||||
limit (int): Maximum number of events to return.
|
limit (int): Maximum number of events to return.
|
||||||
min_depth (int): Minimum depth of events tor return.
|
min_depth (int): Minimum depth of events tor return.
|
||||||
|
timeout (int): Max time to wait in ms
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
content = yield self.transport_layer.get_missing_events(
|
content = yield self.transport_layer.get_missing_events(
|
||||||
@@ -784,18 +788,17 @@ class FederationClient(FederationBase):
|
|||||||
latest_events=[e.event_id for e in latest_events],
|
latest_events=[e.event_id for e in latest_events],
|
||||||
limit=limit,
|
limit=limit,
|
||||||
min_depth=min_depth,
|
min_depth=min_depth,
|
||||||
|
timeout=timeout,
|
||||||
)
|
)
|
||||||
|
|
||||||
events = [
|
events = [
|
||||||
self.event_from_pdu_json(e)
|
event_from_pdu_json(e)
|
||||||
for e in content.get("events", [])
|
for e in content.get("events", [])
|
||||||
]
|
]
|
||||||
|
|
||||||
signed_events = yield self._check_sigs_and_hash_and_fetch(
|
signed_events = yield self._check_sigs_and_hash_and_fetch(
|
||||||
destination, events, outlier=False
|
destination, events, outlier=False
|
||||||
)
|
)
|
||||||
|
|
||||||
have_gotten_all_from_destination = True
|
|
||||||
except HttpResponseException as e:
|
except HttpResponseException as e:
|
||||||
if not e.code == 400:
|
if not e.code == 400:
|
||||||
raise
|
raise
|
||||||
@@ -803,81 +806,9 @@ class FederationClient(FederationBase):
|
|||||||
# We are probably hitting an old server that doesn't support
|
# We are probably hitting an old server that doesn't support
|
||||||
# get_missing_events
|
# get_missing_events
|
||||||
signed_events = []
|
signed_events = []
|
||||||
have_gotten_all_from_destination = False
|
|
||||||
|
|
||||||
if len(signed_events) >= limit:
|
|
||||||
defer.returnValue(signed_events)
|
|
||||||
|
|
||||||
servers = yield self.store.get_joined_hosts_for_room(room_id)
|
|
||||||
|
|
||||||
servers = set(servers)
|
|
||||||
servers.discard(self.server_name)
|
|
||||||
|
|
||||||
failed_to_fetch = set()
|
|
||||||
|
|
||||||
while len(signed_events) < limit:
|
|
||||||
# Are we missing any?
|
|
||||||
|
|
||||||
seen_events = set(earliest_events_ids)
|
|
||||||
seen_events.update(e.event_id for e in signed_events if e)
|
|
||||||
|
|
||||||
missing_events = {}
|
|
||||||
for e in itertools.chain(latest_events, signed_events):
|
|
||||||
if e.depth > min_depth:
|
|
||||||
missing_events.update({
|
|
||||||
e_id: e.depth for e_id, _ in e.prev_events
|
|
||||||
if e_id not in seen_events
|
|
||||||
and e_id not in failed_to_fetch
|
|
||||||
})
|
|
||||||
|
|
||||||
if not missing_events:
|
|
||||||
break
|
|
||||||
|
|
||||||
have_seen = yield self.store.have_events(missing_events)
|
|
||||||
|
|
||||||
for k in have_seen:
|
|
||||||
missing_events.pop(k, None)
|
|
||||||
|
|
||||||
if not missing_events:
|
|
||||||
break
|
|
||||||
|
|
||||||
# Okay, we haven't gotten everything yet. Lets get them.
|
|
||||||
ordered_missing = sorted(missing_events.items(), key=lambda x: x[0])
|
|
||||||
|
|
||||||
if have_gotten_all_from_destination:
|
|
||||||
servers.discard(destination)
|
|
||||||
|
|
||||||
def random_server_list():
|
|
||||||
srvs = list(servers)
|
|
||||||
random.shuffle(srvs)
|
|
||||||
return srvs
|
|
||||||
|
|
||||||
deferreds = [
|
|
||||||
self.get_pdu(
|
|
||||||
destinations=random_server_list(),
|
|
||||||
event_id=e_id,
|
|
||||||
)
|
|
||||||
for e_id, depth in ordered_missing[:limit - len(signed_events)]
|
|
||||||
]
|
|
||||||
|
|
||||||
res = yield defer.DeferredList(deferreds, consumeErrors=True)
|
|
||||||
for (result, val), (e_id, _) in zip(res, ordered_missing):
|
|
||||||
if result and val:
|
|
||||||
signed_events.append(val)
|
|
||||||
else:
|
|
||||||
failed_to_fetch.add(e_id)
|
|
||||||
|
|
||||||
defer.returnValue(signed_events)
|
defer.returnValue(signed_events)
|
||||||
|
|
||||||
def event_from_pdu_json(self, pdu_json, outlier=False):
|
|
||||||
event = FrozenEvent(
|
|
||||||
pdu_json
|
|
||||||
)
|
|
||||||
|
|
||||||
event.internal_metadata.outlier = outlier
|
|
||||||
|
|
||||||
return event
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def forward_third_party_invite(self, destinations, room_id, event_dict):
|
def forward_third_party_invite(self, destinations, room_id, event_dict):
|
||||||
for destination in destinations:
|
for destination in destinations:
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user