mirror of
https://github.com/element-hq/synapse.git
synced 2025-12-11 01:40:27 +00:00
Compare commits
1156 Commits
v0.23.0
...
erikj/stat
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
cdbf76e88a | ||
|
|
cfc2169b31 | ||
|
|
01ccc9e6f2 | ||
|
|
f879127aaa | ||
|
|
e6d87c93f3 | ||
|
|
004cc8a328 | ||
|
|
800cfd5774 | ||
|
|
152c2ac19e | ||
|
|
e70287cff3 | ||
|
|
03a26e28d9 | ||
|
|
3e0c0660b3 | ||
|
|
3f49e131d9 | ||
|
|
9b8c0fb162 | ||
|
|
691f8492fb | ||
|
|
a9d7d98d3f | ||
|
|
bdbb1eec65 | ||
|
|
01f72e2fc7 | ||
|
|
9187862002 | ||
|
|
aa3587fdd1 | ||
|
|
51406dab96 | ||
|
|
fecb45e0c3 | ||
|
|
44cd6e1358 | ||
|
|
8d6dc106d1 | ||
|
|
a052aa42e7 | ||
|
|
8efe773ef1 | ||
|
|
b7e7b52452 | ||
|
|
8cbbfaefc1 | ||
|
|
84b5cc69f5 | ||
|
|
fde8e8f09f | ||
|
|
eb9fc021e3 | ||
|
|
1c41b05c8c | ||
|
|
5bdb57cb66 | ||
|
|
f5aa027c2f | ||
|
|
e66fbcbb02 | ||
|
|
9aa5a0af51 | ||
|
|
610accbb7f | ||
|
|
c384705ee8 | ||
|
|
1a3aa957ca | ||
|
|
3f961e638a | ||
|
|
fa72803490 | ||
|
|
9a0d783c11 | ||
|
|
38f952b9bc | ||
|
|
a8ce159be4 | ||
|
|
f609acc109 | ||
|
|
0092cf38ae | ||
|
|
5b631ff41a | ||
|
|
ba48755d56 | ||
|
|
926ba76e23 | ||
|
|
9cf519769b | ||
|
|
7c7706f42b | ||
|
|
ddb00efc1d | ||
|
|
2a376579f3 | ||
|
|
873aea7168 | ||
|
|
bf7ee93cb6 | ||
|
|
5ea624b0f5 | ||
|
|
0ad5125814 | ||
|
|
068c21ab10 | ||
|
|
b29d1abab6 | ||
|
|
7367a4a823 | ||
|
|
7d26591048 | ||
|
|
2059b8573f | ||
|
|
10fdcf561d | ||
|
|
5ccb57d3ff | ||
|
|
c33c1ceddd | ||
|
|
fb647164f2 | ||
|
|
a492b17fe2 | ||
|
|
cb2c7c0669 | ||
|
|
3959754de3 | ||
|
|
4f28018c83 | ||
|
|
57db62e554 | ||
|
|
0011ede3b0 | ||
|
|
62ad701326 | ||
|
|
3f0f06cb31 | ||
|
|
3e839e0548 | ||
|
|
ebd0127999 | ||
|
|
cfe75a9fb6 | ||
|
|
f51565e023 | ||
|
|
d144ed6ffb | ||
|
|
a08726fc42 | ||
|
|
b27320b550 | ||
|
|
350331d466 | ||
|
|
1a69c6d590 | ||
|
|
df8ff682a7 | ||
|
|
3518d0ea8f | ||
|
|
d45a114824 | ||
|
|
6dbebef141 | ||
|
|
16adb11cc0 | ||
|
|
82f16faa78 | ||
|
|
b78717b87b | ||
|
|
95cb401ae0 | ||
|
|
5d8476d8ff | ||
|
|
56e709857c | ||
|
|
cb9f8e527c | ||
|
|
cea462e285 | ||
|
|
bf8e97bd3c | ||
|
|
ea3442c15c | ||
|
|
16469a4f15 | ||
|
|
c82111a55f | ||
|
|
da87791975 | ||
|
|
99e9b4f26c | ||
|
|
f5160d4a3e | ||
|
|
8b3573a8b2 | ||
|
|
299fd740c7 | ||
|
|
9a2d9b4789 | ||
|
|
141c343e03 | ||
|
|
f43b6d6d9b | ||
|
|
0f942f68c1 | ||
|
|
d0fcc48f9d | ||
|
|
31becf4ac3 | ||
|
|
d023ecb810 | ||
|
|
ea7b3c4b1b | ||
|
|
6ea27fafad | ||
|
|
265b993b8a | ||
|
|
e05bf34117 | ||
|
|
631a73f7ef | ||
|
|
c3f79c9da5 | ||
|
|
889a2a853a | ||
|
|
d65ceb4b48 | ||
|
|
e48c7aac4d | ||
|
|
1708412f56 | ||
|
|
b984dd0b73 | ||
|
|
ba1d08bc4b | ||
|
|
58dd148c4f | ||
|
|
88541f9009 | ||
|
|
dbe80a286b | ||
|
|
20f40348d4 | ||
|
|
735fd8719a | ||
|
|
a56d54dcb7 | ||
|
|
02a1296ad6 | ||
|
|
8cb44da4aa | ||
|
|
8ffaacbee3 | ||
|
|
b2932107bb | ||
|
|
7aed50a038 | ||
|
|
b6c4b851f1 | ||
|
|
ed9b5eced4 | ||
|
|
d4ffe61d4f | ||
|
|
69ce365b79 | ||
|
|
2e223163ff | ||
|
|
f8bfcd7e0d | ||
|
|
d032785aa7 | ||
|
|
2c911d75e8 | ||
|
|
c818fcab11 | ||
|
|
06a14876e5 | ||
|
|
42174946f8 | ||
|
|
f394f5574d | ||
|
|
efb79820b4 | ||
|
|
fafa3e7114 | ||
|
|
6619f047ad | ||
|
|
d960d23830 | ||
|
|
1a6c7cdf54 | ||
|
|
89b7232ff8 | ||
|
|
1773df0632 | ||
|
|
65cf454fd1 | ||
|
|
9e08a93a7b | ||
|
|
4b44f05f19 | ||
|
|
a83c514d1f | ||
|
|
33bebb63f3 | ||
|
|
483e8104db | ||
|
|
2ad4d5b5bb | ||
|
|
92789199a9 | ||
|
|
529c026ac1 | ||
|
|
7c371834cc | ||
|
|
64346be26d | ||
|
|
22518e2833 | ||
|
|
884b26ae41 | ||
|
|
1b2af11650 | ||
|
|
872ff95ed4 | ||
|
|
22004b524e | ||
|
|
4bc4236faf | ||
|
|
2324124a72 | ||
|
|
f793bc3877 | ||
|
|
784f036306 | ||
|
|
6411f725be | ||
|
|
a9a2d66cdd | ||
|
|
0c8ba5dd1c | ||
|
|
3a75de923b | ||
|
|
17445e6701 | ||
|
|
126b9bf96f | ||
|
|
157298f986 | ||
|
|
89f90d808a | ||
|
|
8ded8ba2c7 | ||
|
|
182ff17c83 | ||
|
|
f381d63813 | ||
|
|
6b8604239f | ||
|
|
f756f961ea | ||
|
|
28e973ac11 | ||
|
|
9cb3a190bc | ||
|
|
493e25d554 | ||
|
|
3594dbc6dc | ||
|
|
2311189ee4 | ||
|
|
c57607874c | ||
|
|
8956f0147a | ||
|
|
e5b4a208ce | ||
|
|
73fe866847 | ||
|
|
45b5fe9122 | ||
|
|
d62ce972f8 | ||
|
|
6ae9a3d2a6 | ||
|
|
2ec49826e8 | ||
|
|
a90c60912f | ||
|
|
50e8657867 | ||
|
|
1cf9e071dd | ||
|
|
d0957753bf | ||
|
|
199dba6c15 | ||
|
|
70349872c2 | ||
|
|
eba93b05bf | ||
|
|
bf8a36e080 | ||
|
|
5d0f665848 | ||
|
|
3bd760628b | ||
|
|
eb9b5eec81 | ||
|
|
c2ecfcc3a4 | ||
|
|
7e6cf89dc2 | ||
|
|
26d37f7a63 | ||
|
|
bb73f55fc6 | ||
|
|
faeb369f15 | ||
|
|
3dec9c66b3 | ||
|
|
46244b2759 | ||
|
|
27b094f382 | ||
|
|
573712da6b | ||
|
|
c96d547f4d | ||
|
|
d15d237b0d | ||
|
|
27939cbb0e | ||
|
|
6f72765371 | ||
|
|
cbaad969f9 | ||
|
|
ca9b9d9703 | ||
|
|
a2b25de68d | ||
|
|
8fbb4d0d19 | ||
|
|
95e4cffd85 | ||
|
|
e316bbb4c0 | ||
|
|
f5ac4dc2d4 | ||
|
|
25634ed152 | ||
|
|
24087bffa9 | ||
|
|
ad0ccf15ea | ||
|
|
e440e28456 | ||
|
|
d874d4f2d7 | ||
|
|
6ff8c87484 | ||
|
|
324c3e9399 | ||
|
|
3fc33bae8b | ||
|
|
3acd616979 | ||
|
|
a71a080cd2 | ||
|
|
d1a3325f99 | ||
|
|
bf5ef10a93 | ||
|
|
6af025d3c4 | ||
|
|
012e8e142a | ||
|
|
3a061cae26 | ||
|
|
b96278d6fe | ||
|
|
4810f7effd | ||
|
|
c714c61853 | ||
|
|
acac21248c | ||
|
|
6ed9ff69c2 | ||
|
|
106906a65e | ||
|
|
5fb347fc41 | ||
|
|
cd94728e93 | ||
|
|
fd1601c596 | ||
|
|
ef344b10e5 | ||
|
|
b8d821aa68 | ||
|
|
92c52df702 | ||
|
|
d28ec43e15 | ||
|
|
39bf47319f | ||
|
|
ac27f6a35e | ||
|
|
5978dccff0 | ||
|
|
278d21b5e4 | ||
|
|
5fcbf1e07c | ||
|
|
c0c9327fe0 | ||
|
|
059d3a6c8e | ||
|
|
d627174da2 | ||
|
|
ddb6a79b68 | ||
|
|
0b27ae8dc3 | ||
|
|
4a6d551704 | ||
|
|
bfdf7b9237 | ||
|
|
630caf8a70 | ||
|
|
8fd1a32456 | ||
|
|
4d09366656 | ||
|
|
a9b712e9dc | ||
|
|
32c7b8e48b | ||
|
|
1026690cd2 | ||
|
|
10b34dbb9a | ||
|
|
39a6b35496 | ||
|
|
74fcbf741b | ||
|
|
e571aef06d | ||
|
|
61ffaa8137 | ||
|
|
671540dccf | ||
|
|
5fa571a91b | ||
|
|
053255f36c | ||
|
|
f133228cb3 | ||
|
|
50fe92cd26 | ||
|
|
8ec2e638be | ||
|
|
24dd73028a | ||
|
|
e3624fad5f | ||
|
|
617199d73d | ||
|
|
3e1e69ccaf | ||
|
|
770b2252ca | ||
|
|
3d33eef6fc | ||
|
|
b31bf0bb51 | ||
|
|
9a304ef2b0 | ||
|
|
ebfe64e3d6 | ||
|
|
225dc3b4cb | ||
|
|
9fcbbe8e7d | ||
|
|
447aed42d2 | ||
|
|
ee6fb4cf85 | ||
|
|
3c7b480ba3 | ||
|
|
25c0a020f4 | ||
|
|
3fa362502c | ||
|
|
5ff3d23564 | ||
|
|
c46e75d3d8 | ||
|
|
db91e72ade | ||
|
|
bc496df192 | ||
|
|
a1beca0e25 | ||
|
|
b5049d2e5c | ||
|
|
1f881e0746 | ||
|
|
80b8a28100 | ||
|
|
bd25f9cf36 | ||
|
|
4eeae7ad65 | ||
|
|
bb9f0f3cdb | ||
|
|
6b02fc80d1 | ||
|
|
9c9356512e | ||
|
|
18eae413af | ||
|
|
78d6ddba86 | ||
|
|
9dcd667ac2 | ||
|
|
33cac3dc29 | ||
|
|
6e87b34f7b | ||
|
|
d5352cbba8 | ||
|
|
14737ba495 | ||
|
|
e15d4ea248 | ||
|
|
a18828c129 | ||
|
|
6da4c4d3bd | ||
|
|
0cbda53819 | ||
|
|
77c0629ebc | ||
|
|
e16e45b1b4 | ||
|
|
e1e4ec9f9d | ||
|
|
78e7e05188 | ||
|
|
ad48dfe73d | ||
|
|
518a74586c | ||
|
|
d1fe4db882 | ||
|
|
421d68ca8c | ||
|
|
326189c25a | ||
|
|
3af53c183a | ||
|
|
63c4383927 | ||
|
|
af19f5e9aa | ||
|
|
773f0eed1e | ||
|
|
adfc0c9539 | ||
|
|
d413a2ba98 | ||
|
|
b387ee17b6 | ||
|
|
03dd745fe2 | ||
|
|
e051abd20b | ||
|
|
02ba118f81 | ||
|
|
4c65b98e4a | ||
|
|
d1f3490e75 | ||
|
|
46022025ea | ||
|
|
2186d7c06e | ||
|
|
88b9c5cbf0 | ||
|
|
d7eacc4f87 | ||
|
|
b178eca261 | ||
|
|
d8f90c4208 | ||
|
|
4b0f06e99c | ||
|
|
e98f0f9112 | ||
|
|
25adde9a04 | ||
|
|
6e9bf67f18 | ||
|
|
2b91846497 | ||
|
|
73560237d6 | ||
|
|
86c4f49a31 | ||
|
|
f632083576 | ||
|
|
6c6e197b0a | ||
|
|
d02e43b15f | ||
|
|
349c739966 | ||
|
|
9a72b70630 | ||
|
|
25e2456ee7 | ||
|
|
d32385336f | ||
|
|
b2da272b77 | ||
|
|
4528dd2443 | ||
|
|
93efd7eb04 | ||
|
|
ab9f844aaf | ||
|
|
5c431f421c | ||
|
|
d84f65255e | ||
|
|
a94d9b6b82 | ||
|
|
5552ed9a7f | ||
|
|
2c8526cac7 | ||
|
|
87b7d72760 | ||
|
|
49fce04624 | ||
|
|
b0d9e633ee | ||
|
|
ad7ec63d08 | ||
|
|
62d7d66ae5 | ||
|
|
8fe253f19b | ||
|
|
293380bef7 | ||
|
|
447f4f0d5f | ||
|
|
9d332e0f79 | ||
|
|
0af58f14ee | ||
|
|
81d037dbd8 | ||
|
|
28a6ccb49c | ||
|
|
cd871a3057 | ||
|
|
8ff6726c0d | ||
|
|
d69768348f | ||
|
|
8e85220373 | ||
|
|
3fe2bae857 | ||
|
|
aae77da73f | ||
|
|
ce4f66133e | ||
|
|
b6dc7044a9 | ||
|
|
9a89dae8c5 | ||
|
|
0af5dc63a8 | ||
|
|
5a4da21d58 | ||
|
|
d57765fc8a | ||
|
|
2cf6a7bc20 | ||
|
|
4a53f3a3e8 | ||
|
|
be0dfcd4a2 | ||
|
|
1432f7ccd5 | ||
|
|
2f18a2647b | ||
|
|
d6af5512bb | ||
|
|
ce236f8ac8 | ||
|
|
dc519602ac | ||
|
|
17b54389fe | ||
|
|
28b338ed9b | ||
|
|
a177325b49 | ||
|
|
36da256cc6 | ||
|
|
1224612a79 | ||
|
|
bc67e7d260 | ||
|
|
a87006f9c7 | ||
|
|
06db5c4b76 | ||
|
|
8716eb4920 | ||
|
|
2d9ab533f9 | ||
|
|
390093d45e | ||
|
|
2fb3a28c98 | ||
|
|
a7e4ff9cca | ||
|
|
f884cfffb9 | ||
|
|
a5213df1f7 | ||
|
|
3d5a25407c | ||
|
|
e8f7541d3f | ||
|
|
fb6563b4be | ||
|
|
1954e867b4 | ||
|
|
f23b4078c0 | ||
|
|
11ab2f56f5 | ||
|
|
0486a7814a | ||
|
|
90c14da992 | ||
|
|
1067b96364 | ||
|
|
38506773eb | ||
|
|
300edc2348 | ||
|
|
05f98a2224 | ||
|
|
3cb2dabaad | ||
|
|
d728c47142 | ||
|
|
4102468da9 | ||
|
|
936482d507 | ||
|
|
3d12d97415 | ||
|
|
0f5d2cc37c | ||
|
|
8615f19d20 | ||
|
|
5e97ca7ee6 | ||
|
|
d863f68cab | ||
|
|
6368e5c0ab | ||
|
|
0a90d9ede4 | ||
|
|
6324b65f08 | ||
|
|
44a498418c | ||
|
|
5dfc83704b | ||
|
|
febdca4b37 | ||
|
|
f5f89fda21 | ||
|
|
307f88dfb6 | ||
|
|
5b527d7ee1 | ||
|
|
807e848f0f | ||
|
|
4a31a61ef9 | ||
|
|
ee7a1cabd8 | ||
|
|
9795b9ebb1 | ||
|
|
c5b589f2e8 | ||
|
|
64ddec1bc0 | ||
|
|
a4c5e4a645 | ||
|
|
1159abbdd2 | ||
|
|
a027c2af8d | ||
|
|
5c3c32f16f | ||
|
|
39f4e29d01 | ||
|
|
992018d1c0 | ||
|
|
80fa610f9c | ||
|
|
5e16c1dc8c | ||
|
|
19d274085f | ||
|
|
0fc2362d37 | ||
|
|
21bf87a146 | ||
|
|
694f1c1b18 | ||
|
|
e21370ba54 | ||
|
|
85a4d78213 | ||
|
|
dcc8eded41 | ||
|
|
fefeb0ab0e | ||
|
|
81391fa162 | ||
|
|
1e4edd1717 | ||
|
|
c6c009603c | ||
|
|
4d88958cf6 | ||
|
|
227c491510 | ||
|
|
f4d93ae424 | ||
|
|
f68e4cf690 | ||
|
|
5f23b6d5ea | ||
|
|
7cd34512d8 | ||
|
|
07ab948c38 | ||
|
|
825a07a974 | ||
|
|
f8e1ab5fee | ||
|
|
b9e4a97922 | ||
|
|
5f07f5694c | ||
|
|
8c9d5b4873 | ||
|
|
c175a5f0f2 | ||
|
|
d90e8ea444 | ||
|
|
174eacc8ba | ||
|
|
a66f489678 | ||
|
|
e79db0a673 | ||
|
|
e365ad329f | ||
|
|
19f9227643 | ||
|
|
8f03aa9f61 | ||
|
|
2442e9876c | ||
|
|
9d30a7691c | ||
|
|
9e20840e02 | ||
|
|
dd3092c3a3 | ||
|
|
ada470bccb | ||
|
|
1ee787912b | ||
|
|
47ca5eb882 | ||
|
|
ce3a726fc0 | ||
|
|
b6c9deffda | ||
|
|
51c9d9ed65 | ||
|
|
b30cd5b107 | ||
|
|
a767f06e3f | ||
|
|
cb66a2d387 | ||
|
|
aed4e4ecdd | ||
|
|
f8fa5ae4af | ||
|
|
374c4d4ced | ||
|
|
142fb0a7d4 | ||
|
|
0211464ba2 | ||
|
|
3a556f1ea0 | ||
|
|
e9f7677170 | ||
|
|
eccfc8e928 | ||
|
|
e6b24663e4 | ||
|
|
840f72356e | ||
|
|
18e3a16e8b | ||
|
|
864a6d2977 | ||
|
|
6e375f4597 | ||
|
|
efdfd5c835 | ||
|
|
bd91857028 | ||
|
|
3079f80d4a | ||
|
|
65abc90fb6 | ||
|
|
a7b726ad18 | ||
|
|
75c1b8df01 | ||
|
|
3f9f1c50f3 | ||
|
|
48fa4e1e5b | ||
|
|
df0f602796 | ||
|
|
26cd3f5690 | ||
|
|
3355ce650d | ||
|
|
ed48ecc58c | ||
|
|
37d1a90025 | ||
|
|
3e59143ba8 | ||
|
|
9419bb5776 | ||
|
|
80573e3900 | ||
|
|
069ae2a5d6 | ||
|
|
ba24576f2f | ||
|
|
d8a6c734fa | ||
|
|
ef045dcd71 | ||
|
|
33cb7ef0b7 | ||
|
|
cdc2cb5d11 | ||
|
|
16ec3805e5 | ||
|
|
8529874368 | ||
|
|
da1010c83a | ||
|
|
cc58e177f3 | ||
|
|
d7ea8c4800 | ||
|
|
aa6ecf0984 | ||
|
|
d5f9fb06b0 | ||
|
|
c22e73293a | ||
|
|
b11dca2025 | ||
|
|
7b86c1fdcd | ||
|
|
58ebdb037c | ||
|
|
95f8a713dc | ||
|
|
74e0cc74ce | ||
|
|
1bd40ca73e | ||
|
|
f397153dfc | ||
|
|
5406392f8b | ||
|
|
f61e107f63 | ||
|
|
4b1fceb913 | ||
|
|
a4bb133b68 | ||
|
|
cd3697e8b7 | ||
|
|
3241c7aac3 | ||
|
|
624c46eb06 | ||
|
|
7a48a6b63e | ||
|
|
47d99a20d5 | ||
|
|
ad7e570d07 | ||
|
|
ae31f8ce45 | ||
|
|
7ca5c68233 | ||
|
|
2c6d63922a | ||
|
|
97d1a1dc01 | ||
|
|
8b45de90a4 | ||
|
|
7303ed65e1 | ||
|
|
da562bd6a1 | ||
|
|
d4fb4f7c52 | ||
|
|
dfbc45302e | ||
|
|
c4c1d170af | ||
|
|
fd04968f32 | ||
|
|
c2a1194424 | ||
|
|
ab1b2d0ff2 | ||
|
|
5a4da5bf78 | ||
|
|
84b31a3e7a | ||
|
|
df6c72ede3 | ||
|
|
04bb79f139 | ||
|
|
e828a7380a | ||
|
|
7ef22a41a3 | ||
|
|
96387bd26f | ||
|
|
6be01f599b | ||
|
|
63ccaa5873 | ||
|
|
8b38096a89 | ||
|
|
795b0849f3 | ||
|
|
7f14f0ae38 | ||
|
|
0edf085b68 | ||
|
|
8132a6b7ac | ||
|
|
6b48b3e277 | ||
|
|
2908f955d1 | ||
|
|
79eba878a7 | ||
|
|
68ca864141 | ||
|
|
e1fd4751de | ||
|
|
148c113fbe | ||
|
|
a0c6688976 | ||
|
|
d5a7c56ef9 | ||
|
|
0b4aa2dc21 | ||
|
|
3ab2cfec47 | ||
|
|
7298ed7c51 | ||
|
|
7098b65cb8 | ||
|
|
83d8d4d8cd | ||
|
|
2145ee1976 | ||
|
|
59a7275258 | ||
|
|
d8a05418f9 | ||
|
|
b102e93571 | ||
|
|
cdf6fc15b0 | ||
|
|
74bbeb4373 | ||
|
|
2187724ad2 | ||
|
|
eded7084d2 | ||
|
|
34c3d0a386 | ||
|
|
9d50b6f0ea | ||
|
|
ab1dc84779 | ||
|
|
7fb0e98b03 | ||
|
|
e836bdf734 | ||
|
|
c46139a17e | ||
|
|
d8391f0541 | ||
|
|
4e8374856d | ||
|
|
270f9cd23a | ||
|
|
9d83d52027 | ||
|
|
5b48eec4a1 | ||
|
|
b1edf26051 | ||
|
|
06e5bcfc83 | ||
|
|
624a8bbd67 | ||
|
|
b26cbbb60e | ||
|
|
203058a027 | ||
|
|
97bd18af4e | ||
|
|
ba05f28ae7 | ||
|
|
77a1227870 | ||
|
|
7ab2b69e18 | ||
|
|
10aaa1bc15 | ||
|
|
cdc9e50a5d | ||
|
|
6f05de0e5e | ||
|
|
56e2a4333e | ||
|
|
f959c01600 | ||
|
|
117a8c0d35 | ||
|
|
30d2730ee2 | ||
|
|
aa812feb41 | ||
|
|
552f123bea | ||
|
|
5d0cbf763f | ||
|
|
1b83c09c03 | ||
|
|
7190a550dc | ||
|
|
b2cd6accf5 | ||
|
|
053ecae4db | ||
|
|
038c994724 | ||
|
|
c161472575 | ||
|
|
008aa2fc6d | ||
|
|
6f30fd9235 | ||
|
|
9ecf621404 | ||
|
|
22db751d1e | ||
|
|
03feb7a34d | ||
|
|
35a4b63240 | ||
|
|
4dd1bfa8c1 | ||
|
|
6caa379ba1 | ||
|
|
7e6fa29cb5 | ||
|
|
44a1bfd6a6 | ||
|
|
1fc66c7460 | ||
|
|
7bd6c87eca | ||
|
|
812c191939 | ||
|
|
c741ba59c9 | ||
|
|
781c15a6a3 | ||
|
|
45ab288e07 | ||
|
|
8b33ac8f6c | ||
|
|
63ef607f1f | ||
|
|
6cfee09be9 | ||
|
|
ab335edb02 | ||
|
|
bfbf1e1f1a | ||
|
|
2d314b771f | ||
|
|
5d15abb120 | ||
|
|
46790f50cf | ||
|
|
4d0414c714 | ||
|
|
e508145c9b | ||
|
|
e0ebd1e4bd | ||
|
|
f90649eb2b | ||
|
|
9b599bc18d | ||
|
|
9b803ccc98 | ||
|
|
1282086f58 | ||
|
|
b70b646903 | ||
|
|
2dce6b15c3 | ||
|
|
4e2b2508af | ||
|
|
0ea5310290 | ||
|
|
13735843c7 | ||
|
|
618c7b816a | ||
|
|
0fcb5a8ce5 | ||
|
|
889102315e | ||
|
|
b2a788e902 | ||
|
|
82e4bfb53d | ||
|
|
e8814410ef | ||
|
|
94ff2cda73 | ||
|
|
d305987b40 | ||
|
|
167eb01d83 | ||
|
|
ad408beb66 | ||
|
|
1b870937ae | ||
|
|
2a98ba0ed3 | ||
|
|
02a9a93bde | ||
|
|
e148438e97 | ||
|
|
d46386d57e | ||
|
|
228ccf1fe3 | ||
|
|
780dbb378f | ||
|
|
1ca4288135 | ||
|
|
f5cf3638e9 | ||
|
|
5ef5e14ecc | ||
|
|
76c9af193c | ||
|
|
f9b255cd62 | ||
|
|
44ad6dd4bf | ||
|
|
1bd654dabd | ||
|
|
38b265cb51 | ||
|
|
5561c09091 | ||
|
|
3db5ff69b2 | ||
|
|
ec12e7eada | ||
|
|
631fa4a1b7 | ||
|
|
bf993db11c | ||
|
|
4ad883398f | ||
|
|
d802e8ca6a | ||
|
|
a100700630 | ||
|
|
b6b075fd49 | ||
|
|
d1622e080f | ||
|
|
2ac6deafb7 | ||
|
|
805196fbeb | ||
|
|
f103b91ffa | ||
|
|
fa4f337b49 | ||
|
|
8a4a0ddea6 | ||
|
|
45fbe4ff67 | ||
|
|
f851bc8182 | ||
|
|
9e09a1800b | ||
|
|
a34c586a89 | ||
|
|
6c3a02072b | ||
|
|
4a6754baf2 | ||
|
|
4b36897cd9 | ||
|
|
d4553818a0 | ||
|
|
6b6f03ae05 | ||
|
|
77e3757fa9 | ||
|
|
6b60f7dca0 | ||
|
|
fcdfc911ee | ||
|
|
1189be43a2 | ||
|
|
6650a07ede | ||
|
|
b19d9e2174 | ||
|
|
1f080a6c97 | ||
|
|
04897c9dc1 | ||
|
|
979eed4362 | ||
|
|
bc8a5c0330 | ||
|
|
4c8f94ac94 | ||
|
|
846a94fbc9 | ||
|
|
3cd6b22c7b | ||
|
|
c9b9ef575b | ||
|
|
275826f234 | ||
|
|
4f0488b307 | ||
|
|
e5e930aec3 | ||
|
|
fbbacb284e | ||
|
|
9f7a555b4e | ||
|
|
dd13310fb8 | ||
|
|
691cc4e036 | ||
|
|
0bb253f37b | ||
|
|
59e7e62c4b | ||
|
|
f8420d6279 | ||
|
|
99354b430e | ||
|
|
74c56f794c | ||
|
|
02237ce725 | ||
|
|
318a249c8b | ||
|
|
207fabbc6a | ||
|
|
356bcafc44 | ||
|
|
3e0aaad190 | ||
|
|
a72e4e3e28 | ||
|
|
13b3d7b4a0 | ||
|
|
e025aec028 | ||
|
|
20fe347906 | ||
|
|
9d419f48e6 | ||
|
|
9ded00f221 | ||
|
|
1650eb5847 | ||
|
|
c31a7c3ff6 | ||
|
|
b8e54fbc08 | ||
|
|
a1f8b0fd64 | ||
|
|
1b65ae00ac | ||
|
|
ebda45de4c | ||
|
|
ffc574a6f9 | ||
|
|
e2f4190209 | ||
|
|
9bc17fc5fb | ||
|
|
208a6647f1 | ||
|
|
e51c2bcaef | ||
|
|
71a1bd53b2 | ||
|
|
d0abb4e8e6 | ||
|
|
977078f06d | ||
|
|
6980c4557e | ||
|
|
632baf799e | ||
|
|
af92f5b00f | ||
|
|
4ab8abbc2b | ||
|
|
b1e62d4a57 | ||
|
|
6af3656deb | ||
|
|
4d83632009 | ||
|
|
110b373e9c | ||
|
|
ca571b0ec3 | ||
|
|
d8c26162a1 | ||
|
|
c067088747 | ||
|
|
5451cc7792 | ||
|
|
124314672f | ||
|
|
6362298fa5 | ||
|
|
8b56977b6f | ||
|
|
173567a7f2 | ||
|
|
c7d9f25d22 | ||
|
|
e27b76d117 | ||
|
|
8854c039f2 | ||
|
|
14f581abc2 | ||
|
|
2ca46c7afc | ||
|
|
82d8c1bacb | ||
|
|
2fd9831f7c | ||
|
|
195abfe7a5 | ||
|
|
d8dde19f04 | ||
|
|
585972b51a | ||
|
|
7a6546228b | ||
|
|
92f680889d | ||
|
|
785bd7fd75 | ||
|
|
c89e6aadff | ||
|
|
54a2525133 | ||
|
|
0a5866bec9 | ||
|
|
0d8e3ad48b | ||
|
|
12ef02dc3d | ||
|
|
69e8a05f35 | ||
|
|
007cd48af6 | ||
|
|
713e60b9b6 | ||
|
|
e86cefcb6f | ||
|
|
cfa4e658e0 | ||
|
|
595fe67f01 | ||
|
|
9b2feef9eb | ||
|
|
f7f90e0c8d | ||
|
|
1dd0f53b21 | ||
|
|
8299b323ee | ||
|
|
9b436c8b4c | ||
|
|
5b38fdab31 | ||
|
|
1eb300e1fc | ||
|
|
f7f6bfaae4 | ||
|
|
4ea882ede4 | ||
|
|
566e21eac8 | ||
|
|
351cc35342 | ||
|
|
37d766aedd | ||
|
|
5287e57c86 | ||
|
|
2a7e9faeec | ||
|
|
1ad1ba9e6a | ||
|
|
33a9026cdf | ||
|
|
efd0f5a3c5 | ||
|
|
f009df23ec | ||
|
|
6ba4fabdb9 | ||
|
|
9e2c22c97f | ||
|
|
39dc52157d | ||
|
|
0d437698b2 | ||
|
|
0be99858f3 | ||
|
|
eaaabc6c4f | ||
|
|
ce6d4914f4 | ||
|
|
ecf198aab8 | ||
|
|
3267b81b81 | ||
|
|
d03cfc4258 | ||
|
|
1de557975f | ||
|
|
ffba978077 | ||
|
|
13e16cf302 | ||
|
|
bd0d84bf92 | ||
|
|
1135193dfd | ||
|
|
29812c628b | ||
|
|
58fbbe0f1d | ||
|
|
631d7b87b5 | ||
|
|
6070647774 | ||
|
|
d6237859f6 | ||
|
|
0ef0aeceac | ||
|
|
b4a6b7f720 | ||
|
|
c7d46510d7 | ||
|
|
ffd3f1a783 | ||
|
|
29bafe2f7e | ||
|
|
287dd1ee2c | ||
|
|
513c23bfd9 | ||
|
|
011d03a0f6 | ||
|
|
9ab859f27b | ||
|
|
f4f65ef93e | ||
|
|
bd5718d0ad | ||
|
|
161a862ffb | ||
|
|
69994c385a | ||
|
|
b5dbbac308 | ||
|
|
582bd19ee9 | ||
|
|
74f99f227c | ||
|
|
c2bd177ea0 | ||
|
|
fe6e9f580b | ||
|
|
7216c76654 | ||
|
|
dbdfd8967d | ||
|
|
b8e40d146f | ||
|
|
4cc8bb0767 | ||
|
|
4e242b3e20 | ||
|
|
a6245478c8 | ||
|
|
2e9f5ea31a | ||
|
|
a6ad8148b9 | ||
|
|
5b5f35ccc0 | ||
|
|
9b714abf35 | ||
|
|
33122c5a1b | ||
|
|
a9c2e930ac | ||
|
|
c05e6015cc | ||
|
|
e0a75e0c25 | ||
|
|
85f5674e44 | ||
|
|
c43e8a9736 | ||
|
|
a3ac4f6b0a | ||
|
|
5dfd0350c7 | ||
|
|
ca96d609e4 | ||
|
|
2c5972f87f | ||
|
|
6079d0027a | ||
|
|
99a6c9dbf2 | ||
|
|
9342bcfce0 | ||
|
|
e504816977 | ||
|
|
b2e02084b8 | ||
|
|
db3d84f46c | ||
|
|
1b6b0b1e66 | ||
|
|
6b725cf56a | ||
|
|
64665b57d0 | ||
|
|
2b24416e90 | ||
|
|
b92a8e6e4a | ||
|
|
931fc43cc8 | ||
|
|
31aa7bd8d1 | ||
|
|
ad1911bbf4 | ||
|
|
c021c39cbd | ||
|
|
1f43d22397 | ||
|
|
a675bd08bd | ||
|
|
4d7e1dde70 | ||
|
|
ae5d18617a | ||
|
|
9732ec6797 | ||
|
|
0e28281a02 | ||
|
|
505371414f | ||
|
|
e3428d26ca | ||
|
|
35332298ef | ||
|
|
64db043a71 | ||
|
|
b60859d6cc | ||
|
|
d76621a47b | ||
|
|
4ae85ae121 | ||
|
|
cc505b4b5e | ||
|
|
1259a76047 | ||
|
|
802ca12d05 | ||
|
|
e283b555b1 | ||
|
|
b77a13812c | ||
|
|
6dfde6d485 | ||
|
|
c8eeef6947 | ||
|
|
67cb89fbdf | ||
|
|
bf4fb1fb40 | ||
|
|
f807f7f804 | ||
|
|
b8d8ed1ba9 | ||
|
|
cc794d60e7 | ||
|
|
8dd0c85ac5 | ||
|
|
76fa695241 | ||
|
|
f30c4ed2bc | ||
|
|
b752507b48 | ||
|
|
af94ba9d02 | ||
|
|
818b08d0e4 | ||
|
|
ea18996f54 | ||
|
|
68fd82e840 | ||
|
|
4fad8efbfb | ||
|
|
b78bae2d51 | ||
|
|
271f5601f3 | ||
|
|
c3b7a45e84 | ||
|
|
c3e190ce67 | ||
|
|
b75d443caf | ||
|
|
27e727a146 | ||
|
|
4ce4379235 | ||
|
|
c2c47550f9 | ||
|
|
535cc49f27 | ||
|
|
dfbf73408c | ||
|
|
bc7f3eb32f | ||
|
|
ec954f47fb | ||
|
|
81a5e0073c | ||
|
|
ab1bc9bf5f | ||
|
|
0f1eb3e914 | ||
|
|
84e27a592d | ||
|
|
c9f034b4ac | ||
|
|
a9f9d68631 | ||
|
|
707374d5dc | ||
|
|
89fa00ddff | ||
|
|
79bea15830 | ||
|
|
426f8b0f66 | ||
|
|
6a6cc27aee | ||
|
|
4c7c4d4061 | ||
|
|
4d24becf7f | ||
|
|
ba5b9b80a5 | ||
|
|
c7b0678356 | ||
|
|
a6e3222fe5 | ||
|
|
3cc852d339 | ||
|
|
0eeaa25694 | ||
|
|
aa3fac8057 | ||
|
|
c1c81ee2a4 | ||
|
|
e8496efe84 | ||
|
|
01bbacf3c4 | ||
|
|
148428ce76 | ||
|
|
c8f568ddf9 | ||
|
|
3ddda939d3 | ||
|
|
5de926d66f | ||
|
|
f878e6f8af | ||
|
|
269af961e9 | ||
|
|
ed80c6b6cc | ||
|
|
e433393c4f | ||
|
|
985ce80375 | ||
|
|
b9b9714fd5 | ||
|
|
fa969cfdde | ||
|
|
44f8e383f3 | ||
|
|
0c8da8b519 | ||
|
|
eaaa837e00 | ||
|
|
cbe3c3fdd4 | ||
|
|
6748f0a579 | ||
|
|
93b0cf7a99 | ||
|
|
d8ce68b09b | ||
|
|
78d4ced829 | ||
|
|
197c14dbcf | ||
|
|
5f20a91fa1 | ||
|
|
1e2ac54351 | ||
|
|
1e375468de | ||
|
|
c2c188b699 | ||
|
|
c46a0d7eb4 | ||
|
|
bd769a81e1 | ||
|
|
537088e7dc | ||
|
|
41fd9989a2 | ||
|
|
11d62f43c9 | ||
|
|
e4ab96021e | ||
|
|
2a7ed700d5 | ||
|
|
84716d267c | ||
|
|
e4779be97a | ||
|
|
f2da6df568 | ||
|
|
30848c0fcd | ||
|
|
e585c83209 | ||
|
|
6c1bb1601e | ||
|
|
ea87cb1ba5 | ||
|
|
3fed5bb25f | ||
|
|
27955056e0 | ||
|
|
90d70af269 | ||
|
|
7fc1aad195 | ||
|
|
cafb8de132 | ||
|
|
d5325d7ef1 | ||
|
|
d5694ac5fa | ||
|
|
e43de3ae4b | ||
|
|
75e67b9ee4 | ||
|
|
768f00dedb | ||
|
|
4dc07e93a8 | ||
|
|
7cc483aa0e | ||
|
|
e1e7d76cf1 | ||
|
|
93247a424a | ||
|
|
5f501ec7e2 | ||
|
|
761d255fdf | ||
|
|
ace8079086 | ||
|
|
7a44c01d89 | ||
|
|
c9bc4b7031 | ||
|
|
ae79764fe5 | ||
|
|
77f1d24de3 | ||
|
|
9ccb4226ba | ||
|
|
bf86a41ef1 | ||
|
|
8090fd4664 | ||
|
|
3a743f649c | ||
|
|
adec03395d | ||
|
|
74e494b010 | ||
|
|
ef3a5ae787 | ||
|
|
8c06dd6071 | ||
|
|
60c78666ab | ||
|
|
1786b0e768 | ||
|
|
8ad5f34908 | ||
|
|
6cd5fcd536 | ||
|
|
ccc67d445b | ||
|
|
9fd086e506 | ||
|
|
0b03a97708 | ||
|
|
4824a33c31 | ||
|
|
1e5fcfd14a | ||
|
|
17b8e2bd02 | ||
|
|
a8e2a3df32 | ||
|
|
0d7c7fd907 | ||
|
|
95298783bb | ||
|
|
e1dec2f1a7 | ||
|
|
bb746a9de1 | ||
|
|
ae8d4bb0f0 | ||
|
|
197d82dc07 | ||
|
|
069ae2df12 | ||
|
|
47d9848dc4 | ||
|
|
93e504d04e | ||
|
|
b5feaa5a49 | ||
|
|
7f0d0ba3bc | ||
|
|
4a9b1cf253 | ||
|
|
6d8799af1a | ||
|
|
258409ef61 | ||
|
|
bf81f3cf2c | ||
|
|
27ebc5c8f2 | ||
|
|
97c544f91f | ||
|
|
2800983f3e | ||
|
|
8b50fe5330 | ||
|
|
73b4e18c62 | ||
|
|
175a01f56c | ||
|
|
ba3ff7918b | ||
|
|
ef8e578677 | ||
|
|
b880ff190a | ||
|
|
05e21285aa | ||
|
|
a1e67bcb97 | ||
|
|
ebbaae5526 | ||
|
|
966a70f1fa | ||
|
|
629cdfb124 | ||
|
|
ed666d3969 | ||
|
|
b76ef6ccb8 | ||
|
|
851aeae7c7 | ||
|
|
d5e32c843f | ||
|
|
96917d5552 | ||
|
|
0401604222 | ||
|
|
b238cf7f6b | ||
|
|
960dae3340 | ||
|
|
2cc998fed8 | ||
|
|
139fe30f47 | ||
|
|
4d793626ff | ||
|
|
c544188ee3 | ||
|
|
0ab153d201 | ||
|
|
8209b5f033 | ||
|
|
b3bf6a1218 | ||
|
|
57826d645b | ||
|
|
6f443a74cf | ||
|
|
14a34f12d7 | ||
|
|
3431ec55dc | ||
|
|
6027b1992f | ||
|
|
e884ff31d8 | ||
|
|
05c13f6c22 | ||
|
|
94ecd871a0 | ||
|
|
12ed4ee48e | ||
|
|
332839f6ea | ||
|
|
e5ea6dd021 | ||
|
|
cccfcfa7b9 | ||
|
|
68f34e85ce | ||
|
|
3e703eb04e | ||
|
|
508460f240 | ||
|
|
6e9f147faa | ||
|
|
4540730111 | ||
|
|
e96ee95a7e | ||
|
|
2f9eafdd36 | ||
|
|
b3de67234e | ||
|
|
cb3aee8219 | ||
|
|
85fda57208 | ||
|
|
4b203bdba5 | ||
|
|
3b0470dba5 | ||
|
|
8575e3160f | ||
|
|
a78cda4baf | ||
|
|
7a39da8cc6 | ||
|
|
5bbb53580a | ||
|
|
26451a09eb | ||
|
|
8d55877c9e | ||
|
|
a62406aaa5 | ||
|
|
28e8c46f29 | ||
|
|
6d586dc05c | ||
|
|
410b4e14a1 | ||
|
|
fe4e885f54 | ||
|
|
bbb739d24a | ||
|
|
26752df503 | ||
|
|
e52c391cd4 | ||
|
|
0aac30d53b | ||
|
|
6322fbbd41 | ||
|
|
8ba89f1050 | ||
|
|
429925a5e9 | ||
|
|
83936293eb | ||
|
|
b8ca494ee9 |
2
.gitignore
vendored
2
.gitignore
vendored
@@ -46,3 +46,5 @@ static/client/register/register_config.js
|
||||
|
||||
env/
|
||||
*.config
|
||||
|
||||
.vscode/
|
||||
|
||||
255
CHANGES.rst
255
CHANGES.rst
@@ -1,3 +1,258 @@
|
||||
Changes in synapse v0.27.2 (2018-03-26)
|
||||
=======================================
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix bug which broke TCP replication between workers (PR #3015)
|
||||
|
||||
|
||||
Changes in synapse v0.27.1 (2018-03-26)
|
||||
=======================================
|
||||
|
||||
Meta release as v0.27.0 temporarily pointed to the wrong commit
|
||||
|
||||
|
||||
Changes in synapse v0.27.0 (2018-03-26)
|
||||
=======================================
|
||||
|
||||
No changes since v0.27.0-rc2
|
||||
|
||||
|
||||
Changes in synapse v0.27.0-rc2 (2018-03-19)
|
||||
===========================================
|
||||
|
||||
Pulls in v0.26.1
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix bug introduced in v0.27.0-rc1 that causes much increased memory usage in state cache (PR #3005)
|
||||
|
||||
|
||||
Changes in synapse v0.26.1 (2018-03-15)
|
||||
=======================================
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix bug where an invalid event caused server to stop functioning correctly,
|
||||
due to parsing and serializing bugs in ujson library (PR #3008)
|
||||
|
||||
|
||||
Changes in synapse v0.27.0-rc1 (2018-03-14)
|
||||
===========================================
|
||||
|
||||
The common case for running Synapse is not to run separate workers, but for those that do, be aware that synctl no longer starts the main synapse when using ``-a`` option with workers. A new worker file should be added with ``worker_app: synapse.app.homeserver``.
|
||||
|
||||
This release also begins the process of renaming a number of the metrics
|
||||
reported to prometheus. See `docs/metrics-howto.rst <docs/metrics-howto.rst#block-and-response-metrics-renamed-for-0-27-0>`_.
|
||||
Note that the v0.28.0 release will remove the deprecated metric names.
|
||||
|
||||
Features:
|
||||
|
||||
* Add ability for ASes to override message send time (PR #2754)
|
||||
* Add support for custom storage providers for media repository (PR #2867, #2777, #2783, #2789, #2791, #2804, #2812, #2814, #2857, #2868, #2767)
|
||||
* Add purge API features, see `docs/admin_api/purge_history_api.rst <docs/admin_api/purge_history_api.rst>`_ for full details (PR #2858, #2867, #2882, #2946, #2962, #2943)
|
||||
* Add support for whitelisting 3PIDs that users can register. (PR #2813)
|
||||
* Add ``/room/{id}/event/{id}`` API (PR #2766)
|
||||
* Add an admin API to get all the media in a room (PR #2818) Thanks to @turt2live!
|
||||
* Add ``federation_domain_whitelist`` option (PR #2820, #2821)
|
||||
|
||||
|
||||
Changes:
|
||||
|
||||
* Continue to factor out processing from main process and into worker processes. See updated `docs/workers.rst <docs/workers.rst>`_ (PR #2892 - #2904, #2913, #2920 - #2926, #2947, #2847, #2854, #2872, #2873, #2874, #2928, #2929, #2934, #2856, #2976 - #2984, #2987 - #2989, #2991 - #2993, #2995, #2784)
|
||||
* Ensure state cache is used when persisting events (PR #2864, #2871, #2802, #2835, #2836, #2841, #2842, #2849)
|
||||
* Change the default config to bind on both IPv4 and IPv6 on all platforms (PR #2435) Thanks to @silkeh!
|
||||
* No longer require a specific version of saml2 (PR #2695) Thanks to @okurz!
|
||||
* Remove ``verbosity``/``log_file`` from generated config (PR #2755)
|
||||
* Add and improve metrics and logging (PR #2770, #2778, #2785, #2786, #2787, #2793, #2794, #2795, #2809, #2810, #2833, #2834, #2844, #2965, #2927, #2975, #2790, #2796, #2838)
|
||||
* When using synctl with workers, don't start the main synapse automatically (PR #2774)
|
||||
* Minor performance improvements (PR #2773, #2792)
|
||||
* Use a connection pool for non-federation outbound connections (PR #2817)
|
||||
* Make it possible to run unit tests against postgres (PR #2829)
|
||||
* Update pynacl dependency to 1.2.1 or higher (PR #2888) Thanks to @bachp!
|
||||
* Remove ability for AS users to call /events and /sync (PR #2948)
|
||||
* Use bcrypt.checkpw (PR #2949) Thanks to @krombel!
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix broken ``ldap_config`` config option (PR #2683) Thanks to @seckrv!
|
||||
* Fix error message when user is not allowed to unban (PR #2761) Thanks to @turt2live!
|
||||
* Fix publicised groups GET API (singular) over federation (PR #2772)
|
||||
* Fix user directory when using ``user_directory_search_all_users`` config option (PR #2803, #2831)
|
||||
* Fix error on ``/publicRooms`` when no rooms exist (PR #2827)
|
||||
* Fix bug in quarantine_media (PR #2837)
|
||||
* Fix url_previews when no Content-Type is returned from URL (PR #2845)
|
||||
* Fix rare race in sync API when joining room (PR #2944)
|
||||
* Fix slow event search, switch back from GIST to GIN indexes (PR #2769, #2848)
|
||||
|
||||
|
||||
Changes in synapse v0.26.0 (2018-01-05)
|
||||
=======================================
|
||||
|
||||
No changes since v0.26.0-rc1
|
||||
|
||||
|
||||
Changes in synapse v0.26.0-rc1 (2017-12-13)
|
||||
===========================================
|
||||
|
||||
Features:
|
||||
|
||||
* Add ability for ASes to publicise groups for their users (PR #2686)
|
||||
* Add all local users to the user_directory and optionally search them (PR
|
||||
#2723)
|
||||
* Add support for custom login types for validating users (PR #2729)
|
||||
|
||||
|
||||
Changes:
|
||||
|
||||
* Update example Prometheus config to new format (PR #2648) Thanks to
|
||||
@krombel!
|
||||
* Rename redact_content option to include_content in Push API (PR #2650)
|
||||
* Declare support for r0.3.0 (PR #2677)
|
||||
* Improve upserts (PR #2684, #2688, #2689, #2713)
|
||||
* Improve documentation of workers (PR #2700)
|
||||
* Improve tracebacks on exceptions (PR #2705)
|
||||
* Allow guest access to group APIs for reading (PR #2715)
|
||||
* Support for posting content in federation_client script (PR #2716)
|
||||
* Delete devices and pushers on logouts etc (PR #2722)
|
||||
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix database port script (PR #2673)
|
||||
* Fix internal server error on login with ldap_auth_provider (PR #2678) Thanks
|
||||
to @jkolo!
|
||||
* Fix error on sqlite 3.7 (PR #2697)
|
||||
* Fix OPTIONS on preview_url (PR #2707)
|
||||
* Fix error handling on dns lookup (PR #2711)
|
||||
* Fix wrong avatars when inviting multiple users when creating room (PR #2717)
|
||||
* Fix 500 when joining matrix-dev (PR #2719)
|
||||
|
||||
|
||||
Changes in synapse v0.25.1 (2017-11-17)
|
||||
=======================================
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix login with LDAP and other password provider modules (PR #2678). Thanks to
|
||||
@jkolo!
|
||||
|
||||
Changes in synapse v0.25.0 (2017-11-15)
|
||||
=======================================
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix port script (PR #2673)
|
||||
|
||||
|
||||
Changes in synapse v0.25.0-rc1 (2017-11-14)
|
||||
===========================================
|
||||
|
||||
Features:
|
||||
|
||||
* Add is_public to groups table to allow for private groups (PR #2582)
|
||||
* Add a route for determining who you are (PR #2668) Thanks to @turt2live!
|
||||
* Add more features to the password providers (PR #2608, #2610, #2620, #2622,
|
||||
#2623, #2624, #2626, #2628, #2629)
|
||||
* Add a hook for custom rest endpoints (PR #2627)
|
||||
* Add API to update group room visibility (PR #2651)
|
||||
|
||||
|
||||
Changes:
|
||||
|
||||
* Ignore <noscript> tags when generating URL preview descriptions (PR #2576)
|
||||
Thanks to @maximevaillancourt!
|
||||
* Register some /unstable endpoints in /r0 as well (PR #2579) Thanks to
|
||||
@krombel!
|
||||
* Support /keys/upload on /r0 as well as /unstable (PR #2585)
|
||||
* Front-end proxy: pass through auth header (PR #2586)
|
||||
* Allow ASes to deactivate their own users (PR #2589)
|
||||
* Remove refresh tokens (PR #2613)
|
||||
* Automatically set default displayname on register (PR #2617)
|
||||
* Log login requests (PR #2618)
|
||||
* Always return `is_public` in the `/groups/:group_id/rooms` API (PR #2630)
|
||||
* Avoid no-op media deletes (PR #2637) Thanks to @spantaleev!
|
||||
* Fix various embarrassing typos around user_directory and add some doc. (PR
|
||||
#2643)
|
||||
* Return whether a user is an admin within a group (PR #2647)
|
||||
* Namespace visibility options for groups (PR #2657)
|
||||
* Downcase UserIDs on registration (PR #2662)
|
||||
* Cache failures when fetching URL previews (PR #2669)
|
||||
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix port script (PR #2577)
|
||||
* Fix error when running synapse with no logfile (PR #2581)
|
||||
* Fix UI auth when deleting devices (PR #2591)
|
||||
* Fix typo when checking if user is invited to group (PR #2599)
|
||||
* Fix the port script to drop NUL values in all tables (PR #2611)
|
||||
* Fix appservices being backlogged and not receiving new events due to a bug in
|
||||
notify_interested_services (PR #2631) Thanks to @xyzz!
|
||||
* Fix updating rooms avatar/display name when modified by admin (PR #2636)
|
||||
Thanks to @farialima!
|
||||
* Fix bug in state group storage (PR #2649)
|
||||
* Fix 500 on invalid utf-8 in request (PR #2663)
|
||||
|
||||
|
||||
Changes in synapse v0.24.1 (2017-10-24)
|
||||
=======================================
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix updating group profiles over federation (PR #2567)
|
||||
|
||||
|
||||
Changes in synapse v0.24.0 (2017-10-23)
|
||||
=======================================
|
||||
|
||||
No changes since v0.24.0-rc1
|
||||
|
||||
|
||||
Changes in synapse v0.24.0-rc1 (2017-10-19)
|
||||
===========================================
|
||||
|
||||
Features:
|
||||
|
||||
* Add Group Server (PR #2352, #2363, #2374, #2377, #2378, #2382, #2410, #2426,
|
||||
#2430, #2454, #2471, #2472, #2544)
|
||||
* Add support for channel notifications (PR #2501)
|
||||
* Add basic implementation of backup media store (PR #2538)
|
||||
* Add config option to auto-join new users to rooms (PR #2545)
|
||||
|
||||
|
||||
Changes:
|
||||
|
||||
* Make the spam checker a module (PR #2474)
|
||||
* Delete expired url cache data (PR #2478)
|
||||
* Ignore incoming events for rooms that we have left (PR #2490)
|
||||
* Allow spam checker to reject invites too (PR #2492)
|
||||
* Add room creation checks to spam checker (PR #2495)
|
||||
* Spam checking: add the invitee to user_may_invite (PR #2502)
|
||||
* Process events from federation for different rooms in parallel (PR #2520)
|
||||
* Allow error strings from spam checker (PR #2531)
|
||||
* Improve error handling for missing files in config (PR #2551)
|
||||
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix handling SERVFAILs when doing AAAA lookups for federation (PR #2477)
|
||||
* Fix incompatibility with newer versions of ujson (PR #2483) Thanks to
|
||||
@jeremycline!
|
||||
* Fix notification keywords that start/end with non-word chars (PR #2500)
|
||||
* Fix stack overflow and logcontexts from linearizer (PR #2532)
|
||||
* Fix 500 error when fields missing from power_levels event (PR #2552)
|
||||
* Fix 500 error when we get an error handling a PDU (PR #2553)
|
||||
|
||||
|
||||
Changes in synapse v0.23.1 (2017-10-02)
|
||||
=======================================
|
||||
|
||||
Changes:
|
||||
|
||||
* Make 'affinity' package optional, as it is not supported on some platforms
|
||||
|
||||
|
||||
Changes in synapse v0.23.0 (2017-10-02)
|
||||
=======================================
|
||||
|
||||
|
||||
@@ -30,8 +30,12 @@ use github's pull request workflow to review the contribution, and either ask
|
||||
you to make any refinements needed or merge it and make them ourselves. The
|
||||
changes will then land on master when we next do a release.
|
||||
|
||||
We use Jenkins for continuous integration (http://matrix.org/jenkins), and
|
||||
typically all pull requests get automatically tested Jenkins: if your change breaks the build, Jenkins will yell about it in #matrix-dev:matrix.org so please lurk there and keep an eye open.
|
||||
We use `Jenkins <http://matrix.org/jenkins>`_ and
|
||||
`Travis <https://travis-ci.org/matrix-org/synapse>`_ for continuous
|
||||
integration. All pull requests to synapse get automatically tested by Travis;
|
||||
the Jenkins builds require an adminstrator to start them. If your change
|
||||
breaks the build, this will be shown in github, so please keep an eye on the
|
||||
pull request for feedback.
|
||||
|
||||
Code style
|
||||
~~~~~~~~~~
|
||||
|
||||
24
README.rst
24
README.rst
@@ -354,6 +354,10 @@ https://matrix.org/docs/projects/try-matrix-now.html (or build your own with one
|
||||
Fedora
|
||||
------
|
||||
|
||||
Synapse is in the Fedora repositories as ``matrix-synapse``::
|
||||
|
||||
sudo dnf install matrix-synapse
|
||||
|
||||
Oleg Girko provides Fedora RPMs at
|
||||
https://obs.infoserver.lv/project/monitor/matrix-synapse
|
||||
|
||||
@@ -632,6 +636,11 @@ largest boxes pause for thought.)
|
||||
|
||||
Troubleshooting
|
||||
---------------
|
||||
|
||||
You can use the federation tester to check if your homeserver is all set:
|
||||
``https://matrix.org/federationtester/api/report?server_name=<your_server_name>``
|
||||
If any of the attributes under "checks" is false, federation won't work.
|
||||
|
||||
The typical failure mode with federation is that when you try to join a room,
|
||||
it is rejected with "401: Unauthorized". Generally this means that other
|
||||
servers in the room couldn't access yours. (Joining a room over federation is a
|
||||
@@ -823,7 +832,9 @@ spidering 'internal' URLs on your network. At the very least we recommend that
|
||||
your loopback and RFC1918 IP addresses are blacklisted.
|
||||
|
||||
This also requires the optional lxml and netaddr python dependencies to be
|
||||
installed.
|
||||
installed. This in turn requires the libxml2 library to be available - on
|
||||
Debian/Ubuntu this means ``apt-get install libxml2-dev``, or equivalent for
|
||||
your OS.
|
||||
|
||||
|
||||
Password reset
|
||||
@@ -883,6 +894,17 @@ This should end with a 'PASSED' result::
|
||||
|
||||
PASSED (successes=143)
|
||||
|
||||
Running the Integration Tests
|
||||
=============================
|
||||
|
||||
Synapse is accompanied by `SyTest <https://github.com/matrix-org/sytest>`_,
|
||||
a Matrix homeserver integration testing suite, which uses HTTP requests to
|
||||
access the API as a Matrix client would. It is able to run Synapse directly from
|
||||
the source tree, so installation of the server is not required.
|
||||
|
||||
Testing with SyTest is recommended for verifying that changes related to the
|
||||
Client-Server API are functioning correctly. See the `installation instructions
|
||||
<https://github.com/matrix-org/sytest#installing>`_ for details.
|
||||
|
||||
Building Internal API Documentation
|
||||
===================================
|
||||
|
||||
@@ -5,7 +5,8 @@ To use it, first install prometheus by following the instructions at
|
||||
|
||||
http://prometheus.io/
|
||||
|
||||
Then add a new job to the main prometheus.conf file:
|
||||
### for Prometheus v1
|
||||
Add a new job to the main prometheus.conf file:
|
||||
|
||||
job: {
|
||||
name: "synapse"
|
||||
@@ -15,6 +16,22 @@ Then add a new job to the main prometheus.conf file:
|
||||
}
|
||||
}
|
||||
|
||||
### for Prometheus v2
|
||||
Add a new job to the main prometheus.yml file:
|
||||
|
||||
- job_name: "synapse"
|
||||
metrics_path: "/_synapse/metrics"
|
||||
# when endpoint uses https:
|
||||
scheme: "https"
|
||||
|
||||
static_configs:
|
||||
- targets: ['SERVER.LOCATION:PORT']
|
||||
|
||||
To use `synapse.rules` add
|
||||
|
||||
rule_files:
|
||||
- "/PATH/TO/synapse-v2.rules"
|
||||
|
||||
Metrics are disabled by default when running synapse; they must be enabled
|
||||
with the 'enable-metrics' option, either in the synapse config file or as a
|
||||
command-line option.
|
||||
|
||||
60
contrib/prometheus/synapse-v2.rules
Normal file
60
contrib/prometheus/synapse-v2.rules
Normal file
@@ -0,0 +1,60 @@
|
||||
groups:
|
||||
- name: synapse
|
||||
rules:
|
||||
- record: "synapse_federation_transaction_queue_pendingEdus:total"
|
||||
expr: "sum(synapse_federation_transaction_queue_pendingEdus or absent(synapse_federation_transaction_queue_pendingEdus)*0)"
|
||||
- record: "synapse_federation_transaction_queue_pendingPdus:total"
|
||||
expr: "sum(synapse_federation_transaction_queue_pendingPdus or absent(synapse_federation_transaction_queue_pendingPdus)*0)"
|
||||
- record: 'synapse_http_server_requests:method'
|
||||
labels:
|
||||
servlet: ""
|
||||
expr: "sum(synapse_http_server_requests) by (method)"
|
||||
- record: 'synapse_http_server_requests:servlet'
|
||||
labels:
|
||||
method: ""
|
||||
expr: 'sum(synapse_http_server_requests) by (servlet)'
|
||||
|
||||
- record: 'synapse_http_server_requests:total'
|
||||
labels:
|
||||
servlet: ""
|
||||
expr: 'sum(synapse_http_server_requests:by_method) by (servlet)'
|
||||
|
||||
- record: 'synapse_cache:hit_ratio_5m'
|
||||
expr: 'rate(synapse_util_caches_cache:hits[5m]) / rate(synapse_util_caches_cache:total[5m])'
|
||||
- record: 'synapse_cache:hit_ratio_30s'
|
||||
expr: 'rate(synapse_util_caches_cache:hits[30s]) / rate(synapse_util_caches_cache:total[30s])'
|
||||
|
||||
- record: 'synapse_federation_client_sent'
|
||||
labels:
|
||||
type: "EDU"
|
||||
expr: 'synapse_federation_client_sent_edus + 0'
|
||||
- record: 'synapse_federation_client_sent'
|
||||
labels:
|
||||
type: "PDU"
|
||||
expr: 'synapse_federation_client_sent_pdu_destinations:count + 0'
|
||||
- record: 'synapse_federation_client_sent'
|
||||
labels:
|
||||
type: "Query"
|
||||
expr: 'sum(synapse_federation_client_sent_queries) by (job)'
|
||||
|
||||
- record: 'synapse_federation_server_received'
|
||||
labels:
|
||||
type: "EDU"
|
||||
expr: 'synapse_federation_server_received_edus + 0'
|
||||
- record: 'synapse_federation_server_received'
|
||||
labels:
|
||||
type: "PDU"
|
||||
expr: 'synapse_federation_server_received_pdus + 0'
|
||||
- record: 'synapse_federation_server_received'
|
||||
labels:
|
||||
type: "Query"
|
||||
expr: 'sum(synapse_federation_server_received_queries) by (job)'
|
||||
|
||||
- record: 'synapse_federation_transaction_queue_pending'
|
||||
labels:
|
||||
type: "EDU"
|
||||
expr: 'synapse_federation_transaction_queue_pending_edus + 0'
|
||||
- record: 'synapse_federation_transaction_queue_pending'
|
||||
labels:
|
||||
type: "PDU"
|
||||
expr: 'synapse_federation_transaction_queue_pending_pdus + 0'
|
||||
23
docs/admin_api/media_admin_api.md
Normal file
23
docs/admin_api/media_admin_api.md
Normal file
@@ -0,0 +1,23 @@
|
||||
# List all media in a room
|
||||
|
||||
This API gets a list of known media in a room.
|
||||
|
||||
The API is:
|
||||
```
|
||||
GET /_matrix/client/r0/admin/room/<room_id>/media
|
||||
```
|
||||
including an `access_token` of a server admin.
|
||||
|
||||
It returns a JSON body like the following:
|
||||
```
|
||||
{
|
||||
"local": [
|
||||
"mxc://localhost/xwvutsrqponmlkjihgfedcba",
|
||||
"mxc://localhost/abcdefghijklmnopqrstuvwx"
|
||||
],
|
||||
"remote": [
|
||||
"mxc://matrix.org/xwvutsrqponmlkjihgfedcba",
|
||||
"mxc://matrix.org/abcdefghijklmnopqrstuvwx"
|
||||
]
|
||||
}
|
||||
```
|
||||
@@ -8,8 +8,56 @@ Depending on the amount of history being purged a call to the API may take
|
||||
several minutes or longer. During this period users will not be able to
|
||||
paginate further back in the room from the point being purged from.
|
||||
|
||||
The API is simply:
|
||||
The API is:
|
||||
|
||||
``POST /_matrix/client/r0/admin/purge_history/<room_id>/<event_id>``
|
||||
``POST /_matrix/client/r0/admin/purge_history/<room_id>[/<event_id>]``
|
||||
|
||||
including an ``access_token`` of a server admin.
|
||||
|
||||
By default, events sent by local users are not deleted, as they may represent
|
||||
the only copies of this content in existence. (Events sent by remote users are
|
||||
deleted.)
|
||||
|
||||
Room state data (such as joins, leaves, topic) is always preserved.
|
||||
|
||||
To delete local message events as well, set ``delete_local_events`` in the body:
|
||||
|
||||
.. code:: json
|
||||
|
||||
{
|
||||
"delete_local_events": true
|
||||
}
|
||||
|
||||
The caller must specify the point in the room to purge up to. This can be
|
||||
specified by including an event_id in the URI, or by setting a
|
||||
``purge_up_to_event_id`` or ``purge_up_to_ts`` in the request body. If an event
|
||||
id is given, that event (and others at the same graph depth) will be retained.
|
||||
If ``purge_up_to_ts`` is given, it should be a timestamp since the unix epoch,
|
||||
in milliseconds.
|
||||
|
||||
The API starts the purge running, and returns immediately with a JSON body with
|
||||
a purge id:
|
||||
|
||||
.. code:: json
|
||||
|
||||
{
|
||||
"purge_id": "<opaque id>"
|
||||
}
|
||||
|
||||
Purge status query
|
||||
------------------
|
||||
|
||||
It is possible to poll for updates on recent purges with a second API;
|
||||
|
||||
``GET /_matrix/client/r0/admin/purge_history_status/<purge_id>``
|
||||
|
||||
(again, with a suitable ``access_token``). This API returns a JSON body like
|
||||
the following:
|
||||
|
||||
.. code:: json
|
||||
|
||||
{
|
||||
"status": "active"
|
||||
}
|
||||
|
||||
The status will be one of ``active``, ``complete``, or ``failed``.
|
||||
|
||||
@@ -1,26 +1,13 @@
|
||||
Basically, PEP8
|
||||
- Everything should comply with PEP8. Code should pass
|
||||
``pep8 --max-line-length=100`` without any warnings.
|
||||
|
||||
- NEVER tabs. 4 spaces to indent.
|
||||
- Max line width: 79 chars (with flexibility to overflow by a "few chars" if
|
||||
the overflowing content is not semantically significant and avoids an
|
||||
explosion of vertical whitespace).
|
||||
- Use camel case for class and type names
|
||||
- Use underscores for functions and variables.
|
||||
- Use double quotes.
|
||||
- Use parentheses instead of '\\' for line continuation where ever possible
|
||||
(which is pretty much everywhere)
|
||||
- There should be max a single new line between:
|
||||
- statements
|
||||
- functions in a class
|
||||
- There should be two new lines between:
|
||||
- definitions in a module (e.g., between different classes)
|
||||
- There should be spaces where spaces should be and not where there shouldn't be:
|
||||
- a single space after a comma
|
||||
- a single space before and after for '=' when used as assignment
|
||||
- no spaces before and after for '=' for default values and keyword arguments.
|
||||
- Indenting must follow PEP8; either hanging indent or multiline-visual indent
|
||||
depending on the size and shape of the arguments and what makes more sense to
|
||||
the author. In other words, both this::
|
||||
- **Indenting**:
|
||||
|
||||
- NEVER tabs. 4 spaces to indent.
|
||||
|
||||
- follow PEP8; either hanging indent or multiline-visual indent depending
|
||||
on the size and shape of the arguments and what makes more sense to the
|
||||
author. In other words, both this::
|
||||
|
||||
print("I am a fish %s" % "moo")
|
||||
|
||||
@@ -33,20 +20,100 @@ Basically, PEP8
|
||||
|
||||
print(
|
||||
"I am a fish %s" %
|
||||
"moo"
|
||||
"moo",
|
||||
)
|
||||
|
||||
...are valid, although given each one takes up 2x more vertical space than
|
||||
the previous, it's up to the author's discretion as to which layout makes most
|
||||
sense for their function invocation. (e.g. if they want to add comments
|
||||
per-argument, or put expressions in the arguments, or group related arguments
|
||||
together, or want to deliberately extend or preserve vertical/horizontal
|
||||
space)
|
||||
the previous, it's up to the author's discretion as to which layout makes
|
||||
most sense for their function invocation. (e.g. if they want to add
|
||||
comments per-argument, or put expressions in the arguments, or group
|
||||
related arguments together, or want to deliberately extend or preserve
|
||||
vertical/horizontal space)
|
||||
|
||||
Comments should follow the `google code style <http://google.github.io/styleguide/pyguide.html?showone=Comments#Comments>`_.
|
||||
This is so that we can generate documentation with
|
||||
`sphinx <http://sphinxcontrib-napoleon.readthedocs.org/en/latest/>`_. See the
|
||||
`examples <http://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_google.html>`_
|
||||
in the sphinx documentation.
|
||||
- **Line length**:
|
||||
|
||||
Code should pass pep8 --max-line-length=100 without any warnings.
|
||||
Max line length is 79 chars (with flexibility to overflow by a "few chars" if
|
||||
the overflowing content is not semantically significant and avoids an
|
||||
explosion of vertical whitespace).
|
||||
|
||||
Use parentheses instead of ``\`` for line continuation where ever possible
|
||||
(which is pretty much everywhere).
|
||||
|
||||
- **Naming**:
|
||||
|
||||
- Use camel case for class and type names
|
||||
- Use underscores for functions and variables.
|
||||
|
||||
- Use double quotes ``"foo"`` rather than single quotes ``'foo'``.
|
||||
|
||||
- **Blank lines**:
|
||||
|
||||
- There should be max a single new line between:
|
||||
|
||||
- statements
|
||||
- functions in a class
|
||||
|
||||
- There should be two new lines between:
|
||||
|
||||
- definitions in a module (e.g., between different classes)
|
||||
|
||||
- **Whitespace**:
|
||||
|
||||
There should be spaces where spaces should be and not where there shouldn't
|
||||
be:
|
||||
|
||||
- a single space after a comma
|
||||
- a single space before and after for '=' when used as assignment
|
||||
- no spaces before and after for '=' for default values and keyword arguments.
|
||||
|
||||
- **Comments**: should follow the `google code style
|
||||
<http://google.github.io/styleguide/pyguide.html?showone=Comments#Comments>`_.
|
||||
This is so that we can generate documentation with `sphinx
|
||||
<http://sphinxcontrib-napoleon.readthedocs.org/en/latest/>`_. See the
|
||||
`examples
|
||||
<http://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_google.html>`_
|
||||
in the sphinx documentation.
|
||||
|
||||
- **Imports**:
|
||||
|
||||
- Prefer to import classes and functions than packages or modules.
|
||||
|
||||
Example::
|
||||
|
||||
from synapse.types import UserID
|
||||
...
|
||||
user_id = UserID(local, server)
|
||||
|
||||
is preferred over::
|
||||
|
||||
from synapse import types
|
||||
...
|
||||
user_id = types.UserID(local, server)
|
||||
|
||||
(or any other variant).
|
||||
|
||||
This goes against the advice in the Google style guide, but it means that
|
||||
errors in the name are caught early (at import time).
|
||||
|
||||
- Multiple imports from the same package can be combined onto one line::
|
||||
|
||||
from synapse.types import GroupID, RoomID, UserID
|
||||
|
||||
An effort should be made to keep the individual imports in alphabetical
|
||||
order.
|
||||
|
||||
If the list becomes long, wrap it with parentheses and split it over
|
||||
multiple lines.
|
||||
|
||||
- As per `PEP-8 <https://www.python.org/dev/peps/pep-0008/#imports>`_,
|
||||
imports should be grouped in the following order, with a blank line between
|
||||
each group:
|
||||
|
||||
1. standard library imports
|
||||
2. related third party imports
|
||||
3. local application/library specific imports
|
||||
|
||||
- Imports within each group should be sorted alphabetically by module name.
|
||||
|
||||
- Avoid wildcard imports (``from synapse.types import *``) and relative
|
||||
imports (``from .types import UserID``).
|
||||
|
||||
@@ -279,9 +279,9 @@ Obviously that option means that the operations done in
|
||||
that might be fixed by setting a different logcontext via a ``with
|
||||
LoggingContext(...)`` in ``background_operation``).
|
||||
|
||||
The second option is to use ``logcontext.preserve_fn``, which wraps a function
|
||||
so that it doesn't reset the logcontext even when it returns an incomplete
|
||||
deferred, and adds a callback to the returned deferred to reset the
|
||||
The second option is to use ``logcontext.run_in_background``, which wraps a
|
||||
function so that it doesn't reset the logcontext even when it returns an
|
||||
incomplete deferred, and adds a callback to the returned deferred to reset the
|
||||
logcontext. In other words, it turns a function that follows the Synapse rules
|
||||
about logcontexts and Deferreds into one which behaves more like an external
|
||||
function — the opposite operation to that described in the previous section.
|
||||
@@ -293,15 +293,11 @@ It can be used like this:
|
||||
def do_request_handling():
|
||||
yield foreground_operation()
|
||||
|
||||
logcontext.preserve_fn(background_operation)()
|
||||
logcontext.run_in_background(background_operation)
|
||||
|
||||
# this will now be logged against the request context
|
||||
logger.debug("Request handling complete")
|
||||
|
||||
XXX: I think ``preserve_context_over_fn`` is supposed to do the first option,
|
||||
but the fact that it does ``preserve_context_over_deferred`` on its results
|
||||
means that its use is fraught with difficulty.
|
||||
|
||||
Passing synapse deferreds into third-party functions
|
||||
----------------------------------------------------
|
||||
|
||||
|
||||
@@ -33,6 +33,53 @@ How to monitor Synapse metrics using Prometheus
|
||||
|
||||
Restart prometheus.
|
||||
|
||||
|
||||
Block and response metrics renamed for 0.27.0
|
||||
---------------------------------------------
|
||||
|
||||
Synapse 0.27.0 begins the process of rationalising the duplicate ``*:count``
|
||||
metrics reported for the resource tracking for code blocks and HTTP requests.
|
||||
|
||||
At the same time, the corresponding ``*:total`` metrics are being renamed, as
|
||||
the ``:total`` suffix no longer makes sense in the absence of a corresponding
|
||||
``:count`` metric.
|
||||
|
||||
To enable a graceful migration path, this release just adds new names for the
|
||||
metrics being renamed. A future release will remove the old ones.
|
||||
|
||||
The following table shows the new metrics, and the old metrics which they are
|
||||
replacing.
|
||||
|
||||
==================================================== ===================================================
|
||||
New name Old name
|
||||
==================================================== ===================================================
|
||||
synapse_util_metrics_block_count synapse_util_metrics_block_timer:count
|
||||
synapse_util_metrics_block_count synapse_util_metrics_block_ru_utime:count
|
||||
synapse_util_metrics_block_count synapse_util_metrics_block_ru_stime:count
|
||||
synapse_util_metrics_block_count synapse_util_metrics_block_db_txn_count:count
|
||||
synapse_util_metrics_block_count synapse_util_metrics_block_db_txn_duration:count
|
||||
|
||||
synapse_util_metrics_block_time_seconds synapse_util_metrics_block_timer:total
|
||||
synapse_util_metrics_block_ru_utime_seconds synapse_util_metrics_block_ru_utime:total
|
||||
synapse_util_metrics_block_ru_stime_seconds synapse_util_metrics_block_ru_stime:total
|
||||
synapse_util_metrics_block_db_txn_count synapse_util_metrics_block_db_txn_count:total
|
||||
synapse_util_metrics_block_db_txn_duration_seconds synapse_util_metrics_block_db_txn_duration:total
|
||||
|
||||
synapse_http_server_response_count synapse_http_server_requests
|
||||
synapse_http_server_response_count synapse_http_server_response_time:count
|
||||
synapse_http_server_response_count synapse_http_server_response_ru_utime:count
|
||||
synapse_http_server_response_count synapse_http_server_response_ru_stime:count
|
||||
synapse_http_server_response_count synapse_http_server_response_db_txn_count:count
|
||||
synapse_http_server_response_count synapse_http_server_response_db_txn_duration:count
|
||||
|
||||
synapse_http_server_response_time_seconds synapse_http_server_response_time:total
|
||||
synapse_http_server_response_ru_utime_seconds synapse_http_server_response_ru_utime:total
|
||||
synapse_http_server_response_ru_stime_seconds synapse_http_server_response_ru_stime:total
|
||||
synapse_http_server_response_db_txn_count synapse_http_server_response_db_txn_count:total
|
||||
synapse_http_server_response_db_txn_duration_seconds synapse_http_server_response_db_txn_duration:total
|
||||
==================================================== ===================================================
|
||||
|
||||
|
||||
Standard Metric Names
|
||||
---------------------
|
||||
|
||||
@@ -42,7 +89,7 @@ have been changed to seconds, from miliseconds.
|
||||
|
||||
================================== =============================
|
||||
New name Old name
|
||||
---------------------------------- -----------------------------
|
||||
================================== =============================
|
||||
process_cpu_user_seconds_total process_resource_utime / 1000
|
||||
process_cpu_system_seconds_total process_resource_stime / 1000
|
||||
process_open_fds (no 'type' label) process_fds
|
||||
@@ -52,7 +99,7 @@ The python-specific counts of garbage collector performance have been renamed.
|
||||
|
||||
=========================== ======================
|
||||
New name Old name
|
||||
--------------------------- ----------------------
|
||||
=========================== ======================
|
||||
python_gc_time reactor_gc_time
|
||||
python_gc_unreachable_total reactor_gc_unreachable
|
||||
python_gc_counts reactor_gc_counts
|
||||
@@ -62,7 +109,7 @@ The twisted-specific reactor metrics have been renamed.
|
||||
|
||||
==================================== =====================
|
||||
New name Old name
|
||||
------------------------------------ ---------------------
|
||||
==================================== =====================
|
||||
python_twisted_reactor_pending_calls reactor_pending_calls
|
||||
python_twisted_reactor_tick_time reactor_tick_time
|
||||
==================================== =====================
|
||||
|
||||
99
docs/password_auth_providers.rst
Normal file
99
docs/password_auth_providers.rst
Normal file
@@ -0,0 +1,99 @@
|
||||
Password auth provider modules
|
||||
==============================
|
||||
|
||||
Password auth providers offer a way for server administrators to integrate
|
||||
their Synapse installation with an existing authentication system.
|
||||
|
||||
A password auth provider is a Python class which is dynamically loaded into
|
||||
Synapse, and provides a number of methods by which it can integrate with the
|
||||
authentication system.
|
||||
|
||||
This document serves as a reference for those looking to implement their own
|
||||
password auth providers.
|
||||
|
||||
Required methods
|
||||
----------------
|
||||
|
||||
Password auth provider classes must provide the following methods:
|
||||
|
||||
*class* ``SomeProvider.parse_config``\(*config*)
|
||||
|
||||
This method is passed the ``config`` object for this module from the
|
||||
homeserver configuration file.
|
||||
|
||||
It should perform any appropriate sanity checks on the provided
|
||||
configuration, and return an object which is then passed into ``__init__``.
|
||||
|
||||
*class* ``SomeProvider``\(*config*, *account_handler*)
|
||||
|
||||
The constructor is passed the config object returned by ``parse_config``,
|
||||
and a ``synapse.module_api.ModuleApi`` object which allows the
|
||||
password provider to check if accounts exist and/or create new ones.
|
||||
|
||||
Optional methods
|
||||
----------------
|
||||
|
||||
Password auth provider classes may optionally provide the following methods.
|
||||
|
||||
*class* ``SomeProvider.get_db_schema_files``\()
|
||||
|
||||
This method, if implemented, should return an Iterable of ``(name,
|
||||
stream)`` pairs of database schema files. Each file is applied in turn at
|
||||
initialisation, and a record is then made in the database so that it is
|
||||
not re-applied on the next start.
|
||||
|
||||
``someprovider.get_supported_login_types``\()
|
||||
|
||||
This method, if implemented, should return a ``dict`` mapping from a login
|
||||
type identifier (such as ``m.login.password``) to an iterable giving the
|
||||
fields which must be provided by the user in the submission to the
|
||||
``/login`` api. These fields are passed in the ``login_dict`` dictionary
|
||||
to ``check_auth``.
|
||||
|
||||
For example, if a password auth provider wants to implement a custom login
|
||||
type of ``com.example.custom_login``, where the client is expected to pass
|
||||
the fields ``secret1`` and ``secret2``, the provider should implement this
|
||||
method and return the following dict::
|
||||
|
||||
{"com.example.custom_login": ("secret1", "secret2")}
|
||||
|
||||
``someprovider.check_auth``\(*username*, *login_type*, *login_dict*)
|
||||
|
||||
This method is the one that does the real work. If implemented, it will be
|
||||
called for each login attempt where the login type matches one of the keys
|
||||
returned by ``get_supported_login_types``.
|
||||
|
||||
It is passed the (possibly UNqualified) ``user`` provided by the client,
|
||||
the login type, and a dictionary of login secrets passed by the client.
|
||||
|
||||
The method should return a Twisted ``Deferred`` object, which resolves to
|
||||
the canonical ``@localpart:domain`` user id if authentication is successful,
|
||||
and ``None`` if not.
|
||||
|
||||
Alternatively, the ``Deferred`` can resolve to a ``(str, func)`` tuple, in
|
||||
which case the second field is a callback which will be called with the
|
||||
result from the ``/login`` call (including ``access_token``, ``device_id``,
|
||||
etc.)
|
||||
|
||||
``someprovider.check_password``\(*user_id*, *password*)
|
||||
|
||||
This method provides a simpler interface than ``get_supported_login_types``
|
||||
and ``check_auth`` for password auth providers that just want to provide a
|
||||
mechanism for validating ``m.login.password`` logins.
|
||||
|
||||
Iif implemented, it will be called to check logins with an
|
||||
``m.login.password`` login type. It is passed a qualified
|
||||
``@localpart:domain`` user id, and the password provided by the user.
|
||||
|
||||
The method should return a Twisted ``Deferred`` object, which resolves to
|
||||
``True`` if authentication is successful, and ``False`` if not.
|
||||
|
||||
``someprovider.on_logged_out``\(*user_id*, *device_id*, *access_token*)
|
||||
|
||||
This method, if implemented, is called when a user logs out. It is passed
|
||||
the qualified user ID, the ID of the deactivated device (if any: access
|
||||
tokens are occasionally created without an associated device ID), and the
|
||||
(now deactivated) access token.
|
||||
|
||||
It may return a Twisted ``Deferred`` object; the logout request will wait
|
||||
for the deferred to complete but the result is ignored.
|
||||
@@ -50,7 +50,7 @@ master_doc = 'index'
|
||||
|
||||
# General information about the project.
|
||||
project = u'Synapse'
|
||||
copyright = u'2014, TNG'
|
||||
copyright = u'Copyright 2014-2017 OpenMarket Ltd, 2017 Vector Creations Ltd, 2017 New Vector Ltd'
|
||||
|
||||
# The version info for the project you're documenting, acts as replacement for
|
||||
# |version| and |release|, also used in various other places throughout the
|
||||
|
||||
@@ -56,6 +56,7 @@ As a first cut, let's do #2 and have the receiver hit the API to calculate its o
|
||||
API
|
||||
---
|
||||
|
||||
```
|
||||
GET /_matrix/media/r0/preview_url?url=http://wherever.com
|
||||
200 OK
|
||||
{
|
||||
@@ -66,6 +67,7 @@ GET /_matrix/media/r0/preview_url?url=http://wherever.com
|
||||
"og:description" : "“Synapse 0.12 is out! Lots of polishing, performance &amp; bugfixes: /sync API, /r0 prefix, fulltext search, 3PID invites https://t.co/5alhXLLEGP”"
|
||||
"og:site_name" : "Twitter"
|
||||
}
|
||||
```
|
||||
|
||||
* Downloads the URL
|
||||
* If HTML, just stores it in RAM and parses it for OG meta tags
|
||||
17
docs/user_directory.md
Normal file
17
docs/user_directory.md
Normal file
@@ -0,0 +1,17 @@
|
||||
User Directory API Implementation
|
||||
=================================
|
||||
|
||||
The user directory is currently maintained based on the 'visible' users
|
||||
on this particular server - i.e. ones which your account shares a room with, or
|
||||
who are present in a publicly viewable room present on the server.
|
||||
|
||||
The directory info is stored in various tables, which can (typically after
|
||||
DB corruption) get stale or out of sync. If this happens, for now the
|
||||
quickest solution to fix it is:
|
||||
|
||||
```
|
||||
UPDATE user_directory_stream_pos SET stream_id = NULL;
|
||||
```
|
||||
|
||||
and restart the synapse, which should then start a background task to
|
||||
flush the current tables and regenerate the directory.
|
||||
190
docs/workers.rst
190
docs/workers.rst
@@ -1,11 +1,15 @@
|
||||
Scaling synapse via workers
|
||||
---------------------------
|
||||
===========================
|
||||
|
||||
Synapse has experimental support for splitting out functionality into
|
||||
multiple separate python processes, helping greatly with scalability. These
|
||||
processes are called 'workers', and are (eventually) intended to scale
|
||||
horizontally independently.
|
||||
|
||||
All of the below is highly experimental and subject to change as Synapse evolves,
|
||||
but documenting it here to help folks needing highly scalable Synapses similar
|
||||
to the one running matrix.org!
|
||||
|
||||
All processes continue to share the same database instance, and as such, workers
|
||||
only work with postgres based synapse deployments (sharing a single sqlite
|
||||
across multiple processes is a recipe for disaster, plus you should be using
|
||||
@@ -16,37 +20,57 @@ TCP protocol called 'replication' - analogous to MySQL or Postgres style
|
||||
database replication; feeding a stream of relevant data to the workers so they
|
||||
can be kept in sync with the main synapse process and database state.
|
||||
|
||||
To enable workers, you need to add a replication listener to the master synapse, e.g.::
|
||||
Configuration
|
||||
-------------
|
||||
|
||||
To make effective use of the workers, you will need to configure an HTTP
|
||||
reverse-proxy such as nginx or haproxy, which will direct incoming requests to
|
||||
the correct worker, or to the main synapse instance. Note that this includes
|
||||
requests made to the federation port. The caveats regarding running a
|
||||
reverse-proxy on the federation port still apply (see
|
||||
https://github.com/matrix-org/synapse/blob/master/README.rst#reverse-proxying-the-federation-port).
|
||||
|
||||
To enable workers, you need to add two replication listeners to the master
|
||||
synapse, e.g.::
|
||||
|
||||
listeners:
|
||||
# The TCP replication port
|
||||
- port: 9092
|
||||
bind_address: '127.0.0.1'
|
||||
type: replication
|
||||
# The HTTP replication port
|
||||
- port: 9093
|
||||
bind_address: '127.0.0.1'
|
||||
type: http
|
||||
resources:
|
||||
- names: [replication]
|
||||
|
||||
Under **no circumstances** should this replication API listener be exposed to the
|
||||
public internet; it currently implements no authentication whatsoever and is
|
||||
Under **no circumstances** should these replication API listeners be exposed to
|
||||
the public internet; it currently implements no authentication whatsoever and is
|
||||
unencrypted.
|
||||
|
||||
You then create a set of configs for the various worker processes. These should be
|
||||
worker configuration files should be stored in a dedicated subdirectory, to allow
|
||||
synctl to manipulate them.
|
||||
(Roughly, the TCP port is used for streaming data from the master to the
|
||||
workers, and the HTTP port for the workers to send data to the main
|
||||
synapse process.)
|
||||
|
||||
The current available worker applications are:
|
||||
* synapse.app.pusher - handles sending push notifications to sygnal and email
|
||||
* synapse.app.synchrotron - handles /sync endpoints. can scales horizontally through multiple instances.
|
||||
* synapse.app.appservice - handles output traffic to Application Services
|
||||
* synapse.app.federation_reader - handles receiving federation traffic (including public_rooms API)
|
||||
* synapse.app.media_repository - handles the media repository.
|
||||
* synapse.app.client_reader - handles client API endpoints like /publicRooms
|
||||
You then create a set of configs for the various worker processes. These
|
||||
should be worker configuration files, and should be stored in a dedicated
|
||||
subdirectory, to allow synctl to manipulate them.
|
||||
|
||||
Each worker configuration file inherits the configuration of the main homeserver
|
||||
configuration file. You can then override configuration specific to that worker,
|
||||
e.g. the HTTP listener that it provides (if any); logging configuration; etc.
|
||||
You should minimise the number of overrides though to maintain a usable config.
|
||||
|
||||
You must specify the type of worker application (worker_app) and the replication
|
||||
endpoint that it's talking to on the main synapse process (worker_replication_host
|
||||
and worker_replication_port).
|
||||
You must specify the type of worker application (``worker_app``). The currently
|
||||
available worker applications are listed below. You must also specify the
|
||||
replication endpoints that it's talking to on the main synapse process.
|
||||
``worker_replication_host`` should specify the host of the main synapse,
|
||||
``worker_replication_port`` should point to the TCP replication listener port and
|
||||
``worker_replication_http_port`` should point to the HTTP replication port.
|
||||
|
||||
Currently, only the ``event_creator`` worker requires specifying
|
||||
``worker_replication_http_port``.
|
||||
|
||||
For instance::
|
||||
|
||||
@@ -55,6 +79,7 @@ For instance::
|
||||
# The replication listener on the synapse to talk to.
|
||||
worker_replication_host: 127.0.0.1
|
||||
worker_replication_port: 9092
|
||||
worker_replication_http_port: 9093
|
||||
|
||||
worker_listeners:
|
||||
- type: http
|
||||
@@ -68,11 +93,11 @@ For instance::
|
||||
worker_log_config: /home/matrix/synapse/config/synchrotron_log_config.yaml
|
||||
|
||||
...is a full configuration for a synchrotron worker instance, which will expose a
|
||||
plain HTTP /sync endpoint on port 8083 separately from the /sync endpoint provided
|
||||
plain HTTP ``/sync`` endpoint on port 8083 separately from the ``/sync`` endpoint provided
|
||||
by the main synapse.
|
||||
|
||||
Obviously you should configure your loadbalancer to route the /sync endpoint to
|
||||
the synchrotron instance(s) in this instance.
|
||||
Obviously you should configure your reverse-proxy to route the relevant
|
||||
endpoints to the worker (``localhost:8083`` in the above example).
|
||||
|
||||
Finally, to actually run your worker-based synapse, you must pass synctl the -a
|
||||
commandline option to tell it to operate on all the worker configurations found
|
||||
@@ -89,6 +114,125 @@ To manipulate a specific worker, you pass the -w option to synctl::
|
||||
|
||||
synctl -w $CONFIG/workers/synchrotron.yaml restart
|
||||
|
||||
All of the above is highly experimental and subject to change as Synapse evolves,
|
||||
but documenting it here to help folks needing highly scalable Synapses similar
|
||||
to the one running matrix.org!
|
||||
|
||||
Available worker applications
|
||||
-----------------------------
|
||||
|
||||
``synapse.app.pusher``
|
||||
~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Handles sending push notifications to sygnal and email. Doesn't handle any
|
||||
REST endpoints itself, but you should set ``start_pushers: False`` in the
|
||||
shared configuration file to stop the main synapse sending these notifications.
|
||||
|
||||
Note this worker cannot be load-balanced: only one instance should be active.
|
||||
|
||||
``synapse.app.synchrotron``
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The synchrotron handles ``sync`` requests from clients. In particular, it can
|
||||
handle REST endpoints matching the following regular expressions::
|
||||
|
||||
^/_matrix/client/(v2_alpha|r0)/sync$
|
||||
^/_matrix/client/(api/v1|v2_alpha|r0)/events$
|
||||
^/_matrix/client/(api/v1|r0)/initialSync$
|
||||
^/_matrix/client/(api/v1|r0)/rooms/[^/]+/initialSync$
|
||||
|
||||
The above endpoints should all be routed to the synchrotron worker by the
|
||||
reverse-proxy configuration.
|
||||
|
||||
It is possible to run multiple instances of the synchrotron to scale
|
||||
horizontally. In this case the reverse-proxy should be configured to
|
||||
load-balance across the instances, though it will be more efficient if all
|
||||
requests from a particular user are routed to a single instance. Extracting
|
||||
a userid from the access token is currently left as an exercise for the reader.
|
||||
|
||||
``synapse.app.appservice``
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Handles sending output traffic to Application Services. Doesn't handle any
|
||||
REST endpoints itself, but you should set ``notify_appservices: False`` in the
|
||||
shared configuration file to stop the main synapse sending these notifications.
|
||||
|
||||
Note this worker cannot be load-balanced: only one instance should be active.
|
||||
|
||||
``synapse.app.federation_reader``
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Handles a subset of federation endpoints. In particular, it can handle REST
|
||||
endpoints matching the following regular expressions::
|
||||
|
||||
^/_matrix/federation/v1/event/
|
||||
^/_matrix/federation/v1/state/
|
||||
^/_matrix/federation/v1/state_ids/
|
||||
^/_matrix/federation/v1/backfill/
|
||||
^/_matrix/federation/v1/get_missing_events/
|
||||
^/_matrix/federation/v1/publicRooms
|
||||
|
||||
The above endpoints should all be routed to the federation_reader worker by the
|
||||
reverse-proxy configuration.
|
||||
|
||||
``synapse.app.federation_sender``
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Handles sending federation traffic to other servers. Doesn't handle any
|
||||
REST endpoints itself, but you should set ``send_federation: False`` in the
|
||||
shared configuration file to stop the main synapse sending this traffic.
|
||||
|
||||
Note this worker cannot be load-balanced: only one instance should be active.
|
||||
|
||||
``synapse.app.media_repository``
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Handles the media repository. It can handle all endpoints starting with::
|
||||
|
||||
/_matrix/media/
|
||||
|
||||
You should also set ``enable_media_repo: False`` in the shared configuration
|
||||
file to stop the main synapse running background jobs related to managing the
|
||||
media repository.
|
||||
|
||||
Note this worker cannot be load-balanced: only one instance should be active.
|
||||
|
||||
``synapse.app.client_reader``
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Handles client API endpoints. It can handle REST endpoints matching the
|
||||
following regular expressions::
|
||||
|
||||
^/_matrix/client/(api/v1|r0|unstable)/publicRooms$
|
||||
|
||||
``synapse.app.user_dir``
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Handles searches in the user directory. It can handle REST endpoints matching
|
||||
the following regular expressions::
|
||||
|
||||
^/_matrix/client/(api/v1|r0|unstable)/user_directory/search$
|
||||
|
||||
``synapse.app.frontend_proxy``
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Proxies some frequently-requested client endpoints to add caching and remove
|
||||
load from the main synapse. It can handle REST endpoints matching the following
|
||||
regular expressions::
|
||||
|
||||
^/_matrix/client/(api/v1|r0|unstable)/keys/upload
|
||||
|
||||
It will proxy any requests it cannot handle to the main synapse instance. It
|
||||
must therefore be configured with the location of the main instance, via
|
||||
the ``worker_main_http_uri`` setting in the frontend_proxy worker configuration
|
||||
file. For example::
|
||||
|
||||
worker_main_http_uri: http://127.0.0.1:8008
|
||||
|
||||
|
||||
``synapse.app.event_creator``
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Handles non-state event creation. It can handle REST endpoints matching::
|
||||
|
||||
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/send
|
||||
|
||||
It will create events locally and then send them on to the main synapse
|
||||
instance to be persisted and handled.
|
||||
|
||||
@@ -123,15 +123,25 @@ def lookup(destination, path):
|
||||
except:
|
||||
return "https://%s:%d%s" % (destination, 8448, path)
|
||||
|
||||
def get_json(origin_name, origin_key, destination, path):
|
||||
request_json = {
|
||||
"method": "GET",
|
||||
|
||||
def request_json(method, origin_name, origin_key, destination, path, content):
|
||||
if method is None:
|
||||
if content is None:
|
||||
method = "GET"
|
||||
else:
|
||||
method = "POST"
|
||||
|
||||
json_to_sign = {
|
||||
"method": method,
|
||||
"uri": path,
|
||||
"origin": origin_name,
|
||||
"destination": destination,
|
||||
}
|
||||
|
||||
signed_json = sign_json(request_json, origin_key, origin_name)
|
||||
if content is not None:
|
||||
json_to_sign["content"] = json.loads(content)
|
||||
|
||||
signed_json = sign_json(json_to_sign, origin_key, origin_name)
|
||||
|
||||
authorization_headers = []
|
||||
|
||||
@@ -145,10 +155,12 @@ def get_json(origin_name, origin_key, destination, path):
|
||||
dest = lookup(destination, path)
|
||||
print ("Requesting %s" % dest, file=sys.stderr)
|
||||
|
||||
result = requests.get(
|
||||
dest,
|
||||
result = requests.request(
|
||||
method=method,
|
||||
url=dest,
|
||||
headers={"Authorization": authorization_headers[0]},
|
||||
verify=False,
|
||||
data=content,
|
||||
)
|
||||
sys.stderr.write("Status Code: %d\n" % (result.status_code,))
|
||||
return result.json()
|
||||
@@ -186,6 +198,17 @@ def main():
|
||||
"connect appropriately.",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"-X", "--method",
|
||||
help="HTTP method to use for the request. Defaults to GET if --data is"
|
||||
"unspecified, POST if it is."
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--body",
|
||||
help="Data to send as the body of the HTTP request"
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"path",
|
||||
help="request path. We will add '/_matrix/federation/v1/' to this."
|
||||
@@ -199,8 +222,11 @@ def main():
|
||||
with open(args.signing_key_path) as f:
|
||||
key = read_signing_keys(f)[0]
|
||||
|
||||
result = get_json(
|
||||
args.server_name, key, args.destination, "/_matrix/federation/v1/" + args.path
|
||||
result = request_json(
|
||||
args.method,
|
||||
args.server_name, key, args.destination,
|
||||
"/_matrix/federation/v1/" + args.path,
|
||||
content=args.body,
|
||||
)
|
||||
|
||||
json.dump(result, sys.stdout)
|
||||
|
||||
133
scripts/move_remote_media_to_new_store.py
Executable file
133
scripts/move_remote_media_to_new_store.py
Executable file
@@ -0,0 +1,133 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2017 New Vector Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
Moves a list of remote media from one media store to another.
|
||||
|
||||
The input should be a list of media files to be moved, one per line. Each line
|
||||
should be formatted::
|
||||
|
||||
<origin server>|<file id>
|
||||
|
||||
This can be extracted from postgres with::
|
||||
|
||||
psql --tuples-only -A -c "select media_origin, filesystem_id from
|
||||
matrix.remote_media_cache where ..."
|
||||
|
||||
To use, pipe the above into::
|
||||
|
||||
PYTHON_PATH=. ./scripts/move_remote_media_to_new_store.py <source repo> <dest repo>
|
||||
"""
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import argparse
|
||||
import logging
|
||||
|
||||
import sys
|
||||
|
||||
import os
|
||||
|
||||
import shutil
|
||||
|
||||
from synapse.rest.media.v1.filepath import MediaFilePaths
|
||||
|
||||
logger = logging.getLogger()
|
||||
|
||||
|
||||
def main(src_repo, dest_repo):
|
||||
src_paths = MediaFilePaths(src_repo)
|
||||
dest_paths = MediaFilePaths(dest_repo)
|
||||
for line in sys.stdin:
|
||||
line = line.strip()
|
||||
parts = line.split('|')
|
||||
if len(parts) != 2:
|
||||
print("Unable to parse input line %s" % line, file=sys.stderr)
|
||||
exit(1)
|
||||
|
||||
move_media(parts[0], parts[1], src_paths, dest_paths)
|
||||
|
||||
|
||||
def move_media(origin_server, file_id, src_paths, dest_paths):
|
||||
"""Move the given file, and any thumbnails, to the dest repo
|
||||
|
||||
Args:
|
||||
origin_server (str):
|
||||
file_id (str):
|
||||
src_paths (MediaFilePaths):
|
||||
dest_paths (MediaFilePaths):
|
||||
"""
|
||||
logger.info("%s/%s", origin_server, file_id)
|
||||
|
||||
# check that the original exists
|
||||
original_file = src_paths.remote_media_filepath(origin_server, file_id)
|
||||
if not os.path.exists(original_file):
|
||||
logger.warn(
|
||||
"Original for %s/%s (%s) does not exist",
|
||||
origin_server, file_id, original_file,
|
||||
)
|
||||
else:
|
||||
mkdir_and_move(
|
||||
original_file,
|
||||
dest_paths.remote_media_filepath(origin_server, file_id),
|
||||
)
|
||||
|
||||
# now look for thumbnails
|
||||
original_thumb_dir = src_paths.remote_media_thumbnail_dir(
|
||||
origin_server, file_id,
|
||||
)
|
||||
if not os.path.exists(original_thumb_dir):
|
||||
return
|
||||
|
||||
mkdir_and_move(
|
||||
original_thumb_dir,
|
||||
dest_paths.remote_media_thumbnail_dir(origin_server, file_id)
|
||||
)
|
||||
|
||||
|
||||
def mkdir_and_move(original_file, dest_file):
|
||||
dirname = os.path.dirname(dest_file)
|
||||
if not os.path.exists(dirname):
|
||||
logger.debug("mkdir %s", dirname)
|
||||
os.makedirs(dirname)
|
||||
logger.debug("mv %s %s", original_file, dest_file)
|
||||
shutil.move(original_file, dest_file)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(
|
||||
description=__doc__,
|
||||
formatter_class = argparse.RawDescriptionHelpFormatter,
|
||||
)
|
||||
parser.add_argument(
|
||||
"-v", action='store_true', help='enable debug logging')
|
||||
parser.add_argument(
|
||||
"src_repo",
|
||||
help="Path to source content repo",
|
||||
)
|
||||
parser.add_argument(
|
||||
"dest_repo",
|
||||
help="Path to source content repo",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
logging_config = {
|
||||
"level": logging.DEBUG if args.v else logging.INFO,
|
||||
"format": "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(message)s"
|
||||
}
|
||||
logging.basicConfig(**logging_config)
|
||||
|
||||
main(args.src_repo, args.dest_repo)
|
||||
@@ -42,6 +42,14 @@ BOOLEAN_COLUMNS = {
|
||||
"public_room_list_stream": ["visibility"],
|
||||
"device_lists_outbound_pokes": ["sent"],
|
||||
"users_who_share_rooms": ["share_private"],
|
||||
"groups": ["is_public"],
|
||||
"group_rooms": ["is_public"],
|
||||
"group_users": ["is_public", "is_admin"],
|
||||
"group_summary_rooms": ["is_public"],
|
||||
"group_room_categories": ["is_public"],
|
||||
"group_summary_users": ["is_public"],
|
||||
"group_roles": ["is_public"],
|
||||
"local_group_membership": ["is_publicised", "is_admin"],
|
||||
}
|
||||
|
||||
|
||||
@@ -112,6 +120,7 @@ class Store(object):
|
||||
|
||||
_simple_update_one = SQLBaseStore.__dict__["_simple_update_one"]
|
||||
_simple_update_one_txn = SQLBaseStore.__dict__["_simple_update_one_txn"]
|
||||
_simple_update_txn = SQLBaseStore.__dict__["_simple_update_txn"]
|
||||
|
||||
def runInteraction(self, desc, func, *args, **kwargs):
|
||||
def r(conn):
|
||||
@@ -318,7 +327,7 @@ class Porter(object):
|
||||
backward_chunk = min(row[0] for row in brows) - 1
|
||||
|
||||
rows = frows + brows
|
||||
self._convert_rows(table, headers, rows)
|
||||
rows = self._convert_rows(table, headers, rows)
|
||||
|
||||
def insert(txn):
|
||||
self.postgres_store.insert_many_txn(
|
||||
@@ -376,10 +385,13 @@ class Porter(object):
|
||||
" VALUES (?,?,?,?,to_tsvector('english', ?),?,?)"
|
||||
)
|
||||
|
||||
rows_dict = [
|
||||
dict(zip(headers, row))
|
||||
for row in rows
|
||||
]
|
||||
rows_dict = []
|
||||
for row in rows:
|
||||
d = dict(zip(headers, row))
|
||||
if "\0" in d['value']:
|
||||
logger.warn('dropping search row %s', d)
|
||||
else:
|
||||
rows_dict.append(d)
|
||||
|
||||
txn.executemany(sql, [
|
||||
(
|
||||
@@ -551,17 +563,29 @@ class Porter(object):
|
||||
i for i, h in enumerate(headers) if h in bool_col_names
|
||||
]
|
||||
|
||||
class BadValueException(Exception):
|
||||
pass
|
||||
|
||||
def conv(j, col):
|
||||
if j in bool_cols:
|
||||
return bool(col)
|
||||
elif isinstance(col, basestring) and "\0" in col:
|
||||
logger.warn("DROPPING ROW: NUL value in table %s col %s: %r", table, headers[j], col)
|
||||
raise BadValueException();
|
||||
return col
|
||||
|
||||
outrows = []
|
||||
for i, row in enumerate(rows):
|
||||
rows[i] = tuple(
|
||||
try:
|
||||
outrows.append(tuple(
|
||||
conv(j, col)
|
||||
for j, col in enumerate(row)
|
||||
if j > 0
|
||||
)
|
||||
))
|
||||
except BadValueException:
|
||||
pass
|
||||
|
||||
return outrows
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _setup_sent_transactions(self):
|
||||
@@ -589,7 +613,7 @@ class Porter(object):
|
||||
"select", r,
|
||||
)
|
||||
|
||||
self._convert_rows("sent_transactions", headers, rows)
|
||||
rows = self._convert_rows("sent_transactions", headers, rows)
|
||||
|
||||
inserted_rows = len(rows)
|
||||
if inserted_rows:
|
||||
|
||||
45
scripts/sync_room_to_group.pl
Executable file
45
scripts/sync_room_to_group.pl
Executable file
@@ -0,0 +1,45 @@
|
||||
#!/usr/bin/env perl
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
use JSON::XS;
|
||||
use LWP::UserAgent;
|
||||
use URI::Escape;
|
||||
|
||||
if (@ARGV < 4) {
|
||||
die "usage: $0 <homeserver url> <access_token> <room_id|room_alias> <group_id>\n";
|
||||
}
|
||||
|
||||
my ($hs, $access_token, $room_id, $group_id) = @ARGV;
|
||||
my $ua = LWP::UserAgent->new();
|
||||
$ua->timeout(10);
|
||||
|
||||
if ($room_id =~ /^#/) {
|
||||
$room_id = uri_escape($room_id);
|
||||
$room_id = decode_json($ua->get("${hs}/_matrix/client/r0/directory/room/${room_id}?access_token=${access_token}")->decoded_content)->{room_id};
|
||||
}
|
||||
|
||||
my $room_users = [ keys %{decode_json($ua->get("${hs}/_matrix/client/r0/rooms/${room_id}/joined_members?access_token=${access_token}")->decoded_content)->{joined}} ];
|
||||
my $group_users = [
|
||||
(map { $_->{user_id} } @{decode_json($ua->get("${hs}/_matrix/client/unstable/groups/${group_id}/users?access_token=${access_token}" )->decoded_content)->{chunk}}),
|
||||
(map { $_->{user_id} } @{decode_json($ua->get("${hs}/_matrix/client/unstable/groups/${group_id}/invited_users?access_token=${access_token}" )->decoded_content)->{chunk}}),
|
||||
];
|
||||
|
||||
die "refusing to sync from empty room" unless (@$room_users);
|
||||
die "refusing to sync to empty group" unless (@$group_users);
|
||||
|
||||
my $diff = {};
|
||||
foreach my $user (@$room_users) { $diff->{$user}++ }
|
||||
foreach my $user (@$group_users) { $diff->{$user}-- }
|
||||
|
||||
foreach my $user (keys %$diff) {
|
||||
if ($diff->{$user} == 1) {
|
||||
warn "inviting $user";
|
||||
print STDERR $ua->put("${hs}/_matrix/client/unstable/groups/${group_id}/admin/users/invite/${user}?access_token=${access_token}", Content=>'{}')->status_line."\n";
|
||||
}
|
||||
elsif ($diff->{$user} == -1) {
|
||||
warn "removing $user";
|
||||
print STDERR $ua->put("${hs}/_matrix/client/unstable/groups/${group_id}/admin/users/remove/${user}?access_token=${access_token}", Content=>'{}')->status_line."\n";
|
||||
}
|
||||
}
|
||||
@@ -16,4 +16,4 @@
|
||||
""" This is a reference implementation of a Matrix home server.
|
||||
"""
|
||||
|
||||
__version__ = "0.23.0"
|
||||
__version__ = "0.27.2"
|
||||
|
||||
@@ -270,7 +270,11 @@ class Auth(object):
|
||||
rights (str): The operation being performed; the access token must
|
||||
allow this.
|
||||
Returns:
|
||||
dict : dict that includes the user and the ID of their access token.
|
||||
Deferred[dict]: dict that includes:
|
||||
`user` (UserID)
|
||||
`is_guest` (bool)
|
||||
`token_id` (int|None): access token id. May be None if guest
|
||||
`device_id` (str|None): device corresponding to access token
|
||||
Raises:
|
||||
AuthError if no user by that token exists or the token is invalid.
|
||||
"""
|
||||
|
||||
@@ -46,6 +46,7 @@ class Codes(object):
|
||||
THREEPID_AUTH_FAILED = "M_THREEPID_AUTH_FAILED"
|
||||
THREEPID_IN_USE = "M_THREEPID_IN_USE"
|
||||
THREEPID_NOT_FOUND = "M_THREEPID_NOT_FOUND"
|
||||
THREEPID_DENIED = "M_THREEPID_DENIED"
|
||||
INVALID_USERNAME = "M_INVALID_USERNAME"
|
||||
SERVER_NOT_TRUSTED = "M_SERVER_NOT_TRUSTED"
|
||||
|
||||
@@ -140,6 +141,48 @@ class RegistrationError(SynapseError):
|
||||
pass
|
||||
|
||||
|
||||
class FederationDeniedError(SynapseError):
|
||||
"""An error raised when the server tries to federate with a server which
|
||||
is not on its federation whitelist.
|
||||
|
||||
Attributes:
|
||||
destination (str): The destination which has been denied
|
||||
"""
|
||||
|
||||
def __init__(self, destination):
|
||||
"""Raised by federation client or server to indicate that we are
|
||||
are deliberately not attempting to contact a given server because it is
|
||||
not on our federation whitelist.
|
||||
|
||||
Args:
|
||||
destination (str): the domain in question
|
||||
"""
|
||||
|
||||
self.destination = destination
|
||||
|
||||
super(FederationDeniedError, self).__init__(
|
||||
code=403,
|
||||
msg="Federation denied with %s." % (self.destination,),
|
||||
errcode=Codes.FORBIDDEN,
|
||||
)
|
||||
|
||||
|
||||
class InteractiveAuthIncompleteError(Exception):
|
||||
"""An error raised when UI auth is not yet complete
|
||||
|
||||
(This indicates we should return a 401 with 'result' as the body)
|
||||
|
||||
Attributes:
|
||||
result (dict): the server response to the request, which should be
|
||||
passed back to the client
|
||||
"""
|
||||
def __init__(self, result):
|
||||
super(InteractiveAuthIncompleteError, self).__init__(
|
||||
"Interactive auth not yet complete",
|
||||
)
|
||||
self.result = result
|
||||
|
||||
|
||||
class UnrecognizedRequestError(SynapseError):
|
||||
"""An error indicating we don't understand the request you're trying to make"""
|
||||
def __init__(self, *args, **kwargs):
|
||||
|
||||
@@ -17,7 +17,7 @@ from synapse.storage.presence import UserPresenceState
|
||||
from synapse.types import UserID, RoomID
|
||||
from twisted.internet import defer
|
||||
|
||||
import ujson as json
|
||||
import simplejson as json
|
||||
import jsonschema
|
||||
from jsonschema import FormatChecker
|
||||
|
||||
|
||||
@@ -12,14 +12,22 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import gc
|
||||
import logging
|
||||
import sys
|
||||
|
||||
try:
|
||||
import affinity
|
||||
except Exception:
|
||||
affinity = None
|
||||
|
||||
import affinity
|
||||
from daemonize import Daemonize
|
||||
from synapse.util import PreserveLoggingContext
|
||||
from synapse.util.rlimit import change_resource_limit
|
||||
from twisted.internet import reactor
|
||||
from twisted.internet import error, reactor
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def start_worker_reactor(appname, config):
|
||||
@@ -78,6 +86,13 @@ def start_reactor(
|
||||
with PreserveLoggingContext():
|
||||
logger.info("Running")
|
||||
if cpu_affinity is not None:
|
||||
if not affinity:
|
||||
quit_with_error(
|
||||
"Missing package 'affinity' required for cpu_affinity\n"
|
||||
"option\n\n"
|
||||
"Install by running:\n\n"
|
||||
" pip install affinity\n\n"
|
||||
)
|
||||
logger.info("Setting CPU affinity to %s" % cpu_affinity)
|
||||
affinity.set_process_affinity_mask(0, cpu_affinity)
|
||||
change_resource_limit(soft_file_limit)
|
||||
@@ -97,3 +112,67 @@ def start_reactor(
|
||||
daemon.start()
|
||||
else:
|
||||
run()
|
||||
|
||||
|
||||
def quit_with_error(error_string):
|
||||
message_lines = error_string.split("\n")
|
||||
line_length = max([len(l) for l in message_lines if len(l) < 80]) + 2
|
||||
sys.stderr.write("*" * line_length + '\n')
|
||||
for line in message_lines:
|
||||
sys.stderr.write(" %s\n" % (line.rstrip(),))
|
||||
sys.stderr.write("*" * line_length + '\n')
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def listen_tcp(bind_addresses, port, factory, backlog=50):
|
||||
"""
|
||||
Create a TCP socket for a port and several addresses
|
||||
"""
|
||||
for address in bind_addresses:
|
||||
try:
|
||||
reactor.listenTCP(
|
||||
port,
|
||||
factory,
|
||||
backlog,
|
||||
address
|
||||
)
|
||||
except error.CannotListenError as e:
|
||||
check_bind_error(e, address, bind_addresses)
|
||||
|
||||
|
||||
def listen_ssl(bind_addresses, port, factory, context_factory, backlog=50):
|
||||
"""
|
||||
Create an SSL socket for a port and several addresses
|
||||
"""
|
||||
for address in bind_addresses:
|
||||
try:
|
||||
reactor.listenSSL(
|
||||
port,
|
||||
factory,
|
||||
context_factory,
|
||||
backlog,
|
||||
address
|
||||
)
|
||||
except error.CannotListenError as e:
|
||||
check_bind_error(e, address, bind_addresses)
|
||||
|
||||
|
||||
def check_bind_error(e, address, bind_addresses):
|
||||
"""
|
||||
This method checks an exception occurred while binding on 0.0.0.0.
|
||||
If :: is specified in the bind addresses a warning is shown.
|
||||
The exception is still raised otherwise.
|
||||
|
||||
Binding on both 0.0.0.0 and :: causes an exception on Linux and macOS
|
||||
because :: binds on both IPv4 and IPv6 (as per RFC 3493).
|
||||
When binding on 0.0.0.0 after :: this can safely be ignored.
|
||||
|
||||
Args:
|
||||
e (Exception): Exception that was caught.
|
||||
address (str): Address on which binding was attempted.
|
||||
bind_addresses (list): Addresses on which the service listens.
|
||||
"""
|
||||
if address == '0.0.0.0' and '::' in bind_addresses:
|
||||
logger.warn('Failed to listen on 0.0.0.0, continuing because listening on [::]')
|
||||
else:
|
||||
raise e
|
||||
|
||||
@@ -36,7 +36,7 @@ from synapse.util.logcontext import LoggingContext, preserve_fn
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.versionstring import get_version_string
|
||||
from twisted.internet import reactor
|
||||
from twisted.web.resource import Resource
|
||||
from twisted.web.resource import NoResource
|
||||
|
||||
logger = logging.getLogger("synapse.app.appservice")
|
||||
|
||||
@@ -49,19 +49,6 @@ class AppserviceSlaveStore(
|
||||
|
||||
|
||||
class AppserviceServer(HomeServer):
|
||||
def get_db_conn(self, run_new_connection=True):
|
||||
# Any param beginning with cp_ is a parameter for adbapi, and should
|
||||
# not be passed to the database engine.
|
||||
db_params = {
|
||||
k: v for k, v in self.db_config.get("args", {}).items()
|
||||
if not k.startswith("cp_")
|
||||
}
|
||||
db_conn = self.database_engine.module.connect(**db_params)
|
||||
|
||||
if run_new_connection:
|
||||
self.database_engine.on_new_connection(db_conn)
|
||||
return db_conn
|
||||
|
||||
def setup(self):
|
||||
logger.info("Setting up.")
|
||||
self.datastore = AppserviceSlaveStore(self.get_db_conn(), self)
|
||||
@@ -77,18 +64,17 @@ class AppserviceServer(HomeServer):
|
||||
if name == "metrics":
|
||||
resources[METRICS_PREFIX] = MetricsResource(self)
|
||||
|
||||
root_resource = create_resource_tree(resources, Resource())
|
||||
root_resource = create_resource_tree(resources, NoResource())
|
||||
|
||||
for address in bind_addresses:
|
||||
reactor.listenTCP(
|
||||
_base.listen_tcp(
|
||||
bind_addresses,
|
||||
port,
|
||||
SynapseSite(
|
||||
"synapse.access.http.%s" % (site_tag,),
|
||||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
),
|
||||
interface=address
|
||||
)
|
||||
)
|
||||
|
||||
logger.info("Synapse appservice now listening on port %d", port)
|
||||
@@ -98,17 +84,14 @@ class AppserviceServer(HomeServer):
|
||||
if listener["type"] == "http":
|
||||
self._listen_http(listener)
|
||||
elif listener["type"] == "manhole":
|
||||
bind_addresses = listener["bind_addresses"]
|
||||
|
||||
for address in bind_addresses:
|
||||
reactor.listenTCP(
|
||||
_base.listen_tcp(
|
||||
listener["bind_addresses"],
|
||||
listener["port"],
|
||||
manhole(
|
||||
username="matrix",
|
||||
password="rabbithole",
|
||||
globals={"hs": self},
|
||||
),
|
||||
interface=address
|
||||
)
|
||||
)
|
||||
else:
|
||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||
|
||||
@@ -44,7 +44,7 @@ from synapse.util.logcontext import LoggingContext
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.versionstring import get_version_string
|
||||
from twisted.internet import reactor
|
||||
from twisted.web.resource import Resource
|
||||
from twisted.web.resource import NoResource
|
||||
|
||||
logger = logging.getLogger("synapse.app.client_reader")
|
||||
|
||||
@@ -64,19 +64,6 @@ class ClientReaderSlavedStore(
|
||||
|
||||
|
||||
class ClientReaderServer(HomeServer):
|
||||
def get_db_conn(self, run_new_connection=True):
|
||||
# Any param beginning with cp_ is a parameter for adbapi, and should
|
||||
# not be passed to the database engine.
|
||||
db_params = {
|
||||
k: v for k, v in self.db_config.get("args", {}).items()
|
||||
if not k.startswith("cp_")
|
||||
}
|
||||
db_conn = self.database_engine.module.connect(**db_params)
|
||||
|
||||
if run_new_connection:
|
||||
self.database_engine.on_new_connection(db_conn)
|
||||
return db_conn
|
||||
|
||||
def setup(self):
|
||||
logger.info("Setting up.")
|
||||
self.datastore = ClientReaderSlavedStore(self.get_db_conn(), self)
|
||||
@@ -101,18 +88,17 @@ class ClientReaderServer(HomeServer):
|
||||
"/_matrix/client/api/v1": resource,
|
||||
})
|
||||
|
||||
root_resource = create_resource_tree(resources, Resource())
|
||||
root_resource = create_resource_tree(resources, NoResource())
|
||||
|
||||
for address in bind_addresses:
|
||||
reactor.listenTCP(
|
||||
_base.listen_tcp(
|
||||
bind_addresses,
|
||||
port,
|
||||
SynapseSite(
|
||||
"synapse.access.http.%s" % (site_tag,),
|
||||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
),
|
||||
interface=address
|
||||
)
|
||||
)
|
||||
|
||||
logger.info("Synapse client reader now listening on port %d", port)
|
||||
@@ -122,18 +108,16 @@ class ClientReaderServer(HomeServer):
|
||||
if listener["type"] == "http":
|
||||
self._listen_http(listener)
|
||||
elif listener["type"] == "manhole":
|
||||
bind_addresses = listener["bind_addresses"]
|
||||
|
||||
for address in bind_addresses:
|
||||
reactor.listenTCP(
|
||||
_base.listen_tcp(
|
||||
listener["bind_addresses"],
|
||||
listener["port"],
|
||||
manhole(
|
||||
username="matrix",
|
||||
password="rabbithole",
|
||||
globals={"hs": self},
|
||||
),
|
||||
interface=address
|
||||
)
|
||||
)
|
||||
|
||||
else:
|
||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||
|
||||
@@ -172,7 +156,6 @@ def start(config_options):
|
||||
)
|
||||
|
||||
ss.setup()
|
||||
ss.get_handlers()
|
||||
ss.start_listening(config.worker_listeners)
|
||||
|
||||
def start():
|
||||
|
||||
189
synapse/app/event_creator.py
Normal file
189
synapse/app/event_creator.py
Normal file
@@ -0,0 +1,189 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2018 New Vector Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import logging
|
||||
import sys
|
||||
|
||||
import synapse
|
||||
from synapse import events
|
||||
from synapse.app import _base
|
||||
from synapse.config._base import ConfigError
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.config.logger import setup_logging
|
||||
from synapse.crypto import context_factory
|
||||
from synapse.http.server import JsonResource
|
||||
from synapse.http.site import SynapseSite
|
||||
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
|
||||
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||
from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
|
||||
from synapse.replication.slave.storage.devices import SlavedDeviceStore
|
||||
from synapse.replication.slave.storage.directory import DirectoryStore
|
||||
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||
from synapse.replication.slave.storage.profile import SlavedProfileStore
|
||||
from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore
|
||||
from synapse.replication.slave.storage.pushers import SlavedPusherStore
|
||||
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
|
||||
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||
from synapse.replication.slave.storage.room import RoomStore
|
||||
from synapse.replication.slave.storage.transactions import TransactionStore
|
||||
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||
from synapse.rest.client.v1.room import (
|
||||
RoomSendEventRestServlet, RoomMembershipRestServlet, RoomStateEventRestServlet,
|
||||
JoinRoomAliasServlet,
|
||||
)
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.engines import create_engine
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.logcontext import LoggingContext
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.versionstring import get_version_string
|
||||
from twisted.internet import reactor
|
||||
from twisted.web.resource import NoResource
|
||||
|
||||
logger = logging.getLogger("synapse.app.event_creator")
|
||||
|
||||
|
||||
class EventCreatorSlavedStore(
|
||||
DirectoryStore,
|
||||
TransactionStore,
|
||||
SlavedProfileStore,
|
||||
SlavedAccountDataStore,
|
||||
SlavedPusherStore,
|
||||
SlavedReceiptsStore,
|
||||
SlavedPushRuleStore,
|
||||
SlavedDeviceStore,
|
||||
SlavedClientIpStore,
|
||||
SlavedApplicationServiceStore,
|
||||
SlavedEventStore,
|
||||
SlavedRegistrationStore,
|
||||
RoomStore,
|
||||
BaseSlavedStore,
|
||||
):
|
||||
pass
|
||||
|
||||
|
||||
class EventCreatorServer(HomeServer):
|
||||
def setup(self):
|
||||
logger.info("Setting up.")
|
||||
self.datastore = EventCreatorSlavedStore(self.get_db_conn(), self)
|
||||
logger.info("Finished setting up.")
|
||||
|
||||
def _listen_http(self, listener_config):
|
||||
port = listener_config["port"]
|
||||
bind_addresses = listener_config["bind_addresses"]
|
||||
site_tag = listener_config.get("tag", port)
|
||||
resources = {}
|
||||
for res in listener_config["resources"]:
|
||||
for name in res["names"]:
|
||||
if name == "metrics":
|
||||
resources[METRICS_PREFIX] = MetricsResource(self)
|
||||
elif name == "client":
|
||||
resource = JsonResource(self, canonical_json=False)
|
||||
RoomSendEventRestServlet(self).register(resource)
|
||||
RoomMembershipRestServlet(self).register(resource)
|
||||
RoomStateEventRestServlet(self).register(resource)
|
||||
JoinRoomAliasServlet(self).register(resource)
|
||||
resources.update({
|
||||
"/_matrix/client/r0": resource,
|
||||
"/_matrix/client/unstable": resource,
|
||||
"/_matrix/client/v2_alpha": resource,
|
||||
"/_matrix/client/api/v1": resource,
|
||||
})
|
||||
|
||||
root_resource = create_resource_tree(resources, NoResource())
|
||||
|
||||
_base.listen_tcp(
|
||||
bind_addresses,
|
||||
port,
|
||||
SynapseSite(
|
||||
"synapse.access.http.%s" % (site_tag,),
|
||||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
)
|
||||
)
|
||||
|
||||
logger.info("Synapse event creator now listening on port %d", port)
|
||||
|
||||
def start_listening(self, listeners):
|
||||
for listener in listeners:
|
||||
if listener["type"] == "http":
|
||||
self._listen_http(listener)
|
||||
elif listener["type"] == "manhole":
|
||||
_base.listen_tcp(
|
||||
listener["bind_addresses"],
|
||||
listener["port"],
|
||||
manhole(
|
||||
username="matrix",
|
||||
password="rabbithole",
|
||||
globals={"hs": self},
|
||||
)
|
||||
)
|
||||
else:
|
||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||
|
||||
self.get_tcp_replication().start_replication(self)
|
||||
|
||||
def build_tcp_replication(self):
|
||||
return ReplicationClientHandler(self.get_datastore())
|
||||
|
||||
|
||||
def start(config_options):
|
||||
try:
|
||||
config = HomeServerConfig.load_config(
|
||||
"Synapse event creator", config_options
|
||||
)
|
||||
except ConfigError as e:
|
||||
sys.stderr.write("\n" + e.message + "\n")
|
||||
sys.exit(1)
|
||||
|
||||
assert config.worker_app == "synapse.app.event_creator"
|
||||
|
||||
assert config.worker_replication_http_port is not None
|
||||
|
||||
setup_logging(config, use_worker_options=True)
|
||||
|
||||
events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||
|
||||
database_engine = create_engine(config.database_config)
|
||||
|
||||
tls_server_context_factory = context_factory.ServerContextFactory(config)
|
||||
|
||||
ss = EventCreatorServer(
|
||||
config.server_name,
|
||||
db_config=config.database_config,
|
||||
tls_server_context_factory=tls_server_context_factory,
|
||||
config=config,
|
||||
version_string="Synapse/" + get_version_string(synapse),
|
||||
database_engine=database_engine,
|
||||
)
|
||||
|
||||
ss.setup()
|
||||
ss.start_listening(config.worker_listeners)
|
||||
|
||||
def start():
|
||||
ss.get_state_handler().start_caching()
|
||||
ss.get_datastore().start_profiling()
|
||||
|
||||
reactor.callWhenRunning(start)
|
||||
|
||||
_base.start_worker_reactor("synapse-event-creator", config)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
with LoggingContext("main"):
|
||||
start(sys.argv[1:])
|
||||
@@ -41,7 +41,7 @@ from synapse.util.logcontext import LoggingContext
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.versionstring import get_version_string
|
||||
from twisted.internet import reactor
|
||||
from twisted.web.resource import Resource
|
||||
from twisted.web.resource import NoResource
|
||||
|
||||
logger = logging.getLogger("synapse.app.federation_reader")
|
||||
|
||||
@@ -58,19 +58,6 @@ class FederationReaderSlavedStore(
|
||||
|
||||
|
||||
class FederationReaderServer(HomeServer):
|
||||
def get_db_conn(self, run_new_connection=True):
|
||||
# Any param beginning with cp_ is a parameter for adbapi, and should
|
||||
# not be passed to the database engine.
|
||||
db_params = {
|
||||
k: v for k, v in self.db_config.get("args", {}).items()
|
||||
if not k.startswith("cp_")
|
||||
}
|
||||
db_conn = self.database_engine.module.connect(**db_params)
|
||||
|
||||
if run_new_connection:
|
||||
self.database_engine.on_new_connection(db_conn)
|
||||
return db_conn
|
||||
|
||||
def setup(self):
|
||||
logger.info("Setting up.")
|
||||
self.datastore = FederationReaderSlavedStore(self.get_db_conn(), self)
|
||||
@@ -90,18 +77,17 @@ class FederationReaderServer(HomeServer):
|
||||
FEDERATION_PREFIX: TransportLayerServer(self),
|
||||
})
|
||||
|
||||
root_resource = create_resource_tree(resources, Resource())
|
||||
root_resource = create_resource_tree(resources, NoResource())
|
||||
|
||||
for address in bind_addresses:
|
||||
reactor.listenTCP(
|
||||
_base.listen_tcp(
|
||||
bind_addresses,
|
||||
port,
|
||||
SynapseSite(
|
||||
"synapse.access.http.%s" % (site_tag,),
|
||||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
),
|
||||
interface=address
|
||||
)
|
||||
)
|
||||
|
||||
logger.info("Synapse federation reader now listening on port %d", port)
|
||||
@@ -111,17 +97,14 @@ class FederationReaderServer(HomeServer):
|
||||
if listener["type"] == "http":
|
||||
self._listen_http(listener)
|
||||
elif listener["type"] == "manhole":
|
||||
bind_addresses = listener["bind_addresses"]
|
||||
|
||||
for address in bind_addresses:
|
||||
reactor.listenTCP(
|
||||
_base.listen_tcp(
|
||||
listener["bind_addresses"],
|
||||
listener["port"],
|
||||
manhole(
|
||||
username="matrix",
|
||||
password="rabbithole",
|
||||
globals={"hs": self},
|
||||
),
|
||||
interface=address
|
||||
)
|
||||
)
|
||||
else:
|
||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||
@@ -161,7 +144,6 @@ def start(config_options):
|
||||
)
|
||||
|
||||
ss.setup()
|
||||
ss.get_handlers()
|
||||
ss.start_listening(config.worker_listeners)
|
||||
|
||||
def start():
|
||||
|
||||
@@ -42,7 +42,7 @@ from synapse.util.logcontext import LoggingContext, preserve_fn
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.versionstring import get_version_string
|
||||
from twisted.internet import defer, reactor
|
||||
from twisted.web.resource import Resource
|
||||
from twisted.web.resource import NoResource
|
||||
|
||||
logger = logging.getLogger("synapse.app.federation_sender")
|
||||
|
||||
@@ -76,19 +76,6 @@ class FederationSenderSlaveStore(
|
||||
|
||||
|
||||
class FederationSenderServer(HomeServer):
|
||||
def get_db_conn(self, run_new_connection=True):
|
||||
# Any param beginning with cp_ is a parameter for adbapi, and should
|
||||
# not be passed to the database engine.
|
||||
db_params = {
|
||||
k: v for k, v in self.db_config.get("args", {}).items()
|
||||
if not k.startswith("cp_")
|
||||
}
|
||||
db_conn = self.database_engine.module.connect(**db_params)
|
||||
|
||||
if run_new_connection:
|
||||
self.database_engine.on_new_connection(db_conn)
|
||||
return db_conn
|
||||
|
||||
def setup(self):
|
||||
logger.info("Setting up.")
|
||||
self.datastore = FederationSenderSlaveStore(self.get_db_conn(), self)
|
||||
@@ -104,18 +91,17 @@ class FederationSenderServer(HomeServer):
|
||||
if name == "metrics":
|
||||
resources[METRICS_PREFIX] = MetricsResource(self)
|
||||
|
||||
root_resource = create_resource_tree(resources, Resource())
|
||||
root_resource = create_resource_tree(resources, NoResource())
|
||||
|
||||
for address in bind_addresses:
|
||||
reactor.listenTCP(
|
||||
_base.listen_tcp(
|
||||
bind_addresses,
|
||||
port,
|
||||
SynapseSite(
|
||||
"synapse.access.http.%s" % (site_tag,),
|
||||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
),
|
||||
interface=address
|
||||
)
|
||||
)
|
||||
|
||||
logger.info("Synapse federation_sender now listening on port %d", port)
|
||||
@@ -125,17 +111,14 @@ class FederationSenderServer(HomeServer):
|
||||
if listener["type"] == "http":
|
||||
self._listen_http(listener)
|
||||
elif listener["type"] == "manhole":
|
||||
bind_addresses = listener["bind_addresses"]
|
||||
|
||||
for address in bind_addresses:
|
||||
reactor.listenTCP(
|
||||
_base.listen_tcp(
|
||||
listener["bind_addresses"],
|
||||
listener["port"],
|
||||
manhole(
|
||||
username="matrix",
|
||||
password="rabbithole",
|
||||
globals={"hs": self},
|
||||
),
|
||||
interface=address
|
||||
)
|
||||
)
|
||||
else:
|
||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||
|
||||
@@ -44,14 +44,13 @@ from synapse.util.logcontext import LoggingContext
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.versionstring import get_version_string
|
||||
from twisted.internet import defer, reactor
|
||||
from twisted.web.resource import Resource
|
||||
from twisted.web.resource import NoResource
|
||||
|
||||
logger = logging.getLogger("synapse.app.frontend_proxy")
|
||||
|
||||
|
||||
class KeyUploadServlet(RestServlet):
|
||||
PATTERNS = client_v2_patterns("/keys/upload(/(?P<device_id>[^/]+))?$",
|
||||
releases=())
|
||||
PATTERNS = client_v2_patterns("/keys/upload(/(?P<device_id>[^/]+))?$")
|
||||
|
||||
def __init__(self, hs):
|
||||
"""
|
||||
@@ -89,9 +88,16 @@ class KeyUploadServlet(RestServlet):
|
||||
|
||||
if body:
|
||||
# They're actually trying to upload something, proxy to main synapse.
|
||||
# Pass through the auth headers, if any, in case the access token
|
||||
# is there.
|
||||
auth_headers = request.requestHeaders.getRawHeaders("Authorization", [])
|
||||
headers = {
|
||||
"Authorization": auth_headers,
|
||||
}
|
||||
result = yield self.http_client.post_json_get_json(
|
||||
self.main_uri + request.uri,
|
||||
body,
|
||||
headers=headers,
|
||||
)
|
||||
|
||||
defer.returnValue((200, result))
|
||||
@@ -112,19 +118,6 @@ class FrontendProxySlavedStore(
|
||||
|
||||
|
||||
class FrontendProxyServer(HomeServer):
|
||||
def get_db_conn(self, run_new_connection=True):
|
||||
# Any param beginning with cp_ is a parameter for adbapi, and should
|
||||
# not be passed to the database engine.
|
||||
db_params = {
|
||||
k: v for k, v in self.db_config.get("args", {}).items()
|
||||
if not k.startswith("cp_")
|
||||
}
|
||||
db_conn = self.database_engine.module.connect(**db_params)
|
||||
|
||||
if run_new_connection:
|
||||
self.database_engine.on_new_connection(db_conn)
|
||||
return db_conn
|
||||
|
||||
def setup(self):
|
||||
logger.info("Setting up.")
|
||||
self.datastore = FrontendProxySlavedStore(self.get_db_conn(), self)
|
||||
@@ -149,18 +142,17 @@ class FrontendProxyServer(HomeServer):
|
||||
"/_matrix/client/api/v1": resource,
|
||||
})
|
||||
|
||||
root_resource = create_resource_tree(resources, Resource())
|
||||
root_resource = create_resource_tree(resources, NoResource())
|
||||
|
||||
for address in bind_addresses:
|
||||
reactor.listenTCP(
|
||||
_base.listen_tcp(
|
||||
bind_addresses,
|
||||
port,
|
||||
SynapseSite(
|
||||
"synapse.access.http.%s" % (site_tag,),
|
||||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
),
|
||||
interface=address
|
||||
)
|
||||
)
|
||||
|
||||
logger.info("Synapse client reader now listening on port %d", port)
|
||||
@@ -170,17 +162,14 @@ class FrontendProxyServer(HomeServer):
|
||||
if listener["type"] == "http":
|
||||
self._listen_http(listener)
|
||||
elif listener["type"] == "manhole":
|
||||
bind_addresses = listener["bind_addresses"]
|
||||
|
||||
for address in bind_addresses:
|
||||
reactor.listenTCP(
|
||||
_base.listen_tcp(
|
||||
listener["bind_addresses"],
|
||||
listener["port"],
|
||||
manhole(
|
||||
username="matrix",
|
||||
password="rabbithole",
|
||||
globals={"hs": self},
|
||||
),
|
||||
interface=address
|
||||
)
|
||||
)
|
||||
else:
|
||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||
@@ -222,7 +211,6 @@ def start(config_options):
|
||||
)
|
||||
|
||||
ss.setup()
|
||||
ss.get_handlers()
|
||||
ss.start_listening(config.worker_listeners)
|
||||
|
||||
def start():
|
||||
|
||||
@@ -25,22 +25,25 @@ from synapse.api.urls import CONTENT_REPO_PREFIX, FEDERATION_PREFIX, \
|
||||
LEGACY_MEDIA_PREFIX, MEDIA_PREFIX, SERVER_KEY_PREFIX, SERVER_KEY_V2_PREFIX, \
|
||||
STATIC_PREFIX, WEB_CLIENT_PREFIX
|
||||
from synapse.app import _base
|
||||
from synapse.app._base import quit_with_error, listen_ssl, listen_tcp
|
||||
from synapse.config._base import ConfigError
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.crypto import context_factory
|
||||
from synapse.federation.transport.server import TransportLayerServer
|
||||
from synapse.module_api import ModuleApi
|
||||
from synapse.http.additional_resource import AdditionalResource
|
||||
from synapse.http.server import RootRedirect
|
||||
from synapse.http.site import SynapseSite
|
||||
from synapse.metrics import register_memory_metrics
|
||||
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||
from synapse.python_dependencies import CONDITIONAL_REQUIREMENTS, \
|
||||
check_requirements
|
||||
from synapse.replication.http import ReplicationRestResource, REPLICATION_PREFIX
|
||||
from synapse.replication.tcp.resource import ReplicationStreamProtocolFactory
|
||||
from synapse.rest import ClientRestResource
|
||||
from synapse.rest.key.v1.server_key_resource import LocalKey
|
||||
from synapse.rest.key.v2 import KeyApiV2Resource
|
||||
from synapse.rest.media.v0.content_repository import ContentRepoResource
|
||||
from synapse.rest.media.v1.media_repository import MediaRepositoryResource
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage import are_all_users_on_domain
|
||||
from synapse.storage.engines import IncorrectDatabaseSetup, create_engine
|
||||
@@ -48,11 +51,12 @@ from synapse.storage.prepare_database import UpgradeDatabaseException, prepare_d
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.logcontext import LoggingContext
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.module_loader import load_module
|
||||
from synapse.util.rlimit import change_resource_limit
|
||||
from synapse.util.versionstring import get_version_string
|
||||
from twisted.application import service
|
||||
from twisted.internet import defer, reactor
|
||||
from twisted.web.resource import EncodingResourceWrapper, Resource
|
||||
from twisted.web.resource import EncodingResourceWrapper, NoResource
|
||||
from twisted.web.server import GzipEncoderFactory
|
||||
from twisted.web.static import File
|
||||
|
||||
@@ -106,9 +110,67 @@ class SynapseHomeServer(HomeServer):
|
||||
resources = {}
|
||||
for res in listener_config["resources"]:
|
||||
for name in res["names"]:
|
||||
resources.update(self._configure_named_resource(
|
||||
name, res.get("compress", False),
|
||||
))
|
||||
|
||||
additional_resources = listener_config.get("additional_resources", {})
|
||||
logger.debug("Configuring additional resources: %r",
|
||||
additional_resources)
|
||||
module_api = ModuleApi(self, self.get_auth_handler())
|
||||
for path, resmodule in additional_resources.items():
|
||||
handler_cls, config = load_module(resmodule)
|
||||
handler = handler_cls(config, module_api)
|
||||
resources[path] = AdditionalResource(self, handler.handle_request)
|
||||
|
||||
if WEB_CLIENT_PREFIX in resources:
|
||||
root_resource = RootRedirect(WEB_CLIENT_PREFIX)
|
||||
else:
|
||||
root_resource = NoResource()
|
||||
|
||||
root_resource = create_resource_tree(resources, root_resource)
|
||||
|
||||
if tls:
|
||||
listen_ssl(
|
||||
bind_addresses,
|
||||
port,
|
||||
SynapseSite(
|
||||
"synapse.access.https.%s" % (site_tag,),
|
||||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
),
|
||||
self.tls_server_context_factory,
|
||||
)
|
||||
|
||||
else:
|
||||
listen_tcp(
|
||||
bind_addresses,
|
||||
port,
|
||||
SynapseSite(
|
||||
"synapse.access.http.%s" % (site_tag,),
|
||||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
)
|
||||
)
|
||||
logger.info("Synapse now listening on port %d", port)
|
||||
|
||||
def _configure_named_resource(self, name, compress=False):
|
||||
"""Build a resource map for a named resource
|
||||
|
||||
Args:
|
||||
name (str): named resource: one of "client", "federation", etc
|
||||
compress (bool): whether to enable gzip compression for this
|
||||
resource
|
||||
|
||||
Returns:
|
||||
dict[str, Resource]: map from path to HTTP resource
|
||||
"""
|
||||
resources = {}
|
||||
if name == "client":
|
||||
client_resource = ClientRestResource(self)
|
||||
if res["compress"]:
|
||||
if compress:
|
||||
client_resource = gz_wrap(client_resource)
|
||||
|
||||
resources.update({
|
||||
@@ -132,7 +194,8 @@ class SynapseHomeServer(HomeServer):
|
||||
})
|
||||
|
||||
if name in ["media", "federation", "client"]:
|
||||
media_repo = MediaRepositoryResource(self)
|
||||
if self.get_config().enable_media_repo:
|
||||
media_repo = self.get_media_repository_resource()
|
||||
resources.update({
|
||||
MEDIA_PREFIX: media_repo,
|
||||
LEGACY_MEDIA_PREFIX: media_repo,
|
||||
@@ -140,6 +203,10 @@ class SynapseHomeServer(HomeServer):
|
||||
self, self.config.uploads_path
|
||||
),
|
||||
})
|
||||
elif name == "media":
|
||||
raise ConfigError(
|
||||
"'media' resource conflicts with enable_media_repo=False",
|
||||
)
|
||||
|
||||
if name in ["keys", "federation"]:
|
||||
resources.update({
|
||||
@@ -153,39 +220,10 @@ class SynapseHomeServer(HomeServer):
|
||||
if name == "metrics" and self.get_config().enable_metrics:
|
||||
resources[METRICS_PREFIX] = MetricsResource(self)
|
||||
|
||||
if WEB_CLIENT_PREFIX in resources:
|
||||
root_resource = RootRedirect(WEB_CLIENT_PREFIX)
|
||||
else:
|
||||
root_resource = Resource()
|
||||
if name == "replication":
|
||||
resources[REPLICATION_PREFIX] = ReplicationRestResource(self)
|
||||
|
||||
root_resource = create_resource_tree(resources, root_resource)
|
||||
|
||||
if tls:
|
||||
for address in bind_addresses:
|
||||
reactor.listenSSL(
|
||||
port,
|
||||
SynapseSite(
|
||||
"synapse.access.https.%s" % (site_tag,),
|
||||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
),
|
||||
self.tls_server_context_factory,
|
||||
interface=address
|
||||
)
|
||||
else:
|
||||
for address in bind_addresses:
|
||||
reactor.listenTCP(
|
||||
port,
|
||||
SynapseSite(
|
||||
"synapse.access.http.%s" % (site_tag,),
|
||||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
),
|
||||
interface=address
|
||||
)
|
||||
logger.info("Synapse now listening on port %d", port)
|
||||
return resources
|
||||
|
||||
def start_listening(self):
|
||||
config = self.get_config()
|
||||
@@ -194,17 +232,14 @@ class SynapseHomeServer(HomeServer):
|
||||
if listener["type"] == "http":
|
||||
self._listener_http(config, listener)
|
||||
elif listener["type"] == "manhole":
|
||||
bind_addresses = listener["bind_addresses"]
|
||||
|
||||
for address in bind_addresses:
|
||||
reactor.listenTCP(
|
||||
listen_tcp(
|
||||
listener["bind_addresses"],
|
||||
listener["port"],
|
||||
manhole(
|
||||
username="matrix",
|
||||
password="rabbithole",
|
||||
globals={"hs": self},
|
||||
),
|
||||
interface=address
|
||||
)
|
||||
)
|
||||
elif listener["type"] == "replication":
|
||||
bind_addresses = listener["bind_addresses"]
|
||||
@@ -235,29 +270,6 @@ class SynapseHomeServer(HomeServer):
|
||||
except IncorrectDatabaseSetup as e:
|
||||
quit_with_error(e.message)
|
||||
|
||||
def get_db_conn(self, run_new_connection=True):
|
||||
# Any param beginning with cp_ is a parameter for adbapi, and should
|
||||
# not be passed to the database engine.
|
||||
db_params = {
|
||||
k: v for k, v in self.db_config.get("args", {}).items()
|
||||
if not k.startswith("cp_")
|
||||
}
|
||||
db_conn = self.database_engine.module.connect(**db_params)
|
||||
|
||||
if run_new_connection:
|
||||
self.database_engine.on_new_connection(db_conn)
|
||||
return db_conn
|
||||
|
||||
|
||||
def quit_with_error(error_string):
|
||||
message_lines = error_string.split("\n")
|
||||
line_length = max([len(l) for l in message_lines if len(l) < 80]) + 2
|
||||
sys.stderr.write("*" * line_length + '\n')
|
||||
for line in message_lines:
|
||||
sys.stderr.write(" %s\n" % (line.rstrip(),))
|
||||
sys.stderr.write("*" * line_length + '\n')
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def setup(config_options):
|
||||
"""
|
||||
@@ -336,7 +348,7 @@ def setup(config_options):
|
||||
hs.get_state_handler().start_caching()
|
||||
hs.get_datastore().start_profiling()
|
||||
hs.get_datastore().start_doing_background_updates()
|
||||
hs.get_replication_layer().start_get_pdu_cache()
|
||||
hs.get_federation_client().start_get_pdu_cache()
|
||||
|
||||
register_memory_metrics(hs)
|
||||
|
||||
|
||||
@@ -35,7 +35,6 @@ from synapse.replication.slave.storage.registration import SlavedRegistrationSto
|
||||
from synapse.replication.slave.storage.transactions import TransactionStore
|
||||
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||
from synapse.rest.media.v0.content_repository import ContentRepoResource
|
||||
from synapse.rest.media.v1.media_repository import MediaRepositoryResource
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.engines import create_engine
|
||||
from synapse.storage.media_repository import MediaRepositoryStore
|
||||
@@ -44,7 +43,7 @@ from synapse.util.logcontext import LoggingContext
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.versionstring import get_version_string
|
||||
from twisted.internet import reactor
|
||||
from twisted.web.resource import Resource
|
||||
from twisted.web.resource import NoResource
|
||||
|
||||
logger = logging.getLogger("synapse.app.media_repository")
|
||||
|
||||
@@ -61,19 +60,6 @@ class MediaRepositorySlavedStore(
|
||||
|
||||
|
||||
class MediaRepositoryServer(HomeServer):
|
||||
def get_db_conn(self, run_new_connection=True):
|
||||
# Any param beginning with cp_ is a parameter for adbapi, and should
|
||||
# not be passed to the database engine.
|
||||
db_params = {
|
||||
k: v for k, v in self.db_config.get("args", {}).items()
|
||||
if not k.startswith("cp_")
|
||||
}
|
||||
db_conn = self.database_engine.module.connect(**db_params)
|
||||
|
||||
if run_new_connection:
|
||||
self.database_engine.on_new_connection(db_conn)
|
||||
return db_conn
|
||||
|
||||
def setup(self):
|
||||
logger.info("Setting up.")
|
||||
self.datastore = MediaRepositorySlavedStore(self.get_db_conn(), self)
|
||||
@@ -89,7 +75,7 @@ class MediaRepositoryServer(HomeServer):
|
||||
if name == "metrics":
|
||||
resources[METRICS_PREFIX] = MetricsResource(self)
|
||||
elif name == "media":
|
||||
media_repo = MediaRepositoryResource(self)
|
||||
media_repo = self.get_media_repository_resource()
|
||||
resources.update({
|
||||
MEDIA_PREFIX: media_repo,
|
||||
LEGACY_MEDIA_PREFIX: media_repo,
|
||||
@@ -98,18 +84,17 @@ class MediaRepositoryServer(HomeServer):
|
||||
),
|
||||
})
|
||||
|
||||
root_resource = create_resource_tree(resources, Resource())
|
||||
root_resource = create_resource_tree(resources, NoResource())
|
||||
|
||||
for address in bind_addresses:
|
||||
reactor.listenTCP(
|
||||
_base.listen_tcp(
|
||||
bind_addresses,
|
||||
port,
|
||||
SynapseSite(
|
||||
"synapse.access.http.%s" % (site_tag,),
|
||||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
),
|
||||
interface=address
|
||||
)
|
||||
)
|
||||
|
||||
logger.info("Synapse media repository now listening on port %d", port)
|
||||
@@ -119,17 +104,14 @@ class MediaRepositoryServer(HomeServer):
|
||||
if listener["type"] == "http":
|
||||
self._listen_http(listener)
|
||||
elif listener["type"] == "manhole":
|
||||
bind_addresses = listener["bind_addresses"]
|
||||
|
||||
for address in bind_addresses:
|
||||
reactor.listenTCP(
|
||||
_base.listen_tcp(
|
||||
listener["bind_addresses"],
|
||||
listener["port"],
|
||||
manhole(
|
||||
username="matrix",
|
||||
password="rabbithole",
|
||||
globals={"hs": self},
|
||||
),
|
||||
interface=address
|
||||
)
|
||||
)
|
||||
else:
|
||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||
@@ -151,6 +133,13 @@ def start(config_options):
|
||||
|
||||
assert config.worker_app == "synapse.app.media_repository"
|
||||
|
||||
if config.enable_media_repo:
|
||||
_base.quit_with_error(
|
||||
"enable_media_repo must be disabled in the main synapse process\n"
|
||||
"before the media repo can be run in a separate worker.\n"
|
||||
"Please add ``enable_media_repo: false`` to the main config\n"
|
||||
)
|
||||
|
||||
setup_logging(config, use_worker_options=True)
|
||||
|
||||
events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||
@@ -169,7 +158,6 @@ def start(config_options):
|
||||
)
|
||||
|
||||
ss.setup()
|
||||
ss.get_handlers()
|
||||
ss.start_listening(config.worker_listeners)
|
||||
|
||||
def start():
|
||||
|
||||
@@ -32,13 +32,12 @@ from synapse.replication.tcp.client import ReplicationClientHandler
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage import DataStore
|
||||
from synapse.storage.engines import create_engine
|
||||
from synapse.storage.roommember import RoomMemberStore
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.logcontext import LoggingContext, preserve_fn
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.versionstring import get_version_string
|
||||
from twisted.internet import defer, reactor
|
||||
from twisted.web.resource import Resource
|
||||
from twisted.web.resource import NoResource
|
||||
|
||||
logger = logging.getLogger("synapse.app.pusher")
|
||||
|
||||
@@ -75,25 +74,8 @@ class PusherSlaveStore(
|
||||
DataStore.get_profile_displayname.__func__
|
||||
)
|
||||
|
||||
who_forgot_in_room = (
|
||||
RoomMemberStore.__dict__["who_forgot_in_room"]
|
||||
)
|
||||
|
||||
|
||||
class PusherServer(HomeServer):
|
||||
def get_db_conn(self, run_new_connection=True):
|
||||
# Any param beginning with cp_ is a parameter for adbapi, and should
|
||||
# not be passed to the database engine.
|
||||
db_params = {
|
||||
k: v for k, v in self.db_config.get("args", {}).items()
|
||||
if not k.startswith("cp_")
|
||||
}
|
||||
db_conn = self.database_engine.module.connect(**db_params)
|
||||
|
||||
if run_new_connection:
|
||||
self.database_engine.on_new_connection(db_conn)
|
||||
return db_conn
|
||||
|
||||
def setup(self):
|
||||
logger.info("Setting up.")
|
||||
self.datastore = PusherSlaveStore(self.get_db_conn(), self)
|
||||
@@ -112,18 +94,17 @@ class PusherServer(HomeServer):
|
||||
if name == "metrics":
|
||||
resources[METRICS_PREFIX] = MetricsResource(self)
|
||||
|
||||
root_resource = create_resource_tree(resources, Resource())
|
||||
root_resource = create_resource_tree(resources, NoResource())
|
||||
|
||||
for address in bind_addresses:
|
||||
reactor.listenTCP(
|
||||
_base.listen_tcp(
|
||||
bind_addresses,
|
||||
port,
|
||||
SynapseSite(
|
||||
"synapse.access.http.%s" % (site_tag,),
|
||||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
),
|
||||
interface=address
|
||||
)
|
||||
)
|
||||
|
||||
logger.info("Synapse pusher now listening on port %d", port)
|
||||
@@ -133,17 +114,14 @@ class PusherServer(HomeServer):
|
||||
if listener["type"] == "http":
|
||||
self._listen_http(listener)
|
||||
elif listener["type"] == "manhole":
|
||||
bind_addresses = listener["bind_addresses"]
|
||||
|
||||
for address in bind_addresses:
|
||||
reactor.listenTCP(
|
||||
_base.listen_tcp(
|
||||
listener["bind_addresses"],
|
||||
listener["port"],
|
||||
manhole(
|
||||
username="matrix",
|
||||
password="rabbithole",
|
||||
globals={"hs": self},
|
||||
),
|
||||
interface=address
|
||||
)
|
||||
)
|
||||
else:
|
||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||
|
||||
@@ -40,6 +40,7 @@ from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore
|
||||
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
|
||||
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||
from synapse.replication.slave.storage.room import RoomStore
|
||||
from synapse.replication.slave.storage.groups import SlavedGroupServerStore
|
||||
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||
from synapse.rest.client.v1 import events
|
||||
from synapse.rest.client.v1.initial_sync import InitialSyncRestServlet
|
||||
@@ -55,30 +56,27 @@ from synapse.util.manhole import manhole
|
||||
from synapse.util.stringutils import random_string
|
||||
from synapse.util.versionstring import get_version_string
|
||||
from twisted.internet import defer, reactor
|
||||
from twisted.web.resource import Resource
|
||||
from twisted.web.resource import NoResource
|
||||
|
||||
logger = logging.getLogger("synapse.app.synchrotron")
|
||||
|
||||
|
||||
class SynchrotronSlavedStore(
|
||||
SlavedPushRuleStore,
|
||||
SlavedEventStore,
|
||||
SlavedReceiptsStore,
|
||||
SlavedAccountDataStore,
|
||||
SlavedApplicationServiceStore,
|
||||
SlavedRegistrationStore,
|
||||
SlavedFilteringStore,
|
||||
SlavedPresenceStore,
|
||||
SlavedGroupServerStore,
|
||||
SlavedDeviceInboxStore,
|
||||
SlavedDeviceStore,
|
||||
SlavedPushRuleStore,
|
||||
SlavedEventStore,
|
||||
SlavedClientIpStore,
|
||||
RoomStore,
|
||||
BaseSlavedStore,
|
||||
):
|
||||
who_forgot_in_room = (
|
||||
RoomMemberStore.__dict__["who_forgot_in_room"]
|
||||
)
|
||||
|
||||
did_forget = (
|
||||
RoomMemberStore.__dict__["did_forget"]
|
||||
)
|
||||
@@ -244,19 +242,6 @@ class SynchrotronApplicationService(object):
|
||||
|
||||
|
||||
class SynchrotronServer(HomeServer):
|
||||
def get_db_conn(self, run_new_connection=True):
|
||||
# Any param beginning with cp_ is a parameter for adbapi, and should
|
||||
# not be passed to the database engine.
|
||||
db_params = {
|
||||
k: v for k, v in self.db_config.get("args", {}).items()
|
||||
if not k.startswith("cp_")
|
||||
}
|
||||
db_conn = self.database_engine.module.connect(**db_params)
|
||||
|
||||
if run_new_connection:
|
||||
self.database_engine.on_new_connection(db_conn)
|
||||
return db_conn
|
||||
|
||||
def setup(self):
|
||||
logger.info("Setting up.")
|
||||
self.datastore = SynchrotronSlavedStore(self.get_db_conn(), self)
|
||||
@@ -284,18 +269,17 @@ class SynchrotronServer(HomeServer):
|
||||
"/_matrix/client/api/v1": resource,
|
||||
})
|
||||
|
||||
root_resource = create_resource_tree(resources, Resource())
|
||||
root_resource = create_resource_tree(resources, NoResource())
|
||||
|
||||
for address in bind_addresses:
|
||||
reactor.listenTCP(
|
||||
_base.listen_tcp(
|
||||
bind_addresses,
|
||||
port,
|
||||
SynapseSite(
|
||||
"synapse.access.http.%s" % (site_tag,),
|
||||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
),
|
||||
interface=address
|
||||
)
|
||||
)
|
||||
|
||||
logger.info("Synapse synchrotron now listening on port %d", port)
|
||||
@@ -305,17 +289,14 @@ class SynchrotronServer(HomeServer):
|
||||
if listener["type"] == "http":
|
||||
self._listen_http(listener)
|
||||
elif listener["type"] == "manhole":
|
||||
bind_addresses = listener["bind_addresses"]
|
||||
|
||||
for address in bind_addresses:
|
||||
reactor.listenTCP(
|
||||
_base.listen_tcp(
|
||||
listener["bind_addresses"],
|
||||
listener["port"],
|
||||
manhole(
|
||||
username="matrix",
|
||||
password="rabbithole",
|
||||
globals={"hs": self},
|
||||
),
|
||||
interface=address
|
||||
)
|
||||
)
|
||||
else:
|
||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||
@@ -338,11 +319,10 @@ class SyncReplicationHandler(ReplicationClientHandler):
|
||||
|
||||
self.store = hs.get_datastore()
|
||||
self.typing_handler = hs.get_typing_handler()
|
||||
# NB this is a SynchrotronPresence, not a normal PresenceHandler
|
||||
self.presence_handler = hs.get_presence_handler()
|
||||
self.notifier = hs.get_notifier()
|
||||
|
||||
self.presence_handler.sync_callback = self.send_user_sync
|
||||
|
||||
def on_rdata(self, stream_name, token, rows):
|
||||
super(SyncReplicationHandler, self).on_rdata(stream_name, token, rows)
|
||||
|
||||
@@ -403,6 +383,10 @@ class SyncReplicationHandler(ReplicationClientHandler):
|
||||
)
|
||||
elif stream_name == "presence":
|
||||
yield self.presence_handler.process_replication_rows(token, rows)
|
||||
elif stream_name == "receipts":
|
||||
self.notifier.on_new_event(
|
||||
"groups_key", token, users=[row.user_id for row in rows],
|
||||
)
|
||||
|
||||
|
||||
def start(config_options):
|
||||
|
||||
@@ -184,6 +184,9 @@ def main():
|
||||
worker_configfiles.append(worker_configfile)
|
||||
|
||||
if options.all_processes:
|
||||
# To start the main synapse with -a you need to add a worker file
|
||||
# with worker_app == "synapse.app.homeserver"
|
||||
start_stop_synapse = False
|
||||
worker_configdir = options.all_processes
|
||||
if not os.path.isdir(worker_configdir):
|
||||
write(
|
||||
@@ -200,6 +203,24 @@ def main():
|
||||
with open(worker_configfile) as stream:
|
||||
worker_config = yaml.load(stream)
|
||||
worker_app = worker_config["worker_app"]
|
||||
if worker_app == "synapse.app.homeserver":
|
||||
# We need to special case all of this to pick up options that may
|
||||
# be set in the main config file or in this worker config file.
|
||||
worker_pidfile = (
|
||||
worker_config.get("pid_file")
|
||||
or pidfile
|
||||
)
|
||||
worker_cache_factor = worker_config.get("synctl_cache_factor") or cache_factor
|
||||
daemonize = worker_config.get("daemonize") or config.get("daemonize")
|
||||
assert daemonize, "Main process must have daemonize set to true"
|
||||
|
||||
# The master process doesn't support using worker_* config.
|
||||
for key in worker_config:
|
||||
if key == "worker_app": # But we allow worker_app
|
||||
continue
|
||||
assert not key.startswith("worker_"), \
|
||||
"Main process cannot use worker_* config"
|
||||
else:
|
||||
worker_pidfile = worker_config["worker_pid_file"]
|
||||
worker_daemonize = worker_config["worker_daemonize"]
|
||||
assert worker_daemonize, "In config %r: expected '%s' to be True" % (
|
||||
|
||||
@@ -43,7 +43,7 @@ from synapse.util.logcontext import LoggingContext, preserve_fn
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.versionstring import get_version_string
|
||||
from twisted.internet import reactor
|
||||
from twisted.web.resource import Resource
|
||||
from twisted.web.resource import NoResource
|
||||
|
||||
logger = logging.getLogger("synapse.app.user_dir")
|
||||
|
||||
@@ -92,19 +92,6 @@ class UserDirectorySlaveStore(
|
||||
|
||||
|
||||
class UserDirectoryServer(HomeServer):
|
||||
def get_db_conn(self, run_new_connection=True):
|
||||
# Any param beginning with cp_ is a parameter for adbapi, and should
|
||||
# not be passed to the database engine.
|
||||
db_params = {
|
||||
k: v for k, v in self.db_config.get("args", {}).items()
|
||||
if not k.startswith("cp_")
|
||||
}
|
||||
db_conn = self.database_engine.module.connect(**db_params)
|
||||
|
||||
if run_new_connection:
|
||||
self.database_engine.on_new_connection(db_conn)
|
||||
return db_conn
|
||||
|
||||
def setup(self):
|
||||
logger.info("Setting up.")
|
||||
self.datastore = UserDirectorySlaveStore(self.get_db_conn(), self)
|
||||
@@ -129,18 +116,17 @@ class UserDirectoryServer(HomeServer):
|
||||
"/_matrix/client/api/v1": resource,
|
||||
})
|
||||
|
||||
root_resource = create_resource_tree(resources, Resource())
|
||||
root_resource = create_resource_tree(resources, NoResource())
|
||||
|
||||
for address in bind_addresses:
|
||||
reactor.listenTCP(
|
||||
_base.listen_tcp(
|
||||
bind_addresses,
|
||||
port,
|
||||
SynapseSite(
|
||||
"synapse.access.http.%s" % (site_tag,),
|
||||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
),
|
||||
interface=address
|
||||
)
|
||||
)
|
||||
|
||||
logger.info("Synapse user_dir now listening on port %d", port)
|
||||
@@ -150,17 +136,14 @@ class UserDirectoryServer(HomeServer):
|
||||
if listener["type"] == "http":
|
||||
self._listen_http(listener)
|
||||
elif listener["type"] == "manhole":
|
||||
bind_addresses = listener["bind_addresses"]
|
||||
|
||||
for address in bind_addresses:
|
||||
reactor.listenTCP(
|
||||
_base.listen_tcp(
|
||||
listener["bind_addresses"],
|
||||
listener["port"],
|
||||
manhole(
|
||||
username="matrix",
|
||||
password="rabbithole",
|
||||
globals={"hs": self},
|
||||
),
|
||||
interface=address
|
||||
)
|
||||
)
|
||||
else:
|
||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
# limitations under the License.
|
||||
from synapse.api.constants import EventTypes
|
||||
from synapse.util.caches.descriptors import cachedInlineCallbacks
|
||||
from synapse.types import GroupID, get_domain_from_id
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
@@ -81,12 +82,13 @@ class ApplicationService(object):
|
||||
# values.
|
||||
NS_LIST = [NS_USERS, NS_ALIASES, NS_ROOMS]
|
||||
|
||||
def __init__(self, token, url=None, namespaces=None, hs_token=None,
|
||||
def __init__(self, token, hostname, url=None, namespaces=None, hs_token=None,
|
||||
sender=None, id=None, protocols=None, rate_limited=True):
|
||||
self.token = token
|
||||
self.url = url
|
||||
self.hs_token = hs_token
|
||||
self.sender = sender
|
||||
self.server_name = hostname
|
||||
self.namespaces = self._check_namespaces(namespaces)
|
||||
self.id = id
|
||||
|
||||
@@ -125,6 +127,24 @@ class ApplicationService(object):
|
||||
raise ValueError(
|
||||
"Expected bool for 'exclusive' in ns '%s'" % ns
|
||||
)
|
||||
group_id = regex_obj.get("group_id")
|
||||
if group_id:
|
||||
if not isinstance(group_id, str):
|
||||
raise ValueError(
|
||||
"Expected string for 'group_id' in ns '%s'" % ns
|
||||
)
|
||||
try:
|
||||
GroupID.from_string(group_id)
|
||||
except Exception:
|
||||
raise ValueError(
|
||||
"Expected valid group ID for 'group_id' in ns '%s'" % ns
|
||||
)
|
||||
|
||||
if get_domain_from_id(group_id) != self.server_name:
|
||||
raise ValueError(
|
||||
"Expected 'group_id' to be this host in ns '%s'" % ns
|
||||
)
|
||||
|
||||
regex = regex_obj.get("regex")
|
||||
if isinstance(regex, basestring):
|
||||
regex_obj["regex"] = re.compile(regex) # Pre-compile regex
|
||||
@@ -251,6 +271,21 @@ class ApplicationService(object):
|
||||
if regex_obj["exclusive"]
|
||||
]
|
||||
|
||||
def get_groups_for_user(self, user_id):
|
||||
"""Get the groups that this user is associated with by this AS
|
||||
|
||||
Args:
|
||||
user_id (str): The ID of the user.
|
||||
|
||||
Returns:
|
||||
iterable[str]: an iterable that yields group_id strings.
|
||||
"""
|
||||
return (
|
||||
regex_obj["group_id"]
|
||||
for regex_obj in self.namespaces[ApplicationService.NS_USERS]
|
||||
if "group_id" in regex_obj and regex_obj["regex"].match(user_id)
|
||||
)
|
||||
|
||||
def is_rate_limited(self):
|
||||
return self.rate_limited
|
||||
|
||||
|
||||
@@ -18,6 +18,7 @@ from synapse.api.constants import ThirdPartyEntityKind
|
||||
from synapse.api.errors import CodeMessageException
|
||||
from synapse.http.client import SimpleHttpClient
|
||||
from synapse.events.utils import serialize_event
|
||||
from synapse.util.logcontext import preserve_fn, make_deferred_yieldable
|
||||
from synapse.util.caches.response_cache import ResponseCache
|
||||
from synapse.types import ThirdPartyInstanceID
|
||||
|
||||
@@ -192,9 +193,12 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
defer.returnValue(None)
|
||||
|
||||
key = (service.id, protocol)
|
||||
return self.protocol_meta_cache.get(key) or (
|
||||
self.protocol_meta_cache.set(key, _get())
|
||||
result = self.protocol_meta_cache.get(key)
|
||||
if not result:
|
||||
result = self.protocol_meta_cache.set(
|
||||
key, preserve_fn(_get)()
|
||||
)
|
||||
return make_deferred_yieldable(result)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def push_bulk(self, service, events, txn_id=None):
|
||||
|
||||
@@ -123,7 +123,7 @@ class _ServiceQueuer(object):
|
||||
with Measure(self.clock, "servicequeuer.send"):
|
||||
try:
|
||||
yield self.txn_ctrl.send(service, events)
|
||||
except:
|
||||
except Exception:
|
||||
logger.exception("AS request failed")
|
||||
finally:
|
||||
self.requests_in_flight.discard(service.id)
|
||||
|
||||
@@ -81,22 +81,38 @@ class Config(object):
|
||||
def abspath(file_path):
|
||||
return os.path.abspath(file_path) if file_path else file_path
|
||||
|
||||
@classmethod
|
||||
def path_exists(cls, file_path):
|
||||
"""Check if a file exists
|
||||
|
||||
Unlike os.path.exists, this throws an exception if there is an error
|
||||
checking if the file exists (for example, if there is a perms error on
|
||||
the parent dir).
|
||||
|
||||
Returns:
|
||||
bool: True if the file exists; False if not.
|
||||
"""
|
||||
try:
|
||||
os.stat(file_path)
|
||||
return True
|
||||
except OSError as e:
|
||||
if e.errno != errno.ENOENT:
|
||||
raise e
|
||||
return False
|
||||
|
||||
@classmethod
|
||||
def check_file(cls, file_path, config_name):
|
||||
if file_path is None:
|
||||
raise ConfigError(
|
||||
"Missing config for %s."
|
||||
" You must specify a path for the config file. You can "
|
||||
"do this with the -c or --config-path option. "
|
||||
"Adding --generate-config along with --server-name "
|
||||
"<server name> will generate a config file at the given path."
|
||||
% (config_name,)
|
||||
)
|
||||
if not os.path.exists(file_path):
|
||||
try:
|
||||
os.stat(file_path)
|
||||
except OSError as e:
|
||||
raise ConfigError(
|
||||
"File %s config for %s doesn't exist."
|
||||
" Try running again with --generate-config"
|
||||
% (file_path, config_name,)
|
||||
"Error accessing file '%s' (config for %s): %s"
|
||||
% (file_path, config_name, e.strerror)
|
||||
)
|
||||
return cls.abspath(file_path)
|
||||
|
||||
@@ -248,7 +264,7 @@ class Config(object):
|
||||
" -c CONFIG-FILE\""
|
||||
)
|
||||
(config_path,) = config_files
|
||||
if not os.path.exists(config_path):
|
||||
if not cls.path_exists(config_path):
|
||||
if config_args.keys_directory:
|
||||
config_dir_path = config_args.keys_directory
|
||||
else:
|
||||
@@ -261,7 +277,7 @@ class Config(object):
|
||||
"Must specify a server_name to a generate config for."
|
||||
" Pass -H server.name."
|
||||
)
|
||||
if not os.path.exists(config_dir_path):
|
||||
if not cls.path_exists(config_dir_path):
|
||||
os.makedirs(config_dir_path)
|
||||
with open(config_path, "wb") as config_file:
|
||||
config_bytes, config = obj.generate_config(
|
||||
|
||||
@@ -154,6 +154,7 @@ def _load_appservice(hostname, as_info, config_filename):
|
||||
)
|
||||
return ApplicationService(
|
||||
token=as_info["as_token"],
|
||||
hostname=hostname,
|
||||
url=as_info["url"],
|
||||
namespaces=as_info["namespaces"],
|
||||
hs_token=as_info["hs_token"],
|
||||
|
||||
@@ -41,7 +41,7 @@ class CasConfig(Config):
|
||||
#cas_config:
|
||||
# enabled: true
|
||||
# server_url: "https://cas-server.com"
|
||||
# service_url: "https://homesever.domain.com:8448"
|
||||
# service_url: "https://homeserver.domain.com:8448"
|
||||
# #required_attributes:
|
||||
# # name: value
|
||||
"""
|
||||
|
||||
32
synapse/config/groups.py
Normal file
32
synapse/config/groups.py
Normal file
@@ -0,0 +1,32 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2017 New Vector Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from ._base import Config
|
||||
|
||||
|
||||
class GroupsConfig(Config):
|
||||
def read_config(self, config):
|
||||
self.enable_group_creation = config.get("enable_group_creation", False)
|
||||
self.group_creation_prefix = config.get("group_creation_prefix", "")
|
||||
|
||||
def default_config(self, **kwargs):
|
||||
return """\
|
||||
# Whether to allow non server admins to create groups on this server
|
||||
enable_group_creation: false
|
||||
|
||||
# If enabled, non server admins can only create groups with local parts
|
||||
# starting with this prefix
|
||||
# group_creation_prefix: "unofficial/"
|
||||
"""
|
||||
@@ -34,6 +34,9 @@ from .password_auth_providers import PasswordAuthProviderConfig
|
||||
from .emailconfig import EmailConfig
|
||||
from .workers import WorkerConfig
|
||||
from .push import PushConfig
|
||||
from .spam_checker import SpamCheckerConfig
|
||||
from .groups import GroupsConfig
|
||||
from .user_directory import UserDirectoryConfig
|
||||
|
||||
|
||||
class HomeServerConfig(TlsConfig, ServerConfig, DatabaseConfig, LoggingConfig,
|
||||
@@ -41,7 +44,8 @@ class HomeServerConfig(TlsConfig, ServerConfig, DatabaseConfig, LoggingConfig,
|
||||
VoipConfig, RegistrationConfig, MetricsConfig, ApiConfig,
|
||||
AppServiceConfig, KeyConfig, SAML2Config, CasConfig,
|
||||
JWTConfig, PasswordConfig, EmailConfig,
|
||||
WorkerConfig, PasswordAuthProviderConfig, PushConfig,):
|
||||
WorkerConfig, PasswordAuthProviderConfig, PushConfig,
|
||||
SpamCheckerConfig, GroupsConfig, UserDirectoryConfig,):
|
||||
pass
|
||||
|
||||
|
||||
|
||||
@@ -118,10 +118,9 @@ class KeyConfig(Config):
|
||||
signing_keys = self.read_file(signing_key_path, "signing_key")
|
||||
try:
|
||||
return read_signing_keys(signing_keys.splitlines(True))
|
||||
except Exception:
|
||||
except Exception as e:
|
||||
raise ConfigError(
|
||||
"Error reading signing_key."
|
||||
" Try running again with --generate-config"
|
||||
"Error reading signing_key: %s" % (str(e))
|
||||
)
|
||||
|
||||
def read_old_signing_keys(self, old_signing_keys):
|
||||
@@ -141,7 +140,8 @@ class KeyConfig(Config):
|
||||
|
||||
def generate_files(self, config):
|
||||
signing_key_path = config["signing_key_path"]
|
||||
if not os.path.exists(signing_key_path):
|
||||
|
||||
if not self.path_exists(signing_key_path):
|
||||
with open(signing_key_path, "w") as signing_key_file:
|
||||
key_id = "a_" + random_string(4)
|
||||
write_signing_keys(
|
||||
|
||||
@@ -29,8 +29,8 @@ version: 1
|
||||
|
||||
formatters:
|
||||
precise:
|
||||
format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s\
|
||||
- %(message)s'
|
||||
format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - \
|
||||
%(request)s - %(message)s'
|
||||
|
||||
filters:
|
||||
context:
|
||||
@@ -74,17 +74,10 @@ class LoggingConfig(Config):
|
||||
self.log_file = self.abspath(config.get("log_file"))
|
||||
|
||||
def default_config(self, config_dir_path, server_name, **kwargs):
|
||||
log_file = self.abspath("homeserver.log")
|
||||
log_config = self.abspath(
|
||||
os.path.join(config_dir_path, server_name + ".log.config")
|
||||
)
|
||||
return """
|
||||
# Logging verbosity level. Ignored if log_config is specified.
|
||||
verbose: 0
|
||||
|
||||
# File to write logging to. Ignored if log_config is specified.
|
||||
log_file: "%(log_file)s"
|
||||
|
||||
# A yaml python logging config file
|
||||
log_config: "%(log_config)s"
|
||||
""" % locals()
|
||||
@@ -123,9 +116,10 @@ class LoggingConfig(Config):
|
||||
def generate_files(self, config):
|
||||
log_config = config.get("log_config")
|
||||
if log_config and not os.path.exists(log_config):
|
||||
log_file = self.abspath("homeserver.log")
|
||||
with open(log_config, "wb") as log_config_file:
|
||||
log_config_file.write(
|
||||
DEFAULT_LOG_CONFIG.substitute(log_file=config["log_file"])
|
||||
DEFAULT_LOG_CONFIG.substitute(log_file=log_file)
|
||||
)
|
||||
|
||||
|
||||
@@ -148,8 +142,11 @@ def setup_logging(config, use_worker_options=False):
|
||||
"%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s"
|
||||
" - %(message)s"
|
||||
)
|
||||
if log_config is None:
|
||||
|
||||
if log_config is None:
|
||||
# We don't have a logfile, so fall back to the 'verbosity' param from
|
||||
# the config or cmdline. (Note that we generate a log config for new
|
||||
# installs, so this will be an unusual case)
|
||||
level = logging.INFO
|
||||
level_for_storage = logging.INFO
|
||||
if config.verbosity:
|
||||
@@ -157,11 +154,10 @@ def setup_logging(config, use_worker_options=False):
|
||||
if config.verbosity > 1:
|
||||
level_for_storage = logging.DEBUG
|
||||
|
||||
# FIXME: we need a logging.WARN for a -q quiet option
|
||||
logger = logging.getLogger('')
|
||||
logger.setLevel(level)
|
||||
|
||||
logging.getLogger('synapse.storage').setLevel(level_for_storage)
|
||||
logging.getLogger('synapse.storage.SQL').setLevel(level_for_storage)
|
||||
|
||||
formatter = logging.Formatter(log_format)
|
||||
if log_file:
|
||||
@@ -176,6 +172,10 @@ def setup_logging(config, use_worker_options=False):
|
||||
logger.info("Opened new log file due to SIGHUP")
|
||||
else:
|
||||
handler = logging.StreamHandler()
|
||||
|
||||
def sighup(signum, stack):
|
||||
pass
|
||||
|
||||
handler.setFormatter(formatter)
|
||||
|
||||
handler.addFilter(LoggingContextFilter(request=""))
|
||||
|
||||
@@ -13,44 +13,41 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from ._base import Config, ConfigError
|
||||
from ._base import Config
|
||||
|
||||
import importlib
|
||||
from synapse.util.module_loader import load_module
|
||||
|
||||
LDAP_PROVIDER = 'ldap_auth_provider.LdapAuthProvider'
|
||||
|
||||
|
||||
class PasswordAuthProviderConfig(Config):
|
||||
def read_config(self, config):
|
||||
self.password_providers = []
|
||||
providers = []
|
||||
|
||||
# We want to be backwards compatible with the old `ldap_config`
|
||||
# param.
|
||||
ldap_config = config.get("ldap_config", {})
|
||||
self.ldap_enabled = ldap_config.get("enabled", False)
|
||||
if self.ldap_enabled:
|
||||
from ldap_auth_provider import LdapAuthProvider
|
||||
parsed_config = LdapAuthProvider.parse_config(ldap_config)
|
||||
self.password_providers.append((LdapAuthProvider, parsed_config))
|
||||
if ldap_config.get("enabled", False):
|
||||
providers.append({
|
||||
'module': LDAP_PROVIDER,
|
||||
'config': ldap_config,
|
||||
})
|
||||
|
||||
providers = config.get("password_providers", [])
|
||||
providers.extend(config.get("password_providers", []))
|
||||
for provider in providers:
|
||||
mod_name = provider['module']
|
||||
|
||||
# This is for backwards compat when the ldap auth provider resided
|
||||
# in this package.
|
||||
if provider['module'] == "synapse.util.ldap_auth_provider.LdapAuthProvider":
|
||||
from ldap_auth_provider import LdapAuthProvider
|
||||
provider_class = LdapAuthProvider
|
||||
else:
|
||||
# We need to import the module, and then pick the class out of
|
||||
# that, so we split based on the last dot.
|
||||
module, clz = provider['module'].rsplit(".", 1)
|
||||
module = importlib.import_module(module)
|
||||
provider_class = getattr(module, clz)
|
||||
if mod_name == "synapse.util.ldap_auth_provider.LdapAuthProvider":
|
||||
mod_name = LDAP_PROVIDER
|
||||
|
||||
(provider_class, provider_config) = load_module({
|
||||
"module": mod_name,
|
||||
"config": provider['config'],
|
||||
})
|
||||
|
||||
try:
|
||||
provider_config = provider_class.parse_config(provider["config"])
|
||||
except Exception as e:
|
||||
raise ConfigError(
|
||||
"Failed to parse config for %r: %r" % (provider['module'], e)
|
||||
)
|
||||
self.password_providers.append((provider_class, provider_config))
|
||||
|
||||
def default_config(self, **kwargs):
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2015, 2016 OpenMarket Ltd
|
||||
# Copyright 2017 New Vector Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -18,28 +19,43 @@ from ._base import Config
|
||||
|
||||
class PushConfig(Config):
|
||||
def read_config(self, config):
|
||||
self.push_redact_content = False
|
||||
push_config = config.get("push", {})
|
||||
self.push_include_content = push_config.get("include_content", True)
|
||||
|
||||
# There was a a 'redact_content' setting but mistakenly read from the
|
||||
# 'email'section'. Check for the flag in the 'push' section, and log,
|
||||
# but do not honour it to avoid nasty surprises when people upgrade.
|
||||
if push_config.get("redact_content") is not None:
|
||||
print(
|
||||
"The push.redact_content content option has never worked. "
|
||||
"Please set push.include_content if you want this behaviour"
|
||||
)
|
||||
|
||||
# Now check for the one in the 'email' section and honour it,
|
||||
# with a warning.
|
||||
push_config = config.get("email", {})
|
||||
self.push_redact_content = push_config.get("redact_content", False)
|
||||
redact_content = push_config.get("redact_content")
|
||||
if redact_content is not None:
|
||||
print(
|
||||
"The 'email.redact_content' option is deprecated: "
|
||||
"please set push.include_content instead"
|
||||
)
|
||||
self.push_include_content = not redact_content
|
||||
|
||||
def default_config(self, config_dir_path, server_name, **kwargs):
|
||||
return """
|
||||
# Control how push messages are sent to google/apple to notifications.
|
||||
# Normally every message said in a room with one or more people using
|
||||
# mobile devices will be posted to a push server hosted by matrix.org
|
||||
# which is registered with google and apple in order to allow push
|
||||
# notifications to be sent to these mobile devices.
|
||||
#
|
||||
# Setting redact_content to true will make the push messages contain no
|
||||
# message content which will provide increased privacy. This is a
|
||||
# temporary solution pending improvements to Android and iPhone apps
|
||||
# to get content from the app rather than the notification.
|
||||
#
|
||||
# Clients requesting push notifications can either have the body of
|
||||
# the message sent in the notification poke along with other details
|
||||
# like the sender, or just the event ID and room ID (`event_id_only`).
|
||||
# If clients choose the former, this option controls whether the
|
||||
# notification request includes the content of the event (other details
|
||||
# like the sender are still included). For `event_id_only` push, it
|
||||
# has no effect.
|
||||
|
||||
# For modern android devices the notification content will still appear
|
||||
# because it is loaded by the app. iPhone, however will send a
|
||||
# notification saying only that a message arrived and who it came from.
|
||||
#
|
||||
#push:
|
||||
# redact_content: false
|
||||
# include_content: true
|
||||
"""
|
||||
|
||||
@@ -31,6 +31,8 @@ class RegistrationConfig(Config):
|
||||
strtobool(str(config["disable_registration"]))
|
||||
)
|
||||
|
||||
self.registrations_require_3pid = config.get("registrations_require_3pid", [])
|
||||
self.allowed_local_3pids = config.get("allowed_local_3pids", [])
|
||||
self.registration_shared_secret = config.get("registration_shared_secret")
|
||||
|
||||
self.bcrypt_rounds = config.get("bcrypt_rounds", 12)
|
||||
@@ -41,6 +43,8 @@ class RegistrationConfig(Config):
|
||||
self.allow_guest_access and config.get("invite_3pid_guest", False)
|
||||
)
|
||||
|
||||
self.auto_join_rooms = config.get("auto_join_rooms", [])
|
||||
|
||||
def default_config(self, **kwargs):
|
||||
registration_shared_secret = random_string_with_symbols(50)
|
||||
|
||||
@@ -50,13 +54,32 @@ class RegistrationConfig(Config):
|
||||
# Enable registration for new users.
|
||||
enable_registration: False
|
||||
|
||||
# The user must provide all of the below types of 3PID when registering.
|
||||
#
|
||||
# registrations_require_3pid:
|
||||
# - email
|
||||
# - msisdn
|
||||
|
||||
# Mandate that users are only allowed to associate certain formats of
|
||||
# 3PIDs with accounts on this server.
|
||||
#
|
||||
# allowed_local_3pids:
|
||||
# - medium: email
|
||||
# pattern: ".*@matrix\\.org"
|
||||
# - medium: email
|
||||
# pattern: ".*@vector\\.im"
|
||||
# - medium: msisdn
|
||||
# pattern: "\\+44"
|
||||
|
||||
# If set, allows registration by anyone who also has the shared
|
||||
# secret, even if registration is otherwise disabled.
|
||||
registration_shared_secret: "%(registration_shared_secret)s"
|
||||
|
||||
# Set the number of bcrypt rounds used to generate password hash.
|
||||
# Larger numbers increase the work factor needed to generate the hash.
|
||||
# The default number of rounds is 12.
|
||||
# The default number is 12 (which equates to 2^12 rounds).
|
||||
# N.B. that increasing this will exponentially increase the time required
|
||||
# to register or login - e.g. 24 => 2^24 rounds which will take >20 mins.
|
||||
bcrypt_rounds: 12
|
||||
|
||||
# Allows users to register as guests without a password/email/etc, and
|
||||
@@ -70,6 +93,11 @@ class RegistrationConfig(Config):
|
||||
- matrix.org
|
||||
- vector.im
|
||||
- riot.im
|
||||
|
||||
# Users who register on this homeserver will automatically be joined
|
||||
# to these rooms
|
||||
#auto_join_rooms:
|
||||
# - "#example:example.com"
|
||||
""" % locals()
|
||||
|
||||
def add_arguments(self, parser):
|
||||
|
||||
@@ -16,6 +16,8 @@
|
||||
from ._base import Config, ConfigError
|
||||
from collections import namedtuple
|
||||
|
||||
from synapse.util.module_loader import load_module
|
||||
|
||||
|
||||
MISSING_NETADDR = (
|
||||
"Missing netaddr library. This is required for URL preview API."
|
||||
@@ -36,6 +38,14 @@ ThumbnailRequirement = namedtuple(
|
||||
"ThumbnailRequirement", ["width", "height", "method", "media_type"]
|
||||
)
|
||||
|
||||
MediaStorageProviderConfig = namedtuple(
|
||||
"MediaStorageProviderConfig", (
|
||||
"store_local", # Whether to store newly uploaded local files
|
||||
"store_remote", # Whether to store newly downloaded remote files
|
||||
"store_synchronous", # Whether to wait for successful storage for local uploads
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
def parse_thumbnail_requirements(thumbnail_sizes):
|
||||
""" Takes a list of dictionaries with "width", "height", and "method" keys
|
||||
@@ -70,7 +80,64 @@ class ContentRepositoryConfig(Config):
|
||||
self.max_upload_size = self.parse_size(config["max_upload_size"])
|
||||
self.max_image_pixels = self.parse_size(config["max_image_pixels"])
|
||||
self.max_spider_size = self.parse_size(config["max_spider_size"])
|
||||
|
||||
self.media_store_path = self.ensure_directory(config["media_store_path"])
|
||||
|
||||
backup_media_store_path = config.get("backup_media_store_path")
|
||||
|
||||
synchronous_backup_media_store = config.get(
|
||||
"synchronous_backup_media_store", False
|
||||
)
|
||||
|
||||
storage_providers = config.get("media_storage_providers", [])
|
||||
|
||||
if backup_media_store_path:
|
||||
if storage_providers:
|
||||
raise ConfigError(
|
||||
"Cannot use both 'backup_media_store_path' and 'storage_providers'"
|
||||
)
|
||||
|
||||
storage_providers = [{
|
||||
"module": "file_system",
|
||||
"store_local": True,
|
||||
"store_synchronous": synchronous_backup_media_store,
|
||||
"store_remote": True,
|
||||
"config": {
|
||||
"directory": backup_media_store_path,
|
||||
}
|
||||
}]
|
||||
|
||||
# This is a list of config that can be used to create the storage
|
||||
# providers. The entries are tuples of (Class, class_config,
|
||||
# MediaStorageProviderConfig), where Class is the class of the provider,
|
||||
# the class_config the config to pass to it, and
|
||||
# MediaStorageProviderConfig are options for StorageProviderWrapper.
|
||||
#
|
||||
# We don't create the storage providers here as not all workers need
|
||||
# them to be started.
|
||||
self.media_storage_providers = []
|
||||
|
||||
for provider_config in storage_providers:
|
||||
# We special case the module "file_system" so as not to need to
|
||||
# expose FileStorageProviderBackend
|
||||
if provider_config["module"] == "file_system":
|
||||
provider_config["module"] = (
|
||||
"synapse.rest.media.v1.storage_provider"
|
||||
".FileStorageProviderBackend"
|
||||
)
|
||||
|
||||
provider_class, parsed_config = load_module(provider_config)
|
||||
|
||||
wrapper_config = MediaStorageProviderConfig(
|
||||
provider_config.get("store_local", False),
|
||||
provider_config.get("store_remote", False),
|
||||
provider_config.get("store_synchronous", False),
|
||||
)
|
||||
|
||||
self.media_storage_providers.append(
|
||||
(provider_class, parsed_config, wrapper_config,)
|
||||
)
|
||||
|
||||
self.uploads_path = self.ensure_directory(config["uploads_path"])
|
||||
self.dynamic_thumbnails = config["dynamic_thumbnails"]
|
||||
self.thumbnail_requirements = parse_thumbnail_requirements(
|
||||
@@ -115,6 +182,20 @@ class ContentRepositoryConfig(Config):
|
||||
# Directory where uploaded images and attachments are stored.
|
||||
media_store_path: "%(media_store)s"
|
||||
|
||||
# Media storage providers allow media to be stored in different
|
||||
# locations.
|
||||
# media_storage_providers:
|
||||
# - module: file_system
|
||||
# # Whether to write new local files.
|
||||
# store_local: false
|
||||
# # Whether to write new remote media
|
||||
# store_remote: false
|
||||
# # Whether to block upload requests waiting for write to this
|
||||
# # provider to complete
|
||||
# store_synchronous: false
|
||||
# config:
|
||||
# directory: /mnt/some/other/directory
|
||||
|
||||
# Directory where in-progress uploads are stored.
|
||||
uploads_path: "%(uploads_path)s"
|
||||
|
||||
|
||||
@@ -41,6 +41,12 @@ class ServerConfig(Config):
|
||||
# false only if we are updating the user directory in a worker
|
||||
self.update_user_directory = config.get("update_user_directory", True)
|
||||
|
||||
# whether to enable the media repository endpoints. This should be set
|
||||
# to false if the media repository is running as a separate endpoint;
|
||||
# doing so ensures that we will not run cache cleanup jobs on the
|
||||
# master, potentially causing inconsistency.
|
||||
self.enable_media_repo = config.get("enable_media_repo", True)
|
||||
|
||||
self.filter_timeline_limit = config.get("filter_timeline_limit", -1)
|
||||
|
||||
# Whether we should block invites sent to users on this server
|
||||
@@ -49,6 +55,17 @@ class ServerConfig(Config):
|
||||
"block_non_admin_invites", False,
|
||||
)
|
||||
|
||||
# FIXME: federation_domain_whitelist needs sytests
|
||||
self.federation_domain_whitelist = None
|
||||
federation_domain_whitelist = config.get(
|
||||
"federation_domain_whitelist", None
|
||||
)
|
||||
# turn the whitelist into a hash for speed of lookup
|
||||
if federation_domain_whitelist is not None:
|
||||
self.federation_domain_whitelist = {}
|
||||
for domain in federation_domain_whitelist:
|
||||
self.federation_domain_whitelist[domain] = True
|
||||
|
||||
if self.public_baseurl is not None:
|
||||
if self.public_baseurl[-1] != '/':
|
||||
self.public_baseurl += '/'
|
||||
@@ -204,6 +221,17 @@ class ServerConfig(Config):
|
||||
# (except those sent by local server admins). The default is False.
|
||||
# block_non_admin_invites: True
|
||||
|
||||
# Restrict federation to the following whitelist of domains.
|
||||
# N.B. we recommend also firewalling your federation listener to limit
|
||||
# inbound federation traffic as early as possible, rather than relying
|
||||
# purely on this application-layer restriction. If not specified, the
|
||||
# default is to whitelist everything.
|
||||
#
|
||||
# federation_domain_whitelist:
|
||||
# - lon.example.com
|
||||
# - nyc.example.com
|
||||
# - syd.example.com
|
||||
|
||||
# List of ports that Synapse should listen on, their purpose and their
|
||||
# configuration.
|
||||
listeners:
|
||||
@@ -214,13 +242,12 @@ class ServerConfig(Config):
|
||||
port: %(bind_port)s
|
||||
|
||||
# Local addresses to listen on.
|
||||
# This will listen on all IPv4 addresses by default.
|
||||
# On Linux and Mac OS, `::` will listen on all IPv4 and IPv6
|
||||
# addresses by default. For most other OSes, this will only listen
|
||||
# on IPv6.
|
||||
bind_addresses:
|
||||
- '::'
|
||||
- '0.0.0.0'
|
||||
# Uncomment to listen on all IPv6 interfaces
|
||||
# N.B: On at least Linux this will also listen on all IPv4
|
||||
# addresses, so you will need to comment out the line above.
|
||||
# - '::'
|
||||
|
||||
# This is a 'http' listener, allows us to specify 'resources'.
|
||||
type: http
|
||||
@@ -247,11 +274,18 @@ class ServerConfig(Config):
|
||||
- names: [federation] # Federation APIs
|
||||
compress: false
|
||||
|
||||
# optional list of additional endpoints which can be loaded via
|
||||
# dynamic modules
|
||||
# additional_resources:
|
||||
# "/_matrix/my/custom/endpoint":
|
||||
# module: my_module.CustomRequestHandler
|
||||
# config: {}
|
||||
|
||||
# Unsecure HTTP listener,
|
||||
# For when matrix traffic passes through loadbalancer that unwraps TLS.
|
||||
- port: %(unsecure_port)s
|
||||
tls: false
|
||||
bind_addresses: ['0.0.0.0']
|
||||
bind_addresses: ['::', '0.0.0.0']
|
||||
type: http
|
||||
|
||||
x_forwarded: false
|
||||
@@ -265,7 +299,7 @@ class ServerConfig(Config):
|
||||
# Turn on the twisted ssh manhole service on localhost on the given
|
||||
# port.
|
||||
# - port: 9000
|
||||
# bind_address: 127.0.0.1
|
||||
# bind_addresses: ['::1', '127.0.0.1']
|
||||
# type: manhole
|
||||
""" % locals()
|
||||
|
||||
@@ -303,7 +337,7 @@ def read_gc_thresholds(thresholds):
|
||||
return (
|
||||
int(thresholds[0]), int(thresholds[1]), int(thresholds[2]),
|
||||
)
|
||||
except:
|
||||
except Exception:
|
||||
raise ConfigError(
|
||||
"Value of `gc_threshold` must be a list of three integers if set"
|
||||
)
|
||||
|
||||
35
synapse/config/spam_checker.py
Normal file
35
synapse/config/spam_checker.py
Normal file
@@ -0,0 +1,35 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2017 New Vector Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from synapse.util.module_loader import load_module
|
||||
|
||||
from ._base import Config
|
||||
|
||||
|
||||
class SpamCheckerConfig(Config):
|
||||
def read_config(self, config):
|
||||
self.spam_checker = None
|
||||
|
||||
provider = config.get("spam_checker", None)
|
||||
if provider is not None:
|
||||
self.spam_checker = load_module(provider)
|
||||
|
||||
def default_config(self, **kwargs):
|
||||
return """\
|
||||
# spam_checker:
|
||||
# module: "my_custom_project.SuperSpamChecker"
|
||||
# config:
|
||||
# example_option: 'things'
|
||||
"""
|
||||
@@ -96,7 +96,7 @@ class TlsConfig(Config):
|
||||
# certificates returned by this server match one of the fingerprints.
|
||||
#
|
||||
# Synapse automatically adds the fingerprint of its own certificate
|
||||
# to the list. So if federation traffic is handle directly by synapse
|
||||
# to the list. So if federation traffic is handled directly by synapse
|
||||
# then no modification to the list is required.
|
||||
#
|
||||
# If synapse is run behind a load balancer that handles the TLS then it
|
||||
@@ -109,6 +109,12 @@ class TlsConfig(Config):
|
||||
# key. It may be necessary to publish the fingerprints of a new
|
||||
# certificate and wait until the "valid_until_ts" of the previous key
|
||||
# responses have passed before deploying it.
|
||||
#
|
||||
# You can calculate a fingerprint from a given TLS listener via:
|
||||
# openssl s_client -connect $host:$port < /dev/null 2> /dev/null |
|
||||
# openssl x509 -outform DER | openssl sha256 -binary | base64 | tr -d '='
|
||||
# or by checking matrix.org/federationtester/api/report?server_name=$host
|
||||
#
|
||||
tls_fingerprints: []
|
||||
# tls_fingerprints: [{"sha256": "<base64_encoded_sha256_fingerprint>"}]
|
||||
""" % locals()
|
||||
@@ -126,7 +132,7 @@ class TlsConfig(Config):
|
||||
tls_private_key_path = config["tls_private_key_path"]
|
||||
tls_dh_params_path = config["tls_dh_params_path"]
|
||||
|
||||
if not os.path.exists(tls_private_key_path):
|
||||
if not self.path_exists(tls_private_key_path):
|
||||
with open(tls_private_key_path, "w") as private_key_file:
|
||||
tls_private_key = crypto.PKey()
|
||||
tls_private_key.generate_key(crypto.TYPE_RSA, 2048)
|
||||
@@ -141,7 +147,7 @@ class TlsConfig(Config):
|
||||
crypto.FILETYPE_PEM, private_key_pem
|
||||
)
|
||||
|
||||
if not os.path.exists(tls_certificate_path):
|
||||
if not self.path_exists(tls_certificate_path):
|
||||
with open(tls_certificate_path, "w") as certificate_file:
|
||||
cert = crypto.X509()
|
||||
subject = cert.get_subject()
|
||||
@@ -159,7 +165,7 @@ class TlsConfig(Config):
|
||||
|
||||
certificate_file.write(cert_pem)
|
||||
|
||||
if not os.path.exists(tls_dh_params_path):
|
||||
if not self.path_exists(tls_dh_params_path):
|
||||
if GENERATE_DH_PARAMS:
|
||||
subprocess.check_call([
|
||||
"openssl", "dhparam",
|
||||
|
||||
44
synapse/config/user_directory.py
Normal file
44
synapse/config/user_directory.py
Normal file
@@ -0,0 +1,44 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2017 New Vector Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from ._base import Config
|
||||
|
||||
|
||||
class UserDirectoryConfig(Config):
|
||||
"""User Directory Configuration
|
||||
Configuration for the behaviour of the /user_directory API
|
||||
"""
|
||||
|
||||
def read_config(self, config):
|
||||
self.user_directory_search_all_users = False
|
||||
user_directory_config = config.get("user_directory", None)
|
||||
if user_directory_config:
|
||||
self.user_directory_search_all_users = (
|
||||
user_directory_config.get("search_all_users", False)
|
||||
)
|
||||
|
||||
def default_config(self, config_dir_path, server_name, **kwargs):
|
||||
return """
|
||||
# User Directory configuration
|
||||
#
|
||||
# 'search_all_users' defines whether to search all users visible to your HS
|
||||
# when searching the user directory, rather than limiting to users visible
|
||||
# in public rooms. Defaults to false. If you set it True, you'll have to run
|
||||
# UPDATE user_directory_stream_pos SET stream_id = NULL;
|
||||
# on your database to tell it to rebuild the user_directory search indexes.
|
||||
#
|
||||
#user_directory:
|
||||
# search_all_users: false
|
||||
"""
|
||||
@@ -23,13 +23,26 @@ class WorkerConfig(Config):
|
||||
|
||||
def read_config(self, config):
|
||||
self.worker_app = config.get("worker_app")
|
||||
|
||||
# Canonicalise worker_app so that master always has None
|
||||
if self.worker_app == "synapse.app.homeserver":
|
||||
self.worker_app = None
|
||||
|
||||
self.worker_listeners = config.get("worker_listeners")
|
||||
self.worker_daemonize = config.get("worker_daemonize")
|
||||
self.worker_pid_file = config.get("worker_pid_file")
|
||||
self.worker_log_file = config.get("worker_log_file")
|
||||
self.worker_log_config = config.get("worker_log_config")
|
||||
|
||||
# The host used to connect to the main synapse
|
||||
self.worker_replication_host = config.get("worker_replication_host", None)
|
||||
|
||||
# The port on the main synapse for TCP replication
|
||||
self.worker_replication_port = config.get("worker_replication_port", None)
|
||||
|
||||
# The port on the main synapse for HTTP replication endpoint
|
||||
self.worker_replication_http_port = config.get("worker_replication_http_port")
|
||||
|
||||
self.worker_name = config.get("worker_name", self.worker_app)
|
||||
|
||||
self.worker_main_http_uri = config.get("worker_main_http_uri", None)
|
||||
|
||||
@@ -34,7 +34,7 @@ class ServerContextFactory(ssl.ContextFactory):
|
||||
try:
|
||||
_ecCurve = _OpenSSLECCurve(_defaultCurveName)
|
||||
_ecCurve.addECKeyToContext(context)
|
||||
except:
|
||||
except Exception:
|
||||
logger.exception("Failed to enable elliptic curve for TLS")
|
||||
context.set_options(SSL.OP_NO_SSLv2 | SSL.OP_NO_SSLv3)
|
||||
context.use_certificate_chain_file(config.tls_certificate_file)
|
||||
|
||||
@@ -32,18 +32,25 @@ def check_event_content_hash(event, hash_algorithm=hashlib.sha256):
|
||||
"""Check whether the hash for this PDU matches the contents"""
|
||||
name, expected_hash = compute_content_hash(event, hash_algorithm)
|
||||
logger.debug("Expecting hash: %s", encode_base64(expected_hash))
|
||||
if name not in event.hashes:
|
||||
|
||||
# some malformed events lack a 'hashes'. Protect against it being missing
|
||||
# or a weird type by basically treating it the same as an unhashed event.
|
||||
hashes = event.get("hashes")
|
||||
if not isinstance(hashes, dict):
|
||||
raise SynapseError(400, "Malformed 'hashes'", Codes.UNAUTHORIZED)
|
||||
|
||||
if name not in hashes:
|
||||
raise SynapseError(
|
||||
400,
|
||||
"Algorithm %s not in hashes %s" % (
|
||||
name, list(event.hashes),
|
||||
name, list(hashes),
|
||||
),
|
||||
Codes.UNAUTHORIZED,
|
||||
)
|
||||
message_hash_base64 = event.hashes[name]
|
||||
message_hash_base64 = hashes[name]
|
||||
try:
|
||||
message_hash_bytes = decode_base64(message_hash_base64)
|
||||
except:
|
||||
except Exception:
|
||||
raise SynapseError(
|
||||
400,
|
||||
"Invalid base64: %s" % (message_hash_base64,),
|
||||
|
||||
@@ -759,7 +759,7 @@ def _handle_key_deferred(verify_request):
|
||||
))
|
||||
try:
|
||||
verify_signed_json(json_object, server_name, verify_key)
|
||||
except:
|
||||
except Exception:
|
||||
raise SynapseError(
|
||||
401,
|
||||
"Invalid signature for server %s with key %s:%s" % (
|
||||
|
||||
@@ -319,7 +319,7 @@ def _is_membership_change_allowed(event, auth_events):
|
||||
# TODO (erikj): Implement kicks.
|
||||
if target_banned and user_level < ban_level:
|
||||
raise AuthError(
|
||||
403, "You cannot unban user &s." % (target_user_id,)
|
||||
403, "You cannot unban user %s." % (target_user_id,)
|
||||
)
|
||||
elif target_user_id != event.user_id:
|
||||
kick_level = _get_named_level(auth_events, "kick", 50)
|
||||
@@ -443,12 +443,12 @@ def _check_power_levels(event, auth_events):
|
||||
for k, v in user_list.items():
|
||||
try:
|
||||
UserID.from_string(k)
|
||||
except:
|
||||
except Exception:
|
||||
raise SynapseError(400, "Not a valid user_id: %s" % (k,))
|
||||
|
||||
try:
|
||||
int(v)
|
||||
except:
|
||||
except Exception:
|
||||
raise SynapseError(400, "Not a valid power level: %s" % (v,))
|
||||
|
||||
key = (event.type, event.state_key, )
|
||||
@@ -470,14 +470,14 @@ def _check_power_levels(event, auth_events):
|
||||
("invite", None),
|
||||
]
|
||||
|
||||
old_list = current_state.content.get("users")
|
||||
old_list = current_state.content.get("users", {})
|
||||
for user in set(old_list.keys() + user_list.keys()):
|
||||
levels_to_check.append(
|
||||
(user, "users")
|
||||
)
|
||||
|
||||
old_list = current_state.content.get("events")
|
||||
new_list = event.content.get("events")
|
||||
old_list = current_state.content.get("events", {})
|
||||
new_list = event.content.get("events", {})
|
||||
for ev_id in set(old_list.keys() + new_list.keys()):
|
||||
levels_to_check.append(
|
||||
(ev_id, "events")
|
||||
@@ -676,3 +676,32 @@ def auth_types_for_event(event):
|
||||
auth_types.append(key)
|
||||
|
||||
return auth_types
|
||||
|
||||
|
||||
def filter_dependent_state(keys, state):
|
||||
if (EventTypes.Create, "") in keys:
|
||||
return state
|
||||
|
||||
if (EventTypes.PowerLevels, "") in keys:
|
||||
return state
|
||||
|
||||
def _filter_state(entry):
|
||||
etype, state_key, sender = entry
|
||||
|
||||
if (etype, state_key) in keys:
|
||||
return True
|
||||
|
||||
if (EventTypes.Member, sender) in keys:
|
||||
return True
|
||||
|
||||
if etype == EventTypes.Member:
|
||||
if (EventTypes.JoinRules, "") in keys:
|
||||
return True
|
||||
|
||||
for t, _ in keys:
|
||||
if t == EventTypes.ThirdPartyInvite:
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
return filter(_filter_state, state)
|
||||
|
||||
@@ -55,7 +55,7 @@ class EventBuilderFactory(object):
|
||||
|
||||
local_part = str(int(self.clock.time())) + i + random_string(5)
|
||||
|
||||
e_id = EventID.create(local_part, self.hostname)
|
||||
e_id = EventID(local_part, self.hostname)
|
||||
|
||||
return e_id.to_string()
|
||||
|
||||
|
||||
@@ -13,6 +13,10 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from frozendict import frozendict
|
||||
|
||||
|
||||
class EventContext(object):
|
||||
"""
|
||||
@@ -25,7 +29,9 @@ class EventContext(object):
|
||||
The current state map excluding the current event.
|
||||
(type, state_key) -> event_id
|
||||
|
||||
state_group (int): state group id
|
||||
state_group (int|None): state group id, if the state has been stored
|
||||
as a state group. This is usually only None if e.g. the event is
|
||||
an outlier.
|
||||
rejected (bool|str): A rejection reason if the event was rejected, else
|
||||
False
|
||||
|
||||
@@ -46,7 +52,6 @@ class EventContext(object):
|
||||
"prev_state_ids",
|
||||
"state_group",
|
||||
"rejected",
|
||||
"push_actions",
|
||||
"prev_group",
|
||||
"delta_ids",
|
||||
"prev_state_events",
|
||||
@@ -61,7 +66,6 @@ class EventContext(object):
|
||||
self.state_group = None
|
||||
|
||||
self.rejected = False
|
||||
self.push_actions = []
|
||||
|
||||
# A previously persisted state group and a delta between that
|
||||
# and this state.
|
||||
@@ -71,3 +75,98 @@ class EventContext(object):
|
||||
self.prev_state_events = None
|
||||
|
||||
self.app_service = None
|
||||
|
||||
def serialize(self, event):
|
||||
"""Converts self to a type that can be serialized as JSON, and then
|
||||
deserialized by `deserialize`
|
||||
|
||||
Args:
|
||||
event (FrozenEvent): The event that this context relates to
|
||||
|
||||
Returns:
|
||||
dict
|
||||
"""
|
||||
|
||||
# We don't serialize the full state dicts, instead they get pulled out
|
||||
# of the DB on the other side. However, the other side can't figure out
|
||||
# the prev_state_ids, so if we're a state event we include the event
|
||||
# id that we replaced in the state.
|
||||
if event.is_state():
|
||||
prev_state_id = self.prev_state_ids.get((event.type, event.state_key))
|
||||
else:
|
||||
prev_state_id = None
|
||||
|
||||
return {
|
||||
"prev_state_id": prev_state_id,
|
||||
"event_type": event.type,
|
||||
"event_state_key": event.state_key if event.is_state() else None,
|
||||
"state_group": self.state_group,
|
||||
"rejected": self.rejected,
|
||||
"prev_group": self.prev_group,
|
||||
"delta_ids": _encode_state_dict(self.delta_ids),
|
||||
"prev_state_events": self.prev_state_events,
|
||||
"app_service_id": self.app_service.id if self.app_service else None
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
@defer.inlineCallbacks
|
||||
def deserialize(store, input):
|
||||
"""Converts a dict that was produced by `serialize` back into a
|
||||
EventContext.
|
||||
|
||||
Args:
|
||||
store (DataStore): Used to convert AS ID to AS object
|
||||
input (dict): A dict produced by `serialize`
|
||||
|
||||
Returns:
|
||||
EventContext
|
||||
"""
|
||||
context = EventContext()
|
||||
context.state_group = input["state_group"]
|
||||
context.rejected = input["rejected"]
|
||||
context.prev_group = input["prev_group"]
|
||||
context.delta_ids = _decode_state_dict(input["delta_ids"])
|
||||
context.prev_state_events = input["prev_state_events"]
|
||||
|
||||
# We use the state_group and prev_state_id stuff to pull the
|
||||
# current_state_ids out of the DB and construct prev_state_ids.
|
||||
prev_state_id = input["prev_state_id"]
|
||||
event_type = input["event_type"]
|
||||
event_state_key = input["event_state_key"]
|
||||
|
||||
context.current_state_ids = yield store.get_state_ids_for_group(
|
||||
context.state_group,
|
||||
)
|
||||
if prev_state_id and event_state_key:
|
||||
context.prev_state_ids = dict(context.current_state_ids)
|
||||
context.prev_state_ids[(event_type, event_state_key)] = prev_state_id
|
||||
else:
|
||||
context.prev_state_ids = context.current_state_ids
|
||||
|
||||
app_service_id = input["app_service_id"]
|
||||
if app_service_id:
|
||||
context.app_service = store.get_app_service_by_id(app_service_id)
|
||||
|
||||
defer.returnValue(context)
|
||||
|
||||
|
||||
def _encode_state_dict(state_dict):
|
||||
"""Since dicts of (type, state_key) -> event_id cannot be serialized in
|
||||
JSON we need to convert them to a form that can.
|
||||
"""
|
||||
if state_dict is None:
|
||||
return None
|
||||
|
||||
return [
|
||||
(etype, state_key, v)
|
||||
for (etype, state_key), v in state_dict.iteritems()
|
||||
]
|
||||
|
||||
|
||||
def _decode_state_dict(input):
|
||||
"""Decodes a state dict encoded using `_encode_state_dict` above
|
||||
"""
|
||||
if input is None:
|
||||
return None
|
||||
|
||||
return frozendict({(etype, state_key,): v for etype, state_key, v in input})
|
||||
|
||||
@@ -14,7 +14,21 @@
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
def check_event_for_spam(event):
|
||||
class SpamChecker(object):
|
||||
def __init__(self, hs):
|
||||
self.spam_checker = None
|
||||
|
||||
module = None
|
||||
config = None
|
||||
try:
|
||||
module, config = hs.config.spam_checker
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
if module is not None:
|
||||
self.spam_checker = module(config=config)
|
||||
|
||||
def check_event_for_spam(self, event):
|
||||
"""Checks if a given event is considered "spammy" by this server.
|
||||
|
||||
If the server considers an event spammy, then it will be rejected if
|
||||
@@ -27,12 +41,73 @@ def check_event_for_spam(event):
|
||||
Returns:
|
||||
bool: True if the event is spammy.
|
||||
"""
|
||||
if not hasattr(event, "content") or "body" not in event.content:
|
||||
if self.spam_checker is None:
|
||||
return False
|
||||
|
||||
# for example:
|
||||
#
|
||||
# if "the third flower is green" in event.content["body"]:
|
||||
# return True
|
||||
return self.spam_checker.check_event_for_spam(event)
|
||||
|
||||
return False
|
||||
def user_may_invite(self, inviter_userid, invitee_userid, room_id):
|
||||
"""Checks if a given user may send an invite
|
||||
|
||||
If this method returns false, the invite will be rejected.
|
||||
|
||||
Args:
|
||||
userid (string): The sender's user ID
|
||||
|
||||
Returns:
|
||||
bool: True if the user may send an invite, otherwise False
|
||||
"""
|
||||
if self.spam_checker is None:
|
||||
return True
|
||||
|
||||
return self.spam_checker.user_may_invite(inviter_userid, invitee_userid, room_id)
|
||||
|
||||
def user_may_create_room(self, userid):
|
||||
"""Checks if a given user may create a room
|
||||
|
||||
If this method returns false, the creation request will be rejected.
|
||||
|
||||
Args:
|
||||
userid (string): The sender's user ID
|
||||
|
||||
Returns:
|
||||
bool: True if the user may create a room, otherwise False
|
||||
"""
|
||||
if self.spam_checker is None:
|
||||
return True
|
||||
|
||||
return self.spam_checker.user_may_create_room(userid)
|
||||
|
||||
def user_may_create_room_alias(self, userid, room_alias):
|
||||
"""Checks if a given user may create a room alias
|
||||
|
||||
If this method returns false, the association request will be rejected.
|
||||
|
||||
Args:
|
||||
userid (string): The sender's user ID
|
||||
room_alias (string): The alias to be created
|
||||
|
||||
Returns:
|
||||
bool: True if the user may create a room alias, otherwise False
|
||||
"""
|
||||
if self.spam_checker is None:
|
||||
return True
|
||||
|
||||
return self.spam_checker.user_may_create_room_alias(userid, room_alias)
|
||||
|
||||
def user_may_publish_room(self, userid, room_id):
|
||||
"""Checks if a given user may publish a room to the directory
|
||||
|
||||
If this method returns false, the publish request will be rejected.
|
||||
|
||||
Args:
|
||||
userid (string): The sender's user ID
|
||||
room_id (string): The ID of the room that would be published
|
||||
|
||||
Returns:
|
||||
bool: True if the user may publish the room, otherwise False
|
||||
"""
|
||||
if self.spam_checker is None:
|
||||
return True
|
||||
|
||||
return self.spam_checker.user_may_publish_room(userid, room_id)
|
||||
|
||||
@@ -15,11 +15,3 @@
|
||||
|
||||
""" This package includes all the federation specific logic.
|
||||
"""
|
||||
|
||||
from .replication import ReplicationLayer
|
||||
|
||||
|
||||
def initialize_http_replication(hs):
|
||||
transport = hs.get_federation_transport_client()
|
||||
|
||||
return ReplicationLayer(hs, transport)
|
||||
|
||||
@@ -16,8 +16,9 @@ import logging
|
||||
|
||||
from synapse.api.errors import SynapseError
|
||||
from synapse.crypto.event_signing import check_event_content_hash
|
||||
from synapse.events import spamcheck
|
||||
from synapse.events import FrozenEvent
|
||||
from synapse.events.utils import prune_event
|
||||
from synapse.http.servlet import assert_params_in_request
|
||||
from synapse.util import unwrapFirstError, logcontext
|
||||
from twisted.internet import defer
|
||||
|
||||
@@ -26,7 +27,13 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
class FederationBase(object):
|
||||
def __init__(self, hs):
|
||||
pass
|
||||
self.hs = hs
|
||||
|
||||
self.server_name = hs.hostname
|
||||
self.keyring = hs.get_keyring()
|
||||
self.spam_checker = hs.get_spam_checker()
|
||||
self.store = hs.get_datastore()
|
||||
self._clock = hs.get_clock()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _check_sigs_and_hash_and_fetch(self, origin, pdus, outlier=False,
|
||||
@@ -144,7 +151,7 @@ class FederationBase(object):
|
||||
)
|
||||
return redacted
|
||||
|
||||
if spamcheck.check_event_for_spam(pdu):
|
||||
if self.spam_checker.check_event_for_spam(pdu):
|
||||
logger.warn(
|
||||
"Event contains spam, redacting %s: %s",
|
||||
pdu.event_id, pdu.get_pdu_json()
|
||||
@@ -170,3 +177,28 @@ class FederationBase(object):
|
||||
)
|
||||
|
||||
return deferreds
|
||||
|
||||
|
||||
def event_from_pdu_json(pdu_json, outlier=False):
|
||||
"""Construct a FrozenEvent from an event json received over federation
|
||||
|
||||
Args:
|
||||
pdu_json (object): pdu as received over federation
|
||||
outlier (bool): True to mark this event as an outlier
|
||||
|
||||
Returns:
|
||||
FrozenEvent
|
||||
|
||||
Raises:
|
||||
SynapseError: if the pdu is missing required fields
|
||||
"""
|
||||
# we could probably enforce a bunch of other fields here (room_id, sender,
|
||||
# origin, etc etc)
|
||||
assert_params_in_request(pdu_json, ('event_id', 'type'))
|
||||
event = FrozenEvent(
|
||||
pdu_json
|
||||
)
|
||||
|
||||
event.internal_metadata.outlier = outlier
|
||||
|
||||
return event
|
||||
|
||||
@@ -14,28 +14,28 @@
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from .federation_base import FederationBase
|
||||
from synapse.api.constants import Membership
|
||||
|
||||
from synapse.api.errors import (
|
||||
CodeMessageException, HttpResponseException, SynapseError,
|
||||
)
|
||||
from synapse.util import unwrapFirstError, logcontext
|
||||
from synapse.util.caches.expiringcache import ExpiringCache
|
||||
from synapse.util.logutils import log_function
|
||||
from synapse.util.logcontext import preserve_fn, preserve_context_over_deferred
|
||||
from synapse.events import FrozenEvent, builder
|
||||
import synapse.metrics
|
||||
|
||||
from synapse.util.retryutils import NotRetryingDestination
|
||||
|
||||
import copy
|
||||
import itertools
|
||||
import logging
|
||||
import random
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.constants import Membership
|
||||
from synapse.api.errors import (
|
||||
CodeMessageException, HttpResponseException, SynapseError, FederationDeniedError
|
||||
)
|
||||
from synapse.events import builder
|
||||
from synapse.federation.federation_base import (
|
||||
FederationBase,
|
||||
event_from_pdu_json,
|
||||
)
|
||||
import synapse.metrics
|
||||
from synapse.util import logcontext, unwrapFirstError
|
||||
from synapse.util.caches.expiringcache import ExpiringCache
|
||||
from synapse.util.logcontext import make_deferred_yieldable, preserve_fn
|
||||
from synapse.util.logutils import log_function
|
||||
from synapse.util.retryutils import NotRetryingDestination
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -58,6 +58,7 @@ class FederationClient(FederationBase):
|
||||
self._clear_tried_cache, 60 * 1000,
|
||||
)
|
||||
self.state = hs.get_state_handler()
|
||||
self.transport_layer = hs.get_federation_transport_client()
|
||||
|
||||
def _clear_tried_cache(self):
|
||||
"""Clear pdu_destination_tried cache"""
|
||||
@@ -184,7 +185,7 @@ class FederationClient(FederationBase):
|
||||
logger.debug("backfill transaction_data=%s", repr(transaction_data))
|
||||
|
||||
pdus = [
|
||||
self.event_from_pdu_json(p, outlier=False)
|
||||
event_from_pdu_json(p, outlier=False)
|
||||
for p in transaction_data["pdus"]
|
||||
]
|
||||
|
||||
@@ -244,7 +245,7 @@ class FederationClient(FederationBase):
|
||||
logger.debug("transaction_data %r", transaction_data)
|
||||
|
||||
pdu_list = [
|
||||
self.event_from_pdu_json(p, outlier=outlier)
|
||||
event_from_pdu_json(p, outlier=outlier)
|
||||
for p in transaction_data["pdus"]
|
||||
]
|
||||
|
||||
@@ -266,6 +267,9 @@ class FederationClient(FederationBase):
|
||||
except NotRetryingDestination as e:
|
||||
logger.info(e.message)
|
||||
continue
|
||||
except FederationDeniedError as e:
|
||||
logger.info(e.message)
|
||||
continue
|
||||
except Exception as e:
|
||||
pdu_attempts[destination] = now
|
||||
|
||||
@@ -336,11 +340,11 @@ class FederationClient(FederationBase):
|
||||
)
|
||||
|
||||
pdus = [
|
||||
self.event_from_pdu_json(p, outlier=True) for p in result["pdus"]
|
||||
event_from_pdu_json(p, outlier=True) for p in result["pdus"]
|
||||
]
|
||||
|
||||
auth_chain = [
|
||||
self.event_from_pdu_json(p, outlier=True)
|
||||
event_from_pdu_json(p, outlier=True)
|
||||
for p in result.get("auth_chain", [])
|
||||
]
|
||||
|
||||
@@ -420,7 +424,7 @@ class FederationClient(FederationBase):
|
||||
for e_id in batch
|
||||
]
|
||||
|
||||
res = yield preserve_context_over_deferred(
|
||||
res = yield make_deferred_yieldable(
|
||||
defer.DeferredList(deferreds, consumeErrors=True)
|
||||
)
|
||||
for success, result in res:
|
||||
@@ -441,7 +445,7 @@ class FederationClient(FederationBase):
|
||||
)
|
||||
|
||||
auth_chain = [
|
||||
self.event_from_pdu_json(p, outlier=True)
|
||||
event_from_pdu_json(p, outlier=True)
|
||||
for p in res["auth_chain"]
|
||||
]
|
||||
|
||||
@@ -570,12 +574,12 @@ class FederationClient(FederationBase):
|
||||
logger.debug("Got content: %s", content)
|
||||
|
||||
state = [
|
||||
self.event_from_pdu_json(p, outlier=True)
|
||||
event_from_pdu_json(p, outlier=True)
|
||||
for p in content.get("state", [])
|
||||
]
|
||||
|
||||
auth_chain = [
|
||||
self.event_from_pdu_json(p, outlier=True)
|
||||
event_from_pdu_json(p, outlier=True)
|
||||
for p in content.get("auth_chain", [])
|
||||
]
|
||||
|
||||
@@ -650,7 +654,7 @@ class FederationClient(FederationBase):
|
||||
|
||||
logger.debug("Got response to send_invite: %s", pdu_dict)
|
||||
|
||||
pdu = self.event_from_pdu_json(pdu_dict)
|
||||
pdu = event_from_pdu_json(pdu_dict)
|
||||
|
||||
# Check signatures are correct.
|
||||
pdu = yield self._check_sigs_and_hash(pdu)
|
||||
@@ -740,7 +744,7 @@ class FederationClient(FederationBase):
|
||||
)
|
||||
|
||||
auth_chain = [
|
||||
self.event_from_pdu_json(e)
|
||||
event_from_pdu_json(e)
|
||||
for e in content["auth_chain"]
|
||||
]
|
||||
|
||||
@@ -788,7 +792,7 @@ class FederationClient(FederationBase):
|
||||
)
|
||||
|
||||
events = [
|
||||
self.event_from_pdu_json(e)
|
||||
event_from_pdu_json(e)
|
||||
for e in content.get("events", [])
|
||||
]
|
||||
|
||||
@@ -805,15 +809,6 @@ class FederationClient(FederationBase):
|
||||
|
||||
defer.returnValue(signed_events)
|
||||
|
||||
def event_from_pdu_json(self, pdu_json, outlier=False):
|
||||
event = FrozenEvent(
|
||||
pdu_json
|
||||
)
|
||||
|
||||
event.internal_metadata.outlier = outlier
|
||||
|
||||
return event
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def forward_third_party_invite(self, destinations, room_id, event_dict):
|
||||
for destination in destinations:
|
||||
|
||||
@@ -12,27 +12,30 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from .federation_base import FederationBase
|
||||
from .units import Transaction, Edu
|
||||
|
||||
from synapse.util.async import Linearizer
|
||||
from synapse.util.logutils import log_function
|
||||
from synapse.util.caches.response_cache import ResponseCache
|
||||
from synapse.events import FrozenEvent
|
||||
from synapse.types import get_domain_from_id
|
||||
import synapse.metrics
|
||||
|
||||
from synapse.api.errors import AuthError, FederationError, SynapseError
|
||||
|
||||
from synapse.crypto.event_signing import compute_event_signature
|
||||
|
||||
import simplejson as json
|
||||
import logging
|
||||
|
||||
import simplejson as json
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.errors import AuthError, FederationError, SynapseError, NotFoundError
|
||||
from synapse.crypto.event_signing import compute_event_signature
|
||||
from synapse.federation.federation_base import (
|
||||
FederationBase,
|
||||
event_from_pdu_json,
|
||||
)
|
||||
|
||||
from synapse.federation.persistence import TransactionActions
|
||||
from synapse.federation.units import Edu, Transaction
|
||||
import synapse.metrics
|
||||
from synapse.types import get_domain_from_id
|
||||
from synapse.util import async
|
||||
from synapse.util.caches.response_cache import ResponseCache
|
||||
from synapse.util.logcontext import make_deferred_yieldable, preserve_fn
|
||||
from synapse.util.logutils import log_function
|
||||
|
||||
# when processing incoming transactions, we try to handle multiple rooms in
|
||||
# parallel, up to this limit.
|
||||
TRANSACTION_CONCURRENCY_LIMIT = 10
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -51,49 +54,19 @@ class FederationServer(FederationBase):
|
||||
super(FederationServer, self).__init__(hs)
|
||||
|
||||
self.auth = hs.get_auth()
|
||||
self.handler = hs.get_handlers().federation_handler
|
||||
|
||||
self._server_linearizer = Linearizer("fed_server")
|
||||
self._server_linearizer = async.Linearizer("fed_server")
|
||||
self._transaction_linearizer = async.Linearizer("fed_txn_handler")
|
||||
|
||||
self.transaction_actions = TransactionActions(self.store)
|
||||
|
||||
self.registry = hs.get_federation_registry()
|
||||
|
||||
# We cache responses to state queries, as they take a while and often
|
||||
# come in waves.
|
||||
self._state_resp_cache = ResponseCache(hs, timeout_ms=30000)
|
||||
|
||||
def set_handler(self, handler):
|
||||
"""Sets the handler that the replication layer will use to communicate
|
||||
receipt of new PDUs from other home servers. The required methods are
|
||||
documented on :py:class:`.ReplicationHandler`.
|
||||
"""
|
||||
self.handler = handler
|
||||
|
||||
def register_edu_handler(self, edu_type, handler):
|
||||
if edu_type in self.edu_handlers:
|
||||
raise KeyError("Already have an EDU handler for %s" % (edu_type,))
|
||||
|
||||
self.edu_handlers[edu_type] = handler
|
||||
|
||||
def register_query_handler(self, query_type, handler):
|
||||
"""Sets the handler callable that will be used to handle an incoming
|
||||
federation Query of the given type.
|
||||
|
||||
Args:
|
||||
query_type (str): Category name of the query, which should match
|
||||
the string used by make_query.
|
||||
handler (callable): Invoked to handle incoming queries of this type
|
||||
|
||||
handler is invoked as:
|
||||
result = handler(args)
|
||||
|
||||
where 'args' is a dict mapping strings to strings of the query
|
||||
arguments. It should return a Deferred that will eventually yield an
|
||||
object to encode as JSON.
|
||||
"""
|
||||
if query_type in self.query_handlers:
|
||||
raise KeyError(
|
||||
"Already have a Query handler for %s" % (query_type,)
|
||||
)
|
||||
|
||||
self.query_handlers[query_type] = handler
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def on_backfill_request(self, origin, room_id, versions, limit):
|
||||
@@ -109,25 +82,41 @@ class FederationServer(FederationBase):
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def on_incoming_transaction(self, transaction_data):
|
||||
# keep this as early as possible to make the calculated origin ts as
|
||||
# accurate as possible.
|
||||
request_time = self._clock.time_msec()
|
||||
|
||||
transaction = Transaction(**transaction_data)
|
||||
|
||||
received_pdus_counter.inc_by(len(transaction.pdus))
|
||||
|
||||
for p in transaction.pdus:
|
||||
if "unsigned" in p:
|
||||
unsigned = p["unsigned"]
|
||||
if "age" in unsigned:
|
||||
p["age"] = unsigned["age"]
|
||||
if "age" in p:
|
||||
p["age_ts"] = int(self._clock.time_msec()) - int(p["age"])
|
||||
del p["age"]
|
||||
|
||||
pdu_list = [
|
||||
self.event_from_pdu_json(p) for p in transaction.pdus
|
||||
]
|
||||
if not transaction.transaction_id:
|
||||
raise Exception("Transaction missing transaction_id")
|
||||
if not transaction.origin:
|
||||
raise Exception("Transaction missing origin")
|
||||
|
||||
logger.debug("[%s] Got transaction", transaction.transaction_id)
|
||||
|
||||
# use a linearizer to ensure that we don't process the same transaction
|
||||
# multiple times in parallel.
|
||||
with (yield self._transaction_linearizer.queue(
|
||||
(transaction.origin, transaction.transaction_id),
|
||||
)):
|
||||
result = yield self._handle_incoming_transaction(
|
||||
transaction, request_time,
|
||||
)
|
||||
|
||||
defer.returnValue(result)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _handle_incoming_transaction(self, transaction, request_time):
|
||||
""" Process an incoming transaction and return the HTTP response
|
||||
|
||||
Args:
|
||||
transaction (Transaction): incoming transaction
|
||||
request_time (int): timestamp that the HTTP request arrived at
|
||||
|
||||
Returns:
|
||||
Deferred[(int, object)]: http response code and body
|
||||
"""
|
||||
response = yield self.transaction_actions.have_responded(transaction)
|
||||
|
||||
if response:
|
||||
@@ -140,42 +129,49 @@ class FederationServer(FederationBase):
|
||||
|
||||
logger.debug("[%s] Transaction is new", transaction.transaction_id)
|
||||
|
||||
results = []
|
||||
received_pdus_counter.inc_by(len(transaction.pdus))
|
||||
|
||||
for pdu in pdu_list:
|
||||
# check that it's actually being sent from a valid destination to
|
||||
# workaround bug #1753 in 0.18.5 and 0.18.6
|
||||
if transaction.origin != get_domain_from_id(pdu.event_id):
|
||||
# We continue to accept join events from any server; this is
|
||||
# necessary for the federation join dance to work correctly.
|
||||
# (When we join over federation, the "helper" server is
|
||||
# responsible for sending out the join event, rather than the
|
||||
# origin. See bug #1893).
|
||||
if not (
|
||||
pdu.type == 'm.room.member' and
|
||||
pdu.content and
|
||||
pdu.content.get("membership", None) == 'join'
|
||||
):
|
||||
logger.info(
|
||||
"Discarding PDU %s from invalid origin %s",
|
||||
pdu.event_id, transaction.origin
|
||||
)
|
||||
continue
|
||||
else:
|
||||
logger.info(
|
||||
"Accepting join PDU %s from %s",
|
||||
pdu.event_id, transaction.origin
|
||||
)
|
||||
pdus_by_room = {}
|
||||
|
||||
for p in transaction.pdus:
|
||||
if "unsigned" in p:
|
||||
unsigned = p["unsigned"]
|
||||
if "age" in unsigned:
|
||||
p["age"] = unsigned["age"]
|
||||
if "age" in p:
|
||||
p["age_ts"] = request_time - int(p["age"])
|
||||
del p["age"]
|
||||
|
||||
event = event_from_pdu_json(p)
|
||||
room_id = event.room_id
|
||||
pdus_by_room.setdefault(room_id, []).append(event)
|
||||
|
||||
pdu_results = {}
|
||||
|
||||
# we can process different rooms in parallel (which is useful if they
|
||||
# require callouts to other servers to fetch missing events), but
|
||||
# impose a limit to avoid going too crazy with ram/cpu.
|
||||
@defer.inlineCallbacks
|
||||
def process_pdus_for_room(room_id):
|
||||
logger.debug("Processing PDUs for %s", room_id)
|
||||
for pdu in pdus_by_room[room_id]:
|
||||
event_id = pdu.event_id
|
||||
try:
|
||||
yield self._handle_received_pdu(transaction.origin, pdu)
|
||||
results.append({})
|
||||
yield self._handle_received_pdu(
|
||||
transaction.origin, pdu
|
||||
)
|
||||
pdu_results[event_id] = {}
|
||||
except FederationError as e:
|
||||
self.send_failure(e, transaction.origin)
|
||||
results.append({"error": str(e)})
|
||||
logger.warn("Error handling PDU %s: %s", event_id, e)
|
||||
pdu_results[event_id] = {"error": str(e)}
|
||||
except Exception as e:
|
||||
results.append({"error": str(e)})
|
||||
logger.exception("Failed to handle PDU")
|
||||
pdu_results[event_id] = {"error": str(e)}
|
||||
logger.exception("Failed to handle PDU %s", event_id)
|
||||
|
||||
yield async.concurrently_execute(
|
||||
process_pdus_for_room, pdus_by_room.keys(),
|
||||
TRANSACTION_CONCURRENCY_LIMIT,
|
||||
)
|
||||
|
||||
if hasattr(transaction, "edus"):
|
||||
for edu in (Edu(**x) for x in transaction.edus):
|
||||
@@ -185,17 +181,16 @@ class FederationServer(FederationBase):
|
||||
edu.content
|
||||
)
|
||||
|
||||
for failure in getattr(transaction, "pdu_failures", []):
|
||||
pdu_failures = getattr(transaction, "pdu_failures", [])
|
||||
for failure in pdu_failures:
|
||||
logger.info("Got failure %r", failure)
|
||||
|
||||
logger.debug("Returning: %s", str(results))
|
||||
|
||||
response = {
|
||||
"pdus": dict(zip(
|
||||
(p.event_id for p in pdu_list), results
|
||||
)),
|
||||
"pdus": pdu_results,
|
||||
}
|
||||
|
||||
logger.debug("Returning: %s", str(response))
|
||||
|
||||
yield self.transaction_actions.set_response(
|
||||
transaction,
|
||||
200, response
|
||||
@@ -205,16 +200,7 @@ class FederationServer(FederationBase):
|
||||
@defer.inlineCallbacks
|
||||
def received_edu(self, origin, edu_type, content):
|
||||
received_edus_counter.inc()
|
||||
|
||||
if edu_type in self.edu_handlers:
|
||||
try:
|
||||
yield self.edu_handlers[edu_type](origin, content)
|
||||
except SynapseError as e:
|
||||
logger.info("Failed to handle edu %r: %r", edu_type, e)
|
||||
except Exception as e:
|
||||
logger.exception("Failed to handle edu %r", edu_type)
|
||||
else:
|
||||
logger.warn("Received EDU of type %s with no handler", edu_type)
|
||||
yield self.registry.on_edu(edu_type, origin, content)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
@@ -229,12 +215,13 @@ class FederationServer(FederationBase):
|
||||
result = self._state_resp_cache.get((room_id, event_id))
|
||||
if not result:
|
||||
with (yield self._server_linearizer.queue((origin, room_id))):
|
||||
resp = yield self._state_resp_cache.set(
|
||||
d = self._state_resp_cache.set(
|
||||
(room_id, event_id),
|
||||
self._on_context_state_request_compute(room_id, event_id)
|
||||
preserve_fn(self._on_context_state_request_compute)(room_id, event_id)
|
||||
)
|
||||
resp = yield make_deferred_yieldable(d)
|
||||
else:
|
||||
resp = yield result
|
||||
resp = yield make_deferred_yieldable(result)
|
||||
|
||||
defer.returnValue((200, resp))
|
||||
|
||||
@@ -303,14 +290,8 @@ class FederationServer(FederationBase):
|
||||
@defer.inlineCallbacks
|
||||
def on_query_request(self, query_type, args):
|
||||
received_queries_counter.inc(query_type)
|
||||
|
||||
if query_type in self.query_handlers:
|
||||
response = yield self.query_handlers[query_type](args)
|
||||
defer.returnValue((200, response))
|
||||
else:
|
||||
defer.returnValue(
|
||||
(404, "No handler for Query type '%s'" % (query_type,))
|
||||
)
|
||||
resp = yield self.registry.on_query(query_type, args)
|
||||
defer.returnValue((200, resp))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_make_join_request(self, room_id, user_id):
|
||||
@@ -320,7 +301,7 @@ class FederationServer(FederationBase):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_invite_request(self, origin, content):
|
||||
pdu = self.event_from_pdu_json(content)
|
||||
pdu = event_from_pdu_json(content)
|
||||
ret_pdu = yield self.handler.on_invite_request(origin, pdu)
|
||||
time_now = self._clock.time_msec()
|
||||
defer.returnValue((200, {"event": ret_pdu.get_pdu_json(time_now)}))
|
||||
@@ -328,7 +309,7 @@ class FederationServer(FederationBase):
|
||||
@defer.inlineCallbacks
|
||||
def on_send_join_request(self, origin, content):
|
||||
logger.debug("on_send_join_request: content: %s", content)
|
||||
pdu = self.event_from_pdu_json(content)
|
||||
pdu = event_from_pdu_json(content)
|
||||
logger.debug("on_send_join_request: pdu sigs: %s", pdu.signatures)
|
||||
res_pdus = yield self.handler.on_send_join_request(origin, pdu)
|
||||
time_now = self._clock.time_msec()
|
||||
@@ -348,7 +329,7 @@ class FederationServer(FederationBase):
|
||||
@defer.inlineCallbacks
|
||||
def on_send_leave_request(self, origin, content):
|
||||
logger.debug("on_send_leave_request: content: %s", content)
|
||||
pdu = self.event_from_pdu_json(content)
|
||||
pdu = event_from_pdu_json(content)
|
||||
logger.debug("on_send_leave_request: pdu sigs: %s", pdu.signatures)
|
||||
yield self.handler.on_send_leave_request(origin, pdu)
|
||||
defer.returnValue((200, {}))
|
||||
@@ -385,7 +366,7 @@ class FederationServer(FederationBase):
|
||||
"""
|
||||
with (yield self._server_linearizer.queue((origin, room_id))):
|
||||
auth_chain = [
|
||||
self.event_from_pdu_json(e)
|
||||
event_from_pdu_json(e)
|
||||
for e in content["auth_chain"]
|
||||
]
|
||||
|
||||
@@ -520,6 +501,30 @@ class FederationServer(FederationBase):
|
||||
Returns (Deferred): completes with None
|
||||
Raises: FederationError if the signatures / hash do not match
|
||||
"""
|
||||
# check that it's actually being sent from a valid destination to
|
||||
# workaround bug #1753 in 0.18.5 and 0.18.6
|
||||
if origin != get_domain_from_id(pdu.event_id):
|
||||
# We continue to accept join events from any server; this is
|
||||
# necessary for the federation join dance to work correctly.
|
||||
# (When we join over federation, the "helper" server is
|
||||
# responsible for sending out the join event, rather than the
|
||||
# origin. See bug #1893).
|
||||
if not (
|
||||
pdu.type == 'm.room.member' and
|
||||
pdu.content and
|
||||
pdu.content.get("membership", None) == 'join'
|
||||
):
|
||||
logger.info(
|
||||
"Discarding PDU %s from invalid origin %s",
|
||||
pdu.event_id, origin
|
||||
)
|
||||
return
|
||||
else:
|
||||
logger.info(
|
||||
"Accepting join PDU %s from %s",
|
||||
pdu.event_id, origin
|
||||
)
|
||||
|
||||
# Check signature.
|
||||
try:
|
||||
pdu = yield self._check_sigs_and_hash(pdu)
|
||||
@@ -536,15 +541,6 @@ class FederationServer(FederationBase):
|
||||
def __str__(self):
|
||||
return "<ReplicationLayer(%s)>" % self.server_name
|
||||
|
||||
def event_from_pdu_json(self, pdu_json, outlier=False):
|
||||
event = FrozenEvent(
|
||||
pdu_json
|
||||
)
|
||||
|
||||
event.internal_metadata.outlier = outlier
|
||||
|
||||
return event
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def exchange_third_party_invite(
|
||||
self,
|
||||
@@ -567,3 +563,66 @@ class FederationServer(FederationBase):
|
||||
origin, room_id, event_dict
|
||||
)
|
||||
defer.returnValue(ret)
|
||||
|
||||
|
||||
class FederationHandlerRegistry(object):
|
||||
"""Allows classes to register themselves as handlers for a given EDU or
|
||||
query type for incoming federation traffic.
|
||||
"""
|
||||
def __init__(self):
|
||||
self.edu_handlers = {}
|
||||
self.query_handlers = {}
|
||||
|
||||
def register_edu_handler(self, edu_type, handler):
|
||||
"""Sets the handler callable that will be used to handle an incoming
|
||||
federation EDU of the given type.
|
||||
|
||||
Args:
|
||||
edu_type (str): The type of the incoming EDU to register handler for
|
||||
handler (Callable[[str, dict]]): A callable invoked on incoming EDU
|
||||
of the given type. The arguments are the origin server name and
|
||||
the EDU contents.
|
||||
"""
|
||||
if edu_type in self.edu_handlers:
|
||||
raise KeyError("Already have an EDU handler for %s" % (edu_type,))
|
||||
|
||||
self.edu_handlers[edu_type] = handler
|
||||
|
||||
def register_query_handler(self, query_type, handler):
|
||||
"""Sets the handler callable that will be used to handle an incoming
|
||||
federation query of the given type.
|
||||
|
||||
Args:
|
||||
query_type (str): Category name of the query, which should match
|
||||
the string used by make_query.
|
||||
handler (Callable[[dict], Deferred[dict]]): Invoked to handle
|
||||
incoming queries of this type. The return will be yielded
|
||||
on and the result used as the response to the query request.
|
||||
"""
|
||||
if query_type in self.query_handlers:
|
||||
raise KeyError(
|
||||
"Already have a Query handler for %s" % (query_type,)
|
||||
)
|
||||
|
||||
self.query_handlers[query_type] = handler
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_edu(self, edu_type, origin, content):
|
||||
handler = self.edu_handlers.get(edu_type)
|
||||
if not handler:
|
||||
logger.warn("No handler registered for EDU type %s", edu_type)
|
||||
|
||||
try:
|
||||
yield handler(origin, content)
|
||||
except SynapseError as e:
|
||||
logger.info("Failed to handle edu %r: %r", edu_type, e)
|
||||
except Exception as e:
|
||||
logger.exception("Failed to handle edu %r", edu_type)
|
||||
|
||||
def on_query(self, query_type, args):
|
||||
handler = self.query_handlers.get(query_type)
|
||||
if not handler:
|
||||
logger.warn("No handler registered for query type %s", query_type)
|
||||
raise NotFoundError("No handler for Query type '%s'" % (query_type,))
|
||||
|
||||
return handler(args)
|
||||
|
||||
@@ -1,73 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""This layer is responsible for replicating with remote home servers using
|
||||
a given transport.
|
||||
"""
|
||||
|
||||
from .federation_client import FederationClient
|
||||
from .federation_server import FederationServer
|
||||
|
||||
from .persistence import TransactionActions
|
||||
|
||||
import logging
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ReplicationLayer(FederationClient, FederationServer):
|
||||
"""This layer is responsible for replicating with remote home servers over
|
||||
the given transport. I.e., does the sending and receiving of PDUs to
|
||||
remote home servers.
|
||||
|
||||
The layer communicates with the rest of the server via a registered
|
||||
ReplicationHandler.
|
||||
|
||||
In more detail, the layer:
|
||||
* Receives incoming data and processes it into transactions and pdus.
|
||||
* Fetches any PDUs it thinks it might have missed.
|
||||
* Keeps the current state for contexts up to date by applying the
|
||||
suitable conflict resolution.
|
||||
* Sends outgoing pdus wrapped in transactions.
|
||||
* Fills out the references to previous pdus/transactions appropriately
|
||||
for outgoing data.
|
||||
"""
|
||||
|
||||
def __init__(self, hs, transport_layer):
|
||||
self.server_name = hs.hostname
|
||||
|
||||
self.keyring = hs.get_keyring()
|
||||
|
||||
self.transport_layer = transport_layer
|
||||
|
||||
self.federation_client = self
|
||||
|
||||
self.store = hs.get_datastore()
|
||||
|
||||
self.handler = None
|
||||
self.edu_handlers = {}
|
||||
self.query_handlers = {}
|
||||
|
||||
self._clock = hs.get_clock()
|
||||
|
||||
self.transaction_actions = TransactionActions(self.store)
|
||||
|
||||
self.hs = hs
|
||||
|
||||
super(ReplicationLayer, self).__init__(hs)
|
||||
|
||||
def __str__(self):
|
||||
return "<ReplicationLayer(%s)>" % self.server_name
|
||||
@@ -19,9 +19,9 @@ from twisted.internet import defer
|
||||
from .persistence import TransactionActions
|
||||
from .units import Transaction, Edu
|
||||
|
||||
from synapse.api.errors import HttpResponseException
|
||||
from synapse.api.errors import HttpResponseException, FederationDeniedError
|
||||
from synapse.util import logcontext, PreserveLoggingContext
|
||||
from synapse.util.async import run_on_reactor
|
||||
from synapse.util.logcontext import preserve_context_over_fn, preserve_fn
|
||||
from synapse.util.retryutils import NotRetryingDestination, get_retry_limiter
|
||||
from synapse.util.metrics import measure_func
|
||||
from synapse.handlers.presence import format_user_presence_state, get_interested_remotes
|
||||
@@ -42,6 +42,8 @@ sent_edus_counter = client_metrics.register_counter("sent_edus")
|
||||
|
||||
sent_transactions_counter = client_metrics.register_counter("sent_transactions")
|
||||
|
||||
events_processed_counter = client_metrics.register_counter("events_processed")
|
||||
|
||||
|
||||
class TransactionQueue(object):
|
||||
"""This class makes sure we only have one transaction in flight at
|
||||
@@ -146,7 +148,6 @@ class TransactionQueue(object):
|
||||
else:
|
||||
return not destination.startswith("localhost")
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def notify_new_events(self, current_id):
|
||||
"""This gets called when we have some new events we might want to
|
||||
send out to other servers.
|
||||
@@ -156,6 +157,13 @@ class TransactionQueue(object):
|
||||
if self._is_processing:
|
||||
return
|
||||
|
||||
# fire off a processing loop in the background. It's likely it will
|
||||
# outlast the current request, so run it in the sentinel logcontext.
|
||||
with PreserveLoggingContext():
|
||||
self._process_event_queue_loop()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _process_event_queue_loop(self):
|
||||
try:
|
||||
self._is_processing = True
|
||||
while True:
|
||||
@@ -199,6 +207,8 @@ class TransactionQueue(object):
|
||||
|
||||
self._send_pdu(event, destinations)
|
||||
|
||||
events_processed_counter.inc_by(len(events))
|
||||
|
||||
yield self.store.update_federation_out_pos(
|
||||
"events", next_token
|
||||
)
|
||||
@@ -231,11 +241,9 @@ class TransactionQueue(object):
|
||||
(pdu, order)
|
||||
)
|
||||
|
||||
preserve_context_over_fn(
|
||||
self._attempt_new_transaction, destination
|
||||
)
|
||||
self._attempt_new_transaction(destination)
|
||||
|
||||
@preserve_fn # the caller should not yield on this
|
||||
@logcontext.preserve_fn # the caller should not yield on this
|
||||
@defer.inlineCallbacks
|
||||
def send_presence(self, states):
|
||||
"""Send the new presence states to the appropriate destinations.
|
||||
@@ -299,7 +307,7 @@ class TransactionQueue(object):
|
||||
state.user_id: state for state in states
|
||||
})
|
||||
|
||||
preserve_fn(self._attempt_new_transaction)(destination)
|
||||
self._attempt_new_transaction(destination)
|
||||
|
||||
def send_edu(self, destination, edu_type, content, key=None):
|
||||
edu = Edu(
|
||||
@@ -321,9 +329,7 @@ class TransactionQueue(object):
|
||||
else:
|
||||
self.pending_edus_by_dest.setdefault(destination, []).append(edu)
|
||||
|
||||
preserve_context_over_fn(
|
||||
self._attempt_new_transaction, destination
|
||||
)
|
||||
self._attempt_new_transaction(destination)
|
||||
|
||||
def send_failure(self, failure, destination):
|
||||
if destination == self.server_name or destination == "localhost":
|
||||
@@ -336,9 +342,7 @@ class TransactionQueue(object):
|
||||
destination, []
|
||||
).append(failure)
|
||||
|
||||
preserve_context_over_fn(
|
||||
self._attempt_new_transaction, destination
|
||||
)
|
||||
self._attempt_new_transaction(destination)
|
||||
|
||||
def send_device_messages(self, destination):
|
||||
if destination == self.server_name or destination == "localhost":
|
||||
@@ -347,15 +351,24 @@ class TransactionQueue(object):
|
||||
if not self.can_send_to(destination):
|
||||
return
|
||||
|
||||
preserve_context_over_fn(
|
||||
self._attempt_new_transaction, destination
|
||||
)
|
||||
self._attempt_new_transaction(destination)
|
||||
|
||||
def get_current_token(self):
|
||||
return 0
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _attempt_new_transaction(self, destination):
|
||||
"""Try to start a new transaction to this destination
|
||||
|
||||
If there is already a transaction in progress to this destination,
|
||||
returns immediately. Otherwise kicks off the process of sending a
|
||||
transaction in the background.
|
||||
|
||||
Args:
|
||||
destination (str):
|
||||
|
||||
Returns:
|
||||
None
|
||||
"""
|
||||
# list of (pending_pdu, deferred, order)
|
||||
if destination in self.pending_transactions:
|
||||
# XXX: pending_transactions can get stuck on by a never-ending
|
||||
@@ -368,6 +381,19 @@ class TransactionQueue(object):
|
||||
)
|
||||
return
|
||||
|
||||
logger.debug("TX [%s] Starting transaction loop", destination)
|
||||
|
||||
# Drop the logcontext before starting the transaction. It doesn't
|
||||
# really make sense to log all the outbound transactions against
|
||||
# whatever path led us to this point: that's pretty arbitrary really.
|
||||
#
|
||||
# (this also means we can fire off _perform_transaction without
|
||||
# yielding)
|
||||
with logcontext.PreserveLoggingContext():
|
||||
self._transaction_transmission_loop(destination)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _transaction_transmission_loop(self, destination):
|
||||
pending_pdus = []
|
||||
try:
|
||||
self.pending_transactions[destination] = 1
|
||||
@@ -464,6 +490,8 @@ class TransactionQueue(object):
|
||||
(e.retry_last_ts + e.retry_interval) / 1000.0
|
||||
),
|
||||
)
|
||||
except FederationDeniedError as e:
|
||||
logger.info(e)
|
||||
except Exception as e:
|
||||
logger.warn(
|
||||
"TX [%s] Failed to send transaction: %s",
|
||||
|
||||
@@ -212,6 +212,9 @@ class TransportLayerClient(object):
|
||||
|
||||
Fails with ``NotRetryingDestination`` if we are not yet ready
|
||||
to retry this server.
|
||||
|
||||
Fails with ``FederationDeniedError`` if the remote destination
|
||||
is not in our federation whitelist
|
||||
"""
|
||||
valid_memberships = {Membership.JOIN, Membership.LEAVE}
|
||||
if membership not in valid_memberships:
|
||||
@@ -471,3 +474,418 @@ class TransportLayerClient(object):
|
||||
)
|
||||
|
||||
defer.returnValue(content)
|
||||
|
||||
@log_function
|
||||
def get_group_profile(self, destination, group_id, requester_user_id):
|
||||
"""Get a group profile
|
||||
"""
|
||||
path = PREFIX + "/groups/%s/profile" % (group_id,)
|
||||
|
||||
return self.client.get_json(
|
||||
destination=destination,
|
||||
path=path,
|
||||
args={"requester_user_id": requester_user_id},
|
||||
ignore_backoff=True,
|
||||
)
|
||||
|
||||
@log_function
|
||||
def update_group_profile(self, destination, group_id, requester_user_id, content):
|
||||
"""Update a remote group profile
|
||||
|
||||
Args:
|
||||
destination (str)
|
||||
group_id (str)
|
||||
requester_user_id (str)
|
||||
content (dict): The new profile of the group
|
||||
"""
|
||||
path = PREFIX + "/groups/%s/profile" % (group_id,)
|
||||
|
||||
return self.client.post_json(
|
||||
destination=destination,
|
||||
path=path,
|
||||
args={"requester_user_id": requester_user_id},
|
||||
data=content,
|
||||
ignore_backoff=True,
|
||||
)
|
||||
|
||||
@log_function
|
||||
def get_group_summary(self, destination, group_id, requester_user_id):
|
||||
"""Get a group summary
|
||||
"""
|
||||
path = PREFIX + "/groups/%s/summary" % (group_id,)
|
||||
|
||||
return self.client.get_json(
|
||||
destination=destination,
|
||||
path=path,
|
||||
args={"requester_user_id": requester_user_id},
|
||||
ignore_backoff=True,
|
||||
)
|
||||
|
||||
@log_function
|
||||
def get_rooms_in_group(self, destination, group_id, requester_user_id):
|
||||
"""Get all rooms in a group
|
||||
"""
|
||||
path = PREFIX + "/groups/%s/rooms" % (group_id,)
|
||||
|
||||
return self.client.get_json(
|
||||
destination=destination,
|
||||
path=path,
|
||||
args={"requester_user_id": requester_user_id},
|
||||
ignore_backoff=True,
|
||||
)
|
||||
|
||||
def add_room_to_group(self, destination, group_id, requester_user_id, room_id,
|
||||
content):
|
||||
"""Add a room to a group
|
||||
"""
|
||||
path = PREFIX + "/groups/%s/room/%s" % (group_id, room_id,)
|
||||
|
||||
return self.client.post_json(
|
||||
destination=destination,
|
||||
path=path,
|
||||
args={"requester_user_id": requester_user_id},
|
||||
data=content,
|
||||
ignore_backoff=True,
|
||||
)
|
||||
|
||||
def update_room_in_group(self, destination, group_id, requester_user_id, room_id,
|
||||
config_key, content):
|
||||
"""Update room in group
|
||||
"""
|
||||
path = PREFIX + "/groups/%s/room/%s/config/%s" % (group_id, room_id, config_key,)
|
||||
|
||||
return self.client.post_json(
|
||||
destination=destination,
|
||||
path=path,
|
||||
args={"requester_user_id": requester_user_id},
|
||||
data=content,
|
||||
ignore_backoff=True,
|
||||
)
|
||||
|
||||
def remove_room_from_group(self, destination, group_id, requester_user_id, room_id):
|
||||
"""Remove a room from a group
|
||||
"""
|
||||
path = PREFIX + "/groups/%s/room/%s" % (group_id, room_id,)
|
||||
|
||||
return self.client.delete_json(
|
||||
destination=destination,
|
||||
path=path,
|
||||
args={"requester_user_id": requester_user_id},
|
||||
ignore_backoff=True,
|
||||
)
|
||||
|
||||
@log_function
|
||||
def get_users_in_group(self, destination, group_id, requester_user_id):
|
||||
"""Get users in a group
|
||||
"""
|
||||
path = PREFIX + "/groups/%s/users" % (group_id,)
|
||||
|
||||
return self.client.get_json(
|
||||
destination=destination,
|
||||
path=path,
|
||||
args={"requester_user_id": requester_user_id},
|
||||
ignore_backoff=True,
|
||||
)
|
||||
|
||||
@log_function
|
||||
def get_invited_users_in_group(self, destination, group_id, requester_user_id):
|
||||
"""Get users that have been invited to a group
|
||||
"""
|
||||
path = PREFIX + "/groups/%s/invited_users" % (group_id,)
|
||||
|
||||
return self.client.get_json(
|
||||
destination=destination,
|
||||
path=path,
|
||||
args={"requester_user_id": requester_user_id},
|
||||
ignore_backoff=True,
|
||||
)
|
||||
|
||||
@log_function
|
||||
def accept_group_invite(self, destination, group_id, user_id, content):
|
||||
"""Accept a group invite
|
||||
"""
|
||||
path = PREFIX + "/groups/%s/users/%s/accept_invite" % (group_id, user_id)
|
||||
|
||||
return self.client.post_json(
|
||||
destination=destination,
|
||||
path=path,
|
||||
data=content,
|
||||
ignore_backoff=True,
|
||||
)
|
||||
|
||||
@log_function
|
||||
def invite_to_group(self, destination, group_id, user_id, requester_user_id, content):
|
||||
"""Invite a user to a group
|
||||
"""
|
||||
path = PREFIX + "/groups/%s/users/%s/invite" % (group_id, user_id)
|
||||
|
||||
return self.client.post_json(
|
||||
destination=destination,
|
||||
path=path,
|
||||
args={"requester_user_id": requester_user_id},
|
||||
data=content,
|
||||
ignore_backoff=True,
|
||||
)
|
||||
|
||||
@log_function
|
||||
def invite_to_group_notification(self, destination, group_id, user_id, content):
|
||||
"""Sent by group server to inform a user's server that they have been
|
||||
invited.
|
||||
"""
|
||||
|
||||
path = PREFIX + "/groups/local/%s/users/%s/invite" % (group_id, user_id)
|
||||
|
||||
return self.client.post_json(
|
||||
destination=destination,
|
||||
path=path,
|
||||
data=content,
|
||||
ignore_backoff=True,
|
||||
)
|
||||
|
||||
@log_function
|
||||
def remove_user_from_group(self, destination, group_id, requester_user_id,
|
||||
user_id, content):
|
||||
"""Remove a user fron a group
|
||||
"""
|
||||
path = PREFIX + "/groups/%s/users/%s/remove" % (group_id, user_id)
|
||||
|
||||
return self.client.post_json(
|
||||
destination=destination,
|
||||
path=path,
|
||||
args={"requester_user_id": requester_user_id},
|
||||
data=content,
|
||||
ignore_backoff=True,
|
||||
)
|
||||
|
||||
@log_function
|
||||
def remove_user_from_group_notification(self, destination, group_id, user_id,
|
||||
content):
|
||||
"""Sent by group server to inform a user's server that they have been
|
||||
kicked from the group.
|
||||
"""
|
||||
|
||||
path = PREFIX + "/groups/local/%s/users/%s/remove" % (group_id, user_id)
|
||||
|
||||
return self.client.post_json(
|
||||
destination=destination,
|
||||
path=path,
|
||||
data=content,
|
||||
ignore_backoff=True,
|
||||
)
|
||||
|
||||
@log_function
|
||||
def renew_group_attestation(self, destination, group_id, user_id, content):
|
||||
"""Sent by either a group server or a user's server to periodically update
|
||||
the attestations
|
||||
"""
|
||||
|
||||
path = PREFIX + "/groups/%s/renew_attestation/%s" % (group_id, user_id)
|
||||
|
||||
return self.client.post_json(
|
||||
destination=destination,
|
||||
path=path,
|
||||
data=content,
|
||||
ignore_backoff=True,
|
||||
)
|
||||
|
||||
@log_function
|
||||
def update_group_summary_room(self, destination, group_id, user_id, room_id,
|
||||
category_id, content):
|
||||
"""Update a room entry in a group summary
|
||||
"""
|
||||
if category_id:
|
||||
path = PREFIX + "/groups/%s/summary/categories/%s/rooms/%s" % (
|
||||
group_id, category_id, room_id,
|
||||
)
|
||||
else:
|
||||
path = PREFIX + "/groups/%s/summary/rooms/%s" % (group_id, room_id,)
|
||||
|
||||
return self.client.post_json(
|
||||
destination=destination,
|
||||
path=path,
|
||||
args={"requester_user_id": user_id},
|
||||
data=content,
|
||||
ignore_backoff=True,
|
||||
)
|
||||
|
||||
@log_function
|
||||
def delete_group_summary_room(self, destination, group_id, user_id, room_id,
|
||||
category_id):
|
||||
"""Delete a room entry in a group summary
|
||||
"""
|
||||
if category_id:
|
||||
path = PREFIX + "/groups/%s/summary/categories/%s/rooms/%s" % (
|
||||
group_id, category_id, room_id,
|
||||
)
|
||||
else:
|
||||
path = PREFIX + "/groups/%s/summary/rooms/%s" % (group_id, room_id,)
|
||||
|
||||
return self.client.delete_json(
|
||||
destination=destination,
|
||||
path=path,
|
||||
args={"requester_user_id": user_id},
|
||||
ignore_backoff=True,
|
||||
)
|
||||
|
||||
@log_function
|
||||
def get_group_categories(self, destination, group_id, requester_user_id):
|
||||
"""Get all categories in a group
|
||||
"""
|
||||
path = PREFIX + "/groups/%s/categories" % (group_id,)
|
||||
|
||||
return self.client.get_json(
|
||||
destination=destination,
|
||||
path=path,
|
||||
args={"requester_user_id": requester_user_id},
|
||||
ignore_backoff=True,
|
||||
)
|
||||
|
||||
@log_function
|
||||
def get_group_category(self, destination, group_id, requester_user_id, category_id):
|
||||
"""Get category info in a group
|
||||
"""
|
||||
path = PREFIX + "/groups/%s/categories/%s" % (group_id, category_id,)
|
||||
|
||||
return self.client.get_json(
|
||||
destination=destination,
|
||||
path=path,
|
||||
args={"requester_user_id": requester_user_id},
|
||||
ignore_backoff=True,
|
||||
)
|
||||
|
||||
@log_function
|
||||
def update_group_category(self, destination, group_id, requester_user_id, category_id,
|
||||
content):
|
||||
"""Update a category in a group
|
||||
"""
|
||||
path = PREFIX + "/groups/%s/categories/%s" % (group_id, category_id,)
|
||||
|
||||
return self.client.post_json(
|
||||
destination=destination,
|
||||
path=path,
|
||||
args={"requester_user_id": requester_user_id},
|
||||
data=content,
|
||||
ignore_backoff=True,
|
||||
)
|
||||
|
||||
@log_function
|
||||
def delete_group_category(self, destination, group_id, requester_user_id,
|
||||
category_id):
|
||||
"""Delete a category in a group
|
||||
"""
|
||||
path = PREFIX + "/groups/%s/categories/%s" % (group_id, category_id,)
|
||||
|
||||
return self.client.delete_json(
|
||||
destination=destination,
|
||||
path=path,
|
||||
args={"requester_user_id": requester_user_id},
|
||||
ignore_backoff=True,
|
||||
)
|
||||
|
||||
@log_function
|
||||
def get_group_roles(self, destination, group_id, requester_user_id):
|
||||
"""Get all roles in a group
|
||||
"""
|
||||
path = PREFIX + "/groups/%s/roles" % (group_id,)
|
||||
|
||||
return self.client.get_json(
|
||||
destination=destination,
|
||||
path=path,
|
||||
args={"requester_user_id": requester_user_id},
|
||||
ignore_backoff=True,
|
||||
)
|
||||
|
||||
@log_function
|
||||
def get_group_role(self, destination, group_id, requester_user_id, role_id):
|
||||
"""Get a roles info
|
||||
"""
|
||||
path = PREFIX + "/groups/%s/roles/%s" % (group_id, role_id,)
|
||||
|
||||
return self.client.get_json(
|
||||
destination=destination,
|
||||
path=path,
|
||||
args={"requester_user_id": requester_user_id},
|
||||
ignore_backoff=True,
|
||||
)
|
||||
|
||||
@log_function
|
||||
def update_group_role(self, destination, group_id, requester_user_id, role_id,
|
||||
content):
|
||||
"""Update a role in a group
|
||||
"""
|
||||
path = PREFIX + "/groups/%s/roles/%s" % (group_id, role_id,)
|
||||
|
||||
return self.client.post_json(
|
||||
destination=destination,
|
||||
path=path,
|
||||
args={"requester_user_id": requester_user_id},
|
||||
data=content,
|
||||
ignore_backoff=True,
|
||||
)
|
||||
|
||||
@log_function
|
||||
def delete_group_role(self, destination, group_id, requester_user_id, role_id):
|
||||
"""Delete a role in a group
|
||||
"""
|
||||
path = PREFIX + "/groups/%s/roles/%s" % (group_id, role_id,)
|
||||
|
||||
return self.client.delete_json(
|
||||
destination=destination,
|
||||
path=path,
|
||||
args={"requester_user_id": requester_user_id},
|
||||
ignore_backoff=True,
|
||||
)
|
||||
|
||||
@log_function
|
||||
def update_group_summary_user(self, destination, group_id, requester_user_id,
|
||||
user_id, role_id, content):
|
||||
"""Update a users entry in a group
|
||||
"""
|
||||
if role_id:
|
||||
path = PREFIX + "/groups/%s/summary/roles/%s/users/%s" % (
|
||||
group_id, role_id, user_id,
|
||||
)
|
||||
else:
|
||||
path = PREFIX + "/groups/%s/summary/users/%s" % (group_id, user_id,)
|
||||
|
||||
return self.client.post_json(
|
||||
destination=destination,
|
||||
path=path,
|
||||
args={"requester_user_id": requester_user_id},
|
||||
data=content,
|
||||
ignore_backoff=True,
|
||||
)
|
||||
|
||||
@log_function
|
||||
def delete_group_summary_user(self, destination, group_id, requester_user_id,
|
||||
user_id, role_id):
|
||||
"""Delete a users entry in a group
|
||||
"""
|
||||
if role_id:
|
||||
path = PREFIX + "/groups/%s/summary/roles/%s/users/%s" % (
|
||||
group_id, role_id, user_id,
|
||||
)
|
||||
else:
|
||||
path = PREFIX + "/groups/%s/summary/users/%s" % (group_id, user_id,)
|
||||
|
||||
return self.client.delete_json(
|
||||
destination=destination,
|
||||
path=path,
|
||||
args={"requester_user_id": requester_user_id},
|
||||
ignore_backoff=True,
|
||||
)
|
||||
|
||||
def bulk_get_publicised_groups(self, destination, user_ids):
|
||||
"""Get the groups a list of users are publicising
|
||||
"""
|
||||
|
||||
path = PREFIX + "/get_groups_publicised"
|
||||
|
||||
content = {"user_ids": user_ids}
|
||||
|
||||
return self.client.post_json(
|
||||
destination=destination,
|
||||
path=path,
|
||||
data=content,
|
||||
ignore_backoff=True,
|
||||
)
|
||||
|
||||
@@ -16,7 +16,7 @@
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.urls import FEDERATION_PREFIX as PREFIX
|
||||
from synapse.api.errors import Codes, SynapseError
|
||||
from synapse.api.errors import Codes, SynapseError, FederationDeniedError
|
||||
from synapse.http.server import JsonResource
|
||||
from synapse.http.servlet import (
|
||||
parse_json_object_from_request, parse_integer_from_args, parse_string_from_args,
|
||||
@@ -25,7 +25,7 @@ from synapse.http.servlet import (
|
||||
from synapse.util.ratelimitutils import FederationRateLimiter
|
||||
from synapse.util.versionstring import get_version_string
|
||||
from synapse.util.logcontext import preserve_fn
|
||||
from synapse.types import ThirdPartyInstanceID
|
||||
from synapse.types import ThirdPartyInstanceID, get_domain_from_id
|
||||
|
||||
import functools
|
||||
import logging
|
||||
@@ -81,6 +81,7 @@ class Authenticator(object):
|
||||
self.keyring = hs.get_keyring()
|
||||
self.server_name = hs.hostname
|
||||
self.store = hs.get_datastore()
|
||||
self.federation_domain_whitelist = hs.config.federation_domain_whitelist
|
||||
|
||||
# A method just so we can pass 'self' as the authenticator to the Servlets
|
||||
@defer.inlineCallbacks
|
||||
@@ -92,6 +93,12 @@ class Authenticator(object):
|
||||
"signatures": {},
|
||||
}
|
||||
|
||||
if (
|
||||
self.federation_domain_whitelist is not None and
|
||||
self.server_name not in self.federation_domain_whitelist
|
||||
):
|
||||
raise FederationDeniedError(self.server_name)
|
||||
|
||||
if content is not None:
|
||||
json_request["content"] = content
|
||||
|
||||
@@ -112,7 +119,7 @@ class Authenticator(object):
|
||||
key = strip_quotes(param_dict["key"])
|
||||
sig = strip_quotes(param_dict["sig"])
|
||||
return (origin, key, sig)
|
||||
except:
|
||||
except Exception:
|
||||
raise AuthenticationError(
|
||||
400, "Malformed Authorization header", Codes.UNAUTHORIZED
|
||||
)
|
||||
@@ -177,7 +184,7 @@ class BaseFederationServlet(object):
|
||||
if self.REQUIRE_AUTH:
|
||||
logger.exception("authenticate_request failed")
|
||||
raise
|
||||
except:
|
||||
except Exception:
|
||||
logger.exception("authenticate_request failed")
|
||||
raise
|
||||
|
||||
@@ -270,7 +277,7 @@ class FederationSendServlet(BaseFederationServlet):
|
||||
code, response = yield self.handler.on_incoming_transaction(
|
||||
transaction_data
|
||||
)
|
||||
except:
|
||||
except Exception:
|
||||
logger.exception("on_incoming_transaction failed")
|
||||
raise
|
||||
|
||||
@@ -609,6 +616,514 @@ class FederationVersionServlet(BaseFederationServlet):
|
||||
}))
|
||||
|
||||
|
||||
class FederationGroupsProfileServlet(BaseFederationServlet):
|
||||
"""Get/set the basic profile of a group on behalf of a user
|
||||
"""
|
||||
PATH = "/groups/(?P<group_id>[^/]*)/profile$"
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_GET(self, origin, content, query, group_id):
|
||||
requester_user_id = parse_string_from_args(query, "requester_user_id")
|
||||
if get_domain_from_id(requester_user_id) != origin:
|
||||
raise SynapseError(403, "requester_user_id doesn't match origin")
|
||||
|
||||
new_content = yield self.handler.get_group_profile(
|
||||
group_id, requester_user_id
|
||||
)
|
||||
|
||||
defer.returnValue((200, new_content))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_POST(self, origin, content, query, group_id):
|
||||
requester_user_id = parse_string_from_args(query, "requester_user_id")
|
||||
if get_domain_from_id(requester_user_id) != origin:
|
||||
raise SynapseError(403, "requester_user_id doesn't match origin")
|
||||
|
||||
new_content = yield self.handler.update_group_profile(
|
||||
group_id, requester_user_id, content
|
||||
)
|
||||
|
||||
defer.returnValue((200, new_content))
|
||||
|
||||
|
||||
class FederationGroupsSummaryServlet(BaseFederationServlet):
|
||||
PATH = "/groups/(?P<group_id>[^/]*)/summary$"
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_GET(self, origin, content, query, group_id):
|
||||
requester_user_id = parse_string_from_args(query, "requester_user_id")
|
||||
if get_domain_from_id(requester_user_id) != origin:
|
||||
raise SynapseError(403, "requester_user_id doesn't match origin")
|
||||
|
||||
new_content = yield self.handler.get_group_summary(
|
||||
group_id, requester_user_id
|
||||
)
|
||||
|
||||
defer.returnValue((200, new_content))
|
||||
|
||||
|
||||
class FederationGroupsRoomsServlet(BaseFederationServlet):
|
||||
"""Get the rooms in a group on behalf of a user
|
||||
"""
|
||||
PATH = "/groups/(?P<group_id>[^/]*)/rooms$"
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_GET(self, origin, content, query, group_id):
|
||||
requester_user_id = parse_string_from_args(query, "requester_user_id")
|
||||
if get_domain_from_id(requester_user_id) != origin:
|
||||
raise SynapseError(403, "requester_user_id doesn't match origin")
|
||||
|
||||
new_content = yield self.handler.get_rooms_in_group(
|
||||
group_id, requester_user_id
|
||||
)
|
||||
|
||||
defer.returnValue((200, new_content))
|
||||
|
||||
|
||||
class FederationGroupsAddRoomsServlet(BaseFederationServlet):
|
||||
"""Add/remove room from group
|
||||
"""
|
||||
PATH = "/groups/(?P<group_id>[^/]*)/room/(?P<room_id>[^/]*)$"
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_POST(self, origin, content, query, group_id, room_id):
|
||||
requester_user_id = parse_string_from_args(query, "requester_user_id")
|
||||
if get_domain_from_id(requester_user_id) != origin:
|
||||
raise SynapseError(403, "requester_user_id doesn't match origin")
|
||||
|
||||
new_content = yield self.handler.add_room_to_group(
|
||||
group_id, requester_user_id, room_id, content
|
||||
)
|
||||
|
||||
defer.returnValue((200, new_content))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_DELETE(self, origin, content, query, group_id, room_id):
|
||||
requester_user_id = parse_string_from_args(query, "requester_user_id")
|
||||
if get_domain_from_id(requester_user_id) != origin:
|
||||
raise SynapseError(403, "requester_user_id doesn't match origin")
|
||||
|
||||
new_content = yield self.handler.remove_room_from_group(
|
||||
group_id, requester_user_id, room_id,
|
||||
)
|
||||
|
||||
defer.returnValue((200, new_content))
|
||||
|
||||
|
||||
class FederationGroupsAddRoomsConfigServlet(BaseFederationServlet):
|
||||
"""Update room config in group
|
||||
"""
|
||||
PATH = (
|
||||
"/groups/(?P<group_id>[^/]*)/room/(?P<room_id>[^/]*)"
|
||||
"/config/(?P<config_key>[^/]*)$"
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_POST(self, origin, content, query, group_id, room_id, config_key):
|
||||
requester_user_id = parse_string_from_args(query, "requester_user_id")
|
||||
if get_domain_from_id(requester_user_id) != origin:
|
||||
raise SynapseError(403, "requester_user_id doesn't match origin")
|
||||
|
||||
result = yield self.groups_handler.update_room_in_group(
|
||||
group_id, requester_user_id, room_id, config_key, content,
|
||||
)
|
||||
|
||||
defer.returnValue((200, result))
|
||||
|
||||
|
||||
class FederationGroupsUsersServlet(BaseFederationServlet):
|
||||
"""Get the users in a group on behalf of a user
|
||||
"""
|
||||
PATH = "/groups/(?P<group_id>[^/]*)/users$"
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_GET(self, origin, content, query, group_id):
|
||||
requester_user_id = parse_string_from_args(query, "requester_user_id")
|
||||
if get_domain_from_id(requester_user_id) != origin:
|
||||
raise SynapseError(403, "requester_user_id doesn't match origin")
|
||||
|
||||
new_content = yield self.handler.get_users_in_group(
|
||||
group_id, requester_user_id
|
||||
)
|
||||
|
||||
defer.returnValue((200, new_content))
|
||||
|
||||
|
||||
class FederationGroupsInvitedUsersServlet(BaseFederationServlet):
|
||||
"""Get the users that have been invited to a group
|
||||
"""
|
||||
PATH = "/groups/(?P<group_id>[^/]*)/invited_users$"
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_GET(self, origin, content, query, group_id):
|
||||
requester_user_id = parse_string_from_args(query, "requester_user_id")
|
||||
if get_domain_from_id(requester_user_id) != origin:
|
||||
raise SynapseError(403, "requester_user_id doesn't match origin")
|
||||
|
||||
new_content = yield self.handler.get_invited_users_in_group(
|
||||
group_id, requester_user_id
|
||||
)
|
||||
|
||||
defer.returnValue((200, new_content))
|
||||
|
||||
|
||||
class FederationGroupsInviteServlet(BaseFederationServlet):
|
||||
"""Ask a group server to invite someone to the group
|
||||
"""
|
||||
PATH = "/groups/(?P<group_id>[^/]*)/users/(?P<user_id>[^/]*)/invite$"
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_POST(self, origin, content, query, group_id, user_id):
|
||||
requester_user_id = parse_string_from_args(query, "requester_user_id")
|
||||
if get_domain_from_id(requester_user_id) != origin:
|
||||
raise SynapseError(403, "requester_user_id doesn't match origin")
|
||||
|
||||
new_content = yield self.handler.invite_to_group(
|
||||
group_id, user_id, requester_user_id, content,
|
||||
)
|
||||
|
||||
defer.returnValue((200, new_content))
|
||||
|
||||
|
||||
class FederationGroupsAcceptInviteServlet(BaseFederationServlet):
|
||||
"""Accept an invitation from the group server
|
||||
"""
|
||||
PATH = "/groups/(?P<group_id>[^/]*)/users/(?P<user_id>[^/]*)/accept_invite$"
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_POST(self, origin, content, query, group_id, user_id):
|
||||
if get_domain_from_id(user_id) != origin:
|
||||
raise SynapseError(403, "user_id doesn't match origin")
|
||||
|
||||
new_content = yield self.handler.accept_invite(
|
||||
group_id, user_id, content,
|
||||
)
|
||||
|
||||
defer.returnValue((200, new_content))
|
||||
|
||||
|
||||
class FederationGroupsRemoveUserServlet(BaseFederationServlet):
|
||||
"""Leave or kick a user from the group
|
||||
"""
|
||||
PATH = "/groups/(?P<group_id>[^/]*)/users/(?P<user_id>[^/]*)/remove$"
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_POST(self, origin, content, query, group_id, user_id):
|
||||
requester_user_id = parse_string_from_args(query, "requester_user_id")
|
||||
if get_domain_from_id(requester_user_id) != origin:
|
||||
raise SynapseError(403, "requester_user_id doesn't match origin")
|
||||
|
||||
new_content = yield self.handler.remove_user_from_group(
|
||||
group_id, user_id, requester_user_id, content,
|
||||
)
|
||||
|
||||
defer.returnValue((200, new_content))
|
||||
|
||||
|
||||
class FederationGroupsLocalInviteServlet(BaseFederationServlet):
|
||||
"""A group server has invited a local user
|
||||
"""
|
||||
PATH = "/groups/local/(?P<group_id>[^/]*)/users/(?P<user_id>[^/]*)/invite$"
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_POST(self, origin, content, query, group_id, user_id):
|
||||
if get_domain_from_id(group_id) != origin:
|
||||
raise SynapseError(403, "group_id doesn't match origin")
|
||||
|
||||
new_content = yield self.handler.on_invite(
|
||||
group_id, user_id, content,
|
||||
)
|
||||
|
||||
defer.returnValue((200, new_content))
|
||||
|
||||
|
||||
class FederationGroupsRemoveLocalUserServlet(BaseFederationServlet):
|
||||
"""A group server has removed a local user
|
||||
"""
|
||||
PATH = "/groups/local/(?P<group_id>[^/]*)/users/(?P<user_id>[^/]*)/remove$"
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_POST(self, origin, content, query, group_id, user_id):
|
||||
if get_domain_from_id(group_id) != origin:
|
||||
raise SynapseError(403, "user_id doesn't match origin")
|
||||
|
||||
new_content = yield self.handler.user_removed_from_group(
|
||||
group_id, user_id, content,
|
||||
)
|
||||
|
||||
defer.returnValue((200, new_content))
|
||||
|
||||
|
||||
class FederationGroupsRenewAttestaionServlet(BaseFederationServlet):
|
||||
"""A group or user's server renews their attestation
|
||||
"""
|
||||
PATH = "/groups/(?P<group_id>[^/]*)/renew_attestation/(?P<user_id>[^/]*)$"
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_POST(self, origin, content, query, group_id, user_id):
|
||||
# We don't need to check auth here as we check the attestation signatures
|
||||
|
||||
new_content = yield self.handler.on_renew_attestation(
|
||||
group_id, user_id, content
|
||||
)
|
||||
|
||||
defer.returnValue((200, new_content))
|
||||
|
||||
|
||||
class FederationGroupsSummaryRoomsServlet(BaseFederationServlet):
|
||||
"""Add/remove a room from the group summary, with optional category.
|
||||
|
||||
Matches both:
|
||||
- /groups/:group/summary/rooms/:room_id
|
||||
- /groups/:group/summary/categories/:category/rooms/:room_id
|
||||
"""
|
||||
PATH = (
|
||||
"/groups/(?P<group_id>[^/]*)/summary"
|
||||
"(/categories/(?P<category_id>[^/]+))?"
|
||||
"/rooms/(?P<room_id>[^/]*)$"
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_POST(self, origin, content, query, group_id, category_id, room_id):
|
||||
requester_user_id = parse_string_from_args(query, "requester_user_id")
|
||||
if get_domain_from_id(requester_user_id) != origin:
|
||||
raise SynapseError(403, "requester_user_id doesn't match origin")
|
||||
|
||||
if category_id == "":
|
||||
raise SynapseError(400, "category_id cannot be empty string")
|
||||
|
||||
resp = yield self.handler.update_group_summary_room(
|
||||
group_id, requester_user_id,
|
||||
room_id=room_id,
|
||||
category_id=category_id,
|
||||
content=content,
|
||||
)
|
||||
|
||||
defer.returnValue((200, resp))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_DELETE(self, origin, content, query, group_id, category_id, room_id):
|
||||
requester_user_id = parse_string_from_args(query, "requester_user_id")
|
||||
if get_domain_from_id(requester_user_id) != origin:
|
||||
raise SynapseError(403, "requester_user_id doesn't match origin")
|
||||
|
||||
if category_id == "":
|
||||
raise SynapseError(400, "category_id cannot be empty string")
|
||||
|
||||
resp = yield self.handler.delete_group_summary_room(
|
||||
group_id, requester_user_id,
|
||||
room_id=room_id,
|
||||
category_id=category_id,
|
||||
)
|
||||
|
||||
defer.returnValue((200, resp))
|
||||
|
||||
|
||||
class FederationGroupsCategoriesServlet(BaseFederationServlet):
|
||||
"""Get all categories for a group
|
||||
"""
|
||||
PATH = (
|
||||
"/groups/(?P<group_id>[^/]*)/categories/$"
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_GET(self, origin, content, query, group_id):
|
||||
requester_user_id = parse_string_from_args(query, "requester_user_id")
|
||||
if get_domain_from_id(requester_user_id) != origin:
|
||||
raise SynapseError(403, "requester_user_id doesn't match origin")
|
||||
|
||||
resp = yield self.handler.get_group_categories(
|
||||
group_id, requester_user_id,
|
||||
)
|
||||
|
||||
defer.returnValue((200, resp))
|
||||
|
||||
|
||||
class FederationGroupsCategoryServlet(BaseFederationServlet):
|
||||
"""Add/remove/get a category in a group
|
||||
"""
|
||||
PATH = (
|
||||
"/groups/(?P<group_id>[^/]*)/categories/(?P<category_id>[^/]+)$"
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_GET(self, origin, content, query, group_id, category_id):
|
||||
requester_user_id = parse_string_from_args(query, "requester_user_id")
|
||||
if get_domain_from_id(requester_user_id) != origin:
|
||||
raise SynapseError(403, "requester_user_id doesn't match origin")
|
||||
|
||||
resp = yield self.handler.get_group_category(
|
||||
group_id, requester_user_id, category_id
|
||||
)
|
||||
|
||||
defer.returnValue((200, resp))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_POST(self, origin, content, query, group_id, category_id):
|
||||
requester_user_id = parse_string_from_args(query, "requester_user_id")
|
||||
if get_domain_from_id(requester_user_id) != origin:
|
||||
raise SynapseError(403, "requester_user_id doesn't match origin")
|
||||
|
||||
if category_id == "":
|
||||
raise SynapseError(400, "category_id cannot be empty string")
|
||||
|
||||
resp = yield self.handler.upsert_group_category(
|
||||
group_id, requester_user_id, category_id, content,
|
||||
)
|
||||
|
||||
defer.returnValue((200, resp))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_DELETE(self, origin, content, query, group_id, category_id):
|
||||
requester_user_id = parse_string_from_args(query, "requester_user_id")
|
||||
if get_domain_from_id(requester_user_id) != origin:
|
||||
raise SynapseError(403, "requester_user_id doesn't match origin")
|
||||
|
||||
if category_id == "":
|
||||
raise SynapseError(400, "category_id cannot be empty string")
|
||||
|
||||
resp = yield self.handler.delete_group_category(
|
||||
group_id, requester_user_id, category_id,
|
||||
)
|
||||
|
||||
defer.returnValue((200, resp))
|
||||
|
||||
|
||||
class FederationGroupsRolesServlet(BaseFederationServlet):
|
||||
"""Get roles in a group
|
||||
"""
|
||||
PATH = (
|
||||
"/groups/(?P<group_id>[^/]*)/roles/$"
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_GET(self, origin, content, query, group_id):
|
||||
requester_user_id = parse_string_from_args(query, "requester_user_id")
|
||||
if get_domain_from_id(requester_user_id) != origin:
|
||||
raise SynapseError(403, "requester_user_id doesn't match origin")
|
||||
|
||||
resp = yield self.handler.get_group_roles(
|
||||
group_id, requester_user_id,
|
||||
)
|
||||
|
||||
defer.returnValue((200, resp))
|
||||
|
||||
|
||||
class FederationGroupsRoleServlet(BaseFederationServlet):
|
||||
"""Add/remove/get a role in a group
|
||||
"""
|
||||
PATH = (
|
||||
"/groups/(?P<group_id>[^/]*)/roles/(?P<role_id>[^/]+)$"
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_GET(self, origin, content, query, group_id, role_id):
|
||||
requester_user_id = parse_string_from_args(query, "requester_user_id")
|
||||
if get_domain_from_id(requester_user_id) != origin:
|
||||
raise SynapseError(403, "requester_user_id doesn't match origin")
|
||||
|
||||
resp = yield self.handler.get_group_role(
|
||||
group_id, requester_user_id, role_id
|
||||
)
|
||||
|
||||
defer.returnValue((200, resp))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_POST(self, origin, content, query, group_id, role_id):
|
||||
requester_user_id = parse_string_from_args(query, "requester_user_id")
|
||||
if get_domain_from_id(requester_user_id) != origin:
|
||||
raise SynapseError(403, "requester_user_id doesn't match origin")
|
||||
|
||||
if role_id == "":
|
||||
raise SynapseError(400, "role_id cannot be empty string")
|
||||
|
||||
resp = yield self.handler.update_group_role(
|
||||
group_id, requester_user_id, role_id, content,
|
||||
)
|
||||
|
||||
defer.returnValue((200, resp))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_DELETE(self, origin, content, query, group_id, role_id):
|
||||
requester_user_id = parse_string_from_args(query, "requester_user_id")
|
||||
if get_domain_from_id(requester_user_id) != origin:
|
||||
raise SynapseError(403, "requester_user_id doesn't match origin")
|
||||
|
||||
if role_id == "":
|
||||
raise SynapseError(400, "role_id cannot be empty string")
|
||||
|
||||
resp = yield self.handler.delete_group_role(
|
||||
group_id, requester_user_id, role_id,
|
||||
)
|
||||
|
||||
defer.returnValue((200, resp))
|
||||
|
||||
|
||||
class FederationGroupsSummaryUsersServlet(BaseFederationServlet):
|
||||
"""Add/remove a user from the group summary, with optional role.
|
||||
|
||||
Matches both:
|
||||
- /groups/:group/summary/users/:user_id
|
||||
- /groups/:group/summary/roles/:role/users/:user_id
|
||||
"""
|
||||
PATH = (
|
||||
"/groups/(?P<group_id>[^/]*)/summary"
|
||||
"(/roles/(?P<role_id>[^/]+))?"
|
||||
"/users/(?P<user_id>[^/]*)$"
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_POST(self, origin, content, query, group_id, role_id, user_id):
|
||||
requester_user_id = parse_string_from_args(query, "requester_user_id")
|
||||
if get_domain_from_id(requester_user_id) != origin:
|
||||
raise SynapseError(403, "requester_user_id doesn't match origin")
|
||||
|
||||
if role_id == "":
|
||||
raise SynapseError(400, "role_id cannot be empty string")
|
||||
|
||||
resp = yield self.handler.update_group_summary_user(
|
||||
group_id, requester_user_id,
|
||||
user_id=user_id,
|
||||
role_id=role_id,
|
||||
content=content,
|
||||
)
|
||||
|
||||
defer.returnValue((200, resp))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_DELETE(self, origin, content, query, group_id, role_id, user_id):
|
||||
requester_user_id = parse_string_from_args(query, "requester_user_id")
|
||||
if get_domain_from_id(requester_user_id) != origin:
|
||||
raise SynapseError(403, "requester_user_id doesn't match origin")
|
||||
|
||||
if role_id == "":
|
||||
raise SynapseError(400, "role_id cannot be empty string")
|
||||
|
||||
resp = yield self.handler.delete_group_summary_user(
|
||||
group_id, requester_user_id,
|
||||
user_id=user_id,
|
||||
role_id=role_id,
|
||||
)
|
||||
|
||||
defer.returnValue((200, resp))
|
||||
|
||||
|
||||
class FederationGroupsBulkPublicisedServlet(BaseFederationServlet):
|
||||
"""Get roles in a group
|
||||
"""
|
||||
PATH = (
|
||||
"/get_groups_publicised$"
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_POST(self, origin, content, query):
|
||||
resp = yield self.handler.bulk_get_publicised_groups(
|
||||
content["user_ids"], proxy=False,
|
||||
)
|
||||
|
||||
defer.returnValue((200, resp))
|
||||
|
||||
|
||||
FEDERATION_SERVLET_CLASSES = (
|
||||
FederationSendServlet,
|
||||
FederationPullServlet,
|
||||
@@ -635,15 +1150,47 @@ FEDERATION_SERVLET_CLASSES = (
|
||||
FederationVersionServlet,
|
||||
)
|
||||
|
||||
|
||||
ROOM_LIST_CLASSES = (
|
||||
PublicRoomList,
|
||||
)
|
||||
|
||||
GROUP_SERVER_SERVLET_CLASSES = (
|
||||
FederationGroupsProfileServlet,
|
||||
FederationGroupsSummaryServlet,
|
||||
FederationGroupsRoomsServlet,
|
||||
FederationGroupsUsersServlet,
|
||||
FederationGroupsInvitedUsersServlet,
|
||||
FederationGroupsInviteServlet,
|
||||
FederationGroupsAcceptInviteServlet,
|
||||
FederationGroupsRemoveUserServlet,
|
||||
FederationGroupsSummaryRoomsServlet,
|
||||
FederationGroupsCategoriesServlet,
|
||||
FederationGroupsCategoryServlet,
|
||||
FederationGroupsRolesServlet,
|
||||
FederationGroupsRoleServlet,
|
||||
FederationGroupsSummaryUsersServlet,
|
||||
FederationGroupsAddRoomsServlet,
|
||||
FederationGroupsAddRoomsConfigServlet,
|
||||
)
|
||||
|
||||
|
||||
GROUP_LOCAL_SERVLET_CLASSES = (
|
||||
FederationGroupsLocalInviteServlet,
|
||||
FederationGroupsRemoveLocalUserServlet,
|
||||
FederationGroupsBulkPublicisedServlet,
|
||||
)
|
||||
|
||||
|
||||
GROUP_ATTESTATION_SERVLET_CLASSES = (
|
||||
FederationGroupsRenewAttestaionServlet,
|
||||
)
|
||||
|
||||
|
||||
def register_servlets(hs, resource, authenticator, ratelimiter):
|
||||
for servletclass in FEDERATION_SERVLET_CLASSES:
|
||||
servletclass(
|
||||
handler=hs.get_replication_layer(),
|
||||
handler=hs.get_federation_server(),
|
||||
authenticator=authenticator,
|
||||
ratelimiter=ratelimiter,
|
||||
server_name=hs.hostname,
|
||||
@@ -656,3 +1203,27 @@ def register_servlets(hs, resource, authenticator, ratelimiter):
|
||||
ratelimiter=ratelimiter,
|
||||
server_name=hs.hostname,
|
||||
).register(resource)
|
||||
|
||||
for servletclass in GROUP_SERVER_SERVLET_CLASSES:
|
||||
servletclass(
|
||||
handler=hs.get_groups_server_handler(),
|
||||
authenticator=authenticator,
|
||||
ratelimiter=ratelimiter,
|
||||
server_name=hs.hostname,
|
||||
).register(resource)
|
||||
|
||||
for servletclass in GROUP_LOCAL_SERVLET_CLASSES:
|
||||
servletclass(
|
||||
handler=hs.get_groups_local_handler(),
|
||||
authenticator=authenticator,
|
||||
ratelimiter=ratelimiter,
|
||||
server_name=hs.hostname,
|
||||
).register(resource)
|
||||
|
||||
for servletclass in GROUP_ATTESTATION_SERVLET_CLASSES:
|
||||
servletclass(
|
||||
handler=hs.get_groups_attestation_renewer(),
|
||||
authenticator=authenticator,
|
||||
ratelimiter=ratelimiter,
|
||||
server_name=hs.hostname,
|
||||
).register(resource)
|
||||
|
||||
0
synapse/groups/__init__.py
Normal file
0
synapse/groups/__init__.py
Normal file
195
synapse/groups/attestations.py
Normal file
195
synapse/groups/attestations.py
Normal file
@@ -0,0 +1,195 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2017 Vector Creations Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Attestations ensure that users and groups can't lie about their memberships.
|
||||
|
||||
When a user joins a group the HS and GS swap attestations, which allow them
|
||||
both to independently prove to third parties their membership.These
|
||||
attestations have a validity period so need to be periodically renewed.
|
||||
|
||||
If a user leaves (or gets kicked out of) a group, either side can still use
|
||||
their attestation to "prove" their membership, until the attestation expires.
|
||||
Therefore attestations shouldn't be relied on to prove membership in important
|
||||
cases, but can for less important situtations, e.g. showing a users membership
|
||||
of groups on their profile, showing flairs, etc.abs
|
||||
|
||||
An attestsation is a signed blob of json that looks like:
|
||||
|
||||
{
|
||||
"user_id": "@foo:a.example.com",
|
||||
"group_id": "+bar:b.example.com",
|
||||
"valid_until_ms": 1507994728530,
|
||||
"signatures":{"matrix.org":{"ed25519:auto":"..."}}
|
||||
}
|
||||
"""
|
||||
|
||||
import logging
|
||||
import random
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.errors import SynapseError
|
||||
from synapse.types import get_domain_from_id
|
||||
from synapse.util.logcontext import preserve_fn
|
||||
|
||||
from signedjson.sign import sign_json
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# Default validity duration for new attestations we create
|
||||
DEFAULT_ATTESTATION_LENGTH_MS = 3 * 24 * 60 * 60 * 1000
|
||||
|
||||
# We add some jitter to the validity duration of attestations so that if we
|
||||
# add lots of users at once we don't need to renew them all at once.
|
||||
# The jitter is a multiplier picked randomly between the first and second number
|
||||
DEFAULT_ATTESTATION_JITTER = (0.9, 1.3)
|
||||
|
||||
# Start trying to update our attestations when they come this close to expiring
|
||||
UPDATE_ATTESTATION_TIME_MS = 1 * 24 * 60 * 60 * 1000
|
||||
|
||||
|
||||
class GroupAttestationSigning(object):
|
||||
"""Creates and verifies group attestations.
|
||||
"""
|
||||
def __init__(self, hs):
|
||||
self.keyring = hs.get_keyring()
|
||||
self.clock = hs.get_clock()
|
||||
self.server_name = hs.hostname
|
||||
self.signing_key = hs.config.signing_key[0]
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def verify_attestation(self, attestation, group_id, user_id, server_name=None):
|
||||
"""Verifies that the given attestation matches the given parameters.
|
||||
|
||||
An optional server_name can be supplied to explicitly set which server's
|
||||
signature is expected. Otherwise assumes that either the group_id or user_id
|
||||
is local and uses the other's server as the one to check.
|
||||
"""
|
||||
|
||||
if not server_name:
|
||||
if get_domain_from_id(group_id) == self.server_name:
|
||||
server_name = get_domain_from_id(user_id)
|
||||
elif get_domain_from_id(user_id) == self.server_name:
|
||||
server_name = get_domain_from_id(group_id)
|
||||
else:
|
||||
raise Exception("Expected either group_id or user_id to be local")
|
||||
|
||||
if user_id != attestation["user_id"]:
|
||||
raise SynapseError(400, "Attestation has incorrect user_id")
|
||||
|
||||
if group_id != attestation["group_id"]:
|
||||
raise SynapseError(400, "Attestation has incorrect group_id")
|
||||
valid_until_ms = attestation["valid_until_ms"]
|
||||
|
||||
# TODO: We also want to check that *new* attestations that people give
|
||||
# us to store are valid for at least a little while.
|
||||
if valid_until_ms < self.clock.time_msec():
|
||||
raise SynapseError(400, "Attestation expired")
|
||||
|
||||
yield self.keyring.verify_json_for_server(server_name, attestation)
|
||||
|
||||
def create_attestation(self, group_id, user_id):
|
||||
"""Create an attestation for the group_id and user_id with default
|
||||
validity length.
|
||||
"""
|
||||
validity_period = DEFAULT_ATTESTATION_LENGTH_MS
|
||||
validity_period *= random.uniform(*DEFAULT_ATTESTATION_JITTER)
|
||||
valid_until_ms = int(self.clock.time_msec() + validity_period)
|
||||
|
||||
return sign_json({
|
||||
"group_id": group_id,
|
||||
"user_id": user_id,
|
||||
"valid_until_ms": valid_until_ms,
|
||||
}, self.server_name, self.signing_key)
|
||||
|
||||
|
||||
class GroupAttestionRenewer(object):
|
||||
"""Responsible for sending and receiving attestation updates.
|
||||
"""
|
||||
|
||||
def __init__(self, hs):
|
||||
self.clock = hs.get_clock()
|
||||
self.store = hs.get_datastore()
|
||||
self.assestations = hs.get_groups_attestation_signing()
|
||||
self.transport_client = hs.get_federation_transport_client()
|
||||
self.is_mine_id = hs.is_mine_id
|
||||
self.attestations = hs.get_groups_attestation_signing()
|
||||
|
||||
self._renew_attestations_loop = self.clock.looping_call(
|
||||
self._renew_attestations, 30 * 60 * 1000,
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_renew_attestation(self, group_id, user_id, content):
|
||||
"""When a remote updates an attestation
|
||||
"""
|
||||
attestation = content["attestation"]
|
||||
|
||||
if not self.is_mine_id(group_id) and not self.is_mine_id(user_id):
|
||||
raise SynapseError(400, "Neither user not group are on this server")
|
||||
|
||||
yield self.attestations.verify_attestation(
|
||||
attestation,
|
||||
user_id=user_id,
|
||||
group_id=group_id,
|
||||
)
|
||||
|
||||
yield self.store.update_remote_attestion(group_id, user_id, attestation)
|
||||
|
||||
defer.returnValue({})
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _renew_attestations(self):
|
||||
"""Called periodically to check if we need to update any of our attestations
|
||||
"""
|
||||
|
||||
now = self.clock.time_msec()
|
||||
|
||||
rows = yield self.store.get_attestations_need_renewals(
|
||||
now + UPDATE_ATTESTATION_TIME_MS
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _renew_attestation(group_id, user_id):
|
||||
if not self.is_mine_id(group_id):
|
||||
destination = get_domain_from_id(group_id)
|
||||
elif not self.is_mine_id(user_id):
|
||||
destination = get_domain_from_id(user_id)
|
||||
else:
|
||||
logger.warn(
|
||||
"Incorrectly trying to do attestations for user: %r in %r",
|
||||
user_id, group_id,
|
||||
)
|
||||
yield self.store.remove_attestation_renewal(group_id, user_id)
|
||||
return
|
||||
|
||||
attestation = self.attestations.create_attestation(group_id, user_id)
|
||||
|
||||
yield self.transport_client.renew_group_attestation(
|
||||
destination, group_id, user_id,
|
||||
content={"attestation": attestation},
|
||||
)
|
||||
|
||||
yield self.store.update_attestation_renewal(
|
||||
group_id, user_id, attestation
|
||||
)
|
||||
|
||||
for row in rows:
|
||||
group_id = row["group_id"]
|
||||
user_id = row["user_id"]
|
||||
|
||||
preserve_fn(_renew_attestation)(group_id, user_id)
|
||||
864
synapse/groups/groups_server.py
Normal file
864
synapse/groups/groups_server.py
Normal file
@@ -0,0 +1,864 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2017 Vector Creations Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
|
||||
from synapse.api.errors import SynapseError
|
||||
from synapse.types import GroupID, RoomID, UserID, get_domain_from_id
|
||||
from twisted.internet import defer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# TODO: Allow users to "knock" or simpkly join depending on rules
|
||||
# TODO: Federation admin APIs
|
||||
# TODO: is_priveged flag to users and is_public to users and rooms
|
||||
# TODO: Audit log for admins (profile updates, membership changes, users who tried
|
||||
# to join but were rejected, etc)
|
||||
# TODO: Flairs
|
||||
|
||||
|
||||
class GroupsServerHandler(object):
|
||||
def __init__(self, hs):
|
||||
self.hs = hs
|
||||
self.store = hs.get_datastore()
|
||||
self.room_list_handler = hs.get_room_list_handler()
|
||||
self.auth = hs.get_auth()
|
||||
self.clock = hs.get_clock()
|
||||
self.keyring = hs.get_keyring()
|
||||
self.is_mine_id = hs.is_mine_id
|
||||
self.signing_key = hs.config.signing_key[0]
|
||||
self.server_name = hs.hostname
|
||||
self.attestations = hs.get_groups_attestation_signing()
|
||||
self.transport_client = hs.get_federation_transport_client()
|
||||
self.profile_handler = hs.get_profile_handler()
|
||||
|
||||
# Ensure attestations get renewed
|
||||
hs.get_groups_attestation_renewer()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def check_group_is_ours(self, group_id, requester_user_id,
|
||||
and_exists=False, and_is_admin=None):
|
||||
"""Check that the group is ours, and optionally if it exists.
|
||||
|
||||
If group does exist then return group.
|
||||
|
||||
Args:
|
||||
group_id (str)
|
||||
and_exists (bool): whether to also check if group exists
|
||||
and_is_admin (str): whether to also check if given str is a user_id
|
||||
that is an admin
|
||||
"""
|
||||
if not self.is_mine_id(group_id):
|
||||
raise SynapseError(400, "Group not on this server")
|
||||
|
||||
group = yield self.store.get_group(group_id)
|
||||
if and_exists and not group:
|
||||
raise SynapseError(404, "Unknown group")
|
||||
|
||||
is_user_in_group = yield self.store.is_user_in_group(requester_user_id, group_id)
|
||||
if group and not is_user_in_group and not group["is_public"]:
|
||||
raise SynapseError(404, "Unknown group")
|
||||
|
||||
if and_is_admin:
|
||||
is_admin = yield self.store.is_user_admin_in_group(group_id, and_is_admin)
|
||||
if not is_admin:
|
||||
raise SynapseError(403, "User is not admin in group")
|
||||
|
||||
defer.returnValue(group)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_group_summary(self, group_id, requester_user_id):
|
||||
"""Get the summary for a group as seen by requester_user_id.
|
||||
|
||||
The group summary consists of the profile of the room, and a curated
|
||||
list of users and rooms. These list *may* be organised by role/category.
|
||||
The roles/categories are ordered, and so are the users/rooms within them.
|
||||
|
||||
A user/room may appear in multiple roles/categories.
|
||||
"""
|
||||
yield self.check_group_is_ours(group_id, requester_user_id, and_exists=True)
|
||||
|
||||
is_user_in_group = yield self.store.is_user_in_group(requester_user_id, group_id)
|
||||
|
||||
profile = yield self.get_group_profile(group_id, requester_user_id)
|
||||
|
||||
users, roles = yield self.store.get_users_for_summary_by_role(
|
||||
group_id, include_private=is_user_in_group,
|
||||
)
|
||||
|
||||
# TODO: Add profiles to users
|
||||
|
||||
rooms, categories = yield self.store.get_rooms_for_summary_by_category(
|
||||
group_id, include_private=is_user_in_group,
|
||||
)
|
||||
|
||||
for room_entry in rooms:
|
||||
room_id = room_entry["room_id"]
|
||||
joined_users = yield self.store.get_users_in_room(room_id)
|
||||
entry = yield self.room_list_handler.generate_room_entry(
|
||||
room_id, len(joined_users),
|
||||
with_alias=False, allow_private=True,
|
||||
)
|
||||
entry = dict(entry) # so we don't change whats cached
|
||||
entry.pop("room_id", None)
|
||||
|
||||
room_entry["profile"] = entry
|
||||
|
||||
rooms.sort(key=lambda e: e.get("order", 0))
|
||||
|
||||
for entry in users:
|
||||
user_id = entry["user_id"]
|
||||
|
||||
if not self.is_mine_id(requester_user_id):
|
||||
attestation = yield self.store.get_remote_attestation(group_id, user_id)
|
||||
if not attestation:
|
||||
continue
|
||||
|
||||
entry["attestation"] = attestation
|
||||
else:
|
||||
entry["attestation"] = self.attestations.create_attestation(
|
||||
group_id, user_id,
|
||||
)
|
||||
|
||||
user_profile = yield self.profile_handler.get_profile_from_cache(user_id)
|
||||
entry.update(user_profile)
|
||||
|
||||
users.sort(key=lambda e: e.get("order", 0))
|
||||
|
||||
membership_info = yield self.store.get_users_membership_info_in_group(
|
||||
group_id, requester_user_id,
|
||||
)
|
||||
|
||||
defer.returnValue({
|
||||
"profile": profile,
|
||||
"users_section": {
|
||||
"users": users,
|
||||
"roles": roles,
|
||||
"total_user_count_estimate": 0, # TODO
|
||||
},
|
||||
"rooms_section": {
|
||||
"rooms": rooms,
|
||||
"categories": categories,
|
||||
"total_room_count_estimate": 0, # TODO
|
||||
},
|
||||
"user": membership_info,
|
||||
})
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def update_group_summary_room(self, group_id, requester_user_id,
|
||||
room_id, category_id, content):
|
||||
"""Add/update a room to the group summary
|
||||
"""
|
||||
yield self.check_group_is_ours(
|
||||
group_id,
|
||||
requester_user_id,
|
||||
and_exists=True,
|
||||
and_is_admin=requester_user_id,
|
||||
)
|
||||
|
||||
RoomID.from_string(room_id) # Ensure valid room id
|
||||
|
||||
order = content.get("order", None)
|
||||
|
||||
is_public = _parse_visibility_from_contents(content)
|
||||
|
||||
yield self.store.add_room_to_summary(
|
||||
group_id=group_id,
|
||||
room_id=room_id,
|
||||
category_id=category_id,
|
||||
order=order,
|
||||
is_public=is_public,
|
||||
)
|
||||
|
||||
defer.returnValue({})
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def delete_group_summary_room(self, group_id, requester_user_id,
|
||||
room_id, category_id):
|
||||
"""Remove a room from the summary
|
||||
"""
|
||||
yield self.check_group_is_ours(
|
||||
group_id,
|
||||
requester_user_id,
|
||||
and_exists=True,
|
||||
and_is_admin=requester_user_id,
|
||||
)
|
||||
|
||||
yield self.store.remove_room_from_summary(
|
||||
group_id=group_id,
|
||||
room_id=room_id,
|
||||
category_id=category_id,
|
||||
)
|
||||
|
||||
defer.returnValue({})
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_group_categories(self, group_id, requester_user_id):
|
||||
"""Get all categories in a group (as seen by user)
|
||||
"""
|
||||
yield self.check_group_is_ours(group_id, requester_user_id, and_exists=True)
|
||||
|
||||
categories = yield self.store.get_group_categories(
|
||||
group_id=group_id,
|
||||
)
|
||||
defer.returnValue({"categories": categories})
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_group_category(self, group_id, requester_user_id, category_id):
|
||||
"""Get a specific category in a group (as seen by user)
|
||||
"""
|
||||
yield self.check_group_is_ours(group_id, requester_user_id, and_exists=True)
|
||||
|
||||
res = yield self.store.get_group_category(
|
||||
group_id=group_id,
|
||||
category_id=category_id,
|
||||
)
|
||||
|
||||
defer.returnValue(res)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def update_group_category(self, group_id, requester_user_id, category_id, content):
|
||||
"""Add/Update a group category
|
||||
"""
|
||||
yield self.check_group_is_ours(
|
||||
group_id,
|
||||
requester_user_id,
|
||||
and_exists=True,
|
||||
and_is_admin=requester_user_id,
|
||||
)
|
||||
|
||||
is_public = _parse_visibility_from_contents(content)
|
||||
profile = content.get("profile")
|
||||
|
||||
yield self.store.upsert_group_category(
|
||||
group_id=group_id,
|
||||
category_id=category_id,
|
||||
is_public=is_public,
|
||||
profile=profile,
|
||||
)
|
||||
|
||||
defer.returnValue({})
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def delete_group_category(self, group_id, requester_user_id, category_id):
|
||||
"""Delete a group category
|
||||
"""
|
||||
yield self.check_group_is_ours(
|
||||
group_id,
|
||||
requester_user_id,
|
||||
and_exists=True,
|
||||
and_is_admin=requester_user_id
|
||||
)
|
||||
|
||||
yield self.store.remove_group_category(
|
||||
group_id=group_id,
|
||||
category_id=category_id,
|
||||
)
|
||||
|
||||
defer.returnValue({})
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_group_roles(self, group_id, requester_user_id):
|
||||
"""Get all roles in a group (as seen by user)
|
||||
"""
|
||||
yield self.check_group_is_ours(group_id, requester_user_id, and_exists=True)
|
||||
|
||||
roles = yield self.store.get_group_roles(
|
||||
group_id=group_id,
|
||||
)
|
||||
defer.returnValue({"roles": roles})
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_group_role(self, group_id, requester_user_id, role_id):
|
||||
"""Get a specific role in a group (as seen by user)
|
||||
"""
|
||||
yield self.check_group_is_ours(group_id, requester_user_id, and_exists=True)
|
||||
|
||||
res = yield self.store.get_group_role(
|
||||
group_id=group_id,
|
||||
role_id=role_id,
|
||||
)
|
||||
defer.returnValue(res)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def update_group_role(self, group_id, requester_user_id, role_id, content):
|
||||
"""Add/update a role in a group
|
||||
"""
|
||||
yield self.check_group_is_ours(
|
||||
group_id,
|
||||
requester_user_id,
|
||||
and_exists=True,
|
||||
and_is_admin=requester_user_id,
|
||||
)
|
||||
|
||||
is_public = _parse_visibility_from_contents(content)
|
||||
|
||||
profile = content.get("profile")
|
||||
|
||||
yield self.store.upsert_group_role(
|
||||
group_id=group_id,
|
||||
role_id=role_id,
|
||||
is_public=is_public,
|
||||
profile=profile,
|
||||
)
|
||||
|
||||
defer.returnValue({})
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def delete_group_role(self, group_id, requester_user_id, role_id):
|
||||
"""Remove role from group
|
||||
"""
|
||||
yield self.check_group_is_ours(
|
||||
group_id,
|
||||
requester_user_id,
|
||||
and_exists=True,
|
||||
and_is_admin=requester_user_id,
|
||||
)
|
||||
|
||||
yield self.store.remove_group_role(
|
||||
group_id=group_id,
|
||||
role_id=role_id,
|
||||
)
|
||||
|
||||
defer.returnValue({})
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def update_group_summary_user(self, group_id, requester_user_id, user_id, role_id,
|
||||
content):
|
||||
"""Add/update a users entry in the group summary
|
||||
"""
|
||||
yield self.check_group_is_ours(
|
||||
group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id,
|
||||
)
|
||||
|
||||
order = content.get("order", None)
|
||||
|
||||
is_public = _parse_visibility_from_contents(content)
|
||||
|
||||
yield self.store.add_user_to_summary(
|
||||
group_id=group_id,
|
||||
user_id=user_id,
|
||||
role_id=role_id,
|
||||
order=order,
|
||||
is_public=is_public,
|
||||
)
|
||||
|
||||
defer.returnValue({})
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def delete_group_summary_user(self, group_id, requester_user_id, user_id, role_id):
|
||||
"""Remove a user from the group summary
|
||||
"""
|
||||
yield self.check_group_is_ours(
|
||||
group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id,
|
||||
)
|
||||
|
||||
yield self.store.remove_user_from_summary(
|
||||
group_id=group_id,
|
||||
user_id=user_id,
|
||||
role_id=role_id,
|
||||
)
|
||||
|
||||
defer.returnValue({})
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_group_profile(self, group_id, requester_user_id):
|
||||
"""Get the group profile as seen by requester_user_id
|
||||
"""
|
||||
|
||||
yield self.check_group_is_ours(group_id, requester_user_id)
|
||||
|
||||
group_description = yield self.store.get_group(group_id)
|
||||
|
||||
if group_description:
|
||||
defer.returnValue(group_description)
|
||||
else:
|
||||
raise SynapseError(404, "Unknown group")
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def update_group_profile(self, group_id, requester_user_id, content):
|
||||
"""Update the group profile
|
||||
"""
|
||||
yield self.check_group_is_ours(
|
||||
group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id,
|
||||
)
|
||||
|
||||
profile = {}
|
||||
for keyname in ("name", "avatar_url", "short_description",
|
||||
"long_description"):
|
||||
if keyname in content:
|
||||
value = content[keyname]
|
||||
if not isinstance(value, basestring):
|
||||
raise SynapseError(400, "%r value is not a string" % (keyname,))
|
||||
profile[keyname] = value
|
||||
|
||||
yield self.store.update_group_profile(group_id, profile)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_users_in_group(self, group_id, requester_user_id):
|
||||
"""Get the users in group as seen by requester_user_id.
|
||||
|
||||
The ordering is arbitrary at the moment
|
||||
"""
|
||||
|
||||
yield self.check_group_is_ours(group_id, requester_user_id, and_exists=True)
|
||||
|
||||
is_user_in_group = yield self.store.is_user_in_group(requester_user_id, group_id)
|
||||
|
||||
user_results = yield self.store.get_users_in_group(
|
||||
group_id, include_private=is_user_in_group,
|
||||
)
|
||||
|
||||
chunk = []
|
||||
for user_result in user_results:
|
||||
g_user_id = user_result["user_id"]
|
||||
is_public = user_result["is_public"]
|
||||
is_privileged = user_result["is_admin"]
|
||||
|
||||
entry = {"user_id": g_user_id}
|
||||
|
||||
profile = yield self.profile_handler.get_profile_from_cache(g_user_id)
|
||||
entry.update(profile)
|
||||
|
||||
entry["is_public"] = bool(is_public)
|
||||
entry["is_privileged"] = bool(is_privileged)
|
||||
|
||||
if not self.is_mine_id(g_user_id):
|
||||
attestation = yield self.store.get_remote_attestation(group_id, g_user_id)
|
||||
if not attestation:
|
||||
continue
|
||||
|
||||
entry["attestation"] = attestation
|
||||
else:
|
||||
entry["attestation"] = self.attestations.create_attestation(
|
||||
group_id, g_user_id,
|
||||
)
|
||||
|
||||
chunk.append(entry)
|
||||
|
||||
# TODO: If admin add lists of users whose attestations have timed out
|
||||
|
||||
defer.returnValue({
|
||||
"chunk": chunk,
|
||||
"total_user_count_estimate": len(user_results),
|
||||
})
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_invited_users_in_group(self, group_id, requester_user_id):
|
||||
"""Get the users that have been invited to a group as seen by requester_user_id.
|
||||
|
||||
The ordering is arbitrary at the moment
|
||||
"""
|
||||
|
||||
yield self.check_group_is_ours(group_id, requester_user_id, and_exists=True)
|
||||
|
||||
is_user_in_group = yield self.store.is_user_in_group(requester_user_id, group_id)
|
||||
|
||||
if not is_user_in_group:
|
||||
raise SynapseError(403, "User not in group")
|
||||
|
||||
invited_users = yield self.store.get_invited_users_in_group(group_id)
|
||||
|
||||
user_profiles = []
|
||||
|
||||
for user_id in invited_users:
|
||||
user_profile = {
|
||||
"user_id": user_id
|
||||
}
|
||||
try:
|
||||
profile = yield self.profile_handler.get_profile_from_cache(user_id)
|
||||
user_profile.update(profile)
|
||||
except Exception as e:
|
||||
logger.warn("Error getting profile for %s: %s", user_id, e)
|
||||
user_profiles.append(user_profile)
|
||||
|
||||
defer.returnValue({
|
||||
"chunk": user_profiles,
|
||||
"total_user_count_estimate": len(invited_users),
|
||||
})
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_rooms_in_group(self, group_id, requester_user_id):
|
||||
"""Get the rooms in group as seen by requester_user_id
|
||||
|
||||
This returns rooms in order of decreasing number of joined users
|
||||
"""
|
||||
|
||||
yield self.check_group_is_ours(group_id, requester_user_id, and_exists=True)
|
||||
|
||||
is_user_in_group = yield self.store.is_user_in_group(requester_user_id, group_id)
|
||||
|
||||
room_results = yield self.store.get_rooms_in_group(
|
||||
group_id, include_private=is_user_in_group,
|
||||
)
|
||||
|
||||
chunk = []
|
||||
for room_result in room_results:
|
||||
room_id = room_result["room_id"]
|
||||
|
||||
joined_users = yield self.store.get_users_in_room(room_id)
|
||||
entry = yield self.room_list_handler.generate_room_entry(
|
||||
room_id, len(joined_users),
|
||||
with_alias=False, allow_private=True,
|
||||
)
|
||||
|
||||
if not entry:
|
||||
continue
|
||||
|
||||
entry["is_public"] = bool(room_result["is_public"])
|
||||
|
||||
chunk.append(entry)
|
||||
|
||||
chunk.sort(key=lambda e: -e["num_joined_members"])
|
||||
|
||||
defer.returnValue({
|
||||
"chunk": chunk,
|
||||
"total_room_count_estimate": len(room_results),
|
||||
})
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def add_room_to_group(self, group_id, requester_user_id, room_id, content):
|
||||
"""Add room to group
|
||||
"""
|
||||
RoomID.from_string(room_id) # Ensure valid room id
|
||||
|
||||
yield self.check_group_is_ours(
|
||||
group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id
|
||||
)
|
||||
|
||||
is_public = _parse_visibility_from_contents(content)
|
||||
|
||||
yield self.store.add_room_to_group(group_id, room_id, is_public=is_public)
|
||||
|
||||
defer.returnValue({})
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def update_room_in_group(self, group_id, requester_user_id, room_id, config_key,
|
||||
content):
|
||||
"""Update room in group
|
||||
"""
|
||||
RoomID.from_string(room_id) # Ensure valid room id
|
||||
|
||||
yield self.check_group_is_ours(
|
||||
group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id
|
||||
)
|
||||
|
||||
if config_key == "m.visibility":
|
||||
is_public = _parse_visibility_dict(content)
|
||||
|
||||
yield self.store.update_room_in_group_visibility(
|
||||
group_id, room_id,
|
||||
is_public=is_public,
|
||||
)
|
||||
else:
|
||||
raise SynapseError(400, "Uknown config option")
|
||||
|
||||
defer.returnValue({})
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def remove_room_from_group(self, group_id, requester_user_id, room_id):
|
||||
"""Remove room from group
|
||||
"""
|
||||
yield self.check_group_is_ours(
|
||||
group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id
|
||||
)
|
||||
|
||||
yield self.store.remove_room_from_group(group_id, room_id)
|
||||
|
||||
defer.returnValue({})
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def invite_to_group(self, group_id, user_id, requester_user_id, content):
|
||||
"""Invite user to group
|
||||
"""
|
||||
|
||||
group = yield self.check_group_is_ours(
|
||||
group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id
|
||||
)
|
||||
|
||||
# TODO: Check if user knocked
|
||||
# TODO: Check if user is already invited
|
||||
|
||||
content = {
|
||||
"profile": {
|
||||
"name": group["name"],
|
||||
"avatar_url": group["avatar_url"],
|
||||
},
|
||||
"inviter": requester_user_id,
|
||||
}
|
||||
|
||||
if self.hs.is_mine_id(user_id):
|
||||
groups_local = self.hs.get_groups_local_handler()
|
||||
res = yield groups_local.on_invite(group_id, user_id, content)
|
||||
local_attestation = None
|
||||
else:
|
||||
local_attestation = self.attestations.create_attestation(group_id, user_id)
|
||||
content.update({
|
||||
"attestation": local_attestation,
|
||||
})
|
||||
|
||||
res = yield self.transport_client.invite_to_group_notification(
|
||||
get_domain_from_id(user_id), group_id, user_id, content
|
||||
)
|
||||
|
||||
user_profile = res.get("user_profile", {})
|
||||
yield self.store.add_remote_profile_cache(
|
||||
user_id,
|
||||
displayname=user_profile.get("displayname"),
|
||||
avatar_url=user_profile.get("avatar_url"),
|
||||
)
|
||||
|
||||
if res["state"] == "join":
|
||||
if not self.hs.is_mine_id(user_id):
|
||||
remote_attestation = res["attestation"]
|
||||
|
||||
yield self.attestations.verify_attestation(
|
||||
remote_attestation,
|
||||
user_id=user_id,
|
||||
group_id=group_id,
|
||||
)
|
||||
else:
|
||||
remote_attestation = None
|
||||
|
||||
yield self.store.add_user_to_group(
|
||||
group_id, user_id,
|
||||
is_admin=False,
|
||||
is_public=False, # TODO
|
||||
local_attestation=local_attestation,
|
||||
remote_attestation=remote_attestation,
|
||||
)
|
||||
elif res["state"] == "invite":
|
||||
yield self.store.add_group_invite(
|
||||
group_id, user_id,
|
||||
)
|
||||
defer.returnValue({
|
||||
"state": "invite"
|
||||
})
|
||||
elif res["state"] == "reject":
|
||||
defer.returnValue({
|
||||
"state": "reject"
|
||||
})
|
||||
else:
|
||||
raise SynapseError(502, "Unknown state returned by HS")
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def accept_invite(self, group_id, requester_user_id, content):
|
||||
"""User tries to accept an invite to the group.
|
||||
|
||||
This is different from them asking to join, and so should error if no
|
||||
invite exists (and they're not a member of the group)
|
||||
"""
|
||||
|
||||
yield self.check_group_is_ours(group_id, requester_user_id, and_exists=True)
|
||||
|
||||
is_invited = yield self.store.is_user_invited_to_local_group(
|
||||
group_id, requester_user_id,
|
||||
)
|
||||
if not is_invited:
|
||||
raise SynapseError(403, "User not invited to group")
|
||||
|
||||
if not self.hs.is_mine_id(requester_user_id):
|
||||
local_attestation = self.attestations.create_attestation(
|
||||
group_id, requester_user_id,
|
||||
)
|
||||
remote_attestation = content["attestation"]
|
||||
|
||||
yield self.attestations.verify_attestation(
|
||||
remote_attestation,
|
||||
user_id=requester_user_id,
|
||||
group_id=group_id,
|
||||
)
|
||||
else:
|
||||
local_attestation = None
|
||||
remote_attestation = None
|
||||
|
||||
is_public = _parse_visibility_from_contents(content)
|
||||
|
||||
yield self.store.add_user_to_group(
|
||||
group_id, requester_user_id,
|
||||
is_admin=False,
|
||||
is_public=is_public,
|
||||
local_attestation=local_attestation,
|
||||
remote_attestation=remote_attestation,
|
||||
)
|
||||
|
||||
defer.returnValue({
|
||||
"state": "join",
|
||||
"attestation": local_attestation,
|
||||
})
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def knock(self, group_id, requester_user_id, content):
|
||||
"""A user requests becoming a member of the group
|
||||
"""
|
||||
yield self.check_group_is_ours(group_id, requester_user_id, and_exists=True)
|
||||
|
||||
raise NotImplementedError()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def accept_knock(self, group_id, requester_user_id, content):
|
||||
"""Accept a users knock to the room.
|
||||
|
||||
Errors if the user hasn't knocked, rather than inviting them.
|
||||
"""
|
||||
|
||||
yield self.check_group_is_ours(group_id, requester_user_id, and_exists=True)
|
||||
|
||||
raise NotImplementedError()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def remove_user_from_group(self, group_id, user_id, requester_user_id, content):
|
||||
"""Remove a user from the group; either a user is leaving or an admin
|
||||
kicked them.
|
||||
"""
|
||||
|
||||
yield self.check_group_is_ours(group_id, requester_user_id, and_exists=True)
|
||||
|
||||
is_kick = False
|
||||
if requester_user_id != user_id:
|
||||
is_admin = yield self.store.is_user_admin_in_group(
|
||||
group_id, requester_user_id
|
||||
)
|
||||
if not is_admin:
|
||||
raise SynapseError(403, "User is not admin in group")
|
||||
|
||||
is_kick = True
|
||||
|
||||
yield self.store.remove_user_from_group(
|
||||
group_id, user_id,
|
||||
)
|
||||
|
||||
if is_kick:
|
||||
if self.hs.is_mine_id(user_id):
|
||||
groups_local = self.hs.get_groups_local_handler()
|
||||
yield groups_local.user_removed_from_group(group_id, user_id, {})
|
||||
else:
|
||||
yield self.transport_client.remove_user_from_group_notification(
|
||||
get_domain_from_id(user_id), group_id, user_id, {}
|
||||
)
|
||||
|
||||
if not self.hs.is_mine_id(user_id):
|
||||
yield self.store.maybe_delete_remote_profile_cache(user_id)
|
||||
|
||||
defer.returnValue({})
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def create_group(self, group_id, requester_user_id, content):
|
||||
group = yield self.check_group_is_ours(group_id, requester_user_id)
|
||||
|
||||
logger.info("Attempting to create group with ID: %r", group_id)
|
||||
|
||||
# parsing the id into a GroupID validates it.
|
||||
group_id_obj = GroupID.from_string(group_id)
|
||||
|
||||
if group:
|
||||
raise SynapseError(400, "Group already exists")
|
||||
|
||||
is_admin = yield self.auth.is_server_admin(UserID.from_string(requester_user_id))
|
||||
if not is_admin:
|
||||
if not self.hs.config.enable_group_creation:
|
||||
raise SynapseError(
|
||||
403, "Only a server admin can create groups on this server",
|
||||
)
|
||||
localpart = group_id_obj.localpart
|
||||
if not localpart.startswith(self.hs.config.group_creation_prefix):
|
||||
raise SynapseError(
|
||||
400,
|
||||
"Can only create groups with prefix %r on this server" % (
|
||||
self.hs.config.group_creation_prefix,
|
||||
),
|
||||
)
|
||||
|
||||
profile = content.get("profile", {})
|
||||
name = profile.get("name")
|
||||
avatar_url = profile.get("avatar_url")
|
||||
short_description = profile.get("short_description")
|
||||
long_description = profile.get("long_description")
|
||||
user_profile = content.get("user_profile", {})
|
||||
|
||||
yield self.store.create_group(
|
||||
group_id,
|
||||
requester_user_id,
|
||||
name=name,
|
||||
avatar_url=avatar_url,
|
||||
short_description=short_description,
|
||||
long_description=long_description,
|
||||
)
|
||||
|
||||
if not self.hs.is_mine_id(requester_user_id):
|
||||
remote_attestation = content["attestation"]
|
||||
|
||||
yield self.attestations.verify_attestation(
|
||||
remote_attestation,
|
||||
user_id=requester_user_id,
|
||||
group_id=group_id,
|
||||
)
|
||||
|
||||
local_attestation = self.attestations.create_attestation(
|
||||
group_id,
|
||||
requester_user_id,
|
||||
)
|
||||
else:
|
||||
local_attestation = None
|
||||
remote_attestation = None
|
||||
|
||||
yield self.store.add_user_to_group(
|
||||
group_id, requester_user_id,
|
||||
is_admin=True,
|
||||
is_public=True, # TODO
|
||||
local_attestation=local_attestation,
|
||||
remote_attestation=remote_attestation,
|
||||
)
|
||||
|
||||
if not self.hs.is_mine_id(requester_user_id):
|
||||
yield self.store.add_remote_profile_cache(
|
||||
requester_user_id,
|
||||
displayname=user_profile.get("displayname"),
|
||||
avatar_url=user_profile.get("avatar_url"),
|
||||
)
|
||||
|
||||
defer.returnValue({
|
||||
"group_id": group_id,
|
||||
})
|
||||
|
||||
|
||||
def _parse_visibility_from_contents(content):
|
||||
"""Given a content for a request parse out whether the entity should be
|
||||
public or not
|
||||
"""
|
||||
|
||||
visibility = content.get("m.visibility")
|
||||
if visibility:
|
||||
return _parse_visibility_dict(visibility)
|
||||
else:
|
||||
is_public = True
|
||||
|
||||
return is_public
|
||||
|
||||
|
||||
def _parse_visibility_dict(visibility):
|
||||
"""Given a dict for the "m.visibility" config return if the entity should
|
||||
be public or not
|
||||
"""
|
||||
vis_type = visibility.get("type")
|
||||
if not vis_type:
|
||||
return True
|
||||
|
||||
if vis_type not in ("public", "private"):
|
||||
raise SynapseError(
|
||||
400, "Synapse only supports 'public'/'private' visibility"
|
||||
)
|
||||
return vis_type == "public"
|
||||
@@ -17,10 +17,8 @@ from .register import RegistrationHandler
|
||||
from .room import (
|
||||
RoomCreationHandler, RoomContextHandler,
|
||||
)
|
||||
from .room_member import RoomMemberHandler
|
||||
from .message import MessageHandler
|
||||
from .federation import FederationHandler
|
||||
from .profile import ProfileHandler
|
||||
from .directory import DirectoryHandler
|
||||
from .admin import AdminHandler
|
||||
from .identity import IdentityHandler
|
||||
@@ -50,9 +48,7 @@ class Handlers(object):
|
||||
self.registration_handler = RegistrationHandler(hs)
|
||||
self.message_handler = MessageHandler(hs)
|
||||
self.room_creation_handler = RoomCreationHandler(hs)
|
||||
self.room_member_handler = RoomMemberHandler(hs)
|
||||
self.federation_handler = FederationHandler(hs)
|
||||
self.profile_handler = ProfileHandler(hs)
|
||||
self.directory_handler = DirectoryHandler(hs)
|
||||
self.admin_handler = AdminHandler(hs)
|
||||
self.identity_handler = IdentityHandler(hs)
|
||||
|
||||
@@ -158,7 +158,7 @@ class BaseHandler(object):
|
||||
# homeserver.
|
||||
requester = synapse.types.create_requester(
|
||||
target_user, is_guest=True)
|
||||
handler = self.hs.get_handlers().room_member_handler
|
||||
handler = self.hs.get_room_member_handler()
|
||||
yield handler.update_membership(
|
||||
requester,
|
||||
target_user,
|
||||
|
||||
@@ -15,14 +15,19 @@
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
import synapse
|
||||
from synapse.api.constants import EventTypes
|
||||
from synapse.util.metrics import Measure
|
||||
from synapse.util.logcontext import preserve_fn, preserve_context_over_deferred
|
||||
from synapse.util.logcontext import make_deferred_yieldable, preserve_fn
|
||||
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
metrics = synapse.metrics.get_metrics_for(__name__)
|
||||
|
||||
events_processed_counter = metrics.register_counter("events_processed")
|
||||
|
||||
|
||||
def log_failure(failure):
|
||||
logger.error(
|
||||
@@ -70,11 +75,10 @@ class ApplicationServicesHandler(object):
|
||||
with Measure(self.clock, "notify_interested_services"):
|
||||
self.is_processing = True
|
||||
try:
|
||||
upper_bound = self.current_max
|
||||
limit = 100
|
||||
while True:
|
||||
upper_bound, events = yield self.store.get_new_events_for_appservice(
|
||||
upper_bound, limit
|
||||
self.current_max, limit
|
||||
)
|
||||
|
||||
if not events:
|
||||
@@ -104,10 +108,9 @@ class ApplicationServicesHandler(object):
|
||||
service, event
|
||||
)
|
||||
|
||||
yield self.store.set_appservice_last_pos(upper_bound)
|
||||
events_processed_counter.inc_by(len(events))
|
||||
|
||||
if len(events) < limit:
|
||||
break
|
||||
yield self.store.set_appservice_last_pos(upper_bound)
|
||||
finally:
|
||||
self.is_processing = False
|
||||
|
||||
@@ -163,7 +166,7 @@ class ApplicationServicesHandler(object):
|
||||
def query_3pe(self, kind, protocol, fields):
|
||||
services = yield self._get_services_for_3pn(protocol)
|
||||
|
||||
results = yield preserve_context_over_deferred(defer.DeferredList([
|
||||
results = yield make_deferred_yieldable(defer.DeferredList([
|
||||
preserve_fn(self.appservice_api.query_3pe)(service, kind, protocol, fields)
|
||||
for service in services
|
||||
], consumeErrors=True))
|
||||
|
||||
@@ -13,15 +13,19 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from twisted.internet import defer
|
||||
from twisted.internet import defer, threads
|
||||
|
||||
from ._base import BaseHandler
|
||||
from synapse.api.constants import LoginType
|
||||
from synapse.api.errors import (
|
||||
AuthError, Codes, InteractiveAuthIncompleteError, LoginError, StoreError,
|
||||
SynapseError,
|
||||
)
|
||||
from synapse.module_api import ModuleApi
|
||||
from synapse.types import UserID
|
||||
from synapse.api.errors import AuthError, LoginError, Codes, StoreError, SynapseError
|
||||
from synapse.util.async import run_on_reactor
|
||||
from synapse.util.caches.expiringcache import ExpiringCache
|
||||
from synapse.util.logcontext import make_deferred_yieldable
|
||||
|
||||
from twisted.web.client import PartialDownloadError
|
||||
|
||||
@@ -46,7 +50,6 @@ class AuthHandler(BaseHandler):
|
||||
"""
|
||||
super(AuthHandler, self).__init__(hs)
|
||||
self.checkers = {
|
||||
LoginType.PASSWORD: self._check_password_auth,
|
||||
LoginType.RECAPTCHA: self._check_recaptcha,
|
||||
LoginType.EMAIL_IDENTITY: self._check_email_identity,
|
||||
LoginType.MSISDN: self._check_msisdn,
|
||||
@@ -63,10 +66,7 @@ class AuthHandler(BaseHandler):
|
||||
reset_expiry_on_get=True,
|
||||
)
|
||||
|
||||
account_handler = _AccountHandler(
|
||||
hs, check_user_exists=self.check_user_exists
|
||||
)
|
||||
|
||||
account_handler = ModuleApi(hs, self)
|
||||
self.password_providers = [
|
||||
module(config=config, account_handler=account_handler)
|
||||
for module, config in hs.config.password_providers
|
||||
@@ -75,39 +75,120 @@ class AuthHandler(BaseHandler):
|
||||
logger.info("Extra password_providers: %r", self.password_providers)
|
||||
|
||||
self.hs = hs # FIXME better possibility to access registrationHandler later?
|
||||
self.device_handler = hs.get_device_handler()
|
||||
self.macaroon_gen = hs.get_macaroon_generator()
|
||||
self._password_enabled = hs.config.password_enabled
|
||||
|
||||
# we keep this as a list despite the O(N^2) implication so that we can
|
||||
# keep PASSWORD first and avoid confusing clients which pick the first
|
||||
# type in the list. (NB that the spec doesn't require us to do so and
|
||||
# clients which favour types that they don't understand over those that
|
||||
# they do are technically broken)
|
||||
login_types = []
|
||||
if self._password_enabled:
|
||||
login_types.append(LoginType.PASSWORD)
|
||||
for provider in self.password_providers:
|
||||
if hasattr(provider, "get_supported_login_types"):
|
||||
for t in provider.get_supported_login_types().keys():
|
||||
if t not in login_types:
|
||||
login_types.append(t)
|
||||
self._supported_login_types = login_types
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def validate_user_via_ui_auth(self, requester, request_body, clientip):
|
||||
"""
|
||||
Checks that the user is who they claim to be, via a UI auth.
|
||||
|
||||
This is used for things like device deletion and password reset where
|
||||
the user already has a valid access token, but we want to double-check
|
||||
that it isn't stolen by re-authenticating them.
|
||||
|
||||
Args:
|
||||
requester (Requester): The user, as given by the access token
|
||||
|
||||
request_body (dict): The body of the request sent by the client
|
||||
|
||||
clientip (str): The IP address of the client.
|
||||
|
||||
Returns:
|
||||
defer.Deferred[dict]: the parameters for this request (which may
|
||||
have been given only in a previous call).
|
||||
|
||||
Raises:
|
||||
InteractiveAuthIncompleteError if the client has not yet completed
|
||||
any of the permitted login flows
|
||||
|
||||
AuthError if the client has completed a login flow, and it gives
|
||||
a different user to `requester`
|
||||
"""
|
||||
|
||||
# build a list of supported flows
|
||||
flows = [
|
||||
[login_type] for login_type in self._supported_login_types
|
||||
]
|
||||
|
||||
result, params, _ = yield self.check_auth(
|
||||
flows, request_body, clientip,
|
||||
)
|
||||
|
||||
# find the completed login type
|
||||
for login_type in self._supported_login_types:
|
||||
if login_type not in result:
|
||||
continue
|
||||
|
||||
user_id = result[login_type]
|
||||
break
|
||||
else:
|
||||
# this can't happen
|
||||
raise Exception(
|
||||
"check_auth returned True but no successful login type",
|
||||
)
|
||||
|
||||
# check that the UI auth matched the access token
|
||||
if user_id != requester.user.to_string():
|
||||
raise AuthError(403, "Invalid auth")
|
||||
|
||||
defer.returnValue(params)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def check_auth(self, flows, clientdict, clientip):
|
||||
"""
|
||||
Takes a dictionary sent by the client in the login / registration
|
||||
protocol and handles the login flow.
|
||||
protocol and handles the User-Interactive Auth flow.
|
||||
|
||||
As a side effect, this function fills in the 'creds' key on the user's
|
||||
session with a map, which maps each auth-type (str) to the relevant
|
||||
identity authenticated by that auth-type (mostly str, but for captcha, bool).
|
||||
|
||||
If no auth flows have been completed successfully, raises an
|
||||
InteractiveAuthIncompleteError. To handle this, you can use
|
||||
synapse.rest.client.v2_alpha._base.interactive_auth_handler as a
|
||||
decorator.
|
||||
|
||||
Args:
|
||||
flows (list): A list of login flows. Each flow is an ordered list of
|
||||
strings representing auth-types. At least one full
|
||||
flow must be completed in order for auth to be successful.
|
||||
|
||||
clientdict: The dictionary from the client root level, not the
|
||||
'auth' key: this method prompts for auth if none is sent.
|
||||
|
||||
clientip (str): The IP address of the client.
|
||||
|
||||
Returns:
|
||||
A tuple of (authed, dict, dict, session_id) where authed is true if
|
||||
the client has successfully completed an auth flow. If it is true
|
||||
the first dict contains the authenticated credentials of each stage.
|
||||
defer.Deferred[dict, dict, str]: a deferred tuple of
|
||||
(creds, params, session_id).
|
||||
|
||||
If authed is false, the first dictionary is the server response to
|
||||
the login request and should be passed back to the client.
|
||||
'creds' contains the authenticated credentials of each stage.
|
||||
|
||||
In either case, the second dict contains the parameters for this
|
||||
request (which may have been given only in a previous call).
|
||||
'params' contains the parameters for this request (which may
|
||||
have been given only in a previous call).
|
||||
|
||||
session_id is the ID of this session, either passed in by the client
|
||||
or assigned by the call to check_auth
|
||||
'session_id' is the ID of this session, either passed in by the
|
||||
client or assigned by this call
|
||||
|
||||
Raises:
|
||||
InteractiveAuthIncompleteError if the client has not yet completed
|
||||
all the stages in any of the permitted flows.
|
||||
"""
|
||||
|
||||
authdict = None
|
||||
@@ -135,11 +216,8 @@ class AuthHandler(BaseHandler):
|
||||
clientdict = session['clientdict']
|
||||
|
||||
if not authdict:
|
||||
defer.returnValue(
|
||||
(
|
||||
False, self._auth_dict_for_flows(flows, session),
|
||||
clientdict, session['id']
|
||||
)
|
||||
raise InteractiveAuthIncompleteError(
|
||||
self._auth_dict_for_flows(flows, session),
|
||||
)
|
||||
|
||||
if 'creds' not in session:
|
||||
@@ -150,14 +228,12 @@ class AuthHandler(BaseHandler):
|
||||
errordict = {}
|
||||
if 'type' in authdict:
|
||||
login_type = authdict['type']
|
||||
if login_type not in self.checkers:
|
||||
raise LoginError(400, "", Codes.UNRECOGNIZED)
|
||||
try:
|
||||
result = yield self.checkers[login_type](authdict, clientip)
|
||||
result = yield self._check_auth_dict(authdict, clientip)
|
||||
if result:
|
||||
creds[login_type] = result
|
||||
self._save_session(session)
|
||||
except LoginError, e:
|
||||
except LoginError as e:
|
||||
if login_type == LoginType.EMAIL_IDENTITY:
|
||||
# riot used to have a bug where it would request a new
|
||||
# validation token (thus sending a new email) each time it
|
||||
@@ -166,7 +242,7 @@ class AuthHandler(BaseHandler):
|
||||
#
|
||||
# Grandfather in the old behaviour for now to avoid
|
||||
# breaking old riot deployments.
|
||||
raise e
|
||||
raise
|
||||
|
||||
# this step failed. Merge the error dict into the response
|
||||
# so that the client can have another go.
|
||||
@@ -183,12 +259,14 @@ class AuthHandler(BaseHandler):
|
||||
"Auth completed with creds: %r. Client dict has keys: %r",
|
||||
creds, clientdict.keys()
|
||||
)
|
||||
defer.returnValue((True, creds, clientdict, session['id']))
|
||||
defer.returnValue((creds, clientdict, session['id']))
|
||||
|
||||
ret = self._auth_dict_for_flows(flows, session)
|
||||
ret['completed'] = creds.keys()
|
||||
ret.update(errordict)
|
||||
defer.returnValue((False, ret, clientdict, session['id']))
|
||||
raise InteractiveAuthIncompleteError(
|
||||
ret,
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def add_oob_auth(self, stagetype, authdict, clientip):
|
||||
@@ -260,16 +338,37 @@ class AuthHandler(BaseHandler):
|
||||
sess = self._get_session_info(session_id)
|
||||
return sess.setdefault('serverdict', {}).get(key, default)
|
||||
|
||||
def _check_password_auth(self, authdict, _):
|
||||
if "user" not in authdict or "password" not in authdict:
|
||||
raise LoginError(400, "", Codes.MISSING_PARAM)
|
||||
@defer.inlineCallbacks
|
||||
def _check_auth_dict(self, authdict, clientip):
|
||||
"""Attempt to validate the auth dict provided by a client
|
||||
|
||||
user_id = authdict["user"]
|
||||
password = authdict["password"]
|
||||
if not user_id.startswith('@'):
|
||||
user_id = UserID.create(user_id, self.hs.hostname).to_string()
|
||||
Args:
|
||||
authdict (object): auth dict provided by the client
|
||||
clientip (str): IP address of the client
|
||||
|
||||
return self._check_password(user_id, password)
|
||||
Returns:
|
||||
Deferred: result of the stage verification.
|
||||
|
||||
Raises:
|
||||
StoreError if there was a problem accessing the database
|
||||
SynapseError if there was a problem with the request
|
||||
LoginError if there was an authentication problem.
|
||||
"""
|
||||
login_type = authdict['type']
|
||||
checker = self.checkers.get(login_type)
|
||||
if checker is not None:
|
||||
res = yield checker(authdict, clientip)
|
||||
defer.returnValue(res)
|
||||
|
||||
# build a v1-login-style dict out of the authdict and fall back to the
|
||||
# v1 code
|
||||
user_id = authdict.get("user")
|
||||
|
||||
if user_id is None:
|
||||
raise SynapseError(400, "", Codes.MISSING_PARAM)
|
||||
|
||||
(canonical_id, callback) = yield self.validate_login(user_id, authdict)
|
||||
defer.returnValue(canonical_id)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _check_recaptcha(self, authdict, clientip):
|
||||
@@ -398,26 +497,8 @@ class AuthHandler(BaseHandler):
|
||||
|
||||
return self.sessions[session_id]
|
||||
|
||||
def validate_password_login(self, user_id, password):
|
||||
"""
|
||||
Authenticates the user with their username and password.
|
||||
|
||||
Used only by the v1 login API.
|
||||
|
||||
Args:
|
||||
user_id (str): complete @user:id
|
||||
password (str): Password
|
||||
Returns:
|
||||
defer.Deferred: (str) canonical user id
|
||||
Raises:
|
||||
StoreError if there was a problem accessing the database
|
||||
LoginError if there was an authentication problem.
|
||||
"""
|
||||
return self._check_password(user_id, password)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_access_token_for_user_id(self, user_id, device_id=None,
|
||||
initial_display_name=None):
|
||||
def get_access_token_for_user_id(self, user_id, device_id=None):
|
||||
"""
|
||||
Creates a new access token for the user with the given user ID.
|
||||
|
||||
@@ -431,13 +512,10 @@ class AuthHandler(BaseHandler):
|
||||
device_id (str|None): the device ID to associate with the tokens.
|
||||
None to leave the tokens unassociated with a device (deprecated:
|
||||
we should always have a device ID)
|
||||
initial_display_name (str): display name to associate with the
|
||||
device if it needs re-registering
|
||||
Returns:
|
||||
The access token for the user's session.
|
||||
Raises:
|
||||
StoreError if there was a problem storing the token.
|
||||
LoginError if there was an authentication problem.
|
||||
"""
|
||||
logger.info("Logging in user %s on device %s", user_id, device_id)
|
||||
access_token = yield self.issue_access_token(user_id, device_id)
|
||||
@@ -447,9 +525,11 @@ class AuthHandler(BaseHandler):
|
||||
# really don't want is active access_tokens without a record of the
|
||||
# device, so we double-check it here.
|
||||
if device_id is not None:
|
||||
yield self.device_handler.check_device_registered(
|
||||
user_id, device_id, initial_display_name
|
||||
)
|
||||
try:
|
||||
yield self.store.get_device(user_id, device_id)
|
||||
except StoreError:
|
||||
yield self.store.delete_access_token(access_token)
|
||||
raise StoreError(400, "Login raced against device deletion")
|
||||
|
||||
defer.returnValue(access_token)
|
||||
|
||||
@@ -501,29 +581,115 @@ class AuthHandler(BaseHandler):
|
||||
)
|
||||
defer.returnValue(result)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _check_password(self, user_id, password):
|
||||
"""Authenticate a user against the LDAP and local databases.
|
||||
def get_supported_login_types(self):
|
||||
"""Get a the login types supported for the /login API
|
||||
|
||||
user_id is checked case insensitively against the local database, but
|
||||
will throw if there are multiple inexact matches.
|
||||
By default this is just 'm.login.password' (unless password_enabled is
|
||||
False in the config file), but password auth providers can provide
|
||||
other login types.
|
||||
|
||||
Returns:
|
||||
Iterable[str]: login types
|
||||
"""
|
||||
return self._supported_login_types
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def validate_login(self, username, login_submission):
|
||||
"""Authenticates the user for the /login API
|
||||
|
||||
Also used by the user-interactive auth flow to validate
|
||||
m.login.password auth types.
|
||||
|
||||
Args:
|
||||
user_id (str): complete @user:id
|
||||
username (str): username supplied by the user
|
||||
login_submission (dict): the whole of the login submission
|
||||
(including 'type' and other relevant fields)
|
||||
Returns:
|
||||
(str) the canonical_user_id
|
||||
Deferred[str, func]: canonical user id, and optional callback
|
||||
to be called once the access token and device id are issued
|
||||
Raises:
|
||||
LoginError if login fails
|
||||
StoreError if there was a problem accessing the database
|
||||
SynapseError if there was a problem with the request
|
||||
LoginError if there was an authentication problem.
|
||||
"""
|
||||
for provider in self.password_providers:
|
||||
is_valid = yield provider.check_password(user_id, password)
|
||||
if is_valid:
|
||||
defer.returnValue(user_id)
|
||||
|
||||
canonical_user_id = yield self._check_local_password(user_id, password)
|
||||
if username.startswith('@'):
|
||||
qualified_user_id = username
|
||||
else:
|
||||
qualified_user_id = UserID(
|
||||
username, self.hs.hostname
|
||||
).to_string()
|
||||
|
||||
login_type = login_submission.get("type")
|
||||
known_login_type = False
|
||||
|
||||
# special case to check for "password" for the check_password interface
|
||||
# for the auth providers
|
||||
password = login_submission.get("password")
|
||||
if login_type == LoginType.PASSWORD:
|
||||
if not self._password_enabled:
|
||||
raise SynapseError(400, "Password login has been disabled.")
|
||||
if not password:
|
||||
raise SynapseError(400, "Missing parameter: password")
|
||||
|
||||
for provider in self.password_providers:
|
||||
if (hasattr(provider, "check_password")
|
||||
and login_type == LoginType.PASSWORD):
|
||||
known_login_type = True
|
||||
is_valid = yield provider.check_password(
|
||||
qualified_user_id, password,
|
||||
)
|
||||
if is_valid:
|
||||
defer.returnValue((qualified_user_id, None))
|
||||
|
||||
if (not hasattr(provider, "get_supported_login_types")
|
||||
or not hasattr(provider, "check_auth")):
|
||||
# this password provider doesn't understand custom login types
|
||||
continue
|
||||
|
||||
supported_login_types = provider.get_supported_login_types()
|
||||
if login_type not in supported_login_types:
|
||||
# this password provider doesn't understand this login type
|
||||
continue
|
||||
|
||||
known_login_type = True
|
||||
login_fields = supported_login_types[login_type]
|
||||
|
||||
missing_fields = []
|
||||
login_dict = {}
|
||||
for f in login_fields:
|
||||
if f not in login_submission:
|
||||
missing_fields.append(f)
|
||||
else:
|
||||
login_dict[f] = login_submission[f]
|
||||
if missing_fields:
|
||||
raise SynapseError(
|
||||
400, "Missing parameters for login type %s: %s" % (
|
||||
login_type,
|
||||
missing_fields,
|
||||
),
|
||||
)
|
||||
|
||||
result = yield provider.check_auth(
|
||||
username, login_type, login_dict,
|
||||
)
|
||||
if result:
|
||||
if isinstance(result, str):
|
||||
result = (result, None)
|
||||
defer.returnValue(result)
|
||||
|
||||
if login_type == LoginType.PASSWORD:
|
||||
known_login_type = True
|
||||
|
||||
canonical_user_id = yield self._check_local_password(
|
||||
qualified_user_id, password,
|
||||
)
|
||||
|
||||
if canonical_user_id:
|
||||
defer.returnValue(canonical_user_id)
|
||||
defer.returnValue((canonical_user_id, None))
|
||||
|
||||
if not known_login_type:
|
||||
raise SynapseError(400, "Unknown login type %s" % login_type)
|
||||
|
||||
# unknown username or invalid password. We raise a 403 here, but note
|
||||
# that if we're doing user-interactive login, it turns all LoginErrors
|
||||
@@ -549,7 +715,7 @@ class AuthHandler(BaseHandler):
|
||||
if not lookupres:
|
||||
defer.returnValue(None)
|
||||
(user_id, password_hash) = lookupres
|
||||
result = self.validate_hash(password, password_hash)
|
||||
result = yield self.validate_hash(password, password_hash)
|
||||
if not result:
|
||||
logger.warn("Failed password login for user %s", user_id)
|
||||
defer.returnValue(None)
|
||||
@@ -573,22 +739,65 @@ class AuthHandler(BaseHandler):
|
||||
raise AuthError(403, "Invalid token", errcode=Codes.FORBIDDEN)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def set_password(self, user_id, newpassword, requester=None):
|
||||
password_hash = self.hash(newpassword)
|
||||
def delete_access_token(self, access_token):
|
||||
"""Invalidate a single access token
|
||||
|
||||
except_access_token_id = requester.access_token_id if requester else None
|
||||
Args:
|
||||
access_token (str): access token to be deleted
|
||||
|
||||
try:
|
||||
yield self.store.user_set_password_hash(user_id, password_hash)
|
||||
except StoreError as e:
|
||||
if e.code == 404:
|
||||
raise SynapseError(404, "Unknown user", Codes.NOT_FOUND)
|
||||
raise e
|
||||
yield self.store.user_delete_access_tokens(
|
||||
user_id, except_access_token_id
|
||||
Returns:
|
||||
Deferred
|
||||
"""
|
||||
user_info = yield self.auth.get_user_by_access_token(access_token)
|
||||
yield self.store.delete_access_token(access_token)
|
||||
|
||||
# see if any of our auth providers want to know about this
|
||||
for provider in self.password_providers:
|
||||
if hasattr(provider, "on_logged_out"):
|
||||
yield provider.on_logged_out(
|
||||
user_id=str(user_info["user"]),
|
||||
device_id=user_info["device_id"],
|
||||
access_token=access_token,
|
||||
)
|
||||
yield self.hs.get_pusherpool().remove_pushers_by_user(
|
||||
user_id, except_access_token_id
|
||||
|
||||
# delete pushers associated with this access token
|
||||
if user_info["token_id"] is not None:
|
||||
yield self.hs.get_pusherpool().remove_pushers_by_access_token(
|
||||
str(user_info["user"]), (user_info["token_id"], )
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def delete_access_tokens_for_user(self, user_id, except_token_id=None,
|
||||
device_id=None):
|
||||
"""Invalidate access tokens belonging to a user
|
||||
|
||||
Args:
|
||||
user_id (str): ID of user the tokens belong to
|
||||
except_token_id (str|None): access_token ID which should *not* be
|
||||
deleted
|
||||
device_id (str|None): ID of device the tokens are associated with.
|
||||
If None, tokens associated with any device (or no device) will
|
||||
be deleted
|
||||
Returns:
|
||||
Deferred
|
||||
"""
|
||||
tokens_and_devices = yield self.store.user_delete_access_tokens(
|
||||
user_id, except_token_id=except_token_id, device_id=device_id,
|
||||
)
|
||||
|
||||
# see if any of our auth providers want to know about this
|
||||
for provider in self.password_providers:
|
||||
if hasattr(provider, "on_logged_out"):
|
||||
for token, token_id, device_id in tokens_and_devices:
|
||||
yield provider.on_logged_out(
|
||||
user_id=user_id,
|
||||
device_id=device_id,
|
||||
access_token=token,
|
||||
)
|
||||
|
||||
# delete pushers associated with the access tokens
|
||||
yield self.hs.get_pusherpool().remove_pushers_by_access_token(
|
||||
user_id, (token_id for _, token_id, _ in tokens_and_devices),
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@@ -634,11 +843,14 @@ class AuthHandler(BaseHandler):
|
||||
password (str): Password to hash.
|
||||
|
||||
Returns:
|
||||
Hashed password (str).
|
||||
Deferred(str): Hashed password.
|
||||
"""
|
||||
def _do_hash():
|
||||
return bcrypt.hashpw(password.encode('utf8') + self.hs.config.password_pepper,
|
||||
bcrypt.gensalt(self.bcrypt_rounds))
|
||||
|
||||
return make_deferred_yieldable(threads.deferToThread(_do_hash))
|
||||
|
||||
def validate_hash(self, password, stored_hash):
|
||||
"""Validates that self.hash(password) == stored_hash.
|
||||
|
||||
@@ -647,13 +859,19 @@ class AuthHandler(BaseHandler):
|
||||
stored_hash (str): Expected hash value.
|
||||
|
||||
Returns:
|
||||
Whether self.hash(password) == stored_hash (bool).
|
||||
Deferred(bool): Whether self.hash(password) == stored_hash.
|
||||
"""
|
||||
|
||||
def _do_validate_hash():
|
||||
return bcrypt.checkpw(
|
||||
password.encode('utf8') + self.hs.config.password_pepper,
|
||||
stored_hash.encode('utf8')
|
||||
)
|
||||
|
||||
if stored_hash:
|
||||
return bcrypt.hashpw(password.encode('utf8') + self.hs.config.password_pepper,
|
||||
stored_hash.encode('utf8')) == stored_hash
|
||||
return make_deferred_yieldable(threads.deferToThread(_do_validate_hash))
|
||||
else:
|
||||
return False
|
||||
return defer.succeed(False)
|
||||
|
||||
|
||||
class MacaroonGeneartor(object):
|
||||
@@ -696,30 +914,3 @@ class MacaroonGeneartor(object):
|
||||
macaroon.add_first_party_caveat("gen = 1")
|
||||
macaroon.add_first_party_caveat("user_id = %s" % (user_id,))
|
||||
return macaroon
|
||||
|
||||
|
||||
class _AccountHandler(object):
|
||||
"""A proxy object that gets passed to password auth providers so they
|
||||
can register new users etc if necessary.
|
||||
"""
|
||||
def __init__(self, hs, check_user_exists):
|
||||
self.hs = hs
|
||||
|
||||
self._check_user_exists = check_user_exists
|
||||
|
||||
def check_user_exists(self, user_id):
|
||||
"""Check if user exissts.
|
||||
|
||||
Returns:
|
||||
Deferred(bool)
|
||||
"""
|
||||
return self._check_user_exists(user_id)
|
||||
|
||||
def register(self, localpart):
|
||||
"""Registers a new user with given localpart
|
||||
|
||||
Returns:
|
||||
Deferred: a 2-tuple of (user_id, access_token)
|
||||
"""
|
||||
reg = self.hs.get_handlers().registration_handler
|
||||
return reg.register(localpart=localpart)
|
||||
|
||||
52
synapse/handlers/deactivate_account.py
Normal file
52
synapse/handlers/deactivate_account.py
Normal file
@@ -0,0 +1,52 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2017 New Vector Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from twisted.internet import defer
|
||||
|
||||
from ._base import BaseHandler
|
||||
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class DeactivateAccountHandler(BaseHandler):
|
||||
"""Handler which deals with deactivating user accounts."""
|
||||
def __init__(self, hs):
|
||||
super(DeactivateAccountHandler, self).__init__(hs)
|
||||
self._auth_handler = hs.get_auth_handler()
|
||||
self._device_handler = hs.get_device_handler()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def deactivate_account(self, user_id):
|
||||
"""Deactivate a user's account
|
||||
|
||||
Args:
|
||||
user_id (str): ID of user to be deactivated
|
||||
|
||||
Returns:
|
||||
Deferred
|
||||
"""
|
||||
# FIXME: Theoretically there is a race here wherein user resets
|
||||
# password using threepid.
|
||||
|
||||
# first delete any devices belonging to the user, which will also
|
||||
# delete corresponding access tokens.
|
||||
yield self._device_handler.delete_all_devices_for_user(user_id)
|
||||
# then delete any remaining access tokens which weren't associated with
|
||||
# a device.
|
||||
yield self._auth_handler.delete_access_tokens_for_user(user_id)
|
||||
|
||||
yield self.store.user_delete_threepids(user_id)
|
||||
yield self.store.user_set_password_hash(user_id, None)
|
||||
@@ -14,6 +14,7 @@
|
||||
# limitations under the License.
|
||||
from synapse.api import errors
|
||||
from synapse.api.constants import EventTypes
|
||||
from synapse.api.errors import FederationDeniedError
|
||||
from synapse.util import stringutils
|
||||
from synapse.util.async import Linearizer
|
||||
from synapse.util.caches.expiringcache import ExpiringCache
|
||||
@@ -34,15 +35,17 @@ class DeviceHandler(BaseHandler):
|
||||
|
||||
self.hs = hs
|
||||
self.state = hs.get_state_handler()
|
||||
self._auth_handler = hs.get_auth_handler()
|
||||
self.federation_sender = hs.get_federation_sender()
|
||||
self.federation = hs.get_replication_layer()
|
||||
|
||||
self._edu_updater = DeviceListEduUpdater(hs, self)
|
||||
|
||||
self.federation.register_edu_handler(
|
||||
federation_registry = hs.get_federation_registry()
|
||||
|
||||
federation_registry.register_edu_handler(
|
||||
"m.device_list_update", self._edu_updater.incoming_device_list_update,
|
||||
)
|
||||
self.federation.register_query_handler(
|
||||
federation_registry.register_query_handler(
|
||||
"user_devices", self.on_federation_query_user_devices,
|
||||
)
|
||||
|
||||
@@ -159,9 +162,8 @@ class DeviceHandler(BaseHandler):
|
||||
else:
|
||||
raise
|
||||
|
||||
yield self.store.user_delete_access_tokens(
|
||||
yield self._auth_handler.delete_access_tokens_for_user(
|
||||
user_id, device_id=device_id,
|
||||
delete_refresh_tokens=True,
|
||||
)
|
||||
|
||||
yield self.store.delete_e2e_keys_by_device(
|
||||
@@ -170,13 +172,31 @@ class DeviceHandler(BaseHandler):
|
||||
|
||||
yield self.notify_device_update(user_id, [device_id])
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def delete_all_devices_for_user(self, user_id, except_device_id=None):
|
||||
"""Delete all of the user's devices
|
||||
|
||||
Args:
|
||||
user_id (str):
|
||||
except_device_id (str|None): optional device id which should not
|
||||
be deleted
|
||||
|
||||
Returns:
|
||||
defer.Deferred:
|
||||
"""
|
||||
device_map = yield self.store.get_devices_by_user(user_id)
|
||||
device_ids = device_map.keys()
|
||||
if except_device_id is not None:
|
||||
device_ids = [d for d in device_ids if d != except_device_id]
|
||||
yield self.delete_devices(user_id, device_ids)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def delete_devices(self, user_id, device_ids):
|
||||
""" Delete several devices
|
||||
|
||||
Args:
|
||||
user_id (str):
|
||||
device_ids (str): The list of device IDs to delete
|
||||
device_ids (List[str]): The list of device IDs to delete
|
||||
|
||||
Returns:
|
||||
defer.Deferred:
|
||||
@@ -194,9 +214,8 @@ class DeviceHandler(BaseHandler):
|
||||
# Delete access tokens and e2e keys for each device. Not optimised as it is not
|
||||
# considered as part of a critical path.
|
||||
for device_id in device_ids:
|
||||
yield self.store.user_delete_access_tokens(
|
||||
yield self._auth_handler.delete_access_tokens_for_user(
|
||||
user_id, device_id=device_id,
|
||||
delete_refresh_tokens=True,
|
||||
)
|
||||
yield self.store.delete_e2e_keys_by_device(
|
||||
user_id=user_id, device_id=device_id
|
||||
@@ -412,7 +431,7 @@ class DeviceListEduUpdater(object):
|
||||
|
||||
def __init__(self, hs, device_handler):
|
||||
self.store = hs.get_datastore()
|
||||
self.federation = hs.get_replication_layer()
|
||||
self.federation = hs.get_federation_client()
|
||||
self.clock = hs.get_clock()
|
||||
self.device_handler = device_handler
|
||||
|
||||
@@ -496,6 +515,9 @@ class DeviceListEduUpdater(object):
|
||||
# This makes it more likely that the device lists will
|
||||
# eventually become consistent.
|
||||
return
|
||||
except FederationDeniedError as e:
|
||||
logger.info(e)
|
||||
return
|
||||
except Exception:
|
||||
# TODO: Remember that we are now out of sync and try again
|
||||
# later
|
||||
|
||||
@@ -17,7 +17,8 @@ import logging
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.types import get_domain_from_id
|
||||
from synapse.api.errors import SynapseError
|
||||
from synapse.types import get_domain_from_id, UserID
|
||||
from synapse.util.stringutils import random_string
|
||||
|
||||
|
||||
@@ -33,10 +34,10 @@ class DeviceMessageHandler(object):
|
||||
"""
|
||||
self.store = hs.get_datastore()
|
||||
self.notifier = hs.get_notifier()
|
||||
self.is_mine_id = hs.is_mine_id
|
||||
self.is_mine = hs.is_mine
|
||||
self.federation = hs.get_federation_sender()
|
||||
|
||||
hs.get_replication_layer().register_edu_handler(
|
||||
hs.get_federation_registry().register_edu_handler(
|
||||
"m.direct_to_device", self.on_direct_to_device_edu
|
||||
)
|
||||
|
||||
@@ -52,6 +53,12 @@ class DeviceMessageHandler(object):
|
||||
message_type = content["type"]
|
||||
message_id = content["message_id"]
|
||||
for user_id, by_device in content["messages"].items():
|
||||
# we use UserID.from_string to catch invalid user ids
|
||||
if not self.is_mine(UserID.from_string(user_id)):
|
||||
logger.warning("Request for keys for non-local user %s",
|
||||
user_id)
|
||||
raise SynapseError(400, "Not a user here")
|
||||
|
||||
messages_by_device = {
|
||||
device_id: {
|
||||
"content": message_content,
|
||||
@@ -77,7 +84,8 @@ class DeviceMessageHandler(object):
|
||||
local_messages = {}
|
||||
remote_messages = {}
|
||||
for user_id, by_device in messages.items():
|
||||
if self.is_mine_id(user_id):
|
||||
# we use UserID.from_string to catch invalid user ids
|
||||
if self.is_mine(UserID.from_string(user_id)):
|
||||
messages_by_device = {
|
||||
device_id: {
|
||||
"content": message_content,
|
||||
|
||||
@@ -34,12 +34,15 @@ class DirectoryHandler(BaseHandler):
|
||||
|
||||
self.state = hs.get_state_handler()
|
||||
self.appservice_handler = hs.get_application_service_handler()
|
||||
self.event_creation_handler = hs.get_event_creation_handler()
|
||||
|
||||
self.federation = hs.get_replication_layer()
|
||||
self.federation.register_query_handler(
|
||||
self.federation = hs.get_federation_client()
|
||||
hs.get_federation_registry().register_query_handler(
|
||||
"directory", self.on_directory_query
|
||||
)
|
||||
|
||||
self.spam_checker = hs.get_spam_checker()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _create_association(self, room_alias, room_id, servers=None, creator=None):
|
||||
# general association creation for both human users and app services
|
||||
@@ -73,6 +76,11 @@ class DirectoryHandler(BaseHandler):
|
||||
# association creation for human users
|
||||
# TODO(erikj): Do user auth.
|
||||
|
||||
if not self.spam_checker.user_may_create_room_alias(user_id, room_alias):
|
||||
raise SynapseError(
|
||||
403, "This user is not permitted to create this alias",
|
||||
)
|
||||
|
||||
can_create = yield self.can_modify_alias(
|
||||
room_alias,
|
||||
user_id=user_id
|
||||
@@ -242,8 +250,7 @@ class DirectoryHandler(BaseHandler):
|
||||
def send_room_alias_update_event(self, requester, user_id, room_id):
|
||||
aliases = yield self.store.get_aliases_for_room(room_id)
|
||||
|
||||
msg_handler = self.hs.get_handlers().message_handler
|
||||
yield msg_handler.create_and_send_nonmember_event(
|
||||
yield self.event_creation_handler.create_and_send_nonmember_event(
|
||||
requester,
|
||||
{
|
||||
"type": EventTypes.Aliases,
|
||||
@@ -265,8 +272,7 @@ class DirectoryHandler(BaseHandler):
|
||||
if not alias_event or alias_event.content.get("alias", "") != alias_str:
|
||||
return
|
||||
|
||||
msg_handler = self.hs.get_handlers().message_handler
|
||||
yield msg_handler.create_and_send_nonmember_event(
|
||||
yield self.event_creation_handler.create_and_send_nonmember_event(
|
||||
requester,
|
||||
{
|
||||
"type": EventTypes.CanonicalAlias,
|
||||
@@ -327,6 +333,14 @@ class DirectoryHandler(BaseHandler):
|
||||
room_id (str)
|
||||
visibility (str): "public" or "private"
|
||||
"""
|
||||
if not self.spam_checker.user_may_publish_room(
|
||||
requester.user.to_string(), room_id
|
||||
):
|
||||
raise AuthError(
|
||||
403,
|
||||
"This user is not permitted to publish rooms to the room list"
|
||||
)
|
||||
|
||||
if requester.is_guest:
|
||||
raise AuthError(403, "Guests cannot edit the published room list")
|
||||
|
||||
|
||||
@@ -13,14 +13,16 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import ujson as json
|
||||
import simplejson as json
|
||||
import logging
|
||||
|
||||
from canonicaljson import encode_canonical_json
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.errors import SynapseError, CodeMessageException
|
||||
from synapse.types import get_domain_from_id
|
||||
from synapse.api.errors import (
|
||||
SynapseError, CodeMessageException, FederationDeniedError,
|
||||
)
|
||||
from synapse.types import get_domain_from_id, UserID
|
||||
from synapse.util.logcontext import preserve_fn, make_deferred_yieldable
|
||||
from synapse.util.retryutils import NotRetryingDestination
|
||||
|
||||
@@ -30,15 +32,15 @@ logger = logging.getLogger(__name__)
|
||||
class E2eKeysHandler(object):
|
||||
def __init__(self, hs):
|
||||
self.store = hs.get_datastore()
|
||||
self.federation = hs.get_replication_layer()
|
||||
self.federation = hs.get_federation_client()
|
||||
self.device_handler = hs.get_device_handler()
|
||||
self.is_mine_id = hs.is_mine_id
|
||||
self.is_mine = hs.is_mine
|
||||
self.clock = hs.get_clock()
|
||||
|
||||
# doesn't really work as part of the generic query API, because the
|
||||
# query request requires an object POST, but we abuse the
|
||||
# "query handler" interface.
|
||||
self.federation.register_query_handler(
|
||||
hs.get_federation_registry().register_query_handler(
|
||||
"client_keys", self.on_federation_query_client_keys
|
||||
)
|
||||
|
||||
@@ -70,7 +72,8 @@ class E2eKeysHandler(object):
|
||||
remote_queries = {}
|
||||
|
||||
for user_id, device_ids in device_keys_query.items():
|
||||
if self.is_mine_id(user_id):
|
||||
# we use UserID.from_string to catch invalid user ids
|
||||
if self.is_mine(UserID.from_string(user_id)):
|
||||
local_query[user_id] = device_ids
|
||||
else:
|
||||
remote_queries[user_id] = device_ids
|
||||
@@ -139,6 +142,10 @@ class E2eKeysHandler(object):
|
||||
failures[destination] = {
|
||||
"status": 503, "message": "Not ready for retry",
|
||||
}
|
||||
except FederationDeniedError as e:
|
||||
failures[destination] = {
|
||||
"status": 403, "message": "Federation Denied",
|
||||
}
|
||||
except Exception as e:
|
||||
# include ConnectionRefused and other errors
|
||||
failures[destination] = {
|
||||
@@ -170,7 +177,8 @@ class E2eKeysHandler(object):
|
||||
|
||||
result_dict = {}
|
||||
for user_id, device_ids in query.items():
|
||||
if not self.is_mine_id(user_id):
|
||||
# we use UserID.from_string to catch invalid user ids
|
||||
if not self.is_mine(UserID.from_string(user_id)):
|
||||
logger.warning("Request for keys for non-local user %s",
|
||||
user_id)
|
||||
raise SynapseError(400, "Not a user here")
|
||||
@@ -213,7 +221,8 @@ class E2eKeysHandler(object):
|
||||
remote_queries = {}
|
||||
|
||||
for user_id, device_keys in query.get("one_time_keys", {}).items():
|
||||
if self.is_mine_id(user_id):
|
||||
# we use UserID.from_string to catch invalid user ids
|
||||
if self.is_mine(UserID.from_string(user_id)):
|
||||
for device_id, algorithm in device_keys.items():
|
||||
local_query.append((user_id, device_id, algorithm))
|
||||
else:
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
# Copyright 2018 New Vector Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -14,7 +15,6 @@
|
||||
# limitations under the License.
|
||||
|
||||
"""Contains handlers for federation events."""
|
||||
import synapse.util.logcontext
|
||||
from signedjson.key import decode_verify_key_bytes
|
||||
from signedjson.sign import verify_signed_json
|
||||
from unpaddedbase64 import decode_base64
|
||||
@@ -23,13 +23,11 @@ from ._base import BaseHandler
|
||||
|
||||
from synapse.api.errors import (
|
||||
AuthError, FederationError, StoreError, CodeMessageException, SynapseError,
|
||||
FederationDeniedError,
|
||||
)
|
||||
from synapse.api.constants import EventTypes, Membership, RejectedReason
|
||||
from synapse.events.validator import EventValidator
|
||||
from synapse.util import unwrapFirstError
|
||||
from synapse.util.logcontext import (
|
||||
preserve_fn, preserve_context_over_deferred
|
||||
)
|
||||
from synapse.util import unwrapFirstError, logcontext
|
||||
from synapse.util.metrics import measure_func
|
||||
from synapse.util.logutils import log_function
|
||||
from synapse.util.async import run_on_reactor, Linearizer
|
||||
@@ -70,15 +68,15 @@ class FederationHandler(BaseHandler):
|
||||
self.hs = hs
|
||||
|
||||
self.store = hs.get_datastore()
|
||||
self.replication_layer = hs.get_replication_layer()
|
||||
self.replication_layer = hs.get_federation_client()
|
||||
self.state_handler = hs.get_state_handler()
|
||||
self.server_name = hs.hostname
|
||||
self.keyring = hs.get_keyring()
|
||||
self.action_generator = hs.get_action_generator()
|
||||
self.is_mine_id = hs.is_mine_id
|
||||
self.pusher_pool = hs.get_pusherpool()
|
||||
|
||||
self.replication_layer.set_handler(self)
|
||||
self.spam_checker = hs.get_spam_checker()
|
||||
self.event_creation_handler = hs.get_event_creation_handler()
|
||||
|
||||
# When joining a room we need to queue any events for that room up
|
||||
self.room_queues = {}
|
||||
@@ -125,6 +123,28 @@ class FederationHandler(BaseHandler):
|
||||
self.room_queues[pdu.room_id].append((pdu, origin))
|
||||
return
|
||||
|
||||
# If we're no longer in the room just ditch the event entirely. This
|
||||
# is probably an old server that has come back and thinks we're still
|
||||
# in the room (or we've been rejoined to the room by a state reset).
|
||||
#
|
||||
# If we were never in the room then maybe our database got vaped and
|
||||
# we should check if we *are* in fact in the room. If we are then we
|
||||
# can magically rejoin the room.
|
||||
is_in_room = yield self.auth.check_host_in_room(
|
||||
pdu.room_id,
|
||||
self.server_name
|
||||
)
|
||||
if not is_in_room:
|
||||
was_in_room = yield self.store.was_host_joined(
|
||||
pdu.room_id, self.server_name,
|
||||
)
|
||||
if was_in_room:
|
||||
logger.info(
|
||||
"Ignoring PDU %s for room %s from %s as we've left the room!",
|
||||
pdu.event_id, pdu.room_id, origin,
|
||||
)
|
||||
return
|
||||
|
||||
state = None
|
||||
|
||||
auth_chain = []
|
||||
@@ -208,7 +228,7 @@ class FederationHandler(BaseHandler):
|
||||
state, auth_chain = yield self.replication_layer.get_state_for_room(
|
||||
origin, pdu.room_id, pdu.event_id,
|
||||
)
|
||||
except:
|
||||
except Exception:
|
||||
logger.exception("Failed to get state for event: %s", pdu.event_id)
|
||||
|
||||
yield self._process_received_pdu(
|
||||
@@ -442,7 +462,7 @@ class FederationHandler(BaseHandler):
|
||||
def check_match(id):
|
||||
try:
|
||||
return server_name == get_domain_from_id(id)
|
||||
except:
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
# Parses mapping `event_id -> (type, state_key) -> state event_id`
|
||||
@@ -480,7 +500,7 @@ class FederationHandler(BaseHandler):
|
||||
continue
|
||||
try:
|
||||
domain = get_domain_from_id(ev.state_key)
|
||||
except:
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
if domain != server_name:
|
||||
@@ -591,9 +611,9 @@ class FederationHandler(BaseHandler):
|
||||
missing_auth - failed_to_fetch
|
||||
)
|
||||
|
||||
results = yield preserve_context_over_deferred(defer.gatherResults(
|
||||
results = yield logcontext.make_deferred_yieldable(defer.gatherResults(
|
||||
[
|
||||
preserve_fn(self.replication_layer.get_pdu)(
|
||||
logcontext.preserve_fn(self.replication_layer.get_pdu)(
|
||||
[dest],
|
||||
event_id,
|
||||
outlier=True,
|
||||
@@ -719,7 +739,7 @@ class FederationHandler(BaseHandler):
|
||||
joined_domains[dom] = min(d, old_d)
|
||||
else:
|
||||
joined_domains[dom] = d
|
||||
except:
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
return sorted(joined_domains.items(), key=lambda d: d[1])
|
||||
@@ -763,6 +783,9 @@ class FederationHandler(BaseHandler):
|
||||
except NotRetryingDestination as e:
|
||||
logger.info(e.message)
|
||||
continue
|
||||
except FederationDeniedError as e:
|
||||
logger.info(e)
|
||||
continue
|
||||
except Exception as e:
|
||||
logger.exception(
|
||||
"Failed to backfill from %s because %s",
|
||||
@@ -785,10 +808,13 @@ class FederationHandler(BaseHandler):
|
||||
event_ids = list(extremities.keys())
|
||||
|
||||
logger.debug("calling resolve_state_groups in _maybe_backfill")
|
||||
states = yield preserve_context_over_deferred(defer.gatherResults([
|
||||
preserve_fn(self.state_handler.resolve_state_groups)(room_id, [e])
|
||||
for e in event_ids
|
||||
]))
|
||||
resolve = logcontext.preserve_fn(
|
||||
self.state_handler.resolve_state_groups_for_events
|
||||
)
|
||||
states = yield logcontext.make_deferred_yieldable(defer.gatherResults(
|
||||
[resolve(room_id, [e]) for e in event_ids],
|
||||
consumeErrors=True,
|
||||
))
|
||||
states = dict(zip(event_ids, [s.state for s in states]))
|
||||
|
||||
state_map = yield self.store.get_events(
|
||||
@@ -917,7 +943,7 @@ class FederationHandler(BaseHandler):
|
||||
room_creator_user_id="",
|
||||
is_public=False
|
||||
)
|
||||
except:
|
||||
except Exception:
|
||||
# FIXME
|
||||
pass
|
||||
|
||||
@@ -941,9 +967,7 @@ class FederationHandler(BaseHandler):
|
||||
# lots of requests for missing prev_events which we do actually
|
||||
# have. Hence we fire off the deferred, but don't wait for it.
|
||||
|
||||
synapse.util.logcontext.preserve_fn(self._handle_queued_pdus)(
|
||||
room_queue
|
||||
)
|
||||
logcontext.preserve_fn(self._handle_queued_pdus)(room_queue)
|
||||
|
||||
defer.returnValue(True)
|
||||
|
||||
@@ -983,8 +1007,7 @@ class FederationHandler(BaseHandler):
|
||||
})
|
||||
|
||||
try:
|
||||
message_handler = self.hs.get_handlers().message_handler
|
||||
event, context = yield message_handler._create_new_client_event(
|
||||
event, context = yield self.event_creation_handler.create_new_client_event(
|
||||
builder=builder,
|
||||
)
|
||||
except AuthError as e:
|
||||
@@ -1070,6 +1093,9 @@ class FederationHandler(BaseHandler):
|
||||
"""
|
||||
event = pdu
|
||||
|
||||
if event.state_key is None:
|
||||
raise SynapseError(400, "The invite event did not have a state key")
|
||||
|
||||
is_blocked = yield self.store.is_room_blocked(event.room_id)
|
||||
if is_blocked:
|
||||
raise SynapseError(403, "This room has been blocked on this server")
|
||||
@@ -1077,6 +1103,13 @@ class FederationHandler(BaseHandler):
|
||||
if self.hs.config.block_non_admin_invites:
|
||||
raise SynapseError(403, "This server does not accept room invites")
|
||||
|
||||
if not self.spam_checker.user_may_invite(
|
||||
event.sender, event.state_key, event.room_id,
|
||||
):
|
||||
raise SynapseError(
|
||||
403, "This user is not permitted to send invites to this server/user"
|
||||
)
|
||||
|
||||
membership = event.content.get("membership")
|
||||
if event.type != EventTypes.Member or membership != Membership.INVITE:
|
||||
raise SynapseError(400, "The event was not an m.room.member invite event")
|
||||
@@ -1085,9 +1118,6 @@ class FederationHandler(BaseHandler):
|
||||
if sender_domain != origin:
|
||||
raise SynapseError(400, "The invite event was not from the server sending it")
|
||||
|
||||
if event.state_key is None:
|
||||
raise SynapseError(400, "The invite event did not have a state key")
|
||||
|
||||
if not self.is_mine_id(event.state_key):
|
||||
raise SynapseError(400, "The invite event must be for this server")
|
||||
|
||||
@@ -1217,8 +1247,7 @@ class FederationHandler(BaseHandler):
|
||||
"state_key": user_id,
|
||||
})
|
||||
|
||||
message_handler = self.hs.get_handlers().message_handler
|
||||
event, context = yield message_handler._create_new_client_event(
|
||||
event, context = yield self.event_creation_handler.create_new_client_event(
|
||||
builder=builder,
|
||||
)
|
||||
|
||||
@@ -1416,6 +1445,7 @@ class FederationHandler(BaseHandler):
|
||||
auth_events=auth_events,
|
||||
)
|
||||
|
||||
try:
|
||||
if not event.internal_metadata.is_outlier() and not backfilled:
|
||||
yield self.action_generator.handle_push_actions_for_event(
|
||||
event, context
|
||||
@@ -1426,11 +1456,18 @@ class FederationHandler(BaseHandler):
|
||||
context=context,
|
||||
backfilled=backfilled,
|
||||
)
|
||||
except: # noqa: E722, as we reraise the exception this is fine.
|
||||
# Ensure that we actually remove the entries in the push actions
|
||||
# staging area
|
||||
logcontext.preserve_fn(
|
||||
self.store.remove_push_actions_from_staging
|
||||
)(event.event_id)
|
||||
raise
|
||||
|
||||
if not backfilled:
|
||||
# this intentionally does not yield: we don't care about the result
|
||||
# and don't need to wait for it.
|
||||
preserve_fn(self.pusher_pool.on_new_notifications)(
|
||||
logcontext.preserve_fn(self.pusher_pool.on_new_notifications)(
|
||||
event_stream_id, max_stream_id
|
||||
)
|
||||
|
||||
@@ -1443,16 +1480,16 @@ class FederationHandler(BaseHandler):
|
||||
a bunch of outliers, but not a chunk of individual events that depend
|
||||
on each other for state calculations.
|
||||
"""
|
||||
contexts = yield preserve_context_over_deferred(defer.gatherResults(
|
||||
contexts = yield logcontext.make_deferred_yieldable(defer.gatherResults(
|
||||
[
|
||||
preserve_fn(self._prep_event)(
|
||||
logcontext.preserve_fn(self._prep_event)(
|
||||
origin,
|
||||
ev_info["event"],
|
||||
state=ev_info.get("state"),
|
||||
auth_events=ev_info.get("auth_events"),
|
||||
)
|
||||
for ev_info in event_infos
|
||||
]
|
||||
], consumeErrors=True,
|
||||
))
|
||||
|
||||
yield self.store.persist_events(
|
||||
@@ -1678,6 +1715,17 @@ class FederationHandler(BaseHandler):
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def do_auth(self, origin, event, context, auth_events):
|
||||
"""
|
||||
|
||||
Args:
|
||||
origin (str):
|
||||
event (synapse.events.FrozenEvent):
|
||||
context (synapse.events.snapshot.EventContext):
|
||||
auth_events (dict[(str, str)->str]):
|
||||
|
||||
Returns:
|
||||
defer.Deferred[None]
|
||||
"""
|
||||
# Check if we have all the auth events.
|
||||
current_state = set(e.event_id for e in auth_events.values())
|
||||
event_auth_events = set(e_id for e_id, _ in event.auth_events)
|
||||
@@ -1747,7 +1795,7 @@ class FederationHandler(BaseHandler):
|
||||
[e_id for e_id, _ in event.auth_events]
|
||||
)
|
||||
seen_events = set(have_events.keys())
|
||||
except:
|
||||
except Exception:
|
||||
# FIXME:
|
||||
logger.exception("Failed to get auth chain")
|
||||
|
||||
@@ -1760,18 +1808,17 @@ class FederationHandler(BaseHandler):
|
||||
# Do auth conflict res.
|
||||
logger.info("Different auth: %s", different_auth)
|
||||
|
||||
different_events = yield preserve_context_over_deferred(defer.gatherResults(
|
||||
[
|
||||
preserve_fn(self.store.get_event)(
|
||||
different_events = yield logcontext.make_deferred_yieldable(
|
||||
defer.gatherResults([
|
||||
logcontext.preserve_fn(self.store.get_event)(
|
||||
d,
|
||||
allow_none=True,
|
||||
allow_rejected=False,
|
||||
)
|
||||
for d in different_auth
|
||||
if d in have_events and not have_events[d]
|
||||
],
|
||||
consumeErrors=True
|
||||
)).addErrback(unwrapFirstError)
|
||||
], consumeErrors=True)
|
||||
).addErrback(unwrapFirstError)
|
||||
|
||||
if different_events:
|
||||
local_view = dict(auth_events)
|
||||
@@ -1790,16 +1837,9 @@ class FederationHandler(BaseHandler):
|
||||
current_state = set(e.event_id for e in auth_events.values())
|
||||
different_auth = event_auth_events - current_state
|
||||
|
||||
context.current_state_ids = dict(context.current_state_ids)
|
||||
context.current_state_ids.update({
|
||||
k: a.event_id for k, a in auth_events.items()
|
||||
if k != event_key
|
||||
})
|
||||
context.prev_state_ids = dict(context.prev_state_ids)
|
||||
context.prev_state_ids.update({
|
||||
k: a.event_id for k, a in auth_events.items()
|
||||
})
|
||||
context.state_group = self.store.get_next_state_group()
|
||||
yield self._update_context_for_auth_events(
|
||||
event, context, auth_events, event_key,
|
||||
)
|
||||
|
||||
if different_auth and not event.internal_metadata.is_outlier():
|
||||
logger.info("Different auth after resolution: %s", different_auth)
|
||||
@@ -1872,23 +1912,16 @@ class FederationHandler(BaseHandler):
|
||||
except AuthError:
|
||||
pass
|
||||
|
||||
except:
|
||||
except Exception:
|
||||
# FIXME:
|
||||
logger.exception("Failed to query auth chain")
|
||||
|
||||
# 4. Look at rejects and their proofs.
|
||||
# TODO.
|
||||
|
||||
context.current_state_ids = dict(context.current_state_ids)
|
||||
context.current_state_ids.update({
|
||||
k: a.event_id for k, a in auth_events.items()
|
||||
if k != event_key
|
||||
})
|
||||
context.prev_state_ids = dict(context.prev_state_ids)
|
||||
context.prev_state_ids.update({
|
||||
k: a.event_id for k, a in auth_events.items()
|
||||
})
|
||||
context.state_group = self.store.get_next_state_group()
|
||||
yield self._update_context_for_auth_events(
|
||||
event, context, auth_events, event_key,
|
||||
)
|
||||
|
||||
try:
|
||||
self.auth.check(event, auth_events=auth_events)
|
||||
@@ -1896,6 +1929,45 @@ class FederationHandler(BaseHandler):
|
||||
logger.warn("Failed auth resolution for %r because %s", event, e)
|
||||
raise e
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _update_context_for_auth_events(self, event, context, auth_events,
|
||||
event_key):
|
||||
"""Update the state_ids in an event context after auth event resolution,
|
||||
storing the changes as a new state group.
|
||||
|
||||
Args:
|
||||
event (Event): The event we're handling the context for
|
||||
|
||||
context (synapse.events.snapshot.EventContext): event context
|
||||
to be updated
|
||||
|
||||
auth_events (dict[(str, str)->str]): Events to update in the event
|
||||
context.
|
||||
|
||||
event_key ((str, str)): (type, state_key) for the current event.
|
||||
this will not be included in the current_state in the context.
|
||||
"""
|
||||
state_updates = {
|
||||
k: a.event_id for k, a in auth_events.iteritems()
|
||||
if k != event_key
|
||||
}
|
||||
context.current_state_ids = dict(context.current_state_ids)
|
||||
context.current_state_ids.update(state_updates)
|
||||
if context.delta_ids is not None:
|
||||
context.delta_ids = dict(context.delta_ids)
|
||||
context.delta_ids.update(state_updates)
|
||||
context.prev_state_ids = dict(context.prev_state_ids)
|
||||
context.prev_state_ids.update({
|
||||
k: a.event_id for k, a in auth_events.iteritems()
|
||||
})
|
||||
context.state_group = yield self.store.store_state_group(
|
||||
event.event_id,
|
||||
event.room_id,
|
||||
prev_group=context.prev_group,
|
||||
delta_ids=context.delta_ids,
|
||||
current_state_ids=context.current_state_ids,
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def construct_auth_difference(self, local_auth, remote_auth):
|
||||
""" Given a local and remote auth chain, find the differences. This
|
||||
@@ -1939,7 +2011,7 @@ class FederationHandler(BaseHandler):
|
||||
def get_next(it, opt=None):
|
||||
try:
|
||||
return it.next()
|
||||
except:
|
||||
except Exception:
|
||||
return opt
|
||||
|
||||
current_local = get_next(local_iter)
|
||||
@@ -2064,8 +2136,7 @@ class FederationHandler(BaseHandler):
|
||||
if (yield self.auth.check_host_in_room(room_id, self.hs.hostname)):
|
||||
builder = self.event_builder_factory.new(event_dict)
|
||||
EventValidator().validate_new(builder)
|
||||
message_handler = self.hs.get_handlers().message_handler
|
||||
event, context = yield message_handler._create_new_client_event(
|
||||
event, context = yield self.event_creation_handler.create_new_client_event(
|
||||
builder=builder
|
||||
)
|
||||
|
||||
@@ -2080,7 +2151,7 @@ class FederationHandler(BaseHandler):
|
||||
raise e
|
||||
|
||||
yield self._check_signature(event, context)
|
||||
member_handler = self.hs.get_handlers().room_member_handler
|
||||
member_handler = self.hs.get_room_member_handler()
|
||||
yield member_handler.send_membership_event(None, event, context)
|
||||
else:
|
||||
destinations = set(x.split(":", 1)[-1] for x in (sender_user_id, room_id))
|
||||
@@ -2103,8 +2174,7 @@ class FederationHandler(BaseHandler):
|
||||
"""
|
||||
builder = self.event_builder_factory.new(event_dict)
|
||||
|
||||
message_handler = self.hs.get_handlers().message_handler
|
||||
event, context = yield message_handler._create_new_client_event(
|
||||
event, context = yield self.event_creation_handler.create_new_client_event(
|
||||
builder=builder,
|
||||
)
|
||||
|
||||
@@ -2125,7 +2195,7 @@ class FederationHandler(BaseHandler):
|
||||
# TODO: Make sure the signatures actually are correct.
|
||||
event.signatures.update(returned_invite.signatures)
|
||||
|
||||
member_handler = self.hs.get_handlers().room_member_handler
|
||||
member_handler = self.hs.get_room_member_handler()
|
||||
yield member_handler.send_membership_event(None, event, context)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@@ -2154,8 +2224,9 @@ class FederationHandler(BaseHandler):
|
||||
|
||||
builder = self.event_builder_factory.new(event_dict)
|
||||
EventValidator().validate_new(builder)
|
||||
message_handler = self.hs.get_handlers().message_handler
|
||||
event, context = yield message_handler._create_new_client_event(builder=builder)
|
||||
event, context = yield self.event_creation_handler.create_new_client_event(
|
||||
builder=builder,
|
||||
)
|
||||
defer.returnValue((event, context))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
|
||||
430
synapse/handlers/groups_local.py
Normal file
430
synapse/handlers/groups_local.py
Normal file
@@ -0,0 +1,430 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2017 Vector Creations Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.errors import SynapseError
|
||||
from synapse.types import get_domain_from_id
|
||||
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _create_rerouter(func_name):
|
||||
"""Returns a function that looks at the group id and calls the function
|
||||
on federation or the local group server if the group is local
|
||||
"""
|
||||
def f(self, group_id, *args, **kwargs):
|
||||
if self.is_mine_id(group_id):
|
||||
return getattr(self.groups_server_handler, func_name)(
|
||||
group_id, *args, **kwargs
|
||||
)
|
||||
else:
|
||||
destination = get_domain_from_id(group_id)
|
||||
return getattr(self.transport_client, func_name)(
|
||||
destination, group_id, *args, **kwargs
|
||||
)
|
||||
return f
|
||||
|
||||
|
||||
class GroupsLocalHandler(object):
|
||||
def __init__(self, hs):
|
||||
self.hs = hs
|
||||
self.store = hs.get_datastore()
|
||||
self.room_list_handler = hs.get_room_list_handler()
|
||||
self.groups_server_handler = hs.get_groups_server_handler()
|
||||
self.transport_client = hs.get_federation_transport_client()
|
||||
self.auth = hs.get_auth()
|
||||
self.clock = hs.get_clock()
|
||||
self.keyring = hs.get_keyring()
|
||||
self.is_mine_id = hs.is_mine_id
|
||||
self.signing_key = hs.config.signing_key[0]
|
||||
self.server_name = hs.hostname
|
||||
self.notifier = hs.get_notifier()
|
||||
self.attestations = hs.get_groups_attestation_signing()
|
||||
|
||||
self.profile_handler = hs.get_profile_handler()
|
||||
|
||||
# Ensure attestations get renewed
|
||||
hs.get_groups_attestation_renewer()
|
||||
|
||||
# The following functions merely route the query to the local groups server
|
||||
# or federation depending on if the group is local or remote
|
||||
|
||||
get_group_profile = _create_rerouter("get_group_profile")
|
||||
update_group_profile = _create_rerouter("update_group_profile")
|
||||
get_rooms_in_group = _create_rerouter("get_rooms_in_group")
|
||||
|
||||
get_invited_users_in_group = _create_rerouter("get_invited_users_in_group")
|
||||
|
||||
add_room_to_group = _create_rerouter("add_room_to_group")
|
||||
update_room_in_group = _create_rerouter("update_room_in_group")
|
||||
remove_room_from_group = _create_rerouter("remove_room_from_group")
|
||||
|
||||
update_group_summary_room = _create_rerouter("update_group_summary_room")
|
||||
delete_group_summary_room = _create_rerouter("delete_group_summary_room")
|
||||
|
||||
update_group_category = _create_rerouter("update_group_category")
|
||||
delete_group_category = _create_rerouter("delete_group_category")
|
||||
get_group_category = _create_rerouter("get_group_category")
|
||||
get_group_categories = _create_rerouter("get_group_categories")
|
||||
|
||||
update_group_summary_user = _create_rerouter("update_group_summary_user")
|
||||
delete_group_summary_user = _create_rerouter("delete_group_summary_user")
|
||||
|
||||
update_group_role = _create_rerouter("update_group_role")
|
||||
delete_group_role = _create_rerouter("delete_group_role")
|
||||
get_group_role = _create_rerouter("get_group_role")
|
||||
get_group_roles = _create_rerouter("get_group_roles")
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_group_summary(self, group_id, requester_user_id):
|
||||
"""Get the group summary for a group.
|
||||
|
||||
If the group is remote we check that the users have valid attestations.
|
||||
"""
|
||||
if self.is_mine_id(group_id):
|
||||
res = yield self.groups_server_handler.get_group_summary(
|
||||
group_id, requester_user_id
|
||||
)
|
||||
else:
|
||||
res = yield self.transport_client.get_group_summary(
|
||||
get_domain_from_id(group_id), group_id, requester_user_id,
|
||||
)
|
||||
|
||||
group_server_name = get_domain_from_id(group_id)
|
||||
|
||||
# Loop through the users and validate the attestations.
|
||||
chunk = res["users_section"]["users"]
|
||||
valid_users = []
|
||||
for entry in chunk:
|
||||
g_user_id = entry["user_id"]
|
||||
attestation = entry.pop("attestation", {})
|
||||
try:
|
||||
if get_domain_from_id(g_user_id) != group_server_name:
|
||||
yield self.attestations.verify_attestation(
|
||||
attestation,
|
||||
group_id=group_id,
|
||||
user_id=g_user_id,
|
||||
server_name=get_domain_from_id(g_user_id),
|
||||
)
|
||||
valid_users.append(entry)
|
||||
except Exception as e:
|
||||
logger.info("Failed to verify user is in group: %s", e)
|
||||
|
||||
res["users_section"]["users"] = valid_users
|
||||
|
||||
res["users_section"]["users"].sort(key=lambda e: e.get("order", 0))
|
||||
res["rooms_section"]["rooms"].sort(key=lambda e: e.get("order", 0))
|
||||
|
||||
# Add `is_publicised` flag to indicate whether the user has publicised their
|
||||
# membership of the group on their profile
|
||||
result = yield self.store.get_publicised_groups_for_user(requester_user_id)
|
||||
is_publicised = group_id in result
|
||||
|
||||
res.setdefault("user", {})["is_publicised"] = is_publicised
|
||||
|
||||
defer.returnValue(res)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def create_group(self, group_id, user_id, content):
|
||||
"""Create a group
|
||||
"""
|
||||
|
||||
logger.info("Asking to create group with ID: %r", group_id)
|
||||
|
||||
if self.is_mine_id(group_id):
|
||||
res = yield self.groups_server_handler.create_group(
|
||||
group_id, user_id, content
|
||||
)
|
||||
local_attestation = None
|
||||
remote_attestation = None
|
||||
else:
|
||||
local_attestation = self.attestations.create_attestation(group_id, user_id)
|
||||
content["attestation"] = local_attestation
|
||||
|
||||
content["user_profile"] = yield self.profile_handler.get_profile(user_id)
|
||||
|
||||
res = yield self.transport_client.create_group(
|
||||
get_domain_from_id(group_id), group_id, user_id, content,
|
||||
)
|
||||
|
||||
remote_attestation = res["attestation"]
|
||||
yield self.attestations.verify_attestation(
|
||||
remote_attestation,
|
||||
group_id=group_id,
|
||||
user_id=user_id,
|
||||
server_name=get_domain_from_id(group_id),
|
||||
)
|
||||
|
||||
is_publicised = content.get("publicise", False)
|
||||
token = yield self.store.register_user_group_membership(
|
||||
group_id, user_id,
|
||||
membership="join",
|
||||
is_admin=True,
|
||||
local_attestation=local_attestation,
|
||||
remote_attestation=remote_attestation,
|
||||
is_publicised=is_publicised,
|
||||
)
|
||||
self.notifier.on_new_event(
|
||||
"groups_key", token, users=[user_id],
|
||||
)
|
||||
|
||||
defer.returnValue(res)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_users_in_group(self, group_id, requester_user_id):
|
||||
"""Get users in a group
|
||||
"""
|
||||
if self.is_mine_id(group_id):
|
||||
res = yield self.groups_server_handler.get_users_in_group(
|
||||
group_id, requester_user_id
|
||||
)
|
||||
defer.returnValue(res)
|
||||
|
||||
group_server_name = get_domain_from_id(group_id)
|
||||
|
||||
res = yield self.transport_client.get_users_in_group(
|
||||
get_domain_from_id(group_id), group_id, requester_user_id,
|
||||
)
|
||||
|
||||
chunk = res["chunk"]
|
||||
valid_entries = []
|
||||
for entry in chunk:
|
||||
g_user_id = entry["user_id"]
|
||||
attestation = entry.pop("attestation", {})
|
||||
try:
|
||||
if get_domain_from_id(g_user_id) != group_server_name:
|
||||
yield self.attestations.verify_attestation(
|
||||
attestation,
|
||||
group_id=group_id,
|
||||
user_id=g_user_id,
|
||||
server_name=get_domain_from_id(g_user_id),
|
||||
)
|
||||
valid_entries.append(entry)
|
||||
except Exception as e:
|
||||
logger.info("Failed to verify user is in group: %s", e)
|
||||
|
||||
res["chunk"] = valid_entries
|
||||
|
||||
defer.returnValue(res)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def join_group(self, group_id, user_id, content):
|
||||
"""Request to join a group
|
||||
"""
|
||||
raise NotImplementedError() # TODO
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def accept_invite(self, group_id, user_id, content):
|
||||
"""Accept an invite to a group
|
||||
"""
|
||||
if self.is_mine_id(group_id):
|
||||
yield self.groups_server_handler.accept_invite(
|
||||
group_id, user_id, content
|
||||
)
|
||||
local_attestation = None
|
||||
remote_attestation = None
|
||||
else:
|
||||
local_attestation = self.attestations.create_attestation(group_id, user_id)
|
||||
content["attestation"] = local_attestation
|
||||
|
||||
res = yield self.transport_client.accept_group_invite(
|
||||
get_domain_from_id(group_id), group_id, user_id, content,
|
||||
)
|
||||
|
||||
remote_attestation = res["attestation"]
|
||||
|
||||
yield self.attestations.verify_attestation(
|
||||
remote_attestation,
|
||||
group_id=group_id,
|
||||
user_id=user_id,
|
||||
server_name=get_domain_from_id(group_id),
|
||||
)
|
||||
|
||||
# TODO: Check that the group is public and we're being added publically
|
||||
is_publicised = content.get("publicise", False)
|
||||
|
||||
token = yield self.store.register_user_group_membership(
|
||||
group_id, user_id,
|
||||
membership="join",
|
||||
is_admin=False,
|
||||
local_attestation=local_attestation,
|
||||
remote_attestation=remote_attestation,
|
||||
is_publicised=is_publicised,
|
||||
)
|
||||
self.notifier.on_new_event(
|
||||
"groups_key", token, users=[user_id],
|
||||
)
|
||||
|
||||
defer.returnValue({})
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def invite(self, group_id, user_id, requester_user_id, config):
|
||||
"""Invite a user to a group
|
||||
"""
|
||||
content = {
|
||||
"requester_user_id": requester_user_id,
|
||||
"config": config,
|
||||
}
|
||||
if self.is_mine_id(group_id):
|
||||
res = yield self.groups_server_handler.invite_to_group(
|
||||
group_id, user_id, requester_user_id, content,
|
||||
)
|
||||
else:
|
||||
res = yield self.transport_client.invite_to_group(
|
||||
get_domain_from_id(group_id), group_id, user_id, requester_user_id,
|
||||
content,
|
||||
)
|
||||
|
||||
defer.returnValue(res)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_invite(self, group_id, user_id, content):
|
||||
"""One of our users were invited to a group
|
||||
"""
|
||||
# TODO: Support auto join and rejection
|
||||
|
||||
if not self.is_mine_id(user_id):
|
||||
raise SynapseError(400, "User not on this server")
|
||||
|
||||
local_profile = {}
|
||||
if "profile" in content:
|
||||
if "name" in content["profile"]:
|
||||
local_profile["name"] = content["profile"]["name"]
|
||||
if "avatar_url" in content["profile"]:
|
||||
local_profile["avatar_url"] = content["profile"]["avatar_url"]
|
||||
|
||||
token = yield self.store.register_user_group_membership(
|
||||
group_id, user_id,
|
||||
membership="invite",
|
||||
content={"profile": local_profile, "inviter": content["inviter"]},
|
||||
)
|
||||
self.notifier.on_new_event(
|
||||
"groups_key", token, users=[user_id],
|
||||
)
|
||||
try:
|
||||
user_profile = yield self.profile_handler.get_profile(user_id)
|
||||
except Exception as e:
|
||||
logger.warn("No profile for user %s: %s", user_id, e)
|
||||
user_profile = {}
|
||||
|
||||
defer.returnValue({"state": "invite", "user_profile": user_profile})
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def remove_user_from_group(self, group_id, user_id, requester_user_id, content):
|
||||
"""Remove a user from a group
|
||||
"""
|
||||
if user_id == requester_user_id:
|
||||
token = yield self.store.register_user_group_membership(
|
||||
group_id, user_id,
|
||||
membership="leave",
|
||||
)
|
||||
self.notifier.on_new_event(
|
||||
"groups_key", token, users=[user_id],
|
||||
)
|
||||
|
||||
# TODO: Should probably remember that we tried to leave so that we can
|
||||
# retry if the group server is currently down.
|
||||
|
||||
if self.is_mine_id(group_id):
|
||||
res = yield self.groups_server_handler.remove_user_from_group(
|
||||
group_id, user_id, requester_user_id, content,
|
||||
)
|
||||
else:
|
||||
content["requester_user_id"] = requester_user_id
|
||||
res = yield self.transport_client.remove_user_from_group(
|
||||
get_domain_from_id(group_id), group_id, requester_user_id,
|
||||
user_id, content,
|
||||
)
|
||||
|
||||
defer.returnValue(res)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def user_removed_from_group(self, group_id, user_id, content):
|
||||
"""One of our users was removed/kicked from a group
|
||||
"""
|
||||
# TODO: Check if user in group
|
||||
token = yield self.store.register_user_group_membership(
|
||||
group_id, user_id,
|
||||
membership="leave",
|
||||
)
|
||||
self.notifier.on_new_event(
|
||||
"groups_key", token, users=[user_id],
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_joined_groups(self, user_id):
|
||||
group_ids = yield self.store.get_joined_groups(user_id)
|
||||
defer.returnValue({"groups": group_ids})
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_publicised_groups_for_user(self, user_id):
|
||||
if self.hs.is_mine_id(user_id):
|
||||
result = yield self.store.get_publicised_groups_for_user(user_id)
|
||||
|
||||
# Check AS associated groups for this user - this depends on the
|
||||
# RegExps in the AS registration file (under `users`)
|
||||
for app_service in self.store.get_app_services():
|
||||
result.extend(app_service.get_groups_for_user(user_id))
|
||||
|
||||
defer.returnValue({"groups": result})
|
||||
else:
|
||||
bulk_result = yield self.transport_client.bulk_get_publicised_groups(
|
||||
get_domain_from_id(user_id), [user_id],
|
||||
)
|
||||
result = bulk_result.get("users", {}).get(user_id)
|
||||
# TODO: Verify attestations
|
||||
defer.returnValue({"groups": result})
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def bulk_get_publicised_groups(self, user_ids, proxy=True):
|
||||
destinations = {}
|
||||
local_users = set()
|
||||
|
||||
for user_id in user_ids:
|
||||
if self.hs.is_mine_id(user_id):
|
||||
local_users.add(user_id)
|
||||
else:
|
||||
destinations.setdefault(
|
||||
get_domain_from_id(user_id), set()
|
||||
).add(user_id)
|
||||
|
||||
if not proxy and destinations:
|
||||
raise SynapseError(400, "Some user_ids are not local")
|
||||
|
||||
results = {}
|
||||
failed_results = []
|
||||
for destination, dest_user_ids in destinations.iteritems():
|
||||
try:
|
||||
r = yield self.transport_client.bulk_get_publicised_groups(
|
||||
destination, list(dest_user_ids),
|
||||
)
|
||||
results.update(r["users"])
|
||||
except Exception:
|
||||
failed_results.extend(dest_user_ids)
|
||||
|
||||
for uid in local_users:
|
||||
results[uid] = yield self.store.get_publicised_groups_for_user(
|
||||
uid
|
||||
)
|
||||
|
||||
# Check AS associated groups for this user - this depends on the
|
||||
# RegExps in the AS registration file (under `users`)
|
||||
for app_service in self.store.get_app_services():
|
||||
results[uid].extend(app_service.get_groups_for_user(uid))
|
||||
|
||||
defer.returnValue({"users": results})
|
||||
@@ -27,7 +27,7 @@ from synapse.types import (
|
||||
from synapse.util import unwrapFirstError
|
||||
from synapse.util.async import concurrently_execute
|
||||
from synapse.util.caches.snapshot_cache import SnapshotCache
|
||||
from synapse.util.logcontext import preserve_fn, preserve_context_over_deferred
|
||||
from synapse.util.logcontext import make_deferred_yieldable, preserve_fn
|
||||
from synapse.visibility import filter_events_for_client
|
||||
|
||||
from ._base import BaseHandler
|
||||
@@ -163,7 +163,7 @@ class InitialSyncHandler(BaseHandler):
|
||||
lambda states: states[event.event_id]
|
||||
)
|
||||
|
||||
(messages, token), current_state = yield preserve_context_over_deferred(
|
||||
(messages, token), current_state = yield make_deferred_yieldable(
|
||||
defer.gatherResults(
|
||||
[
|
||||
preserve_fn(self.store.get_recent_events_for_room)(
|
||||
@@ -214,7 +214,7 @@ class InitialSyncHandler(BaseHandler):
|
||||
})
|
||||
|
||||
d["account_data"] = account_data_events
|
||||
except:
|
||||
except Exception:
|
||||
logger.exception("Failed to get snapshot")
|
||||
|
||||
yield concurrently_execute(handle_room, room_list, 10)
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014 - 2016 OpenMarket Ltd
|
||||
# Copyright 2017 - 2018 New Vector Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -12,8 +13,8 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from synapse.events import spamcheck
|
||||
from twisted.internet import defer
|
||||
from twisted.internet import defer, reactor
|
||||
from twisted.python.failure import Failure
|
||||
|
||||
from synapse.api.constants import EventTypes, Membership
|
||||
from synapse.api.errors import AuthError, Codes, SynapseError
|
||||
@@ -24,9 +25,12 @@ from synapse.types import (
|
||||
UserID, RoomAlias, RoomStreamToken,
|
||||
)
|
||||
from synapse.util.async import run_on_reactor, ReadWriteLock, Limiter
|
||||
from synapse.util.logcontext import preserve_fn
|
||||
from synapse.util.logcontext import preserve_fn, run_in_background
|
||||
from synapse.util.metrics import measure_func
|
||||
from synapse.util.frozenutils import unfreeze
|
||||
from synapse.util.stringutils import random_string
|
||||
from synapse.visibility import filter_events_for_client
|
||||
from synapse.replication.http.send_event import send_event_to_master
|
||||
|
||||
from ._base import BaseHandler
|
||||
|
||||
@@ -34,11 +38,41 @@ from canonicaljson import encode_canonical_json
|
||||
|
||||
import logging
|
||||
import random
|
||||
import ujson
|
||||
import simplejson
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class PurgeStatus(object):
|
||||
"""Object tracking the status of a purge request
|
||||
|
||||
This class contains information on the progress of a purge request, for
|
||||
return by get_purge_status.
|
||||
|
||||
Attributes:
|
||||
status (int): Tracks whether this request has completed. One of
|
||||
STATUS_{ACTIVE,COMPLETE,FAILED}
|
||||
"""
|
||||
|
||||
STATUS_ACTIVE = 0
|
||||
STATUS_COMPLETE = 1
|
||||
STATUS_FAILED = 2
|
||||
|
||||
STATUS_TEXT = {
|
||||
STATUS_ACTIVE: "active",
|
||||
STATUS_COMPLETE: "complete",
|
||||
STATUS_FAILED: "failed",
|
||||
}
|
||||
|
||||
def __init__(self):
|
||||
self.status = PurgeStatus.STATUS_ACTIVE
|
||||
|
||||
def asdict(self):
|
||||
return {
|
||||
"status": PurgeStatus.STATUS_TEXT[self.status]
|
||||
}
|
||||
|
||||
|
||||
class MessageHandler(BaseHandler):
|
||||
|
||||
def __init__(self, hs):
|
||||
@@ -46,29 +80,89 @@ class MessageHandler(BaseHandler):
|
||||
self.hs = hs
|
||||
self.state = hs.get_state_handler()
|
||||
self.clock = hs.get_clock()
|
||||
self.validator = EventValidator()
|
||||
|
||||
self.pagination_lock = ReadWriteLock()
|
||||
self._purges_in_progress_by_room = set()
|
||||
# map from purge id to PurgeStatus
|
||||
self._purges_by_id = {}
|
||||
|
||||
self.pusher_pool = hs.get_pusherpool()
|
||||
def start_purge_history(self, room_id, topological_ordering,
|
||||
delete_local_events=False):
|
||||
"""Start off a history purge on a room.
|
||||
|
||||
# We arbitrarily limit concurrent event creation for a room to 5.
|
||||
# This is to stop us from diverging history *too* much.
|
||||
self.limiter = Limiter(max_count=5)
|
||||
Args:
|
||||
room_id (str): The room to purge from
|
||||
|
||||
self.action_generator = hs.get_action_generator()
|
||||
topological_ordering (int): minimum topo ordering to preserve
|
||||
delete_local_events (bool): True to delete local events as well as
|
||||
remote ones
|
||||
|
||||
Returns:
|
||||
str: unique ID for this purge transaction.
|
||||
"""
|
||||
if room_id in self._purges_in_progress_by_room:
|
||||
raise SynapseError(
|
||||
400,
|
||||
"History purge already in progress for %s" % (room_id, ),
|
||||
)
|
||||
|
||||
purge_id = random_string(16)
|
||||
|
||||
# we log the purge_id here so that it can be tied back to the
|
||||
# request id in the log lines.
|
||||
logger.info("[purge] starting purge_id %s", purge_id)
|
||||
|
||||
self._purges_by_id[purge_id] = PurgeStatus()
|
||||
run_in_background(
|
||||
self._purge_history,
|
||||
purge_id, room_id, topological_ordering, delete_local_events,
|
||||
)
|
||||
return purge_id
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def purge_history(self, room_id, event_id):
|
||||
event = yield self.store.get_event(event_id)
|
||||
def _purge_history(self, purge_id, room_id, topological_ordering,
|
||||
delete_local_events):
|
||||
"""Carry out a history purge on a room.
|
||||
|
||||
if event.room_id != room_id:
|
||||
raise SynapseError(400, "Event is for wrong room.")
|
||||
|
||||
depth = event.depth
|
||||
Args:
|
||||
purge_id (str): The id for this purge
|
||||
room_id (str): The room to purge from
|
||||
topological_ordering (int): minimum topo ordering to preserve
|
||||
delete_local_events (bool): True to delete local events as well as
|
||||
remote ones
|
||||
|
||||
Returns:
|
||||
Deferred
|
||||
"""
|
||||
self._purges_in_progress_by_room.add(room_id)
|
||||
try:
|
||||
with (yield self.pagination_lock.write(room_id)):
|
||||
yield self.store.delete_old_state(room_id, depth)
|
||||
yield self.store.purge_history(
|
||||
room_id, topological_ordering, delete_local_events,
|
||||
)
|
||||
logger.info("[purge] complete")
|
||||
self._purges_by_id[purge_id].status = PurgeStatus.STATUS_COMPLETE
|
||||
except Exception:
|
||||
logger.error("[purge] failed: %s", Failure().getTraceback().rstrip())
|
||||
self._purges_by_id[purge_id].status = PurgeStatus.STATUS_FAILED
|
||||
finally:
|
||||
self._purges_in_progress_by_room.discard(room_id)
|
||||
|
||||
# remove the purge from the list 24 hours after it completes
|
||||
def clear_purge():
|
||||
del self._purges_by_id[purge_id]
|
||||
reactor.callLater(24 * 3600, clear_purge)
|
||||
|
||||
def get_purge_status(self, purge_id):
|
||||
"""Get the current status of an active purge
|
||||
|
||||
Args:
|
||||
purge_id (str): purge_id returned by start_purge_history
|
||||
|
||||
Returns:
|
||||
PurgeStatus|None
|
||||
"""
|
||||
return self._purges_by_id.get(purge_id)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_messages(self, requester, room_id=None, pagin_config=None,
|
||||
@@ -178,163 +272,6 @@ class MessageHandler(BaseHandler):
|
||||
|
||||
defer.returnValue(chunk)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def create_event(self, requester, event_dict, token_id=None, txn_id=None,
|
||||
prev_event_ids=None):
|
||||
"""
|
||||
Given a dict from a client, create a new event.
|
||||
|
||||
Creates an FrozenEvent object, filling out auth_events, prev_events,
|
||||
etc.
|
||||
|
||||
Adds display names to Join membership events.
|
||||
|
||||
Args:
|
||||
requester
|
||||
event_dict (dict): An entire event
|
||||
token_id (str)
|
||||
txn_id (str)
|
||||
prev_event_ids (list): The prev event ids to use when creating the event
|
||||
|
||||
Returns:
|
||||
Tuple of created event (FrozenEvent), Context
|
||||
"""
|
||||
builder = self.event_builder_factory.new(event_dict)
|
||||
|
||||
with (yield self.limiter.queue(builder.room_id)):
|
||||
self.validator.validate_new(builder)
|
||||
|
||||
if builder.type == EventTypes.Member:
|
||||
membership = builder.content.get("membership", None)
|
||||
target = UserID.from_string(builder.state_key)
|
||||
|
||||
if membership in {Membership.JOIN, Membership.INVITE}:
|
||||
# If event doesn't include a display name, add one.
|
||||
profile = self.hs.get_handlers().profile_handler
|
||||
content = builder.content
|
||||
|
||||
try:
|
||||
if "displayname" not in content:
|
||||
content["displayname"] = yield profile.get_displayname(target)
|
||||
if "avatar_url" not in content:
|
||||
content["avatar_url"] = yield profile.get_avatar_url(target)
|
||||
except Exception as e:
|
||||
logger.info(
|
||||
"Failed to get profile information for %r: %s",
|
||||
target, e
|
||||
)
|
||||
|
||||
if token_id is not None:
|
||||
builder.internal_metadata.token_id = token_id
|
||||
|
||||
if txn_id is not None:
|
||||
builder.internal_metadata.txn_id = txn_id
|
||||
|
||||
event, context = yield self._create_new_client_event(
|
||||
builder=builder,
|
||||
requester=requester,
|
||||
prev_event_ids=prev_event_ids,
|
||||
)
|
||||
|
||||
defer.returnValue((event, context))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def send_nonmember_event(self, requester, event, context, ratelimit=True):
|
||||
"""
|
||||
Persists and notifies local clients and federation of an event.
|
||||
|
||||
Args:
|
||||
event (FrozenEvent) the event to send.
|
||||
context (Context) the context of the event.
|
||||
ratelimit (bool): Whether to rate limit this send.
|
||||
is_guest (bool): Whether the sender is a guest.
|
||||
"""
|
||||
if event.type == EventTypes.Member:
|
||||
raise SynapseError(
|
||||
500,
|
||||
"Tried to send member event through non-member codepath"
|
||||
)
|
||||
|
||||
# We check here if we are currently being rate limited, so that we
|
||||
# don't do unnecessary work. We check again just before we actually
|
||||
# send the event.
|
||||
yield self.ratelimit(requester, update=False)
|
||||
|
||||
user = UserID.from_string(event.sender)
|
||||
|
||||
assert self.hs.is_mine(user), "User must be our own: %s" % (user,)
|
||||
|
||||
if event.is_state():
|
||||
prev_state = yield self.deduplicate_state_event(event, context)
|
||||
if prev_state is not None:
|
||||
defer.returnValue(prev_state)
|
||||
|
||||
yield self.handle_new_client_event(
|
||||
requester=requester,
|
||||
event=event,
|
||||
context=context,
|
||||
ratelimit=ratelimit,
|
||||
)
|
||||
|
||||
if event.type == EventTypes.Message:
|
||||
presence = self.hs.get_presence_handler()
|
||||
# We don't want to block sending messages on any presence code. This
|
||||
# matters as sometimes presence code can take a while.
|
||||
preserve_fn(presence.bump_presence_active_time)(user)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def deduplicate_state_event(self, event, context):
|
||||
"""
|
||||
Checks whether event is in the latest resolved state in context.
|
||||
|
||||
If so, returns the version of the event in context.
|
||||
Otherwise, returns None.
|
||||
"""
|
||||
prev_event_id = context.prev_state_ids.get((event.type, event.state_key))
|
||||
prev_event = yield self.store.get_event(prev_event_id, allow_none=True)
|
||||
if not prev_event:
|
||||
return
|
||||
|
||||
if prev_event and event.user_id == prev_event.user_id:
|
||||
prev_content = encode_canonical_json(prev_event.content)
|
||||
next_content = encode_canonical_json(event.content)
|
||||
if prev_content == next_content:
|
||||
defer.returnValue(prev_event)
|
||||
return
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def create_and_send_nonmember_event(
|
||||
self,
|
||||
requester,
|
||||
event_dict,
|
||||
ratelimit=True,
|
||||
txn_id=None
|
||||
):
|
||||
"""
|
||||
Creates an event, then sends it.
|
||||
|
||||
See self.create_event and self.send_nonmember_event.
|
||||
"""
|
||||
event, context = yield self.create_event(
|
||||
requester,
|
||||
event_dict,
|
||||
token_id=requester.access_token_id,
|
||||
txn_id=txn_id
|
||||
)
|
||||
|
||||
if spamcheck.check_event_for_spam(event):
|
||||
raise SynapseError(
|
||||
403, "Spam is not permitted here", Codes.FORBIDDEN
|
||||
)
|
||||
|
||||
yield self.send_nonmember_event(
|
||||
requester,
|
||||
event,
|
||||
context,
|
||||
ratelimit=ratelimit,
|
||||
)
|
||||
defer.returnValue(event)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_room_data(self, user_id=None, room_id=None,
|
||||
event_type=None, state_key="", is_guest=False):
|
||||
@@ -418,9 +355,234 @@ class MessageHandler(BaseHandler):
|
||||
[serialize_event(c, now) for c in room_state.values()]
|
||||
)
|
||||
|
||||
@measure_func("_create_new_client_event")
|
||||
@defer.inlineCallbacks
|
||||
def _create_new_client_event(self, builder, requester=None, prev_event_ids=None):
|
||||
def get_joined_members(self, requester, room_id):
|
||||
"""Get all the joined members in the room and their profile information.
|
||||
|
||||
If the user has left the room return the state events from when they left.
|
||||
|
||||
Args:
|
||||
requester(Requester): The user requesting state events.
|
||||
room_id(str): The room ID to get all state events from.
|
||||
Returns:
|
||||
A dict of user_id to profile info
|
||||
"""
|
||||
user_id = requester.user.to_string()
|
||||
if not requester.app_service:
|
||||
# We check AS auth after fetching the room membership, as it
|
||||
# requires us to pull out all joined members anyway.
|
||||
membership, _ = yield self._check_in_room_or_world_readable(
|
||||
room_id, user_id
|
||||
)
|
||||
if membership != Membership.JOIN:
|
||||
raise NotImplementedError(
|
||||
"Getting joined members after leaving is not implemented"
|
||||
)
|
||||
|
||||
users_with_profile = yield self.state.get_current_user_in_room(room_id)
|
||||
|
||||
# If this is an AS, double check that they are allowed to see the members.
|
||||
# This can either be because the AS user is in the room or becuase there
|
||||
# is a user in the room that the AS is "interested in"
|
||||
if requester.app_service and user_id not in users_with_profile:
|
||||
for uid in users_with_profile:
|
||||
if requester.app_service.is_interested_in_user(uid):
|
||||
break
|
||||
else:
|
||||
# Loop fell through, AS has no interested users in room
|
||||
raise AuthError(403, "Appservice not in room")
|
||||
|
||||
defer.returnValue({
|
||||
user_id: {
|
||||
"avatar_url": profile.avatar_url,
|
||||
"display_name": profile.display_name,
|
||||
}
|
||||
for user_id, profile in users_with_profile.iteritems()
|
||||
})
|
||||
|
||||
|
||||
class EventCreationHandler(object):
|
||||
def __init__(self, hs):
|
||||
self.hs = hs
|
||||
self.auth = hs.get_auth()
|
||||
self.store = hs.get_datastore()
|
||||
self.state = hs.get_state_handler()
|
||||
self.clock = hs.get_clock()
|
||||
self.validator = EventValidator()
|
||||
self.profile_handler = hs.get_profile_handler()
|
||||
self.event_builder_factory = hs.get_event_builder_factory()
|
||||
self.server_name = hs.hostname
|
||||
self.ratelimiter = hs.get_ratelimiter()
|
||||
self.notifier = hs.get_notifier()
|
||||
self.config = hs.config
|
||||
|
||||
self.http_client = hs.get_simple_http_client()
|
||||
|
||||
# This is only used to get at ratelimit function, and maybe_kick_guest_users
|
||||
self.base_handler = BaseHandler(hs)
|
||||
|
||||
self.pusher_pool = hs.get_pusherpool()
|
||||
|
||||
# We arbitrarily limit concurrent event creation for a room to 5.
|
||||
# This is to stop us from diverging history *too* much.
|
||||
self.limiter = Limiter(max_count=5)
|
||||
|
||||
self.action_generator = hs.get_action_generator()
|
||||
|
||||
self.spam_checker = hs.get_spam_checker()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def create_event(self, requester, event_dict, token_id=None, txn_id=None,
|
||||
prev_event_ids=None):
|
||||
"""
|
||||
Given a dict from a client, create a new event.
|
||||
|
||||
Creates an FrozenEvent object, filling out auth_events, prev_events,
|
||||
etc.
|
||||
|
||||
Adds display names to Join membership events.
|
||||
|
||||
Args:
|
||||
requester
|
||||
event_dict (dict): An entire event
|
||||
token_id (str)
|
||||
txn_id (str)
|
||||
prev_event_ids (list): The prev event ids to use when creating the event
|
||||
|
||||
Returns:
|
||||
Tuple of created event (FrozenEvent), Context
|
||||
"""
|
||||
builder = self.event_builder_factory.new(event_dict)
|
||||
|
||||
with (yield self.limiter.queue(builder.room_id)):
|
||||
self.validator.validate_new(builder)
|
||||
|
||||
if builder.type == EventTypes.Member:
|
||||
membership = builder.content.get("membership", None)
|
||||
target = UserID.from_string(builder.state_key)
|
||||
|
||||
if membership in {Membership.JOIN, Membership.INVITE}:
|
||||
# If event doesn't include a display name, add one.
|
||||
profile = self.profile_handler
|
||||
content = builder.content
|
||||
|
||||
try:
|
||||
if "displayname" not in content:
|
||||
content["displayname"] = yield profile.get_displayname(target)
|
||||
if "avatar_url" not in content:
|
||||
content["avatar_url"] = yield profile.get_avatar_url(target)
|
||||
except Exception as e:
|
||||
logger.info(
|
||||
"Failed to get profile information for %r: %s",
|
||||
target, e
|
||||
)
|
||||
|
||||
if token_id is not None:
|
||||
builder.internal_metadata.token_id = token_id
|
||||
|
||||
if txn_id is not None:
|
||||
builder.internal_metadata.txn_id = txn_id
|
||||
|
||||
event, context = yield self.create_new_client_event(
|
||||
builder=builder,
|
||||
requester=requester,
|
||||
prev_event_ids=prev_event_ids,
|
||||
)
|
||||
|
||||
defer.returnValue((event, context))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def send_nonmember_event(self, requester, event, context, ratelimit=True):
|
||||
"""
|
||||
Persists and notifies local clients and federation of an event.
|
||||
|
||||
Args:
|
||||
event (FrozenEvent) the event to send.
|
||||
context (Context) the context of the event.
|
||||
ratelimit (bool): Whether to rate limit this send.
|
||||
is_guest (bool): Whether the sender is a guest.
|
||||
"""
|
||||
if event.type == EventTypes.Member:
|
||||
raise SynapseError(
|
||||
500,
|
||||
"Tried to send member event through non-member codepath"
|
||||
)
|
||||
|
||||
user = UserID.from_string(event.sender)
|
||||
|
||||
assert self.hs.is_mine(user), "User must be our own: %s" % (user,)
|
||||
|
||||
if event.is_state():
|
||||
prev_state = yield self.deduplicate_state_event(event, context)
|
||||
if prev_state is not None:
|
||||
defer.returnValue(prev_state)
|
||||
|
||||
yield self.handle_new_client_event(
|
||||
requester=requester,
|
||||
event=event,
|
||||
context=context,
|
||||
ratelimit=ratelimit,
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def deduplicate_state_event(self, event, context):
|
||||
"""
|
||||
Checks whether event is in the latest resolved state in context.
|
||||
|
||||
If so, returns the version of the event in context.
|
||||
Otherwise, returns None.
|
||||
"""
|
||||
prev_event_id = context.prev_state_ids.get((event.type, event.state_key))
|
||||
prev_event = yield self.store.get_event(prev_event_id, allow_none=True)
|
||||
if not prev_event:
|
||||
return
|
||||
|
||||
if prev_event and event.user_id == prev_event.user_id:
|
||||
prev_content = encode_canonical_json(prev_event.content)
|
||||
next_content = encode_canonical_json(event.content)
|
||||
if prev_content == next_content:
|
||||
defer.returnValue(prev_event)
|
||||
return
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def create_and_send_nonmember_event(
|
||||
self,
|
||||
requester,
|
||||
event_dict,
|
||||
ratelimit=True,
|
||||
txn_id=None
|
||||
):
|
||||
"""
|
||||
Creates an event, then sends it.
|
||||
|
||||
See self.create_event and self.send_nonmember_event.
|
||||
"""
|
||||
event, context = yield self.create_event(
|
||||
requester,
|
||||
event_dict,
|
||||
token_id=requester.access_token_id,
|
||||
txn_id=txn_id
|
||||
)
|
||||
|
||||
spam_error = self.spam_checker.check_event_for_spam(event)
|
||||
if spam_error:
|
||||
if not isinstance(spam_error, basestring):
|
||||
spam_error = "Spam is not permitted here"
|
||||
raise SynapseError(
|
||||
403, spam_error, Codes.FORBIDDEN
|
||||
)
|
||||
|
||||
yield self.send_nonmember_event(
|
||||
requester,
|
||||
event,
|
||||
context,
|
||||
ratelimit=ratelimit,
|
||||
)
|
||||
defer.returnValue(event)
|
||||
|
||||
@measure_func("create_new_client_event")
|
||||
@defer.inlineCallbacks
|
||||
def create_new_client_event(self, builder, requester=None, prev_event_ids=None):
|
||||
if prev_event_ids:
|
||||
prev_events = yield self.store.add_event_hashes(prev_event_ids)
|
||||
prev_max_depth = yield self.store.get_max_depth_of_events(prev_event_ids)
|
||||
@@ -457,9 +619,7 @@ class MessageHandler(BaseHandler):
|
||||
builder.prev_events = prev_events
|
||||
builder.depth = depth
|
||||
|
||||
state_handler = self.state_handler
|
||||
|
||||
context = yield state_handler.compute_event_context(builder)
|
||||
context = yield self.state.compute_event_context(builder)
|
||||
if requester:
|
||||
context.app_service = requester.app_service
|
||||
|
||||
@@ -494,12 +654,21 @@ class MessageHandler(BaseHandler):
|
||||
event,
|
||||
context,
|
||||
ratelimit=True,
|
||||
extra_users=[]
|
||||
extra_users=[],
|
||||
):
|
||||
# We now need to go and hit out to wherever we need to hit out to.
|
||||
"""Processes a new event. This includes checking auth, persisting it,
|
||||
notifying users, sending to remote servers, etc.
|
||||
|
||||
if ratelimit:
|
||||
yield self.ratelimit(requester)
|
||||
If called from a worker will hit out to the master process for final
|
||||
processing.
|
||||
|
||||
Args:
|
||||
requester (Requester)
|
||||
event (FrozenEvent)
|
||||
context (EventContext)
|
||||
ratelimit (bool)
|
||||
extra_users (list(UserID)): Any extra users to notify about event
|
||||
"""
|
||||
|
||||
try:
|
||||
yield self.auth.check_from_context(event, context)
|
||||
@@ -509,13 +678,64 @@ class MessageHandler(BaseHandler):
|
||||
|
||||
# Ensure that we can round trip before trying to persist in db
|
||||
try:
|
||||
dump = ujson.dumps(event.content)
|
||||
ujson.loads(dump)
|
||||
except:
|
||||
dump = simplejson.dumps(unfreeze(event.content))
|
||||
simplejson.loads(dump)
|
||||
except Exception:
|
||||
logger.exception("Failed to encode content: %r", event.content)
|
||||
raise
|
||||
|
||||
yield self.maybe_kick_guest_users(event, context)
|
||||
yield self.action_generator.handle_push_actions_for_event(
|
||||
event, context
|
||||
)
|
||||
|
||||
try:
|
||||
# If we're a worker we need to hit out to the master.
|
||||
if self.config.worker_app:
|
||||
yield send_event_to_master(
|
||||
self.http_client,
|
||||
host=self.config.worker_replication_host,
|
||||
port=self.config.worker_replication_http_port,
|
||||
requester=requester,
|
||||
event=event,
|
||||
context=context,
|
||||
ratelimit=ratelimit,
|
||||
extra_users=extra_users,
|
||||
)
|
||||
return
|
||||
|
||||
yield self.persist_and_notify_client_event(
|
||||
requester,
|
||||
event,
|
||||
context,
|
||||
ratelimit=ratelimit,
|
||||
extra_users=extra_users,
|
||||
)
|
||||
except: # noqa: E722, as we reraise the exception this is fine.
|
||||
# Ensure that we actually remove the entries in the push actions
|
||||
# staging area, if we calculated them.
|
||||
preserve_fn(self.store.remove_push_actions_from_staging)(event.event_id)
|
||||
raise
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def persist_and_notify_client_event(
|
||||
self,
|
||||
requester,
|
||||
event,
|
||||
context,
|
||||
ratelimit=True,
|
||||
extra_users=[],
|
||||
):
|
||||
"""Called when we have fully built the event, have already
|
||||
calculated the push actions for the event, and checked auth.
|
||||
|
||||
This should only be run on master.
|
||||
"""
|
||||
assert not self.config.worker_app
|
||||
|
||||
if ratelimit:
|
||||
yield self.base_handler.ratelimit(requester)
|
||||
|
||||
yield self.base_handler.maybe_kick_guest_users(event, context)
|
||||
|
||||
if event.type == EventTypes.CanonicalAlias:
|
||||
# Check the alias is acually valid (at this time at least)
|
||||
@@ -608,10 +828,6 @@ class MessageHandler(BaseHandler):
|
||||
"Changing the room create event is forbidden",
|
||||
)
|
||||
|
||||
yield self.action_generator.handle_push_actions_for_event(
|
||||
event, context
|
||||
)
|
||||
|
||||
(event_stream_id, max_stream_id) = yield self.store.persist_event(
|
||||
event, context=context
|
||||
)
|
||||
@@ -631,3 +847,9 @@ class MessageHandler(BaseHandler):
|
||||
)
|
||||
|
||||
preserve_fn(_notify)()
|
||||
|
||||
if event.type == EventTypes.Message:
|
||||
presence = self.hs.get_presence_handler()
|
||||
# We don't want to block sending messages on any presence code. This
|
||||
# matters as sometimes presence code can take a while.
|
||||
preserve_fn(presence.bump_presence_active_time)(requester.user)
|
||||
|
||||
@@ -93,29 +93,30 @@ class PresenceHandler(object):
|
||||
self.store = hs.get_datastore()
|
||||
self.wheel_timer = WheelTimer()
|
||||
self.notifier = hs.get_notifier()
|
||||
self.replication = hs.get_replication_layer()
|
||||
self.federation = hs.get_federation_sender()
|
||||
|
||||
self.state = hs.get_state_handler()
|
||||
|
||||
self.replication.register_edu_handler(
|
||||
federation_registry = hs.get_federation_registry()
|
||||
|
||||
federation_registry.register_edu_handler(
|
||||
"m.presence", self.incoming_presence
|
||||
)
|
||||
self.replication.register_edu_handler(
|
||||
federation_registry.register_edu_handler(
|
||||
"m.presence_invite",
|
||||
lambda origin, content: self.invite_presence(
|
||||
observed_user=UserID.from_string(content["observed_user"]),
|
||||
observer_user=UserID.from_string(content["observer_user"]),
|
||||
)
|
||||
)
|
||||
self.replication.register_edu_handler(
|
||||
federation_registry.register_edu_handler(
|
||||
"m.presence_accept",
|
||||
lambda origin, content: self.accept_presence(
|
||||
observed_user=UserID.from_string(content["observed_user"]),
|
||||
observer_user=UserID.from_string(content["observer_user"]),
|
||||
)
|
||||
)
|
||||
self.replication.register_edu_handler(
|
||||
federation_registry.register_edu_handler(
|
||||
"m.presence_deny",
|
||||
lambda origin, content: self.deny_presence(
|
||||
observed_user=UserID.from_string(content["observed_user"]),
|
||||
@@ -364,7 +365,7 @@ class PresenceHandler(object):
|
||||
)
|
||||
|
||||
preserve_fn(self._update_states)(changes)
|
||||
except:
|
||||
except Exception:
|
||||
logger.exception("Exception in _handle_timeouts loop")
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@@ -1199,7 +1200,7 @@ def handle_timeout(state, is_mine, syncing_user_ids, now):
|
||||
)
|
||||
changed = True
|
||||
else:
|
||||
# We expect to be poked occaisonally by the other side.
|
||||
# We expect to be poked occasionally by the other side.
|
||||
# This is to protect against forgetful/buggy servers, so that
|
||||
# no one gets stuck online forever.
|
||||
if now - state.last_federation_update_ts > FEDERATION_TIMEOUT:
|
||||
|
||||
@@ -17,25 +17,87 @@ import logging
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
import synapse.types
|
||||
from synapse.api.errors import SynapseError, AuthError, CodeMessageException
|
||||
from synapse.types import UserID
|
||||
from synapse.types import UserID, get_domain_from_id
|
||||
from ._base import BaseHandler
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ProfileHandler(BaseHandler):
|
||||
PROFILE_UPDATE_MS = 60 * 1000
|
||||
PROFILE_UPDATE_EVERY_MS = 24 * 60 * 60 * 1000
|
||||
|
||||
def __init__(self, hs):
|
||||
super(ProfileHandler, self).__init__(hs)
|
||||
|
||||
self.federation = hs.get_replication_layer()
|
||||
self.federation.register_query_handler(
|
||||
self.federation = hs.get_federation_client()
|
||||
hs.get_federation_registry().register_query_handler(
|
||||
"profile", self.on_profile_query
|
||||
)
|
||||
|
||||
self.user_directory_handler = hs.get_user_directory_handler()
|
||||
|
||||
if hs.config.worker_app is None:
|
||||
self.clock.looping_call(
|
||||
self._update_remote_profile_cache, self.PROFILE_UPDATE_MS,
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_profile(self, user_id):
|
||||
target_user = UserID.from_string(user_id)
|
||||
if self.hs.is_mine(target_user):
|
||||
displayname = yield self.store.get_profile_displayname(
|
||||
target_user.localpart
|
||||
)
|
||||
avatar_url = yield self.store.get_profile_avatar_url(
|
||||
target_user.localpart
|
||||
)
|
||||
|
||||
defer.returnValue({
|
||||
"displayname": displayname,
|
||||
"avatar_url": avatar_url,
|
||||
})
|
||||
else:
|
||||
try:
|
||||
result = yield self.federation.make_query(
|
||||
destination=target_user.domain,
|
||||
query_type="profile",
|
||||
args={
|
||||
"user_id": user_id,
|
||||
},
|
||||
ignore_backoff=True,
|
||||
)
|
||||
defer.returnValue(result)
|
||||
except CodeMessageException as e:
|
||||
if e.code != 404:
|
||||
logger.exception("Failed to get displayname")
|
||||
|
||||
raise
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_profile_from_cache(self, user_id):
|
||||
"""Get the profile information from our local cache. If the user is
|
||||
ours then the profile information will always be corect. Otherwise,
|
||||
it may be out of date/missing.
|
||||
"""
|
||||
target_user = UserID.from_string(user_id)
|
||||
if self.hs.is_mine(target_user):
|
||||
displayname = yield self.store.get_profile_displayname(
|
||||
target_user.localpart
|
||||
)
|
||||
avatar_url = yield self.store.get_profile_avatar_url(
|
||||
target_user.localpart
|
||||
)
|
||||
|
||||
defer.returnValue({
|
||||
"displayname": displayname,
|
||||
"avatar_url": avatar_url,
|
||||
})
|
||||
else:
|
||||
profile = yield self.store.get_from_remote_profile_cache(user_id)
|
||||
defer.returnValue(profile or {})
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_displayname(self, target_user):
|
||||
if self.hs.is_mine(target_user):
|
||||
@@ -60,7 +122,7 @@ class ProfileHandler(BaseHandler):
|
||||
logger.exception("Failed to get displayname")
|
||||
|
||||
raise
|
||||
except:
|
||||
except Exception:
|
||||
logger.exception("Failed to get displayname")
|
||||
else:
|
||||
defer.returnValue(result["displayname"])
|
||||
@@ -82,7 +144,13 @@ class ProfileHandler(BaseHandler):
|
||||
target_user.localpart, new_displayname
|
||||
)
|
||||
|
||||
yield self._update_join_states(requester)
|
||||
if self.hs.config.user_directory_search_all_users:
|
||||
profile = yield self.store.get_profileinfo(target_user.localpart)
|
||||
yield self.user_directory_handler.handle_local_profile_change(
|
||||
target_user.to_string(), profile
|
||||
)
|
||||
|
||||
yield self._update_join_states(requester, target_user)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_avatar_url(self, target_user):
|
||||
@@ -107,7 +175,7 @@ class ProfileHandler(BaseHandler):
|
||||
if e.code != 404:
|
||||
logger.exception("Failed to get avatar_url")
|
||||
raise
|
||||
except:
|
||||
except Exception:
|
||||
logger.exception("Failed to get avatar_url")
|
||||
|
||||
defer.returnValue(result["avatar_url"])
|
||||
@@ -126,7 +194,13 @@ class ProfileHandler(BaseHandler):
|
||||
target_user.localpart, new_avatar_url
|
||||
)
|
||||
|
||||
yield self._update_join_states(requester)
|
||||
if self.hs.config.user_directory_search_all_users:
|
||||
profile = yield self.store.get_profileinfo(target_user.localpart)
|
||||
yield self.user_directory_handler.handle_local_profile_change(
|
||||
target_user.to_string(), profile
|
||||
)
|
||||
|
||||
yield self._update_join_states(requester, target_user)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_profile_query(self, args):
|
||||
@@ -151,28 +225,24 @@ class ProfileHandler(BaseHandler):
|
||||
defer.returnValue(response)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _update_join_states(self, requester):
|
||||
user = requester.user
|
||||
if not self.hs.is_mine(user):
|
||||
def _update_join_states(self, requester, target_user):
|
||||
if not self.hs.is_mine(target_user):
|
||||
return
|
||||
|
||||
yield self.ratelimit(requester)
|
||||
|
||||
room_ids = yield self.store.get_rooms_for_user(
|
||||
user.to_string(),
|
||||
target_user.to_string(),
|
||||
)
|
||||
|
||||
for room_id in room_ids:
|
||||
handler = self.hs.get_handlers().room_member_handler
|
||||
handler = self.hs.get_room_member_handler()
|
||||
try:
|
||||
# Assume the user isn't a guest because we don't let guests set
|
||||
# profile or avatar data.
|
||||
# XXX why are we recreating `requester` here for each room?
|
||||
# what was wrong with the `requester` we were passed?
|
||||
requester = synapse.types.create_requester(user)
|
||||
# Assume the target_user isn't a guest,
|
||||
# because we don't let guests set profile or avatar data.
|
||||
yield handler.update_membership(
|
||||
requester,
|
||||
user,
|
||||
target_user,
|
||||
room_id,
|
||||
"join", # We treat a profile update like a join.
|
||||
ratelimit=False, # Try to hide that these events aren't atomic.
|
||||
@@ -182,3 +252,44 @@ class ProfileHandler(BaseHandler):
|
||||
"Failed to update join event for room %s - %s",
|
||||
room_id, str(e.message)
|
||||
)
|
||||
|
||||
def _update_remote_profile_cache(self):
|
||||
"""Called periodically to check profiles of remote users we haven't
|
||||
checked in a while.
|
||||
"""
|
||||
entries = yield self.store.get_remote_profile_cache_entries_that_expire(
|
||||
last_checked=self.clock.time_msec() - self.PROFILE_UPDATE_EVERY_MS
|
||||
)
|
||||
|
||||
for user_id, displayname, avatar_url in entries:
|
||||
is_subscribed = yield self.store.is_subscribed_remote_profile_for_user(
|
||||
user_id,
|
||||
)
|
||||
if not is_subscribed:
|
||||
yield self.store.maybe_delete_remote_profile_cache(user_id)
|
||||
continue
|
||||
|
||||
try:
|
||||
profile = yield self.federation.make_query(
|
||||
destination=get_domain_from_id(user_id),
|
||||
query_type="profile",
|
||||
args={
|
||||
"user_id": user_id,
|
||||
},
|
||||
ignore_backoff=True,
|
||||
)
|
||||
except Exception:
|
||||
logger.exception("Failed to get avatar_url")
|
||||
|
||||
yield self.store.update_remote_profile_cache(
|
||||
user_id, displayname, avatar_url
|
||||
)
|
||||
continue
|
||||
|
||||
new_name = profile.get("displayname")
|
||||
new_avatar = profile.get("avatar_url")
|
||||
|
||||
# We always hit update to update the last_check timestamp
|
||||
yield self.store.update_remote_profile_cache(
|
||||
user_id, new_name, new_avatar
|
||||
)
|
||||
|
||||
@@ -41,9 +41,9 @@ class ReadMarkerHandler(BaseHandler):
|
||||
"""
|
||||
|
||||
with (yield self.read_marker_linearizer.queue((room_id, user_id))):
|
||||
account_data = yield self.store.get_account_data_for_room(user_id, room_id)
|
||||
|
||||
existing_read_marker = account_data.get("m.fully_read", None)
|
||||
existing_read_marker = yield self.store.get_account_data_for_room_and_type(
|
||||
user_id, room_id, "m.fully_read",
|
||||
)
|
||||
|
||||
should_update = True
|
||||
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from synapse.util import logcontext
|
||||
|
||||
from ._base import BaseHandler
|
||||
|
||||
@@ -34,7 +35,7 @@ class ReceiptsHandler(BaseHandler):
|
||||
self.store = hs.get_datastore()
|
||||
self.hs = hs
|
||||
self.federation = hs.get_federation_sender()
|
||||
hs.get_replication_layer().register_edu_handler(
|
||||
hs.get_federation_registry().register_edu_handler(
|
||||
"m.receipt", self._received_remote_receipt
|
||||
)
|
||||
self.clock = self.hs.get_clock()
|
||||
@@ -59,6 +60,8 @@ class ReceiptsHandler(BaseHandler):
|
||||
is_new = yield self._handle_new_receipts([receipt])
|
||||
|
||||
if is_new:
|
||||
# fire off a process in the background to send the receipt to
|
||||
# remote servers
|
||||
self._push_remotes([receipt])
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@@ -126,6 +129,7 @@ class ReceiptsHandler(BaseHandler):
|
||||
|
||||
defer.returnValue(True)
|
||||
|
||||
@logcontext.preserve_fn # caller should not yield on this
|
||||
@defer.inlineCallbacks
|
||||
def _push_remotes(self, receipts):
|
||||
"""Given a list of receipts, works out which remote servers should be
|
||||
|
||||
@@ -15,7 +15,6 @@
|
||||
|
||||
"""Contains functions for registering clients."""
|
||||
import logging
|
||||
import urllib
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
@@ -23,8 +22,10 @@ from synapse.api.errors import (
|
||||
AuthError, Codes, SynapseError, RegistrationError, InvalidCaptchaError
|
||||
)
|
||||
from synapse.http.client import CaptchaServerHttpClient
|
||||
from synapse import types
|
||||
from synapse.types import UserID
|
||||
from synapse.util.async import run_on_reactor
|
||||
from synapse.util.async import run_on_reactor, Linearizer
|
||||
from synapse.util.threepids import check_3pid_allowed
|
||||
from ._base import BaseHandler
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -36,21 +37,26 @@ class RegistrationHandler(BaseHandler):
|
||||
super(RegistrationHandler, self).__init__(hs)
|
||||
|
||||
self.auth = hs.get_auth()
|
||||
self._auth_handler = hs.get_auth_handler()
|
||||
self.profile_handler = hs.get_profile_handler()
|
||||
self.user_directory_handler = hs.get_user_directory_handler()
|
||||
self.captcha_client = CaptchaServerHttpClient(hs)
|
||||
|
||||
self._next_generated_user_id = None
|
||||
|
||||
self.macaroon_gen = hs.get_macaroon_generator()
|
||||
|
||||
self._generate_user_id_linearizer = Linearizer(
|
||||
name="_generate_user_id_linearizer",
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def check_username(self, localpart, guest_access_token=None,
|
||||
assigned_user_id=None):
|
||||
yield run_on_reactor()
|
||||
|
||||
if urllib.quote(localpart.encode('utf-8')) != localpart:
|
||||
if types.contains_invalid_mxid_characters(localpart):
|
||||
raise SynapseError(
|
||||
400,
|
||||
"User ID can only contain characters a-z, 0-9, or '_-./'",
|
||||
"User ID can only contain characters a-z, 0-9, or '=_-./'",
|
||||
Codes.INVALID_USERNAME
|
||||
)
|
||||
|
||||
@@ -80,7 +86,7 @@ class RegistrationHandler(BaseHandler):
|
||||
"A different user ID has already been registered for this session",
|
||||
)
|
||||
|
||||
yield self.check_user_id_not_appservice_exclusive(user_id)
|
||||
self.check_user_id_not_appservice_exclusive(user_id)
|
||||
|
||||
users = yield self.store.get_users_by_id_case_insensitive(user_id)
|
||||
if users:
|
||||
@@ -130,7 +136,7 @@ class RegistrationHandler(BaseHandler):
|
||||
yield run_on_reactor()
|
||||
password_hash = None
|
||||
if password:
|
||||
password_hash = self.auth_handler().hash(password)
|
||||
password_hash = yield self.auth_handler().hash(password)
|
||||
|
||||
if localpart:
|
||||
yield self.check_username(localpart, guest_access_token=guest_access_token)
|
||||
@@ -165,6 +171,13 @@ class RegistrationHandler(BaseHandler):
|
||||
),
|
||||
admin=admin,
|
||||
)
|
||||
|
||||
if self.hs.config.user_directory_search_all_users:
|
||||
profile = yield self.store.get_profileinfo(localpart)
|
||||
yield self.user_directory_handler.handle_local_profile_change(
|
||||
user_id, profile
|
||||
)
|
||||
|
||||
else:
|
||||
# autogen a sequential user ID
|
||||
attempts = 0
|
||||
@@ -253,11 +266,10 @@ class RegistrationHandler(BaseHandler):
|
||||
"""
|
||||
Registers email_id as SAML2 Based Auth.
|
||||
"""
|
||||
if urllib.quote(localpart) != localpart:
|
||||
if types.contains_invalid_mxid_characters(localpart):
|
||||
raise SynapseError(
|
||||
400,
|
||||
"User ID must only contain characters which do not"
|
||||
" require URL encoding."
|
||||
"User ID can only contain characters a-z, 0-9, or '=_-./'",
|
||||
)
|
||||
user = UserID(localpart, self.hs.hostname)
|
||||
user_id = user.to_string()
|
||||
@@ -286,12 +298,12 @@ class RegistrationHandler(BaseHandler):
|
||||
"""
|
||||
|
||||
for c in threepidCreds:
|
||||
logger.info("validating theeepidcred sid %s on id server %s",
|
||||
logger.info("validating threepidcred sid %s on id server %s",
|
||||
c['sid'], c['idServer'])
|
||||
try:
|
||||
identity_handler = self.hs.get_handlers().identity_handler
|
||||
threepid = yield identity_handler.threepid_from_creds(c)
|
||||
except:
|
||||
except Exception:
|
||||
logger.exception("Couldn't validate 3pid")
|
||||
raise RegistrationError(400, "Couldn't validate 3pid")
|
||||
|
||||
@@ -300,6 +312,11 @@ class RegistrationHandler(BaseHandler):
|
||||
logger.info("got threepid with medium '%s' and address '%s'",
|
||||
threepid['medium'], threepid['address'])
|
||||
|
||||
if not check_3pid_allowed(self.hs, threepid['medium'], threepid['address']):
|
||||
raise RegistrationError(
|
||||
403, "Third party identifier is not allowed"
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def bind_emails(self, user_id, threepidCreds):
|
||||
"""Links emails with a user ID and informs an identity server.
|
||||
@@ -331,6 +348,8 @@ class RegistrationHandler(BaseHandler):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _generate_user_id(self, reseed=False):
|
||||
if reseed or self._next_generated_user_id is None:
|
||||
with (yield self._generate_user_id_linearizer.queue(())):
|
||||
if reseed or self._next_generated_user_id is None:
|
||||
self._next_generated_user_id = (
|
||||
yield self.store.find_next_generated_user_id_localpart()
|
||||
@@ -418,13 +437,12 @@ class RegistrationHandler(BaseHandler):
|
||||
create_profile_with_localpart=user.localpart,
|
||||
)
|
||||
else:
|
||||
yield self.store.user_delete_access_tokens(user_id=user_id)
|
||||
yield self._auth_handler.delete_access_tokens_for_user(user_id)
|
||||
yield self.store.add_access_token_to_user(user_id=user_id, token=token)
|
||||
|
||||
if displayname is not None:
|
||||
logger.info("setting user display name: %s -> %s", user_id, displayname)
|
||||
profile_handler = self.hs.get_handlers().profile_handler
|
||||
yield profile_handler.set_displayname(
|
||||
yield self.profile_handler.set_displayname(
|
||||
user, requester, displayname, by_admin=True,
|
||||
)
|
||||
|
||||
@@ -434,16 +452,34 @@ class RegistrationHandler(BaseHandler):
|
||||
return self.hs.get_auth_handler()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def guest_access_token_for(self, medium, address, inviter_user_id):
|
||||
def get_or_register_3pid_guest(self, medium, address, inviter_user_id):
|
||||
"""Get a guest access token for a 3PID, creating a guest account if
|
||||
one doesn't already exist.
|
||||
|
||||
Args:
|
||||
medium (str)
|
||||
address (str)
|
||||
inviter_user_id (str): The user ID who is trying to invite the
|
||||
3PID
|
||||
|
||||
Returns:
|
||||
Deferred[(str, str)]: A 2-tuple of `(user_id, access_token)` of the
|
||||
3PID guest account.
|
||||
"""
|
||||
access_token = yield self.store.get_3pid_guest_access_token(medium, address)
|
||||
if access_token:
|
||||
defer.returnValue(access_token)
|
||||
user_info = yield self.auth.get_user_by_access_token(
|
||||
access_token
|
||||
)
|
||||
|
||||
_, access_token = yield self.register(
|
||||
defer.returnValue((user_info["user"].to_string(), access_token))
|
||||
|
||||
user_id, access_token = yield self.register(
|
||||
generate_token=True,
|
||||
make_guest=True
|
||||
)
|
||||
access_token = yield self.store.save_or_get_3pid_guest_access_token(
|
||||
medium, address, access_token, inviter_user_id
|
||||
)
|
||||
defer.returnValue(access_token)
|
||||
|
||||
defer.returnValue((user_id, access_token))
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014 - 2016 OpenMarket Ltd
|
||||
# Copyright 2018 New Vector Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -60,6 +61,12 @@ class RoomCreationHandler(BaseHandler):
|
||||
},
|
||||
}
|
||||
|
||||
def __init__(self, hs):
|
||||
super(RoomCreationHandler, self).__init__(hs)
|
||||
|
||||
self.spam_checker = hs.get_spam_checker()
|
||||
self.event_creation_handler = hs.get_event_creation_handler()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def create_room(self, requester, config, ratelimit=True):
|
||||
""" Creates a new room.
|
||||
@@ -75,6 +82,9 @@ class RoomCreationHandler(BaseHandler):
|
||||
"""
|
||||
user_id = requester.user.to_string()
|
||||
|
||||
if not self.spam_checker.user_may_create_room(user_id):
|
||||
raise SynapseError(403, "You are not permitted to create rooms")
|
||||
|
||||
if ratelimit:
|
||||
yield self.ratelimit(requester)
|
||||
|
||||
@@ -83,7 +93,7 @@ class RoomCreationHandler(BaseHandler):
|
||||
if wchar in config["room_alias_name"]:
|
||||
raise SynapseError(400, "Invalid characters in room alias")
|
||||
|
||||
room_alias = RoomAlias.create(
|
||||
room_alias = RoomAlias(
|
||||
config["room_alias_name"],
|
||||
self.hs.hostname,
|
||||
)
|
||||
@@ -100,7 +110,7 @@ class RoomCreationHandler(BaseHandler):
|
||||
for i in invite_list:
|
||||
try:
|
||||
UserID.from_string(i)
|
||||
except:
|
||||
except Exception:
|
||||
raise SynapseError(400, "Invalid user_id: %s" % (i,))
|
||||
|
||||
invite_3pid_list = config.get("invite_3pid", [])
|
||||
@@ -115,7 +125,7 @@ class RoomCreationHandler(BaseHandler):
|
||||
while attempts < 5:
|
||||
try:
|
||||
random_string = stringutils.random_string(18)
|
||||
gen_room_id = RoomID.create(
|
||||
gen_room_id = RoomID(
|
||||
random_string,
|
||||
self.hs.hostname,
|
||||
)
|
||||
@@ -155,13 +165,11 @@ class RoomCreationHandler(BaseHandler):
|
||||
|
||||
creation_content = config.get("creation_content", {})
|
||||
|
||||
msg_handler = self.hs.get_handlers().message_handler
|
||||
room_member_handler = self.hs.get_handlers().room_member_handler
|
||||
room_member_handler = self.hs.get_room_member_handler()
|
||||
|
||||
yield self._send_events_for_new_room(
|
||||
requester,
|
||||
room_id,
|
||||
msg_handler,
|
||||
room_member_handler,
|
||||
preset_config=preset_config,
|
||||
invite_list=invite_list,
|
||||
@@ -173,7 +181,7 @@ class RoomCreationHandler(BaseHandler):
|
||||
|
||||
if "name" in config:
|
||||
name = config["name"]
|
||||
yield msg_handler.create_and_send_nonmember_event(
|
||||
yield self.event_creation_handler.create_and_send_nonmember_event(
|
||||
requester,
|
||||
{
|
||||
"type": EventTypes.Name,
|
||||
@@ -186,7 +194,7 @@ class RoomCreationHandler(BaseHandler):
|
||||
|
||||
if "topic" in config:
|
||||
topic = config["topic"]
|
||||
yield msg_handler.create_and_send_nonmember_event(
|
||||
yield self.event_creation_handler.create_and_send_nonmember_event(
|
||||
requester,
|
||||
{
|
||||
"type": EventTypes.Topic,
|
||||
@@ -197,12 +205,12 @@ class RoomCreationHandler(BaseHandler):
|
||||
},
|
||||
ratelimit=False)
|
||||
|
||||
for invitee in invite_list:
|
||||
content = {}
|
||||
is_direct = config.get("is_direct", None)
|
||||
if is_direct:
|
||||
content["is_direct"] = is_direct
|
||||
|
||||
for invitee in invite_list:
|
||||
yield room_member_handler.update_membership(
|
||||
requester,
|
||||
UserID.from_string(invitee),
|
||||
@@ -216,7 +224,7 @@ class RoomCreationHandler(BaseHandler):
|
||||
id_server = invite_3pid["id_server"]
|
||||
address = invite_3pid["address"]
|
||||
medium = invite_3pid["medium"]
|
||||
yield self.hs.get_handlers().room_member_handler.do_3pid_invite(
|
||||
yield self.hs.get_room_member_handler().do_3pid_invite(
|
||||
room_id,
|
||||
requester.user,
|
||||
medium,
|
||||
@@ -241,7 +249,6 @@ class RoomCreationHandler(BaseHandler):
|
||||
self,
|
||||
creator, # A Requester object.
|
||||
room_id,
|
||||
msg_handler,
|
||||
room_member_handler,
|
||||
preset_config,
|
||||
invite_list,
|
||||
@@ -264,7 +271,7 @@ class RoomCreationHandler(BaseHandler):
|
||||
@defer.inlineCallbacks
|
||||
def send(etype, content, **kwargs):
|
||||
event = create(etype, content, **kwargs)
|
||||
yield msg_handler.create_and_send_nonmember_event(
|
||||
yield self.event_creation_handler.create_and_send_nonmember_event(
|
||||
creator,
|
||||
event,
|
||||
ratelimit=False
|
||||
@@ -468,12 +475,9 @@ class RoomEventSource(object):
|
||||
user.to_string()
|
||||
)
|
||||
if app_service:
|
||||
events, end_key = yield self.store.get_appservice_room_stream(
|
||||
service=app_service,
|
||||
from_key=from_key,
|
||||
to_key=to_key,
|
||||
limit=limit,
|
||||
)
|
||||
# We no longer support AS users using /sync directly.
|
||||
# See https://github.com/matrix-org/matrix-doc/issues/1144
|
||||
raise NotImplementedError()
|
||||
else:
|
||||
room_events = yield self.store.get_membership_changes_for_user(
|
||||
user.to_string(), from_key, to_key
|
||||
|
||||
@@ -20,6 +20,7 @@ from ._base import BaseHandler
|
||||
from synapse.api.constants import (
|
||||
EventTypes, JoinRules,
|
||||
)
|
||||
from synapse.util.logcontext import make_deferred_yieldable, preserve_fn
|
||||
from synapse.util.async import concurrently_execute
|
||||
from synapse.util.caches.descriptors import cachedInlineCallbacks
|
||||
from synapse.util.caches.response_cache import ResponseCache
|
||||
@@ -70,6 +71,7 @@ class RoomListHandler(BaseHandler):
|
||||
if search_filter:
|
||||
# We explicitly don't bother caching searches or requests for
|
||||
# appservice specific lists.
|
||||
logger.info("Bypassing cache as search request.")
|
||||
return self._get_public_room_list(
|
||||
limit, since_token, search_filter, network_tuple=network_tuple,
|
||||
)
|
||||
@@ -77,13 +79,16 @@ class RoomListHandler(BaseHandler):
|
||||
key = (limit, since_token, network_tuple)
|
||||
result = self.response_cache.get(key)
|
||||
if not result:
|
||||
logger.info("No cached result, calculating one.")
|
||||
result = self.response_cache.set(
|
||||
key,
|
||||
self._get_public_room_list(
|
||||
preserve_fn(self._get_public_room_list)(
|
||||
limit, since_token, network_tuple=network_tuple
|
||||
)
|
||||
)
|
||||
return result
|
||||
else:
|
||||
logger.info("Using cached deferred result.")
|
||||
return make_deferred_yieldable(result)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _get_public_room_list(self, limit=None, since_token=None,
|
||||
@@ -149,6 +154,8 @@ class RoomListHandler(BaseHandler):
|
||||
# We want larger rooms to be first, hence negating num_joined_users
|
||||
rooms_to_order_value[room_id] = (-num_joined_users, room_id)
|
||||
|
||||
logger.info("Getting ordering for %i rooms since %s",
|
||||
len(room_ids), stream_token)
|
||||
yield concurrently_execute(get_order_for_room, room_ids, 10)
|
||||
|
||||
sorted_entries = sorted(rooms_to_order_value.items(), key=lambda e: e[1])
|
||||
@@ -176,34 +183,43 @@ class RoomListHandler(BaseHandler):
|
||||
rooms_to_scan = rooms_to_scan[:since_token.current_limit]
|
||||
rooms_to_scan.reverse()
|
||||
|
||||
# Actually generate the entries. _append_room_entry_to_chunk will append to
|
||||
# chunk but will stop if len(chunk) > limit
|
||||
chunk = []
|
||||
if limit and not search_filter:
|
||||
step = limit + 1
|
||||
for i in xrange(0, len(rooms_to_scan), step):
|
||||
# We iterate here because the vast majority of cases we'll stop
|
||||
# at first iteration, but occaisonally _append_room_entry_to_chunk
|
||||
# won't append to the chunk and so we need to loop again.
|
||||
logger.info("After sorting and filtering, %i rooms remain",
|
||||
len(rooms_to_scan))
|
||||
|
||||
# _append_room_entry_to_chunk will append to chunk but will stop if
|
||||
# len(chunk) > limit
|
||||
#
|
||||
# Normally we will generate enough results on the first iteration here,
|
||||
# but if there is a search filter, _append_room_entry_to_chunk may
|
||||
# filter some results out, in which case we loop again.
|
||||
#
|
||||
# We don't want to scan over the entire range either as that
|
||||
# would potentially waste a lot of work.
|
||||
#
|
||||
# XXX if there is no limit, we may end up DoSing the server with
|
||||
# calls to get_current_state_ids for every single room on the
|
||||
# server. Surely we should cap this somehow?
|
||||
#
|
||||
if limit:
|
||||
step = limit + 1
|
||||
else:
|
||||
# step cannot be zero
|
||||
step = len(rooms_to_scan) if len(rooms_to_scan) != 0 else 1
|
||||
|
||||
chunk = []
|
||||
for i in xrange(0, len(rooms_to_scan), step):
|
||||
batch = rooms_to_scan[i:i + step]
|
||||
logger.info("Processing %i rooms for result", len(batch))
|
||||
yield concurrently_execute(
|
||||
lambda r: self._append_room_entry_to_chunk(
|
||||
r, rooms_to_num_joined[r],
|
||||
chunk, limit, search_filter
|
||||
),
|
||||
rooms_to_scan[i:i + step], 10
|
||||
batch, 5,
|
||||
)
|
||||
logger.info("Now %i rooms in result", len(chunk))
|
||||
if len(chunk) >= limit + 1:
|
||||
break
|
||||
else:
|
||||
yield concurrently_execute(
|
||||
lambda r: self._append_room_entry_to_chunk(
|
||||
r, rooms_to_num_joined[r],
|
||||
chunk, limit, search_filter
|
||||
),
|
||||
rooms_to_scan, 5
|
||||
)
|
||||
|
||||
chunk.sort(key=lambda e: (-e["num_joined_members"], e["room_id"]))
|
||||
|
||||
@@ -276,13 +292,14 @@ class RoomListHandler(BaseHandler):
|
||||
# We've already got enough, so lets just drop it.
|
||||
return
|
||||
|
||||
result = yield self._generate_room_entry(room_id, num_joined_users)
|
||||
result = yield self.generate_room_entry(room_id, num_joined_users)
|
||||
|
||||
if result and _matches_room_entry(result, search_filter):
|
||||
chunk.append(result)
|
||||
|
||||
@cachedInlineCallbacks(num_args=1, cache_context=True)
|
||||
def _generate_room_entry(self, room_id, num_joined_users, cache_context):
|
||||
def generate_room_entry(self, room_id, num_joined_users, cache_context,
|
||||
with_alias=True, allow_private=False):
|
||||
"""Returns the entry for a room
|
||||
"""
|
||||
result = {
|
||||
@@ -316,9 +333,10 @@ class RoomListHandler(BaseHandler):
|
||||
join_rules_event = current_state.get((EventTypes.JoinRules, ""))
|
||||
if join_rules_event:
|
||||
join_rule = join_rules_event.content.get("join_rule", None)
|
||||
if join_rule and join_rule != JoinRules.PUBLIC:
|
||||
if not allow_private and join_rule and join_rule != JoinRules.PUBLIC:
|
||||
defer.returnValue(None)
|
||||
|
||||
if with_alias:
|
||||
aliases = yield self.store.get_aliases_for_room(
|
||||
room_id, on_invalidate=cache_context.invalidate
|
||||
)
|
||||
@@ -391,7 +409,7 @@ class RoomListHandler(BaseHandler):
|
||||
def _get_remote_list_cached(self, server_name, limit=None, since_token=None,
|
||||
search_filter=None, include_all_networks=False,
|
||||
third_party_instance_id=None,):
|
||||
repl_layer = self.hs.get_replication_layer()
|
||||
repl_layer = self.hs.get_federation_client()
|
||||
if search_filter:
|
||||
# We can't cache when asking for search
|
||||
return repl_layer.get_public_rooms(
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2016 OpenMarket Ltd
|
||||
# Copyright 2018 New Vector Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -13,7 +14,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
import abc
|
||||
import logging
|
||||
|
||||
from signedjson.key import decode_verify_key_bytes
|
||||
@@ -29,29 +30,121 @@ from synapse.api.errors import AuthError, SynapseError, Codes
|
||||
from synapse.types import UserID, RoomID
|
||||
from synapse.util.async import Linearizer
|
||||
from synapse.util.distributor import user_left_room, user_joined_room
|
||||
from ._base import BaseHandler
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
id_server_scheme = "https://"
|
||||
|
||||
|
||||
class RoomMemberHandler(BaseHandler):
|
||||
class RoomMemberHandler(object):
|
||||
# TODO(paul): This handler currently contains a messy conflation of
|
||||
# low-level API that works on UserID objects and so on, and REST-level
|
||||
# API that takes ID strings and returns pagination chunks. These concerns
|
||||
# ought to be separated out a lot better.
|
||||
|
||||
__metaclass__ = abc.ABCMeta
|
||||
|
||||
def __init__(self, hs):
|
||||
super(RoomMemberHandler, self).__init__(hs)
|
||||
self.hs = hs
|
||||
self.store = hs.get_datastore()
|
||||
self.auth = hs.get_auth()
|
||||
self.state_handler = hs.get_state_handler()
|
||||
self.config = hs.config
|
||||
self.simple_http_client = hs.get_simple_http_client()
|
||||
|
||||
self.federation_handler = hs.get_handlers().federation_handler
|
||||
self.directory_handler = hs.get_handlers().directory_handler
|
||||
self.registration_handler = hs.get_handlers().registration_handler
|
||||
self.profile_handler = hs.get_profile_handler()
|
||||
self.event_creation_hander = hs.get_event_creation_handler()
|
||||
|
||||
self.member_linearizer = Linearizer(name="member")
|
||||
|
||||
self.clock = hs.get_clock()
|
||||
self.spam_checker = hs.get_spam_checker()
|
||||
|
||||
self.distributor = hs.get_distributor()
|
||||
self.distributor.declare("user_joined_room")
|
||||
self.distributor.declare("user_left_room")
|
||||
@abc.abstractmethod
|
||||
def _remote_join(self, requester, remote_room_hosts, room_id, user, content):
|
||||
"""Try and join a room that this server is not in
|
||||
|
||||
Args:
|
||||
requester (Requester)
|
||||
remote_room_hosts (list[str]): List of servers that can be used
|
||||
to join via.
|
||||
room_id (str): Room that we are trying to join
|
||||
user (UserID): User who is trying to join
|
||||
content (dict): A dict that should be used as the content of the
|
||||
join event.
|
||||
|
||||
Returns:
|
||||
Deferred
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
@abc.abstractmethod
|
||||
def _remote_reject_invite(self, remote_room_hosts, room_id, target):
|
||||
"""Attempt to reject an invite for a room this server is not in. If we
|
||||
fail to do so we locally mark the invite as rejected.
|
||||
|
||||
Args:
|
||||
requester (Requester)
|
||||
remote_room_hosts (list[str]): List of servers to use to try and
|
||||
reject invite
|
||||
room_id (str)
|
||||
target (UserID): The user rejecting the invite
|
||||
|
||||
Returns:
|
||||
Deferred[dict]: A dictionary to be returned to the client, may
|
||||
include event_id etc, or nothing if we locally rejected
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
@abc.abstractmethod
|
||||
def get_or_register_3pid_guest(self, requester, medium, address, inviter_user_id):
|
||||
"""Get a guest access token for a 3PID, creating a guest account if
|
||||
one doesn't already exist.
|
||||
|
||||
Args:
|
||||
requester (Requester)
|
||||
medium (str)
|
||||
address (str)
|
||||
inviter_user_id (str): The user ID who is trying to invite the
|
||||
3PID
|
||||
|
||||
Returns:
|
||||
Deferred[(str, str)]: A 2-tuple of `(user_id, access_token)` of the
|
||||
3PID guest account.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
@abc.abstractmethod
|
||||
def _user_joined_room(self, target, room_id):
|
||||
"""Notifies distributor on master process that the user has joined the
|
||||
room.
|
||||
|
||||
Args:
|
||||
target (UserID)
|
||||
room_id (str)
|
||||
|
||||
Returns:
|
||||
Deferred|None
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
@abc.abstractmethod
|
||||
def _user_left_room(self, target, room_id):
|
||||
"""Notifies distributor on master process that the user has left the
|
||||
room.
|
||||
|
||||
Args:
|
||||
target (UserID)
|
||||
room_id (str)
|
||||
|
||||
Returns:
|
||||
Deferred|None
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _local_membership_update(
|
||||
@@ -63,13 +156,12 @@ class RoomMemberHandler(BaseHandler):
|
||||
):
|
||||
if content is None:
|
||||
content = {}
|
||||
msg_handler = self.hs.get_handlers().message_handler
|
||||
|
||||
content["membership"] = membership
|
||||
if requester.is_guest:
|
||||
content["kind"] = "guest"
|
||||
|
||||
event, context = yield msg_handler.create_event(
|
||||
event, context = yield self.event_creation_hander.create_event(
|
||||
requester,
|
||||
{
|
||||
"type": EventTypes.Member,
|
||||
@@ -87,12 +179,14 @@ class RoomMemberHandler(BaseHandler):
|
||||
)
|
||||
|
||||
# Check if this event matches the previous membership event for the user.
|
||||
duplicate = yield msg_handler.deduplicate_state_event(event, context)
|
||||
duplicate = yield self.event_creation_hander.deduplicate_state_event(
|
||||
event, context,
|
||||
)
|
||||
if duplicate is not None:
|
||||
# Discard the new event since this membership change is a no-op.
|
||||
defer.returnValue(duplicate)
|
||||
|
||||
yield msg_handler.handle_new_client_event(
|
||||
yield self.event_creation_hander.handle_new_client_event(
|
||||
requester,
|
||||
event,
|
||||
context,
|
||||
@@ -114,32 +208,15 @@ class RoomMemberHandler(BaseHandler):
|
||||
prev_member_event = yield self.store.get_event(prev_member_event_id)
|
||||
newly_joined = prev_member_event.membership != Membership.JOIN
|
||||
if newly_joined:
|
||||
yield user_joined_room(self.distributor, target, room_id)
|
||||
yield self._user_joined_room(target, room_id)
|
||||
elif event.membership == Membership.LEAVE:
|
||||
if prev_member_event_id:
|
||||
prev_member_event = yield self.store.get_event(prev_member_event_id)
|
||||
if prev_member_event.membership == Membership.JOIN:
|
||||
user_left_room(self.distributor, target, room_id)
|
||||
yield self._user_left_room(target, room_id)
|
||||
|
||||
defer.returnValue(event)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def remote_join(self, remote_room_hosts, room_id, user, content):
|
||||
if len(remote_room_hosts) == 0:
|
||||
raise SynapseError(404, "No known servers")
|
||||
|
||||
# We don't do an auth check if we are doing an invite
|
||||
# join dance for now, since we're kinda implicitly checking
|
||||
# that we are allowed to join when we decide whether or not we
|
||||
# need to do the invite/join dance.
|
||||
yield self.hs.get_handlers().federation_handler.do_invite_join(
|
||||
remote_room_hosts,
|
||||
room_id,
|
||||
user.to_string(),
|
||||
content,
|
||||
)
|
||||
yield user_joined_room(self.distributor, user, room_id)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def update_membership(
|
||||
self,
|
||||
@@ -186,6 +263,10 @@ class RoomMemberHandler(BaseHandler):
|
||||
content_specified = bool(content)
|
||||
if content is None:
|
||||
content = {}
|
||||
else:
|
||||
# We do a copy here as we potentially change some keys
|
||||
# later on.
|
||||
content = dict(content)
|
||||
|
||||
effective_membership_state = action
|
||||
if action in ["kick", "unban"]:
|
||||
@@ -194,8 +275,7 @@ class RoomMemberHandler(BaseHandler):
|
||||
# if this is a join with a 3pid signature, we may need to turn a 3pid
|
||||
# invite into a normal invite before we can handle the join.
|
||||
if third_party_signed is not None:
|
||||
replication = self.hs.get_replication_layer()
|
||||
yield replication.exchange_third_party_invite(
|
||||
yield self.federation_handler.exchange_third_party_invite(
|
||||
third_party_signed["sender"],
|
||||
target.to_string(),
|
||||
room_id,
|
||||
@@ -210,12 +290,26 @@ class RoomMemberHandler(BaseHandler):
|
||||
if is_blocked:
|
||||
raise SynapseError(403, "This room has been blocked on this server")
|
||||
|
||||
if (effective_membership_state == "invite" and
|
||||
self.hs.config.block_non_admin_invites):
|
||||
if effective_membership_state == "invite":
|
||||
block_invite = False
|
||||
is_requester_admin = yield self.auth.is_server_admin(
|
||||
requester.user,
|
||||
)
|
||||
if not is_requester_admin:
|
||||
if self.config.block_non_admin_invites:
|
||||
logger.info(
|
||||
"Blocking invite: user is not admin and non-admin "
|
||||
"invites disabled"
|
||||
)
|
||||
block_invite = True
|
||||
|
||||
if not self.spam_checker.user_may_invite(
|
||||
requester.user.to_string(), target.to_string(), room_id,
|
||||
):
|
||||
logger.info("Blocking invite due to spam checker")
|
||||
block_invite = True
|
||||
|
||||
if block_invite:
|
||||
raise SynapseError(
|
||||
403, "Invites have been disabled on this server",
|
||||
)
|
||||
@@ -261,13 +355,13 @@ class RoomMemberHandler(BaseHandler):
|
||||
raise AuthError(403, "Guest access not allowed")
|
||||
|
||||
if not is_host_in_room:
|
||||
inviter = yield self.get_inviter(target.to_string(), room_id)
|
||||
inviter = yield self._get_inviter(target.to_string(), room_id)
|
||||
if inviter and not self.hs.is_mine(inviter):
|
||||
remote_room_hosts.append(inviter.domain)
|
||||
|
||||
content["membership"] = Membership.JOIN
|
||||
|
||||
profile = self.hs.get_handlers().profile_handler
|
||||
profile = self.profile_handler
|
||||
if not content_specified:
|
||||
content["displayname"] = yield profile.get_displayname(target)
|
||||
content["avatar_url"] = yield profile.get_avatar_url(target)
|
||||
@@ -275,15 +369,15 @@ class RoomMemberHandler(BaseHandler):
|
||||
if requester.is_guest:
|
||||
content["kind"] = "guest"
|
||||
|
||||
ret = yield self.remote_join(
|
||||
remote_room_hosts, room_id, target, content
|
||||
ret = yield self._remote_join(
|
||||
requester, remote_room_hosts, room_id, target, content
|
||||
)
|
||||
defer.returnValue(ret)
|
||||
|
||||
elif effective_membership_state == Membership.LEAVE:
|
||||
if not is_host_in_room:
|
||||
# perhaps we've been invited
|
||||
inviter = yield self.get_inviter(target.to_string(), room_id)
|
||||
inviter = yield self._get_inviter(target.to_string(), room_id)
|
||||
if not inviter:
|
||||
raise SynapseError(404, "Not a known room")
|
||||
|
||||
@@ -297,28 +391,10 @@ class RoomMemberHandler(BaseHandler):
|
||||
else:
|
||||
# send the rejection to the inviter's HS.
|
||||
remote_room_hosts = remote_room_hosts + [inviter.domain]
|
||||
fed_handler = self.hs.get_handlers().federation_handler
|
||||
try:
|
||||
ret = yield fed_handler.do_remotely_reject_invite(
|
||||
remote_room_hosts,
|
||||
room_id,
|
||||
target.to_string(),
|
||||
res = yield self._remote_reject_invite(
|
||||
requester, remote_room_hosts, room_id, target,
|
||||
)
|
||||
defer.returnValue(ret)
|
||||
except Exception as e:
|
||||
# if we were unable to reject the exception, just mark
|
||||
# it as rejected on our end and plough ahead.
|
||||
#
|
||||
# The 'except' clause is very broad, but we need to
|
||||
# capture everything from DNS failures upwards
|
||||
#
|
||||
logger.warn("Failed to reject invite: %s", e)
|
||||
|
||||
yield self.store.locally_reject_invite(
|
||||
target.to_string(), room_id
|
||||
)
|
||||
|
||||
defer.returnValue({})
|
||||
defer.returnValue(res)
|
||||
|
||||
res = yield self._local_membership_update(
|
||||
requester=requester,
|
||||
@@ -373,8 +449,9 @@ class RoomMemberHandler(BaseHandler):
|
||||
else:
|
||||
requester = synapse.types.create_requester(target_user)
|
||||
|
||||
message_handler = self.hs.get_handlers().message_handler
|
||||
prev_event = yield message_handler.deduplicate_state_event(event, context)
|
||||
prev_event = yield self.event_creation_hander.deduplicate_state_event(
|
||||
event, context,
|
||||
)
|
||||
if prev_event is not None:
|
||||
return
|
||||
|
||||
@@ -391,7 +468,7 @@ class RoomMemberHandler(BaseHandler):
|
||||
if is_blocked:
|
||||
raise SynapseError(403, "This room has been blocked on this server")
|
||||
|
||||
yield message_handler.handle_new_client_event(
|
||||
yield self.event_creation_hander.handle_new_client_event(
|
||||
requester,
|
||||
event,
|
||||
context,
|
||||
@@ -413,12 +490,12 @@ class RoomMemberHandler(BaseHandler):
|
||||
prev_member_event = yield self.store.get_event(prev_member_event_id)
|
||||
newly_joined = prev_member_event.membership != Membership.JOIN
|
||||
if newly_joined:
|
||||
yield user_joined_room(self.distributor, target_user, room_id)
|
||||
yield self._user_joined_room(target_user, room_id)
|
||||
elif event.membership == Membership.LEAVE:
|
||||
if prev_member_event_id:
|
||||
prev_member_event = yield self.store.get_event(prev_member_event_id)
|
||||
if prev_member_event.membership == Membership.JOIN:
|
||||
user_left_room(self.distributor, target_user, room_id)
|
||||
yield self._user_left_room(target_user, room_id)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _can_guest_join(self, current_state_ids):
|
||||
@@ -452,7 +529,7 @@ class RoomMemberHandler(BaseHandler):
|
||||
Raises:
|
||||
SynapseError if room alias could not be found.
|
||||
"""
|
||||
directory_handler = self.hs.get_handlers().directory_handler
|
||||
directory_handler = self.directory_handler
|
||||
mapping = yield directory_handler.get_association(room_alias)
|
||||
|
||||
if not mapping:
|
||||
@@ -464,7 +541,7 @@ class RoomMemberHandler(BaseHandler):
|
||||
defer.returnValue((RoomID.from_string(room_id), servers))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_inviter(self, user_id, room_id):
|
||||
def _get_inviter(self, user_id, room_id):
|
||||
invite = yield self.store.get_invite_for_user_in_room(
|
||||
user_id=user_id,
|
||||
room_id=room_id,
|
||||
@@ -483,7 +560,7 @@ class RoomMemberHandler(BaseHandler):
|
||||
requester,
|
||||
txn_id
|
||||
):
|
||||
if self.hs.config.block_non_admin_invites:
|
||||
if self.config.block_non_admin_invites:
|
||||
is_requester_admin = yield self.auth.is_server_admin(
|
||||
requester.user,
|
||||
)
|
||||
@@ -530,7 +607,7 @@ class RoomMemberHandler(BaseHandler):
|
||||
str: the matrix ID of the 3pid, or None if it is not recognized.
|
||||
"""
|
||||
try:
|
||||
data = yield self.hs.get_simple_http_client().get_json(
|
||||
data = yield self.simple_http_client.get_json(
|
||||
"%s%s/_matrix/identity/api/v1/lookup" % (id_server_scheme, id_server,),
|
||||
{
|
||||
"medium": medium,
|
||||
@@ -541,7 +618,7 @@ class RoomMemberHandler(BaseHandler):
|
||||
if "mxid" in data:
|
||||
if "signatures" not in data:
|
||||
raise AuthError(401, "No signatures on 3pid binding")
|
||||
self.verify_any_signature(data, id_server)
|
||||
yield self._verify_any_signature(data, id_server)
|
||||
defer.returnValue(data["mxid"])
|
||||
|
||||
except IOError as e:
|
||||
@@ -549,11 +626,11 @@ class RoomMemberHandler(BaseHandler):
|
||||
defer.returnValue(None)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def verify_any_signature(self, data, server_hostname):
|
||||
def _verify_any_signature(self, data, server_hostname):
|
||||
if server_hostname not in data["signatures"]:
|
||||
raise AuthError(401, "No signature from server %s" % (server_hostname,))
|
||||
for key_name, signature in data["signatures"][server_hostname].items():
|
||||
key_data = yield self.hs.get_simple_http_client().get_json(
|
||||
key_data = yield self.simple_http_client.get_json(
|
||||
"%s%s/_matrix/identity/api/v1/pubkey/%s" %
|
||||
(id_server_scheme, server_hostname, key_name,),
|
||||
)
|
||||
@@ -578,7 +655,7 @@ class RoomMemberHandler(BaseHandler):
|
||||
user,
|
||||
txn_id
|
||||
):
|
||||
room_state = yield self.hs.get_state_handler().get_current_state(room_id)
|
||||
room_state = yield self.state_handler.get_current_state(room_id)
|
||||
|
||||
inviter_display_name = ""
|
||||
inviter_avatar_url = ""
|
||||
@@ -609,6 +686,7 @@ class RoomMemberHandler(BaseHandler):
|
||||
|
||||
token, public_keys, fallback_public_key, display_name = (
|
||||
yield self._ask_id_server_for_third_party_invite(
|
||||
requester=requester,
|
||||
id_server=id_server,
|
||||
medium=medium,
|
||||
address=address,
|
||||
@@ -623,8 +701,7 @@ class RoomMemberHandler(BaseHandler):
|
||||
)
|
||||
)
|
||||
|
||||
msg_handler = self.hs.get_handlers().message_handler
|
||||
yield msg_handler.create_and_send_nonmember_event(
|
||||
yield self.event_creation_hander.create_and_send_nonmember_event(
|
||||
requester,
|
||||
{
|
||||
"type": EventTypes.ThirdPartyInvite,
|
||||
@@ -646,6 +723,7 @@ class RoomMemberHandler(BaseHandler):
|
||||
@defer.inlineCallbacks
|
||||
def _ask_id_server_for_third_party_invite(
|
||||
self,
|
||||
requester,
|
||||
id_server,
|
||||
medium,
|
||||
address,
|
||||
@@ -662,6 +740,7 @@ class RoomMemberHandler(BaseHandler):
|
||||
Asks an identity server for a third party invite.
|
||||
|
||||
Args:
|
||||
requester (Requester)
|
||||
id_server (str): hostname + optional port for the identity server.
|
||||
medium (str): The literal string "email".
|
||||
address (str): The third party address being invited.
|
||||
@@ -703,24 +782,20 @@ class RoomMemberHandler(BaseHandler):
|
||||
"sender_avatar_url": inviter_avatar_url,
|
||||
}
|
||||
|
||||
if self.hs.config.invite_3pid_guest:
|
||||
registration_handler = self.hs.get_handlers().registration_handler
|
||||
guest_access_token = yield registration_handler.guest_access_token_for(
|
||||
if self.config.invite_3pid_guest:
|
||||
guest_access_token, guest_user_id = yield self.get_or_register_3pid_guest(
|
||||
requester=requester,
|
||||
medium=medium,
|
||||
address=address,
|
||||
inviter_user_id=inviter_user_id,
|
||||
)
|
||||
|
||||
guest_user_info = yield self.hs.get_auth().get_user_by_access_token(
|
||||
guest_access_token
|
||||
)
|
||||
|
||||
invite_config.update({
|
||||
"guest_access_token": guest_access_token,
|
||||
"guest_user_id": guest_user_info["user"].to_string(),
|
||||
"guest_user_id": guest_user_id,
|
||||
})
|
||||
|
||||
data = yield self.hs.get_simple_http_client().post_urlencoded_get_json(
|
||||
data = yield self.simple_http_client.post_urlencoded_get_json(
|
||||
is_url,
|
||||
invite_config
|
||||
)
|
||||
@@ -742,27 +817,6 @@ class RoomMemberHandler(BaseHandler):
|
||||
display_name = data["display_name"]
|
||||
defer.returnValue((token, public_keys, fallback_public_key, display_name))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def forget(self, user, room_id):
|
||||
user_id = user.to_string()
|
||||
|
||||
member = yield self.state_handler.get_current_state(
|
||||
room_id=room_id,
|
||||
event_type=EventTypes.Member,
|
||||
state_key=user_id
|
||||
)
|
||||
membership = member.membership if member else None
|
||||
|
||||
if membership is not None and membership not in [
|
||||
Membership.LEAVE, Membership.BAN
|
||||
]:
|
||||
raise SynapseError(400, "User %s in room %s" % (
|
||||
user_id, room_id
|
||||
))
|
||||
|
||||
if membership:
|
||||
yield self.store.forget(user_id, room_id)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _is_host_in_room(self, current_state_ids):
|
||||
# Have we just created the room, and is this about to be the very
|
||||
@@ -784,3 +838,94 @@ class RoomMemberHandler(BaseHandler):
|
||||
defer.returnValue(True)
|
||||
|
||||
defer.returnValue(False)
|
||||
|
||||
|
||||
class RoomMemberMasterHandler(RoomMemberHandler):
|
||||
def __init__(self, hs):
|
||||
super(RoomMemberMasterHandler, self).__init__(hs)
|
||||
|
||||
self.distributor = hs.get_distributor()
|
||||
self.distributor.declare("user_joined_room")
|
||||
self.distributor.declare("user_left_room")
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _remote_join(self, requester, remote_room_hosts, room_id, user, content):
|
||||
"""Implements RoomMemberHandler._remote_join
|
||||
"""
|
||||
if len(remote_room_hosts) == 0:
|
||||
raise SynapseError(404, "No known servers")
|
||||
|
||||
# We don't do an auth check if we are doing an invite
|
||||
# join dance for now, since we're kinda implicitly checking
|
||||
# that we are allowed to join when we decide whether or not we
|
||||
# need to do the invite/join dance.
|
||||
yield self.federation_handler.do_invite_join(
|
||||
remote_room_hosts,
|
||||
room_id,
|
||||
user.to_string(),
|
||||
content,
|
||||
)
|
||||
yield self._user_joined_room(user, room_id)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _remote_reject_invite(self, requester, remote_room_hosts, room_id, target):
|
||||
"""Implements RoomMemberHandler._remote_reject_invite
|
||||
"""
|
||||
fed_handler = self.federation_handler
|
||||
try:
|
||||
ret = yield fed_handler.do_remotely_reject_invite(
|
||||
remote_room_hosts,
|
||||
room_id,
|
||||
target.to_string(),
|
||||
)
|
||||
defer.returnValue(ret)
|
||||
except Exception as e:
|
||||
# if we were unable to reject the exception, just mark
|
||||
# it as rejected on our end and plough ahead.
|
||||
#
|
||||
# The 'except' clause is very broad, but we need to
|
||||
# capture everything from DNS failures upwards
|
||||
#
|
||||
logger.warn("Failed to reject invite: %s", e)
|
||||
|
||||
yield self.store.locally_reject_invite(
|
||||
target.to_string(), room_id
|
||||
)
|
||||
defer.returnValue({})
|
||||
|
||||
def get_or_register_3pid_guest(self, requester, medium, address, inviter_user_id):
|
||||
"""Implements RoomMemberHandler.get_or_register_3pid_guest
|
||||
"""
|
||||
rg = self.registration_handler
|
||||
return rg.get_or_register_3pid_guest(medium, address, inviter_user_id)
|
||||
|
||||
def _user_joined_room(self, target, room_id):
|
||||
"""Implements RoomMemberHandler._user_joined_room
|
||||
"""
|
||||
return user_joined_room(self.distributor, target, room_id)
|
||||
|
||||
def _user_left_room(self, target, room_id):
|
||||
"""Implements RoomMemberHandler._user_left_room
|
||||
"""
|
||||
return user_left_room(self.distributor, target, room_id)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def forget(self, user, room_id):
|
||||
user_id = user.to_string()
|
||||
|
||||
member = yield self.state_handler.get_current_state(
|
||||
room_id=room_id,
|
||||
event_type=EventTypes.Member,
|
||||
state_key=user_id
|
||||
)
|
||||
membership = member.membership if member else None
|
||||
|
||||
if membership is not None and membership not in [
|
||||
Membership.LEAVE, Membership.BAN
|
||||
]:
|
||||
raise SynapseError(400, "User %s in room %s" % (
|
||||
user_id, room_id
|
||||
))
|
||||
|
||||
if membership:
|
||||
yield self.store.forget(user_id, room_id)
|
||||
|
||||
102
synapse/handlers/room_member_worker.py
Normal file
102
synapse/handlers/room_member_worker.py
Normal file
@@ -0,0 +1,102 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2018 New Vector Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.errors import SynapseError
|
||||
from synapse.handlers.room_member import RoomMemberHandler
|
||||
from synapse.replication.http.membership import (
|
||||
remote_join, remote_reject_invite, get_or_register_3pid_guest,
|
||||
notify_user_membership_change,
|
||||
)
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class RoomMemberWorkerHandler(RoomMemberHandler):
|
||||
@defer.inlineCallbacks
|
||||
def _remote_join(self, requester, remote_room_hosts, room_id, user, content):
|
||||
"""Implements RoomMemberHandler._remote_join
|
||||
"""
|
||||
if len(remote_room_hosts) == 0:
|
||||
raise SynapseError(404, "No known servers")
|
||||
|
||||
ret = yield remote_join(
|
||||
self.simple_http_client,
|
||||
host=self.config.worker_replication_host,
|
||||
port=self.config.worker_replication_http_port,
|
||||
requester=requester,
|
||||
remote_room_hosts=remote_room_hosts,
|
||||
room_id=room_id,
|
||||
user_id=user.to_string(),
|
||||
content=content,
|
||||
)
|
||||
|
||||
yield self._user_joined_room(user, room_id)
|
||||
|
||||
defer.returnValue(ret)
|
||||
|
||||
def _remote_reject_invite(self, requester, remote_room_hosts, room_id, target):
|
||||
"""Implements RoomMemberHandler._remote_reject_invite
|
||||
"""
|
||||
return remote_reject_invite(
|
||||
self.simple_http_client,
|
||||
host=self.config.worker_replication_host,
|
||||
port=self.config.worker_replication_http_port,
|
||||
requester=requester,
|
||||
remote_room_hosts=remote_room_hosts,
|
||||
room_id=room_id,
|
||||
user_id=target.to_string(),
|
||||
)
|
||||
|
||||
def _user_joined_room(self, target, room_id):
|
||||
"""Implements RoomMemberHandler._user_joined_room
|
||||
"""
|
||||
return notify_user_membership_change(
|
||||
self.simple_http_client,
|
||||
host=self.config.worker_replication_host,
|
||||
port=self.config.worker_replication_http_port,
|
||||
user_id=target.to_string(),
|
||||
room_id=room_id,
|
||||
change="joined",
|
||||
)
|
||||
|
||||
def _user_left_room(self, target, room_id):
|
||||
"""Implements RoomMemberHandler._user_left_room
|
||||
"""
|
||||
return notify_user_membership_change(
|
||||
self.simple_http_client,
|
||||
host=self.config.worker_replication_host,
|
||||
port=self.config.worker_replication_http_port,
|
||||
user_id=target.to_string(),
|
||||
room_id=room_id,
|
||||
change="left",
|
||||
)
|
||||
|
||||
def get_or_register_3pid_guest(self, requester, medium, address, inviter_user_id):
|
||||
"""Implements RoomMemberHandler.get_or_register_3pid_guest
|
||||
"""
|
||||
return get_or_register_3pid_guest(
|
||||
self.simple_http_client,
|
||||
host=self.config.worker_replication_host,
|
||||
port=self.config.worker_replication_http_port,
|
||||
requester=requester,
|
||||
medium=medium,
|
||||
address=address,
|
||||
inviter_user_id=inviter_user_id,
|
||||
)
|
||||
@@ -61,7 +61,7 @@ class SearchHandler(BaseHandler):
|
||||
assert batch_group is not None
|
||||
assert batch_group_key is not None
|
||||
assert batch_token is not None
|
||||
except:
|
||||
except Exception:
|
||||
raise SynapseError(400, "Invalid batch")
|
||||
|
||||
try:
|
||||
|
||||
56
synapse/handlers/set_password.py
Normal file
56
synapse/handlers/set_password.py
Normal file
@@ -0,0 +1,56 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2017 New Vector Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import logging
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.errors import Codes, StoreError, SynapseError
|
||||
from ._base import BaseHandler
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class SetPasswordHandler(BaseHandler):
|
||||
"""Handler which deals with changing user account passwords"""
|
||||
def __init__(self, hs):
|
||||
super(SetPasswordHandler, self).__init__(hs)
|
||||
self._auth_handler = hs.get_auth_handler()
|
||||
self._device_handler = hs.get_device_handler()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def set_password(self, user_id, newpassword, requester=None):
|
||||
password_hash = yield self._auth_handler.hash(newpassword)
|
||||
|
||||
except_device_id = requester.device_id if requester else None
|
||||
except_access_token_id = requester.access_token_id if requester else None
|
||||
|
||||
try:
|
||||
yield self.store.user_set_password_hash(user_id, password_hash)
|
||||
except StoreError as e:
|
||||
if e.code == 404:
|
||||
raise SynapseError(404, "Unknown user", Codes.NOT_FOUND)
|
||||
raise e
|
||||
|
||||
# we want to log out all of the user's other sessions. First delete
|
||||
# all his other devices.
|
||||
yield self._device_handler.delete_all_devices_for_user(
|
||||
user_id, except_device_id=except_device_id,
|
||||
)
|
||||
|
||||
# and now delete any access tokens which weren't associated with
|
||||
# devices (or were associated with this device).
|
||||
yield self._auth_handler.delete_access_tokens_for_user(
|
||||
user_id, except_token_id=except_access_token_id,
|
||||
)
|
||||
@@ -15,7 +15,7 @@
|
||||
|
||||
from synapse.api.constants import Membership, EventTypes
|
||||
from synapse.util.async import concurrently_execute
|
||||
from synapse.util.logcontext import LoggingContext
|
||||
from synapse.util.logcontext import LoggingContext, make_deferred_yieldable, preserve_fn
|
||||
from synapse.util.metrics import Measure, measure_func
|
||||
from synapse.util.caches.response_cache import ResponseCache
|
||||
from synapse.push.clientformat import format_push_rules_for_user
|
||||
@@ -108,6 +108,17 @@ class InvitedSyncResult(collections.namedtuple("InvitedSyncResult", [
|
||||
return True
|
||||
|
||||
|
||||
class GroupsSyncResult(collections.namedtuple("GroupsSyncResult", [
|
||||
"join",
|
||||
"invite",
|
||||
"leave",
|
||||
])):
|
||||
__slots__ = []
|
||||
|
||||
def __nonzero__(self):
|
||||
return bool(self.join or self.invite or self.leave)
|
||||
|
||||
|
||||
class DeviceLists(collections.namedtuple("DeviceLists", [
|
||||
"changed", # list of user_ids whose devices may have changed
|
||||
"left", # list of user_ids whose devices we no longer track
|
||||
@@ -129,6 +140,7 @@ class SyncResult(collections.namedtuple("SyncResult", [
|
||||
"device_lists", # List of user_ids whose devices have chanegd
|
||||
"device_one_time_keys_count", # Dict of algorithm to count for one time keys
|
||||
# for this device
|
||||
"groups",
|
||||
])):
|
||||
__slots__ = []
|
||||
|
||||
@@ -144,7 +156,8 @@ class SyncResult(collections.namedtuple("SyncResult", [
|
||||
self.archived or
|
||||
self.account_data or
|
||||
self.to_device or
|
||||
self.device_lists
|
||||
self.device_lists or
|
||||
self.groups
|
||||
)
|
||||
|
||||
|
||||
@@ -171,11 +184,11 @@ class SyncHandler(object):
|
||||
if not result:
|
||||
result = self.response_cache.set(
|
||||
sync_config.request_key,
|
||||
self._wait_for_sync_for_user(
|
||||
preserve_fn(self._wait_for_sync_for_user)(
|
||||
sync_config, since_token, timeout, full_state
|
||||
)
|
||||
)
|
||||
return result
|
||||
return make_deferred_yieldable(result)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _wait_for_sync_for_user(self, sync_config, since_token, timeout,
|
||||
@@ -222,10 +235,10 @@ class SyncHandler(object):
|
||||
defer.returnValue(rules)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def ephemeral_by_room(self, sync_config, now_token, since_token=None):
|
||||
def ephemeral_by_room(self, sync_result_builder, now_token, since_token=None):
|
||||
"""Get the ephemeral events for each room the user is in
|
||||
Args:
|
||||
sync_config (SyncConfig): The flags, filters and user for the sync.
|
||||
sync_result_builder(SyncResultBuilder)
|
||||
now_token (StreamToken): Where the server is currently up to.
|
||||
since_token (StreamToken): Where the server was when the client
|
||||
last synced.
|
||||
@@ -235,10 +248,12 @@ class SyncHandler(object):
|
||||
typing events for that room.
|
||||
"""
|
||||
|
||||
sync_config = sync_result_builder.sync_config
|
||||
|
||||
with Measure(self.clock, "ephemeral_by_room"):
|
||||
typing_key = since_token.typing_key if since_token else "0"
|
||||
|
||||
room_ids = yield self.store.get_rooms_for_user(sync_config.user.to_string())
|
||||
room_ids = sync_result_builder.joined_room_ids
|
||||
|
||||
typing_source = self.event_sources.sources["typing"]
|
||||
typing, typing_key = yield typing_source.get_new_events(
|
||||
@@ -552,10 +567,22 @@ class SyncHandler(object):
|
||||
# Always use the `now_token` in `SyncResultBuilder`
|
||||
now_token = yield self.event_sources.get_current_token()
|
||||
|
||||
user_id = sync_config.user.to_string()
|
||||
app_service = self.store.get_app_service_by_user_id(user_id)
|
||||
if app_service:
|
||||
# We no longer support AS users using /sync directly.
|
||||
# See https://github.com/matrix-org/matrix-doc/issues/1144
|
||||
raise NotImplementedError()
|
||||
else:
|
||||
joined_room_ids = yield self.get_rooms_for_user_at(
|
||||
user_id, now_token.room_stream_id,
|
||||
)
|
||||
|
||||
sync_result_builder = SyncResultBuilder(
|
||||
sync_config, full_state,
|
||||
since_token=since_token,
|
||||
now_token=now_token,
|
||||
joined_room_ids=joined_room_ids,
|
||||
)
|
||||
|
||||
account_data_by_room = yield self._generate_sync_entry_for_account_data(
|
||||
@@ -590,11 +617,12 @@ class SyncHandler(object):
|
||||
device_id = sync_config.device_id
|
||||
one_time_key_counts = {}
|
||||
if device_id:
|
||||
user_id = sync_config.user.to_string()
|
||||
one_time_key_counts = yield self.store.count_e2e_one_time_keys(
|
||||
user_id, device_id
|
||||
)
|
||||
|
||||
yield self._generate_sync_entry_for_groups(sync_result_builder)
|
||||
|
||||
defer.returnValue(SyncResult(
|
||||
presence=sync_result_builder.presence,
|
||||
account_data=sync_result_builder.account_data,
|
||||
@@ -603,10 +631,57 @@ class SyncHandler(object):
|
||||
archived=sync_result_builder.archived,
|
||||
to_device=sync_result_builder.to_device,
|
||||
device_lists=device_lists,
|
||||
groups=sync_result_builder.groups,
|
||||
device_one_time_keys_count=one_time_key_counts,
|
||||
next_batch=sync_result_builder.now_token,
|
||||
))
|
||||
|
||||
@measure_func("_generate_sync_entry_for_groups")
|
||||
@defer.inlineCallbacks
|
||||
def _generate_sync_entry_for_groups(self, sync_result_builder):
|
||||
user_id = sync_result_builder.sync_config.user.to_string()
|
||||
since_token = sync_result_builder.since_token
|
||||
now_token = sync_result_builder.now_token
|
||||
|
||||
if since_token and since_token.groups_key:
|
||||
results = yield self.store.get_groups_changes_for_user(
|
||||
user_id, since_token.groups_key, now_token.groups_key,
|
||||
)
|
||||
else:
|
||||
results = yield self.store.get_all_groups_for_user(
|
||||
user_id, now_token.groups_key,
|
||||
)
|
||||
|
||||
invited = {}
|
||||
joined = {}
|
||||
left = {}
|
||||
for result in results:
|
||||
membership = result["membership"]
|
||||
group_id = result["group_id"]
|
||||
gtype = result["type"]
|
||||
content = result["content"]
|
||||
|
||||
if membership == "join":
|
||||
if gtype == "membership":
|
||||
# TODO: Add profile
|
||||
content.pop("membership", None)
|
||||
joined[group_id] = content["content"]
|
||||
else:
|
||||
joined.setdefault(group_id, {})[gtype] = content
|
||||
elif membership == "invite":
|
||||
if gtype == "membership":
|
||||
content.pop("membership", None)
|
||||
invited[group_id] = content["content"]
|
||||
else:
|
||||
if gtype == "membership":
|
||||
left[group_id] = content["content"]
|
||||
|
||||
sync_result_builder.groups = GroupsSyncResult(
|
||||
join=joined,
|
||||
invite=invited,
|
||||
leave=left,
|
||||
)
|
||||
|
||||
@measure_func("_generate_sync_entry_for_device_list")
|
||||
@defer.inlineCallbacks
|
||||
def _generate_sync_entry_for_device_list(self, sync_result_builder,
|
||||
@@ -829,7 +904,7 @@ class SyncHandler(object):
|
||||
ephemeral_by_room = {}
|
||||
else:
|
||||
now_token, ephemeral_by_room = yield self.ephemeral_by_room(
|
||||
sync_result_builder.sync_config,
|
||||
sync_result_builder,
|
||||
now_token=sync_result_builder.now_token,
|
||||
since_token=sync_result_builder.since_token,
|
||||
)
|
||||
@@ -934,15 +1009,8 @@ class SyncHandler(object):
|
||||
if rooms_changed:
|
||||
defer.returnValue(True)
|
||||
|
||||
app_service = self.store.get_app_service_by_user_id(user_id)
|
||||
if app_service:
|
||||
rooms = yield self.store.get_app_service_rooms(app_service)
|
||||
joined_room_ids = set(r.room_id for r in rooms)
|
||||
else:
|
||||
joined_room_ids = yield self.store.get_rooms_for_user(user_id)
|
||||
|
||||
stream_id = RoomStreamToken.parse_stream_token(since_token.room_key).stream
|
||||
for room_id in joined_room_ids:
|
||||
for room_id in sync_result_builder.joined_room_ids:
|
||||
if self.store.has_room_changed_since(room_id, stream_id):
|
||||
defer.returnValue(True)
|
||||
defer.returnValue(False)
|
||||
@@ -966,13 +1034,6 @@ class SyncHandler(object):
|
||||
|
||||
assert since_token
|
||||
|
||||
app_service = self.store.get_app_service_by_user_id(user_id)
|
||||
if app_service:
|
||||
rooms = yield self.store.get_app_service_rooms(app_service)
|
||||
joined_room_ids = set(r.room_id for r in rooms)
|
||||
else:
|
||||
joined_room_ids = yield self.store.get_rooms_for_user(user_id)
|
||||
|
||||
# Get a list of membership change events that have happened.
|
||||
rooms_changed = yield self.store.get_membership_changes_for_user(
|
||||
user_id, since_token.room_key, now_token.room_key
|
||||
@@ -995,7 +1056,7 @@ class SyncHandler(object):
|
||||
# we do send down the room, and with full state, where necessary
|
||||
|
||||
old_state_ids = None
|
||||
if room_id in joined_room_ids and non_joins:
|
||||
if room_id in sync_result_builder.joined_room_ids and non_joins:
|
||||
# Always include if the user (re)joined the room, especially
|
||||
# important so that device list changes are calculated correctly.
|
||||
# If there are non join member events, but we are still in the room,
|
||||
@@ -1005,7 +1066,7 @@ class SyncHandler(object):
|
||||
# User is in the room so we don't need to do the invite/leave checks
|
||||
continue
|
||||
|
||||
if room_id in joined_room_ids or has_join:
|
||||
if room_id in sync_result_builder.joined_room_ids or has_join:
|
||||
old_state_ids = yield self.get_state_at(room_id, since_token)
|
||||
old_mem_ev_id = old_state_ids.get((EventTypes.Member, user_id), None)
|
||||
old_mem_ev = None
|
||||
@@ -1017,7 +1078,7 @@ class SyncHandler(object):
|
||||
newly_joined_rooms.append(room_id)
|
||||
|
||||
# If user is in the room then we don't need to do the invite/leave checks
|
||||
if room_id in joined_room_ids:
|
||||
if room_id in sync_result_builder.joined_room_ids:
|
||||
continue
|
||||
|
||||
if not non_joins:
|
||||
@@ -1084,7 +1145,7 @@ class SyncHandler(object):
|
||||
|
||||
# Get all events for rooms we're currently joined to.
|
||||
room_to_events = yield self.store.get_room_events_stream_for_rooms(
|
||||
room_ids=joined_room_ids,
|
||||
room_ids=sync_result_builder.joined_room_ids,
|
||||
from_key=since_token.room_key,
|
||||
to_key=now_token.room_key,
|
||||
limit=timeline_limit + 1,
|
||||
@@ -1092,7 +1153,7 @@ class SyncHandler(object):
|
||||
|
||||
# We loop through all room ids, even if there are no new events, in case
|
||||
# there are non room events taht we need to notify about.
|
||||
for room_id in joined_room_ids:
|
||||
for room_id in sync_result_builder.joined_room_ids:
|
||||
room_entry = room_to_events.get(room_id, None)
|
||||
|
||||
if room_entry:
|
||||
@@ -1300,6 +1361,54 @@ class SyncHandler(object):
|
||||
else:
|
||||
raise Exception("Unrecognized rtype: %r", room_builder.rtype)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_rooms_for_user_at(self, user_id, stream_ordering):
|
||||
"""Get set of joined rooms for a user at the given stream ordering.
|
||||
|
||||
The stream ordering *must* be recent, otherwise this may throw an
|
||||
exception if older than a month. (This function is called with the
|
||||
current token, which should be perfectly fine).
|
||||
|
||||
Args:
|
||||
user_id (str)
|
||||
stream_ordering (int)
|
||||
|
||||
ReturnValue:
|
||||
Deferred[frozenset[str]]: Set of room_ids the user is in at given
|
||||
stream_ordering.
|
||||
"""
|
||||
joined_rooms = yield self.store.get_rooms_for_user_with_stream_ordering(
|
||||
user_id,
|
||||
)
|
||||
|
||||
joined_room_ids = set()
|
||||
|
||||
# We need to check that the stream ordering of the join for each room
|
||||
# is before the stream_ordering asked for. This might not be the case
|
||||
# if the user joins a room between us getting the current token and
|
||||
# calling `get_rooms_for_user_with_stream_ordering`.
|
||||
# If the membership's stream ordering is after the given stream
|
||||
# ordering, we need to go and work out if the user was in the room
|
||||
# before.
|
||||
for room_id, membership_stream_ordering in joined_rooms:
|
||||
if membership_stream_ordering <= stream_ordering:
|
||||
joined_room_ids.add(room_id)
|
||||
continue
|
||||
|
||||
logger.info("User joined room after current token: %s", room_id)
|
||||
|
||||
extrems = yield self.store.get_forward_extremeties_for_room(
|
||||
room_id, stream_ordering,
|
||||
)
|
||||
users_in_room = yield self.state.get_current_user_in_room(
|
||||
room_id, extrems,
|
||||
)
|
||||
if user_id in users_in_room:
|
||||
joined_room_ids.add(room_id)
|
||||
|
||||
joined_room_ids = frozenset(joined_room_ids)
|
||||
defer.returnValue(joined_room_ids)
|
||||
|
||||
|
||||
def _action_has_highlight(actions):
|
||||
for action in actions:
|
||||
@@ -1349,7 +1458,8 @@ def _calculate_state(timeline_contains, timeline_start, previous, current):
|
||||
|
||||
class SyncResultBuilder(object):
|
||||
"Used to help build up a new SyncResult for a user"
|
||||
def __init__(self, sync_config, full_state, since_token, now_token):
|
||||
def __init__(self, sync_config, full_state, since_token, now_token,
|
||||
joined_room_ids):
|
||||
"""
|
||||
Args:
|
||||
sync_config(SyncConfig)
|
||||
@@ -1361,6 +1471,7 @@ class SyncResultBuilder(object):
|
||||
self.full_state = full_state
|
||||
self.since_token = since_token
|
||||
self.now_token = now_token
|
||||
self.joined_room_ids = joined_room_ids
|
||||
|
||||
self.presence = []
|
||||
self.account_data = []
|
||||
@@ -1368,6 +1479,7 @@ class SyncResultBuilder(object):
|
||||
self.invited = []
|
||||
self.archived = []
|
||||
self.device = []
|
||||
self.groups = None
|
||||
self.to_device = []
|
||||
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user