mirror of
https://github.com/element-hq/synapse.git
synced 2025-12-07 01:20:16 +00:00
Compare commits
1372 Commits
erikj/file
...
v0.19.0-rc
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
82b3e0851c | ||
|
|
f8c407a13b | ||
|
|
8da976fe00 | ||
|
|
1232ae41cf | ||
|
|
99fa03e8b5 | ||
|
|
a8331897aa | ||
|
|
0f3e296cb7 | ||
|
|
6826593b81 | ||
|
|
6b61060b51 | ||
|
|
46ecd9fd6d | ||
|
|
9efcc3f3be | ||
|
|
832e9c52ca | ||
|
|
54a79c1d37 | ||
|
|
5ae38b65c1 | ||
|
|
bfe3f5815f | ||
|
|
cc01eae332 | ||
|
|
85e98fd4e8 | ||
|
|
51adaac953 | ||
|
|
10e0737569 | ||
|
|
14d5e22700 | ||
|
|
fbfe44bb4d | ||
|
|
d61a04583e | ||
|
|
7e919bdbd0 | ||
|
|
96355d2f2f | ||
|
|
df4ecff5a9 | ||
|
|
6d6591880e | ||
|
|
bd84387ac6 | ||
|
|
ebfaff84c9 | ||
|
|
73d676dc8b | ||
|
|
62f6b86ba7 | ||
|
|
f6124311fd | ||
|
|
88a4d54883 | ||
|
|
368c88c487 | ||
|
|
5deaf9e30b | ||
|
|
acb501c46d | ||
|
|
97479d0c54 | ||
|
|
06567ec513 | ||
|
|
692daf6f54 | ||
|
|
458b6f4733 | ||
|
|
fe08db2713 | ||
|
|
21b7375778 | ||
|
|
4c0ec15bdc | ||
|
|
85c590105f | ||
|
|
ae7a132f38 | ||
|
|
ac001dabdc | ||
|
|
bfb3d255b1 | ||
|
|
ab55794b6f | ||
|
|
d3169e8d28 | ||
|
|
05b9f48ee5 | ||
|
|
4c9812f5da | ||
|
|
4b3403ca9b | ||
|
|
1c13c9f6b6 | ||
|
|
c7a26b7c32 | ||
|
|
fd1c18c088 | ||
|
|
c2c9a78db9 | ||
|
|
e75a779d9e | ||
|
|
828db669ec | ||
|
|
9636b2407d | ||
|
|
3670025e64 | ||
|
|
4ac363a168 | ||
|
|
d360c97ae1 | ||
|
|
76100203ab | ||
|
|
d1e1fd6210 | ||
|
|
252b503fc8 | ||
|
|
84a35f32c7 | ||
|
|
c517a19c2d | ||
|
|
738a2867c8 | ||
|
|
755adff0e4 | ||
|
|
888c59c955 | ||
|
|
f25a4a4692 | ||
|
|
b3e1f2aa7a | ||
|
|
31aca5589c | ||
|
|
76d40f4904 | ||
|
|
fbfad76c03 | ||
|
|
c974116f19 | ||
|
|
e978247fe5 | ||
|
|
51e9fe36e4 | ||
|
|
2367c5568c | ||
|
|
10e48d8310 | ||
|
|
ba8e144554 | ||
|
|
f5b46482f4 | ||
|
|
fdf2a31a51 | ||
|
|
c77b24c092 | ||
|
|
5d2134d485 | ||
|
|
a55fa2047f | ||
|
|
3d9d48fffb | ||
|
|
a0d03f2e15 | ||
|
|
d0897dead5 | ||
|
|
567aa35b67 | ||
|
|
f2f40e64a9 | ||
|
|
4c6a31cd6e | ||
|
|
83333498a5 | ||
|
|
86063d4321 | ||
|
|
09eb08f910 | ||
|
|
97efe99ae9 | ||
|
|
691c8198b7 | ||
|
|
86e6165687 | ||
|
|
1e38be3a7a | ||
|
|
841c228533 | ||
|
|
c430111d0e | ||
|
|
97d3918377 | ||
|
|
6f6bf2a1eb | ||
|
|
8c5009b628 | ||
|
|
ae7b4da4cc | ||
|
|
fc7cae8aa3 | ||
|
|
f9058ca785 | ||
|
|
f648313f98 | ||
|
|
15f012032c | ||
|
|
4ec1cf49e2 | ||
|
|
f878f64f43 | ||
|
|
5f027d1fc5 | ||
|
|
380dba1020 | ||
|
|
ed4d176152 | ||
|
|
c6064a7ba6 | ||
|
|
a8594fd19f | ||
|
|
7fae460402 | ||
|
|
37b4c7d8a9 | ||
|
|
e5d2df9c34 | ||
|
|
04006bb7f0 | ||
|
|
ce59a2faad | ||
|
|
633f97151c | ||
|
|
e6153e1bd1 | ||
|
|
5d6bad1b3c | ||
|
|
e8ecbb6f20 | ||
|
|
d11d7cdf87 | ||
|
|
9e8e236d98 | ||
|
|
d6c75cb7c2 | ||
|
|
1ccd5676e3 | ||
|
|
d906206049 | ||
|
|
f85b6ca494 | ||
|
|
f2f179dce2 | ||
|
|
6d00213e80 | ||
|
|
897f8752da | ||
|
|
beda469bc6 | ||
|
|
46aebbbcbf | ||
|
|
01521299c7 | ||
|
|
2fae34bd2c | ||
|
|
95a22ae194 | ||
|
|
ec0a523ac3 | ||
|
|
e178feca3f | ||
|
|
f0325a9ccc | ||
|
|
c050f493dd | ||
|
|
a3e4a198e3 | ||
|
|
8b2fa38256 | ||
|
|
641ccdbb14 | ||
|
|
6f5e41e420 | ||
|
|
0d37a7bf83 | ||
|
|
ebf94aff8d | ||
|
|
7a13fe16f7 | ||
|
|
bf5c9706d9 | ||
|
|
7b62d0bc70 | ||
|
|
7e6c2937c3 | ||
|
|
b1dfd20292 | ||
|
|
edd6cdfc9a | ||
|
|
3cb1799347 | ||
|
|
8a0fddfd73 | ||
|
|
d524bc9110 | ||
|
|
d2b00d0866 | ||
|
|
ab655dca33 | ||
|
|
5a32e9273e | ||
|
|
caddadfc5a | ||
|
|
dd52d4de4c | ||
|
|
024eb98524 | ||
|
|
32019c9897 | ||
|
|
657488113e | ||
|
|
3b4de17d2b | ||
|
|
7d0981b312 | ||
|
|
07c3c08fad | ||
|
|
f477370c0c | ||
|
|
586f474a44 | ||
|
|
6823fe5241 | ||
|
|
f7085ac84f | ||
|
|
9898bbd9dc | ||
|
|
9a8ae6f1bf | ||
|
|
2f4b2f4783 | ||
|
|
6d363cea9d | ||
|
|
f0e4bac64e | ||
|
|
4304e7e593 | ||
|
|
6515b9c0d4 | ||
|
|
8c48971b51 | ||
|
|
e10c527930 | ||
|
|
2f5be2d8dc | ||
|
|
4086026524 | ||
|
|
9d914454c8 | ||
|
|
19e2fb4386 | ||
|
|
189fd15564 | ||
|
|
8404f132c3 | ||
|
|
b2850e62db | ||
|
|
06c00bd19b | ||
|
|
b42a972b71 | ||
|
|
2c8ac84a26 | ||
|
|
1ef6084b75 | ||
|
|
bd85434cb3 | ||
|
|
c18f7fc410 | ||
|
|
dafd50d178 | ||
|
|
883ff92a7f | ||
|
|
d79d165761 | ||
|
|
8cfc0165e9 | ||
|
|
62451800e7 | ||
|
|
b31ed22738 | ||
|
|
7738329672 | ||
|
|
dd3df11c55 | ||
|
|
e1c5463efc | ||
|
|
468749c9fc | ||
|
|
eedf400d05 | ||
|
|
5175094707 | ||
|
|
8e82611f37 | ||
|
|
6028718b1a | ||
|
|
f784980d2b | ||
|
|
0d766c8ccf | ||
|
|
e02bdaf08b | ||
|
|
b6b67715ed | ||
|
|
555d702e34 | ||
|
|
899a3a1268 | ||
|
|
f3de4f8cb7 | ||
|
|
321d5b73d8 | ||
|
|
62ce3034f3 | ||
|
|
0aff09f6c9 | ||
|
|
48c3b7dc19 | ||
|
|
cc50b1ae53 | ||
|
|
f576c34594 | ||
|
|
0eac4fa525 | ||
|
|
822cb39dfa | ||
|
|
342fb8dae9 | ||
|
|
f023be9293 | ||
|
|
828c58522e | ||
|
|
97ffc5690b | ||
|
|
b4bc6fef5b | ||
|
|
68030fd37b | ||
|
|
b7336ff32d | ||
|
|
5b6672c66d | ||
|
|
84cf00c645 | ||
|
|
bea15fb599 | ||
|
|
0c88ab1844 | ||
|
|
b7f4f902fa | ||
|
|
702c020e58 | ||
|
|
09f15918be | ||
|
|
da2c8f3c94 | ||
|
|
a58e4e0d48 | ||
|
|
f2a5aebf98 | ||
|
|
a9c1b419a9 | ||
|
|
f5cd5ebd7b | ||
|
|
1859af9b2a | ||
|
|
c95e9fff99 | ||
|
|
7dfd70fc83 | ||
|
|
b2f8642d3d | ||
|
|
f5a4001bb1 | ||
|
|
b9b6d17ab1 | ||
|
|
c824dc727a | ||
|
|
edc6a1e4f9 | ||
|
|
35129ac998 | ||
|
|
ed02a0018c | ||
|
|
8bb8cc993a | ||
|
|
aa1336c00a | ||
|
|
4da3fc0ea0 | ||
|
|
24c16fc349 | ||
|
|
b8255eba26 | ||
|
|
b2999a7055 | ||
|
|
c3208e45c9 | ||
|
|
9d95351cad | ||
|
|
1de53a7a1a | ||
|
|
bae1115e55 | ||
|
|
b3d398343e | ||
|
|
0648e76979 | ||
|
|
8588d0eb3d | ||
|
|
1574b839e0 | ||
|
|
7ec2bf9b77 | ||
|
|
d431c0924c | ||
|
|
2bf5a47b3e | ||
|
|
d3bd94805f | ||
|
|
09cbcb78d3 | ||
|
|
631376e2ac | ||
|
|
abed247182 | ||
|
|
9240948346 | ||
|
|
62e6d40b39 | ||
|
|
d45c984653 | ||
|
|
d53a80af25 | ||
|
|
85cd30b1fd | ||
|
|
deca951241 | ||
|
|
9f07f4c559 | ||
|
|
6e18805ac2 | ||
|
|
77692b52b5 | ||
|
|
efa4ccfaee | ||
|
|
e721a7f2c1 | ||
|
|
1233d244ff | ||
|
|
b541fac7c3 | ||
|
|
af32d3b773 | ||
|
|
2fda8134f1 | ||
|
|
8b34f71bea | ||
|
|
fbaf868f62 | ||
|
|
4a9c38bfa3 | ||
|
|
be14c24cea | ||
|
|
1697f6a323 | ||
|
|
52d12ca782 | ||
|
|
c45d8e9ba2 | ||
|
|
da13b4aa86 | ||
|
|
b08f76bd23 | ||
|
|
bd07a35c29 | ||
|
|
de796f27e6 | ||
|
|
2687af82d4 | ||
|
|
3727d66a0e | ||
|
|
0d81e26769 | ||
|
|
59bc64328f | ||
|
|
f32fb65552 | ||
|
|
39a76b9cba | ||
|
|
1529c19675 | ||
|
|
194b6259c5 | ||
|
|
5a2c33c12e | ||
|
|
7dae7087d3 | ||
|
|
12aefb9dfc | ||
|
|
9609c91e7d | ||
|
|
338df4f409 | ||
|
|
3e90250ea3 | ||
|
|
0b1e287e81 | ||
|
|
6c9a0ba415 | ||
|
|
0697bb2247 | ||
|
|
24081224d1 | ||
|
|
c46e7a9c9b | ||
|
|
a2849a18a5 | ||
|
|
59984e9f58 | ||
|
|
546ec1a5cf | ||
|
|
7a00178832 | ||
|
|
9df84dd22d | ||
|
|
3f23154088 | ||
|
|
f6270a8fe2 | ||
|
|
235407a78e | ||
|
|
77bf92e3c6 | ||
|
|
bb3d0c270d | ||
|
|
f8c45d428c | ||
|
|
153535fc56 | ||
|
|
a8d8225ead | ||
|
|
cc03f4c58b | ||
|
|
32c8b5507c | ||
|
|
971edd04af | ||
|
|
471200074b | ||
|
|
6841d8ff55 | ||
|
|
12f3b9000c | ||
|
|
aa09d6b8f0 | ||
|
|
dc4b23e1a1 | ||
|
|
8379a741cc | ||
|
|
321fe5c44c | ||
|
|
b5b3a7e867 | ||
|
|
4febfe47f0 | ||
|
|
77eca2487c | ||
|
|
1c4f05db41 | ||
|
|
7d855447ef | ||
|
|
debbea5b29 | ||
|
|
5c4edc83b5 | ||
|
|
b6146537d2 | ||
|
|
f62b69e32a | ||
|
|
7f02e4d008 | ||
|
|
9192e593ec | ||
|
|
11bfe438a2 | ||
|
|
aaecffba3a | ||
|
|
e1d7c96814 | ||
|
|
7e03f9a484 | ||
|
|
46ca345b06 | ||
|
|
f36ea03741 | ||
|
|
c9d4e7b716 | ||
|
|
f681aab895 | ||
|
|
11254bdf6d | ||
|
|
1985860c6e | ||
|
|
2ac516850b | ||
|
|
302fbd218d | ||
|
|
b2d6e63b79 | ||
|
|
feec718265 | ||
|
|
ee5e8d71ac | ||
|
|
26072df6af | ||
|
|
b69f76c106 | ||
|
|
4d9b5c60f9 | ||
|
|
0163466d72 | ||
|
|
4c79a63fd7 | ||
|
|
54fed21c04 | ||
|
|
90565d015e | ||
|
|
0cf2a64974 | ||
|
|
83bcdcee61 | ||
|
|
d4a459f7cb | ||
|
|
c3d963ac24 | ||
|
|
6d4e6d4cba | ||
|
|
baf9e74a73 | ||
|
|
f9834a3d1a | ||
|
|
aac06e8f74 | ||
|
|
2bbc4cab60 | ||
|
|
cea4e4e7b2 | ||
|
|
0a8b0eeca1 | ||
|
|
51e89709aa | ||
|
|
53b27bbf06 | ||
|
|
70a2157b64 | ||
|
|
f97511a1f3 | ||
|
|
73dc099645 | ||
|
|
88d85ebae1 | ||
|
|
50934ce460 | ||
|
|
e90fcd9edd | ||
|
|
9687e039e7 | ||
|
|
a28ec23273 | ||
|
|
a2a6c1c22f | ||
|
|
524d61bf7e | ||
|
|
7c9cdb2245 | ||
|
|
a289150943 | ||
|
|
544722bad2 | ||
|
|
f8ee66250a | ||
|
|
ed787cf09e | ||
|
|
1587b5a033 | ||
|
|
59ef517e6b | ||
|
|
847d5db1d1 | ||
|
|
daec6fc355 | ||
|
|
0e830d3770 | ||
|
|
dc6cede78e | ||
|
|
c7546b3cdb | ||
|
|
d56c39cf24 | ||
|
|
f9d156d270 | ||
|
|
9d58ccc547 | ||
|
|
9355a5c42b | ||
|
|
3991b4cbdb | ||
|
|
af4a1bac50 | ||
|
|
0964005d84 | ||
|
|
1c93cd9f9f | ||
|
|
8ecaff51a1 | ||
|
|
f6c48802f5 | ||
|
|
a88bc67f88 | ||
|
|
42c43cfafd | ||
|
|
c7daf3136c | ||
|
|
64038b806c | ||
|
|
2bd4513a4d | ||
|
|
d073cb7ead | ||
|
|
8a8ad46f48 | ||
|
|
2771447c29 | ||
|
|
6cc4fcf25c | ||
|
|
ac507e7ab8 | ||
|
|
e6651e8046 | ||
|
|
291628d42a | ||
|
|
3c09818d91 | ||
|
|
27d3f2e7ab | ||
|
|
17e0a58020 | ||
|
|
587d8ac60f | ||
|
|
34449cfc6c | ||
|
|
a4632783fb | ||
|
|
24772ba56e | ||
|
|
eeda4e618c | ||
|
|
d24197bead | ||
|
|
c6bbad109b | ||
|
|
16dc9064d4 | ||
|
|
63772443e6 | ||
|
|
a3f6576084 | ||
|
|
7fc2b5c063 | ||
|
|
89e3e39d52 | ||
|
|
2938a00825 | ||
|
|
5219f7e060 | ||
|
|
93ebeb2aa8 | ||
|
|
c1b077cd19 | ||
|
|
06cc0bb762 | ||
|
|
64c6566980 | ||
|
|
8fd4d9129f | ||
|
|
9164bfa1c3 | ||
|
|
9084720993 | ||
|
|
80d5d3baa1 | ||
|
|
b1c27975d0 | ||
|
|
dc155f4c2c | ||
|
|
2746e805fe | ||
|
|
0aeb1324b7 | ||
|
|
4a9055d446 | ||
|
|
3c91c5b216 | ||
|
|
f6e8019b9c | ||
|
|
760469c812 | ||
|
|
47ed4d84bb | ||
|
|
f09d2b692f | ||
|
|
4c3eb14d68 | ||
|
|
1d4d518b50 | ||
|
|
159434a133 | ||
|
|
264f6c2a39 | ||
|
|
82e71a259c | ||
|
|
490b97d3e7 | ||
|
|
f9d5b60a24 | ||
|
|
1cc22da600 | ||
|
|
aac13b1f9a | ||
|
|
ccc1a3d54d | ||
|
|
665e53524e | ||
|
|
e438699c59 | ||
|
|
a9111786f9 | ||
|
|
1fc1bc2a51 | ||
|
|
db0609f1ec | ||
|
|
ab731d8f8e | ||
|
|
45bdacd9a7 | ||
|
|
177f104432 | ||
|
|
22fbf86e4f | ||
|
|
f138bb40e2 | ||
|
|
855645c719 | ||
|
|
25423f50aa | ||
|
|
2ef617bc06 | ||
|
|
e83a08d795 | ||
|
|
b6800a8ecd | ||
|
|
d04e2ff3a4 | ||
|
|
a842fed418 | ||
|
|
e01a1bc92d | ||
|
|
6fdd31915b | ||
|
|
07caa749bf | ||
|
|
f09db236b1 | ||
|
|
8bfd01f619 | ||
|
|
1b17d1a106 | ||
|
|
b01aaadd48 | ||
|
|
1071c7d963 | ||
|
|
6453d03edd | ||
|
|
3ae48a1f99 | ||
|
|
4cedd53224 | ||
|
|
5663137e03 | ||
|
|
b202531be6 | ||
|
|
1b179455fc | ||
|
|
981f852d54 | ||
|
|
def63649df | ||
|
|
06f1ad1625 | ||
|
|
95fc70216d | ||
|
|
9b0316c75a | ||
|
|
03c2720940 | ||
|
|
b21b9dbc37 | ||
|
|
78c083f159 | ||
|
|
3aa8925091 | ||
|
|
f2f74ffce6 | ||
|
|
7d2cf7e960 | ||
|
|
0108ed8ae6 | ||
|
|
a7f48320b1 | ||
|
|
df2a616c7b | ||
|
|
550308c7a1 | ||
|
|
e8b1d2a452 | ||
|
|
5b54d51d1e | ||
|
|
f6955db970 | ||
|
|
8ca05b5755 | ||
|
|
f0ca088280 | ||
|
|
50ac1d843d | ||
|
|
513e600f63 | ||
|
|
b95dbdcba4 | ||
|
|
927a67ee1a | ||
|
|
6942d68247 | ||
|
|
b59994b454 | ||
|
|
816988baaa | ||
|
|
2869a29fd7 | ||
|
|
d43b63818c | ||
|
|
a68ade6ed3 | ||
|
|
29c5922021 | ||
|
|
d9350b0db8 | ||
|
|
bcb1245a2d | ||
|
|
62073992c5 | ||
|
|
0393c4203c | ||
|
|
6f7540ada4 | ||
|
|
1d107d8484 | ||
|
|
f7aed3d7a2 | ||
|
|
9009143fb9 | ||
|
|
fbd3866bc6 | ||
|
|
9e18e0b1cb | ||
|
|
c61ddeedac | ||
|
|
0af6213019 | ||
|
|
35e2cc8b52 | ||
|
|
6e9f3ab415 | ||
|
|
e641115421 | ||
|
|
3061dac53e | ||
|
|
668f91d707 | ||
|
|
0061e8744f | ||
|
|
fa74fcf512 | ||
|
|
a2f2516199 | ||
|
|
a940618c94 | ||
|
|
c57f871184 | ||
|
|
8681aff4f1 | ||
|
|
5d9546f9f4 | ||
|
|
7b5546d077 | ||
|
|
5d34e32d42 | ||
|
|
f382117852 | ||
|
|
3de7c8a4d0 | ||
|
|
2ff2d36b80 | ||
|
|
9bfc617791 | ||
|
|
503c0ab78b | ||
|
|
e779ee0ee2 | ||
|
|
4285be791d | ||
|
|
b5665f7516 | ||
|
|
6d3513740d | ||
|
|
850b103b36 | ||
|
|
21185e3e8a | ||
|
|
24a70e19c7 | ||
|
|
04aa2f2863 | ||
|
|
f7bcdbe56c | ||
|
|
3027ea22b0 | ||
|
|
5875a65253 | ||
|
|
36d621201b | ||
|
|
9040c9ffa1 | ||
|
|
4a18127917 | ||
|
|
adae348fdf | ||
|
|
4974147aa3 | ||
|
|
13122e5e24 | ||
|
|
cf3e1cc200 | ||
|
|
a38d46249e | ||
|
|
aab6a31c96 | ||
|
|
748d8fdc7b | ||
|
|
655891d179 | ||
|
|
4225a97f4e | ||
|
|
22578545a0 | ||
|
|
667fcd54e8 | ||
|
|
f96020550f | ||
|
|
81964aeb90 | ||
|
|
2e9ee30969 | ||
|
|
a61e4522b5 | ||
|
|
1168cbd54d | ||
|
|
bbc0d9617f | ||
|
|
8009d84364 | ||
|
|
dc692556d6 | ||
|
|
dc78db8c56 | ||
|
|
4f78108d8c | ||
|
|
0b78d8adf2 | ||
|
|
85827eef2d | ||
|
|
90c070c850 | ||
|
|
87528f0756 | ||
|
|
88acb99747 | ||
|
|
2b8ff4659f | ||
|
|
ddfcdd4778 | ||
|
|
6f0c5e5d9b | ||
|
|
49cf205dc7 | ||
|
|
39af634dd2 | ||
|
|
3f6ec271ba | ||
|
|
4d49e0bdfd | ||
|
|
81570abfb2 | ||
|
|
ddc89df89d | ||
|
|
eb24aecf8c | ||
|
|
e1ba98d724 | ||
|
|
a298331de4 | ||
|
|
71edaae981 | ||
|
|
64527f94cc | ||
|
|
883df2e983 | ||
|
|
5336acd46f | ||
|
|
fa9d2c7295 | ||
|
|
19fe990476 | ||
|
|
995f2f032f | ||
|
|
9e1283c824 | ||
|
|
a68807d426 | ||
|
|
2e67cabd7f | ||
|
|
b7b62bf9ea | ||
|
|
d84319ae10 | ||
|
|
23b6701a28 | ||
|
|
e58a9d781c | ||
|
|
74d4cdee25 | ||
|
|
418bcd4309 | ||
|
|
098db4aa52 | ||
|
|
c33b25fd8d | ||
|
|
de4f798f01 | ||
|
|
ea6dc356b0 | ||
|
|
955f34d23e | ||
|
|
241d7d2d62 | ||
|
|
1535f21eb5 | ||
|
|
4be85281f9 | ||
|
|
cb3edec6af | ||
|
|
923f77cff3 | ||
|
|
55e6fc917c | ||
|
|
68c1ed4d1a | ||
|
|
b82fa849c8 | ||
|
|
e457034e99 | ||
|
|
1d98cf26be | ||
|
|
211786ecd6 | ||
|
|
4fb65a1091 | ||
|
|
5810cffd33 | ||
|
|
f3eead0660 | ||
|
|
4131381123 | ||
|
|
6a5ded5988 | ||
|
|
4f181f361d | ||
|
|
c566f0ee17 | ||
|
|
772c6067a3 | ||
|
|
baffe96d95 | ||
|
|
264a48aedf | ||
|
|
21c88016bd | ||
|
|
ed992ae6ba | ||
|
|
3e6e8a1c03 | ||
|
|
e0b6db29ed | ||
|
|
a70a43bc51 | ||
|
|
f2b2cd8eb4 | ||
|
|
00f51493f5 | ||
|
|
d5ae1f1291 | ||
|
|
1b01488d27 | ||
|
|
0f73f0e70e | ||
|
|
ca35e54d6b | ||
|
|
497f053344 | ||
|
|
ad816b0add | ||
|
|
0c057736ac | ||
|
|
43253c10b8 | ||
|
|
18ab019a4a | ||
|
|
76b09c29b0 | ||
|
|
ba6bc2faa0 | ||
|
|
edbcb4152b | ||
|
|
949c2c5435 | ||
|
|
b17af156c7 | ||
|
|
1c9da43a95 | ||
|
|
0b32bb20bb | ||
|
|
c94de0ab60 | ||
|
|
502c901e11 | ||
|
|
48a5a7552d | ||
|
|
706b5d76ed | ||
|
|
7c679b1118 | ||
|
|
d080b3425c | ||
|
|
03a98aff3c | ||
|
|
fa20c9ce94 | ||
|
|
5ef5435529 | ||
|
|
aa7b890cfe | ||
|
|
7cd6edb947 | ||
|
|
0294c14ec4 | ||
|
|
7fe42cf949 | ||
|
|
15ca0c6a4d | ||
|
|
0baf498bd1 | ||
|
|
a232e06100 | ||
|
|
4a32d25d4c | ||
|
|
31f85f9db9 | ||
|
|
ec609f8094 | ||
|
|
caef86f428 | ||
|
|
54417999b6 | ||
|
|
45dc260060 | ||
|
|
d1c217c823 | ||
|
|
897d57bc58 | ||
|
|
555460ae1b | ||
|
|
4162f820ff | ||
|
|
29205e9596 | ||
|
|
d213884c41 | ||
|
|
b91e2833b3 | ||
|
|
f2acc3dcf9 | ||
|
|
3ddec016ff | ||
|
|
8e01263587 | ||
|
|
3265def8c7 | ||
|
|
af4701b311 | ||
|
|
44330a21e9 | ||
|
|
464ffd1b5e | ||
|
|
327425764e | ||
|
|
dbff7e9436 | ||
|
|
a4339de9de | ||
|
|
8aee5aa068 | ||
|
|
52b2318777 | ||
|
|
56f38d1776 | ||
|
|
8cb252d00c | ||
|
|
776594f99d | ||
|
|
ed44c475d8 | ||
|
|
ab80d5e0a9 | ||
|
|
f25d74f69c | ||
|
|
ea05155a8c | ||
|
|
d271383e63 | ||
|
|
0fc0a3bdff | ||
|
|
6c4d582144 | ||
|
|
685da5a3b0 | ||
|
|
a6c6750166 | ||
|
|
bdbcfc2a80 | ||
|
|
6eb0c8a2e4 | ||
|
|
6b54fa81de | ||
|
|
25eb769b26 | ||
|
|
0b6b999e7b | ||
|
|
3328428d05 | ||
|
|
4598682b43 | ||
|
|
033d43e419 | ||
|
|
647c724573 | ||
|
|
a15ba15e64 | ||
|
|
6a6cbfcf1e | ||
|
|
d2688d7f03 | ||
|
|
303b6f29f0 | ||
|
|
1fe7ca1362 | ||
|
|
9bba6ebaa9 | ||
|
|
66efcbbff1 | ||
|
|
0877157353 | ||
|
|
2ffec928e2 | ||
|
|
b390756150 | ||
|
|
b8f84f99ff | ||
|
|
43b77c5d97 | ||
|
|
2f267ee160 | ||
|
|
7d5b142547 | ||
|
|
c3276aef25 | ||
|
|
fa722a699c | ||
|
|
023143f9ae | ||
|
|
5c688739d6 | ||
|
|
ebb46497ba | ||
|
|
91ec972277 | ||
|
|
5beda10bbd | ||
|
|
257025ac89 | ||
|
|
3f9889bfd6 | ||
|
|
caa22334b3 | ||
|
|
5834c6178c | ||
|
|
b152ee71fe | ||
|
|
d987353840 | ||
|
|
a1c8f268e5 | ||
|
|
8b93af662d | ||
|
|
2117c409a0 | ||
|
|
fa9d36e050 | ||
|
|
4ef222ab61 | ||
|
|
61cd9af09b | ||
|
|
791658b576 | ||
|
|
2982d16e07 | ||
|
|
c5b49eb7ca | ||
|
|
b568ca309c | ||
|
|
3c320c006c | ||
|
|
85b51fdd6b | ||
|
|
43954d000e | ||
|
|
2a0159b8ae | ||
|
|
cb98ac261b | ||
|
|
31a07d2335 | ||
|
|
91279fd218 | ||
|
|
513188aa56 | ||
|
|
fadb01551a | ||
|
|
d25c20ccbe | ||
|
|
7d893beebe | ||
|
|
94a83b534f | ||
|
|
74cbfdc7de | ||
|
|
d4a35ada28 | ||
|
|
e020834e4f | ||
|
|
2ad72da931 | ||
|
|
8da7d0e4f9 | ||
|
|
3c4208a057 | ||
|
|
f4164edb70 | ||
|
|
2eed4d7af4 | ||
|
|
438ef47637 | ||
|
|
74a3b4a650 | ||
|
|
9b69c85f7c | ||
|
|
d51b8a1674 | ||
|
|
662b031a30 | ||
|
|
4ec67a3d21 | ||
|
|
0595413c0f | ||
|
|
a7032abb2e | ||
|
|
9e6d88f4e2 | ||
|
|
8c93e0bae7 | ||
|
|
70332a12dd | ||
|
|
373654c635 | ||
|
|
485d999c8a | ||
|
|
69054e3d4c | ||
|
|
0237a0d1a5 | ||
|
|
69a2d4e38c | ||
|
|
19275b3030 | ||
|
|
f12993ec16 | ||
|
|
940d4fad24 | ||
|
|
bb36b93f71 | ||
|
|
d87b87adf7 | ||
|
|
caed150363 | ||
|
|
80a6a445fa | ||
|
|
628e65721b | ||
|
|
274c2f50a5 | ||
|
|
a99e933550 | ||
|
|
3847fa38c4 | ||
|
|
f2690c6423 | ||
|
|
81b94c5750 | ||
|
|
65fa37ac5e | ||
|
|
3baf641a48 | ||
|
|
c0238ecbed | ||
|
|
273b6bcf22 | ||
|
|
f7f1027d3d | ||
|
|
34e5e17f91 | ||
|
|
b96c6c3185 | ||
|
|
1ffe9578d1 | ||
|
|
cce957e254 | ||
|
|
bd9b8d87ae | ||
|
|
2aa39db681 | ||
|
|
657847e4c6 | ||
|
|
965168a842 | ||
|
|
2854ee2a52 | ||
|
|
598317927c | ||
|
|
7ed5acacf4 | ||
|
|
c1c38da586 | ||
|
|
051a9ea921 | ||
|
|
265d847ffd | ||
|
|
f4778d4cd9 | ||
|
|
9e25443db8 | ||
|
|
44982606ee | ||
|
|
516a272aca | ||
|
|
0cfd6c3161 | ||
|
|
5405351b14 | ||
|
|
1b91ff685f | ||
|
|
ed7a703d4c | ||
|
|
1671913287 | ||
|
|
f51888530d | ||
|
|
826ca61745 | ||
|
|
fbd2615de4 | ||
|
|
c10cb581c6 | ||
|
|
ef0cc648cf | ||
|
|
761f9fccff | ||
|
|
a662252758 | ||
|
|
1aa3e1d287 | ||
|
|
1bb8ec296d | ||
|
|
d80f64d370 | ||
|
|
998666be64 | ||
|
|
c882783535 | ||
|
|
572acde483 | ||
|
|
5dc2a702cf | ||
|
|
3e784eff74 | ||
|
|
16b652f0a3 | ||
|
|
e82247f990 | ||
|
|
c7f665d700 | ||
|
|
d3f108b6bb | ||
|
|
097330bae8 | ||
|
|
21b977ccfe | ||
|
|
928d337c16 | ||
|
|
bc1a8b1f7a | ||
|
|
b3be9e4376 | ||
|
|
67f0c990f8 | ||
|
|
fba1111dd6 | ||
|
|
c8cd87b21b | ||
|
|
55e17d3697 | ||
|
|
1ee6285905 | ||
|
|
68e1a872fd | ||
|
|
55fc17cf4b | ||
|
|
ffc807af50 | ||
|
|
41788bba50 | ||
|
|
873f870e5a | ||
|
|
5acbe09b67 | ||
|
|
8c1e746f54 | ||
|
|
93b32d4515 | ||
|
|
bed10f9880 | ||
|
|
4bbef62124 | ||
|
|
3cf15edef7 | ||
|
|
a234e895cf | ||
|
|
c943d8d2e8 | ||
|
|
4daa397a00 | ||
|
|
c7cd35d682 | ||
|
|
54cc69154e | ||
|
|
11faa4296d | ||
|
|
f6338d6a3e | ||
|
|
1ccdc1e93a | ||
|
|
25414b44a2 | ||
|
|
3f11953fcb | ||
|
|
50943ab942 | ||
|
|
30961182f2 | ||
|
|
c1a133a6b6 | ||
|
|
778fa85f47 | ||
|
|
1a1e198f72 | ||
|
|
3b8d0ceb22 | ||
|
|
7356d52e73 | ||
|
|
9459137f1e | ||
|
|
1294d4a329 | ||
|
|
ab34fdecb7 | ||
|
|
b162cb2e41 | ||
|
|
0e1900d819 | ||
|
|
641efb6a39 | ||
|
|
e7af8be5ae | ||
|
|
142983b4ea | ||
|
|
721414d98a | ||
|
|
e993925279 | ||
|
|
a3dc1e9cbe | ||
|
|
d9dcb2ba3a | ||
|
|
adf53f04ce | ||
|
|
c435bfee9c | ||
|
|
db7283cc6b | ||
|
|
d0b8d49f71 | ||
|
|
5474824975 | ||
|
|
17f4f14df7 | ||
|
|
cd5b264b03 | ||
|
|
eb6a7cf3f4 | ||
|
|
37638c06c5 | ||
|
|
60a015550a | ||
|
|
90d5983d7a | ||
|
|
d89f8683dc | ||
|
|
c20cb5160d | ||
|
|
fda97dd58a | ||
|
|
8e1ed09dff | ||
|
|
965f33c901 | ||
|
|
9899824b85 | ||
|
|
9219139351 | ||
|
|
63c19e1df9 | ||
|
|
3e86dcf1c0 | ||
|
|
86bcf4d6a7 | ||
|
|
ba07d4a70e | ||
|
|
928b2187ea | ||
|
|
4b31426a02 | ||
|
|
122c7a43c9 | ||
|
|
14047126d8 | ||
|
|
d143f211c8 | ||
|
|
e9fe9af068 | ||
|
|
aad8a1a825 | ||
|
|
689f4cb914 | ||
|
|
c8f9b45bc2 | ||
|
|
e65bc7d315 | ||
|
|
33f3624ff7 | ||
|
|
8c52160b07 | ||
|
|
a093fab253 | ||
|
|
6e80c03d45 | ||
|
|
6372efbdc3 | ||
|
|
58d6c93103 | ||
|
|
b7ffa0e2cd | ||
|
|
d77ef276fa | ||
|
|
27e0178da9 | ||
|
|
6d1a94d218 | ||
|
|
8731197e54 | ||
|
|
afbf6b33fc | ||
|
|
37adde32dc | ||
|
|
04fc8bbcb0 | ||
|
|
39b900b316 | ||
|
|
47dd8f02a1 | ||
|
|
2426c2f21a | ||
|
|
39242090e3 | ||
|
|
e6784daf07 | ||
|
|
45fd2c8942 | ||
|
|
c0d7d9d642 | ||
|
|
dc76a3e909 | ||
|
|
f164fd9220 | ||
|
|
ba214a5e32 | ||
|
|
4161ff2fc4 | ||
|
|
290763f559 | ||
|
|
b770435389 | ||
|
|
5674ea3e6c | ||
|
|
1e4217c90c | ||
|
|
0acdd0f1ea | ||
|
|
65201631a4 | ||
|
|
697872cf08 | ||
|
|
b515f844ee | ||
|
|
602c84cd9c | ||
|
|
2a91799fcc | ||
|
|
be088b32d8 | ||
|
|
fcf1dec809 | ||
|
|
105ff162d4 | ||
|
|
06964c4a0a | ||
|
|
f3afd6ef1a | ||
|
|
bcbd74dc5b | ||
|
|
d7b42afc74 | ||
|
|
80f4740c8f | ||
|
|
522c804f6b | ||
|
|
19a625362b | ||
|
|
90b8b7706f | ||
|
|
07229bbdae | ||
|
|
434bbf2cb5 | ||
|
|
d5bf7a4a99 | ||
|
|
718ffcf8bb | ||
|
|
3856582741 | ||
|
|
f0c73a1e7a | ||
|
|
b3511adb92 | ||
|
|
6762a7268c | ||
|
|
9da84a9a1e | ||
|
|
403ecd8a2c | ||
|
|
47fbff7a05 | ||
|
|
396624864a | ||
|
|
e73dcb66da | ||
|
|
ea166f2ac9 | ||
|
|
3ec10dffd6 | ||
|
|
732cf72b86 | ||
|
|
abcb9aee5b | ||
|
|
2a0d8f8219 | ||
|
|
320dfe523c | ||
|
|
0af9e1a637 | ||
|
|
fa87c981e1 | ||
|
|
0d7cef0943 | ||
|
|
f90b3d83a3 | ||
|
|
f743471380 | ||
|
|
b9e888858c | ||
|
|
973d67a033 | ||
|
|
e3e3fbc23a | ||
|
|
e885024523 | ||
|
|
7321f45457 | ||
|
|
d87c9092c9 | ||
|
|
b9abf3e4e3 | ||
|
|
92d39126d7 | ||
|
|
b835ebcc79 | ||
|
|
62c5245c87 | ||
|
|
49043f5ff3 | ||
|
|
949629291c | ||
|
|
ad42322257 | ||
|
|
0bba2799b6 | ||
|
|
64a2acb161 | ||
|
|
1594eba29e | ||
|
|
16284039c6 | ||
|
|
1119b4cebe | ||
|
|
109a560905 | ||
|
|
48b5829aea | ||
|
|
7c6f4f9427 | ||
|
|
25c2332071 | ||
|
|
2ee1bd124c | ||
|
|
a2427981b7 | ||
|
|
6cbd1b495e | ||
|
|
1c7c317df1 | ||
|
|
dc3a00f24f | ||
|
|
75299af4fc | ||
|
|
89e786bd85 | ||
|
|
d9664344ec | ||
|
|
784a2d4f2c | ||
|
|
0be963472b | ||
|
|
64e7e11853 | ||
|
|
4d70d1f80e | ||
|
|
99bbd90b0d | ||
|
|
8a57cc3123 | ||
|
|
2edb7c7676 | ||
|
|
dfaf0fee31 | ||
|
|
e380538b59 | ||
|
|
4e1cebd56f | ||
|
|
5006c4b30d | ||
|
|
866a5320de | ||
|
|
448ac6cf0d | ||
|
|
832799dbff | ||
|
|
b4ecf0b886 | ||
|
|
5b5148b7ec | ||
|
|
c9c1541fd0 | ||
|
|
1d40373c9d | ||
|
|
5202679edc | ||
|
|
c315922b5f | ||
|
|
ca8abfbf30 | ||
|
|
5aeadb7414 | ||
|
|
c9f724caa4 | ||
|
|
487bc49bf8 | ||
|
|
739ea29d1e | ||
|
|
ea8c4094db | ||
|
|
7f41bcbeec | ||
|
|
11fdfaf03b | ||
|
|
2510db3e76 | ||
|
|
f91df1f761 | ||
|
|
3bc9629be5 | ||
|
|
d45489474d | ||
|
|
fa1ce4d8ad | ||
|
|
79ebfbe7c6 | ||
|
|
cd41c6ece2 | ||
|
|
27771b2495 | ||
|
|
d3250499c1 | ||
|
|
a8bcc7274d | ||
|
|
65666fedd5 | ||
|
|
0682ca04b3 | ||
|
|
6fe6a6f029 | ||
|
|
d330d45e2d | ||
|
|
ccd4290777 | ||
|
|
9cb42507f8 | ||
|
|
d960910c72 | ||
|
|
b46b8a5efb | ||
|
|
27fe3e2d4f | ||
|
|
3410142741 | ||
|
|
e7674eb759 | ||
|
|
7c1a92274c | ||
|
|
f5deaff424 | ||
|
|
5f360182c6 | ||
|
|
46453bfc2f | ||
|
|
6bf6bc1d1d | ||
|
|
f45be05305 | ||
|
|
24f36469bc | ||
|
|
597c79be10 | ||
|
|
e6021c370e | ||
|
|
a8b946decb | ||
|
|
3f5ac150b2 | ||
|
|
5bcccfde6c | ||
|
|
c95dd7a426 | ||
|
|
4d87d3659a | ||
|
|
32fc39fd4c | ||
|
|
93acf49e9b | ||
|
|
b2c290a6e5 | ||
|
|
499dc1b349 | ||
|
|
9377509dcd | ||
|
|
2d4de61fb7 | ||
|
|
87ef315ad5 | ||
|
|
fccadb7719 | ||
|
|
f0fa66f495 | ||
|
|
1515d1b581 | ||
|
|
a2b7102eea | ||
|
|
a5d7968b3e | ||
|
|
b5525c76d1 | ||
|
|
e97648c4e2 | ||
|
|
835ceeee76 | ||
|
|
b3682df2ca | ||
|
|
8ad8490cff | ||
|
|
59fa91fe88 | ||
|
|
1b5436ad78 | ||
|
|
257c41cc2e | ||
|
|
b4e2290d89 | ||
|
|
e3ee63578f | ||
|
|
ea0b767114 | ||
|
|
ab03912e94 | ||
|
|
1fc50712d6 | ||
|
|
7c7786d4e1 | ||
|
|
b0a14bf53e | ||
|
|
f131cd9e53 | ||
|
|
edb33eb163 | ||
|
|
bcc9cda8ca | ||
|
|
05e3354047 | ||
|
|
3364d96c6b | ||
|
|
98385888b8 | ||
|
|
68264d7404 | ||
|
|
91fa69e029 | ||
|
|
4c56bedee3 | ||
|
|
520ee9bd2c | ||
|
|
a60a2eaa02 | ||
|
|
e3a720217a | ||
|
|
530bc862dc | ||
|
|
a6f5cc65d9 | ||
|
|
e555bc6551 | ||
|
|
a843868fe9 | ||
|
|
f5da3bacb2 | ||
|
|
97f072db74 | ||
|
|
80ad710217 | ||
|
|
4fec5e57be | ||
|
|
a8a32d2714 | ||
|
|
921f17f938 | ||
|
|
9a2f296fa2 | ||
|
|
58c9653c6b | ||
|
|
6b58ade2f0 | ||
|
|
9e66c58ceb | ||
|
|
f83f5fbce8 | ||
|
|
aecaec3e10 | ||
|
|
1efee2f52b | ||
|
|
49e047c55e | ||
|
|
59a2c6d60e | ||
|
|
06f812b95c | ||
|
|
c9154b970c | ||
|
|
b3d5c4ad9d | ||
|
|
456544b621 | ||
|
|
d199f2248f | ||
|
|
8f650bd338 | ||
|
|
7b0f6293f2 | ||
|
|
fcde5b2a97 | ||
|
|
342e072024 | ||
|
|
55e8a87888 | ||
|
|
26a8c1d7ab | ||
|
|
54de6a812a | ||
|
|
733bf44290 | ||
|
|
f14cd95342 | ||
|
|
986615b0b2 | ||
|
|
bfeaab6dfc | ||
|
|
b260f92936 | ||
|
|
271d3e7865 | ||
|
|
18b7eb830b | ||
|
|
74106ba171 | ||
|
|
2a9ce8c422 | ||
|
|
cbea0c7044 | ||
|
|
5aa024e501 | ||
|
|
c51a52f300 | ||
|
|
3d13c3a295 | ||
|
|
a679a01dbe | ||
|
|
8dad08a950 | ||
|
|
0a7d3cd00f | ||
|
|
ec8b217722 | ||
|
|
328ad6901d | ||
|
|
76b89d0edb | ||
|
|
370135ad0b | ||
|
|
0fcbca531f | ||
|
|
1e2740caab | ||
|
|
6ede23ff1b | ||
|
|
591ad2268c | ||
|
|
3c3246c078 | ||
|
|
5fb41a955c | ||
|
|
367b594183 | ||
|
|
7861cfec0a | ||
|
|
019cf013d6 | ||
|
|
4329f20e3f | ||
|
|
a285194021 | ||
|
|
bf81e38d36 | ||
|
|
78cac3e594 | ||
|
|
7871790db1 | ||
|
|
18e044628e | ||
|
|
b557b682d9 | ||
|
|
389c890f14 | ||
|
|
cd8738ab63 | ||
|
|
f6f8f81a48 | ||
|
|
ecd5e6bfa4 | ||
|
|
fda078f995 | ||
|
|
ab5580e152 | ||
|
|
e8d212d92e | ||
|
|
40e539683c | ||
|
|
05f6447301 | ||
|
|
5238960850 | ||
|
|
ccec25e2c6 | ||
|
|
c38b7c4104 | ||
|
|
29b25d59c6 | ||
|
|
884b800899 | ||
|
|
09d31815b4 | ||
|
|
fe1b369946 | ||
|
|
26cb0efa88 | ||
|
|
d47115ff8b | ||
|
|
2e3d90d67c | ||
|
|
a4b06b619c | ||
|
|
c63b1697f4 | ||
|
|
87ffd21b29 | ||
|
|
2452611d0f | ||
|
|
eb359eced4 | ||
|
|
c824b29e77 | ||
|
|
33d7776473 | ||
|
|
9ad8d9b17c | ||
|
|
5b1825ba5b | ||
|
|
9c4cf83259 | ||
|
|
05e7e5e972 | ||
|
|
db4f823d34 | ||
|
|
8e02494166 | ||
|
|
a6f06ce3e2 | ||
|
|
d34e9f93b7 | ||
|
|
efeb6176c1 | ||
|
|
1a54513cf1 | ||
|
|
242c52d607 | ||
|
|
012b4c1913 | ||
|
|
436bffd15f | ||
|
|
1b3c3e6d68 | ||
|
|
33d08e8433 | ||
|
|
8f7f4cb92b | ||
|
|
2623cec874 | ||
|
|
4fcdf7b4b2 | ||
|
|
955ef1f06c | ||
|
|
2ee4c9ee02 | ||
|
|
9dbd903f41 | ||
|
|
bf3de7b90b | ||
|
|
e73ad8de3b | ||
|
|
42f4feb2b7 | ||
|
|
f16f0e169d | ||
|
|
465117d7ca | ||
|
|
7ed58bb347 | ||
|
|
dad2da7e54 | ||
|
|
363786845b | ||
|
|
ec5717caf5 | ||
|
|
d26b660aa6 | ||
|
|
aede7248ab | ||
|
|
68a92afcff | ||
|
|
55abbe1850 | ||
|
|
2c28e25bda | ||
|
|
1e6e370b76 | ||
|
|
1c3c202b96 | ||
|
|
406f7aa0f6 | ||
|
|
34f56b40fd | ||
|
|
c445f5fec7 | ||
|
|
44adde498e | ||
|
|
cf94a78872 | ||
|
|
1a64dffb00 | ||
|
|
081e5d55e6 | ||
|
|
248e6770ca | ||
|
|
40a1c96617 | ||
|
|
7314bf4682 | ||
|
|
e9e3eaa67d | ||
|
|
d36b1d849d | ||
|
|
742056be0d | ||
|
|
bc8f265f0a | ||
|
|
ec041b335e | ||
|
|
053e83dafb | ||
|
|
b97a1356b1 | ||
|
|
b73dc0ef4d | ||
|
|
499e3281e6 | ||
|
|
66868119dc | ||
|
|
aba0b2a39b | ||
|
|
57dca35692 | ||
|
|
c68518dfbb | ||
|
|
e967bc86e7 | ||
|
|
1e2a7f18a1 | ||
|
|
f91faf09b3 | ||
|
|
4430b1ceb3 | ||
|
|
3413f1e284 | ||
|
|
40cbffb2d2 | ||
|
|
b9e997f561 | ||
|
|
9a7a77a22a | ||
|
|
8f6281ab0c | ||
|
|
0da0d0a29d | ||
|
|
022b9176fe | ||
|
|
0c62c958fd | ||
|
|
c41d52a042 | ||
|
|
7e554aac86 | ||
|
|
f863a52cea | ||
|
|
93efcb8526 | ||
|
|
dcfd71aa4c | ||
|
|
fca90b3445 | ||
|
|
a292454aa1 | ||
|
|
4f81edbd4f | ||
|
|
6344db659f | ||
|
|
511a52afc8 | ||
|
|
e885e2a623 | ||
|
|
d137e03231 | ||
|
|
f52565de50 | ||
|
|
a2d288c6a9 | ||
|
|
bd7c51921d | ||
|
|
978fa53cc2 | ||
|
|
eec9609e96 | ||
|
|
9e1b43bcbf | ||
|
|
a3036ac37e | ||
|
|
ebdafd8114 | ||
|
|
a98d215204 | ||
|
|
d554ca5e1d | ||
|
|
209e04fa11 | ||
|
|
e5142f65a6 | ||
|
|
b64aa6d687 | ||
|
|
848d3bf2e1 | ||
|
|
b55c770271 | ||
|
|
d543b72562 | ||
|
|
0136a522b1 | ||
|
|
2cb758ac75 | ||
|
|
560c71c735 | ||
|
|
a37ee2293c | ||
|
|
c55ad2e375 | ||
|
|
aaa9d9f0e1 | ||
|
|
75fa7f6b3c | ||
|
|
a5db0026ed | ||
|
|
9c491366c5 | ||
|
|
385aec4010 | ||
|
|
37b7e84620 | ||
|
|
b791a530da | ||
|
|
a24bc5b2dc | ||
|
|
3bb3f02517 |
8
.gitignore
vendored
8
.gitignore
vendored
@@ -24,10 +24,10 @@ homeserver*.yaml
|
||||
.coverage
|
||||
htmlcov
|
||||
|
||||
demo/*.db
|
||||
demo/*.log
|
||||
demo/*.log.*
|
||||
demo/*.pid
|
||||
demo/*/*.db
|
||||
demo/*/*.log
|
||||
demo/*/*.log.*
|
||||
demo/*/*.pid
|
||||
demo/media_store.*
|
||||
demo/etc
|
||||
|
||||
|
||||
17
.travis.yml
Normal file
17
.travis.yml
Normal file
@@ -0,0 +1,17 @@
|
||||
sudo: false
|
||||
language: python
|
||||
python: 2.7
|
||||
|
||||
# tell travis to cache ~/.cache/pip
|
||||
cache: pip
|
||||
|
||||
env:
|
||||
- TOX_ENV=packaging
|
||||
- TOX_ENV=pep8
|
||||
- TOX_ENV=py27
|
||||
|
||||
install:
|
||||
- pip install tox
|
||||
|
||||
script:
|
||||
- tox -e $TOX_ENV
|
||||
610
CHANGES.rst
610
CHANGES.rst
@@ -1,3 +1,613 @@
|
||||
Changes in synapse v0.19.0-rc3 (2017-02-03)
|
||||
===========================================
|
||||
|
||||
* Fix email push in pusher worker (PR #1875)
|
||||
* Make presence.get_new_events a bit faster (PR #1876)
|
||||
* Make /keys/changes a bit more performant (PR #1877)
|
||||
|
||||
|
||||
Changes in synapse v0.19.0-rc2 (2017-02-02)
|
||||
===========================================
|
||||
|
||||
* Include newly joined users in /keys/changes API (PR #1872)
|
||||
|
||||
|
||||
Changes in synapse v0.19.0-rc1 (2017-02-02)
|
||||
===========================================
|
||||
|
||||
Features:
|
||||
|
||||
* Add support for specifying multiple bind addresses (PR #1709, #1712, #1795,
|
||||
#1835). Thanks to @kyrias!
|
||||
* Add /account/3pid/delete endpoint (PR #1714)
|
||||
* Add config option to configure the Riot URL used in notification emails (PR
|
||||
#1811). Thanks to @aperezdc!
|
||||
* Add username and password config options for turn server (PR #1832). Thanks
|
||||
to @xsteadfastx!
|
||||
* Implement device lists updates over federation (PR #1857, #1861, #1864)
|
||||
* Implement /keys/changes (PR #1869, #1872)
|
||||
|
||||
|
||||
Changes:
|
||||
|
||||
* Improve IPv6 support (PR #1696). Thanks to @kyrias and @glyph!
|
||||
* Log which files we saved attachments to in the media_repository (PR #1791)
|
||||
* Linearize updates to membership via PUT /state/ to better handle multiple
|
||||
joins (PR #1787)
|
||||
* Limit number of entries to prefill from cache on startup (PR #1792)
|
||||
* Remove full_twisted_stacktraces option (PR #1802)
|
||||
* Measure size of some caches by sum of the size of cached values (PR #1815)
|
||||
* Measure metrics of string_cache (PR #1821)
|
||||
* Reduce logging verbosity (PR #1822, #1823, #1824)
|
||||
* Don't clobber a displayname or avatar_url if provided by an m.room.member
|
||||
event (PR #1852)
|
||||
* Better handle 401/404 response for federation /send/ (PR #1866, #1871)
|
||||
|
||||
|
||||
Fixes:
|
||||
|
||||
* Fix ability to change password to a non-ascii one (PR #1711)
|
||||
* Fix push getting stuck due to looking at the wrong view of state (PR #1820)
|
||||
* Fix email address comparison to be case insensitive (PR #1827)
|
||||
* Fix occasional inconsistencies of room membership (PR #1836, #1840)
|
||||
|
||||
|
||||
Performance:
|
||||
|
||||
* Don't block messages sending on bumping presence (PR #1789)
|
||||
* Change device_inbox stream index to include user (PR #1793)
|
||||
* Optimise state resolution (PR #1818)
|
||||
* Use DB cache of joined users for presence (PR #1862)
|
||||
* Add an index to make membership queries faster (PR #1867)
|
||||
|
||||
|
||||
Changes in synapse v0.18.7 (2017-01-09)
|
||||
=======================================
|
||||
|
||||
No changes from v0.18.7-rc2
|
||||
|
||||
|
||||
Changes in synapse v0.18.7-rc2 (2017-01-07)
|
||||
===========================================
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix error in rc1's discarding invalid inbound traffic logic that was
|
||||
incorrectly discarding missing events
|
||||
|
||||
|
||||
Changes in synapse v0.18.7-rc1 (2017-01-06)
|
||||
===========================================
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix error in #PR 1764 to actually fix the nightmare #1753 bug.
|
||||
* Improve deadlock logging further
|
||||
* Discard inbound federation traffic from invalid domains, to immunise
|
||||
against #1753
|
||||
|
||||
|
||||
Changes in synapse v0.18.6 (2017-01-06)
|
||||
=======================================
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix bug when checking if a guest user is allowed to join a room (PR #1772)
|
||||
Thanks to Patrik Oldsberg for diagnosing and the fix!
|
||||
|
||||
|
||||
Changes in synapse v0.18.6-rc3 (2017-01-05)
|
||||
===========================================
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix bug where we failed to send ban events to the banned server (PR #1758)
|
||||
* Fix bug where we sent event that didn't originate on this server to
|
||||
other servers (PR #1764)
|
||||
* Fix bug where processing an event from a remote server took a long time
|
||||
because we were making long HTTP requests (PR #1765, PR #1744)
|
||||
|
||||
Changes:
|
||||
|
||||
* Improve logging for debugging deadlocks (PR #1766, PR #1767)
|
||||
|
||||
|
||||
Changes in synapse v0.18.6-rc2 (2016-12-30)
|
||||
===========================================
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix memory leak in twisted by initialising logging correctly (PR #1731)
|
||||
* Fix bug where fetching missing events took an unacceptable amount of time in
|
||||
large rooms (PR #1734)
|
||||
|
||||
|
||||
Changes in synapse v0.18.6-rc1 (2016-12-29)
|
||||
===========================================
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Make sure that outbound connections are closed (PR #1725)
|
||||
|
||||
|
||||
Changes in synapse v0.18.5 (2016-12-16)
|
||||
=======================================
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix federation /backfill returning events it shouldn't (PR #1700)
|
||||
* Fix crash in url preview (PR #1701)
|
||||
|
||||
|
||||
Changes in synapse v0.18.5-rc3 (2016-12-13)
|
||||
===========================================
|
||||
|
||||
Features:
|
||||
|
||||
* Add support for E2E for guests (PR #1653)
|
||||
* Add new API appservice specific public room list (PR #1676)
|
||||
* Add new room membership APIs (PR #1680)
|
||||
|
||||
|
||||
Changes:
|
||||
|
||||
* Enable guest access for private rooms by default (PR #653)
|
||||
* Limit the number of events that can be created on a given room concurrently
|
||||
(PR #1620)
|
||||
* Log the args that we have on UI auth completion (PR #1649)
|
||||
* Stop generating refresh_tokens (PR #1654)
|
||||
* Stop putting a time caveat on access tokens (PR #1656)
|
||||
* Remove unspecced GET endpoints for e2e keys (PR #1694)
|
||||
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix handling of 500 and 429's over federation (PR #1650)
|
||||
* Fix Content-Type header parsing (PR #1660)
|
||||
* Fix error when previewing sites that include unicode, thanks to kyrias (PR
|
||||
#1664)
|
||||
* Fix some cases where we drop read receipts (PR #1678)
|
||||
* Fix bug where calls to ``/sync`` didn't correctly timeout (PR #1683)
|
||||
* Fix bug where E2E key query would fail if a single remote host failed (PR
|
||||
#1686)
|
||||
|
||||
|
||||
|
||||
Changes in synapse v0.18.5-rc2 (2016-11-24)
|
||||
===========================================
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Don't send old events over federation, fixes bug in -rc1.
|
||||
|
||||
Changes in synapse v0.18.5-rc1 (2016-11-24)
|
||||
===========================================
|
||||
|
||||
Features:
|
||||
|
||||
* Implement "event_fields" in filters (PR #1638)
|
||||
|
||||
Changes:
|
||||
|
||||
* Use external ldap auth pacakge (PR #1628)
|
||||
* Split out federation transaction sending to a worker (PR #1635)
|
||||
* Fail with a coherent error message if `/sync?filter=` is invalid (PR #1636)
|
||||
* More efficient notif count queries (PR #1644)
|
||||
|
||||
|
||||
Changes in synapse v0.18.4 (2016-11-22)
|
||||
=======================================
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Add workaround for buggy clients that the fail to register (PR #1632)
|
||||
|
||||
|
||||
Changes in synapse v0.18.4-rc1 (2016-11-14)
|
||||
===========================================
|
||||
|
||||
Changes:
|
||||
|
||||
* Various database efficiency improvements (PR #1188, #1192)
|
||||
* Update default config to blacklist more internal IPs, thanks to Euan Kemp (PR
|
||||
#1198)
|
||||
* Allow specifying duration in minutes in config, thanks to Daniel Dent (PR
|
||||
#1625)
|
||||
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix media repo to set CORs headers on responses (PR #1190)
|
||||
* Fix registration to not error on non-ascii passwords (PR #1191)
|
||||
* Fix create event code to limit the number of prev_events (PR #1615)
|
||||
* Fix bug in transaction ID deduplication (PR #1624)
|
||||
|
||||
|
||||
Changes in synapse v0.18.3 (2016-11-08)
|
||||
=======================================
|
||||
|
||||
SECURITY UPDATE
|
||||
|
||||
Explicitly require authentication when using LDAP3. This is the default on
|
||||
versions of ``ldap3`` above 1.0, but some distributions will package an older
|
||||
version.
|
||||
|
||||
If you are using LDAP3 login and have a version of ``ldap3`` older than 1.0 it
|
||||
is **CRITICAL to updgrade**.
|
||||
|
||||
|
||||
Changes in synapse v0.18.2 (2016-11-01)
|
||||
=======================================
|
||||
|
||||
No changes since v0.18.2-rc5
|
||||
|
||||
|
||||
Changes in synapse v0.18.2-rc5 (2016-10-28)
|
||||
===========================================
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix prometheus process metrics in worker processes (PR #1184)
|
||||
|
||||
|
||||
Changes in synapse v0.18.2-rc4 (2016-10-27)
|
||||
===========================================
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix ``user_threepids`` schema delta, which in some instances prevented
|
||||
startup after upgrade (PR #1183)
|
||||
|
||||
|
||||
Changes in synapse v0.18.2-rc3 (2016-10-27)
|
||||
===========================================
|
||||
|
||||
Changes:
|
||||
|
||||
* Allow clients to supply access tokens as headers (PR #1098)
|
||||
* Clarify error codes for GET /filter/, thanks to Alexander Maznev (PR #1164)
|
||||
* Make password reset email field case insensitive (PR #1170)
|
||||
* Reduce redundant database work in email pusher (PR #1174)
|
||||
* Allow configurable rate limiting per AS (PR #1175)
|
||||
* Check whether to ratelimit sooner to avoid work (PR #1176)
|
||||
* Standardise prometheus metrics (PR #1177)
|
||||
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix incredibly slow back pagination query (PR #1178)
|
||||
* Fix infinite typing bug (PR #1179)
|
||||
|
||||
|
||||
Changes in synapse v0.18.2-rc2 (2016-10-25)
|
||||
===========================================
|
||||
|
||||
(This release did not include the changes advertised and was identical to RC1)
|
||||
|
||||
|
||||
Changes in synapse v0.18.2-rc1 (2016-10-17)
|
||||
===========================================
|
||||
|
||||
Changes:
|
||||
|
||||
* Remove redundant event_auth index (PR #1113)
|
||||
* Reduce DB hits for replication (PR #1141)
|
||||
* Implement pluggable password auth (PR #1155)
|
||||
* Remove rate limiting from app service senders and fix get_or_create_user
|
||||
requester, thanks to Patrik Oldsberg (PR #1157)
|
||||
* window.postmessage for Interactive Auth fallback (PR #1159)
|
||||
* Use sys.executable instead of hardcoded python, thanks to Pedro Larroy
|
||||
(PR #1162)
|
||||
* Add config option for adding additional TLS fingerprints (PR #1167)
|
||||
* User-interactive auth on delete device (PR #1168)
|
||||
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix not being allowed to set your own state_key, thanks to Patrik Oldsberg
|
||||
(PR #1150)
|
||||
* Fix interactive auth to return 401 from for incorrect password (PR #1160,
|
||||
#1166)
|
||||
* Fix email push notifs being dropped (PR #1169)
|
||||
|
||||
|
||||
|
||||
Changes in synapse v0.18.1 (2016-10-05)
|
||||
======================================
|
||||
|
||||
No changes since v0.18.1-rc1
|
||||
|
||||
|
||||
Changes in synapse v0.18.1-rc1 (2016-09-30)
|
||||
===========================================
|
||||
|
||||
Features:
|
||||
|
||||
* Add total_room_count_estimate to ``/publicRooms`` (PR #1133)
|
||||
|
||||
|
||||
Changes:
|
||||
|
||||
* Time out typing over federation (PR #1140)
|
||||
* Restructure LDAP authentication (PR #1153)
|
||||
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix 3pid invites when server is already in the room (PR #1136)
|
||||
* Fix upgrading with SQLite taking lots of CPU for a few days
|
||||
after upgrade (PR #1144)
|
||||
* Fix upgrading from very old database versions (PR #1145)
|
||||
* Fix port script to work with recently added tables (PR #1146)
|
||||
|
||||
|
||||
Changes in synapse v0.18.0 (2016-09-19)
|
||||
=======================================
|
||||
|
||||
The release includes major changes to the state storage database schemas, which
|
||||
significantly reduce database size. Synapse will attempt to upgrade the current
|
||||
data in the background. Servers with large SQLite database may experience
|
||||
degradation of performance while this upgrade is in progress, therefore you may
|
||||
want to consider migrating to using Postgres before upgrading very large SQLite
|
||||
databases
|
||||
|
||||
|
||||
Changes:
|
||||
|
||||
* Make public room search case insensitive (PR #1127)
|
||||
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix and clean up publicRooms pagination (PR #1129)
|
||||
|
||||
|
||||
Changes in synapse v0.18.0-rc1 (2016-09-16)
|
||||
===========================================
|
||||
|
||||
Features:
|
||||
|
||||
* Add ``only=highlight`` on ``/notifications`` (PR #1081)
|
||||
* Add server param to /publicRooms (PR #1082)
|
||||
* Allow clients to ask for the whole of a single state event (PR #1094)
|
||||
* Add is_direct param to /createRoom (PR #1108)
|
||||
* Add pagination support to publicRooms (PR #1121)
|
||||
* Add very basic filter API to /publicRooms (PR #1126)
|
||||
* Add basic direct to device messaging support for E2E (PR #1074, #1084, #1104,
|
||||
#1111)
|
||||
|
||||
|
||||
Changes:
|
||||
|
||||
* Move to storing state_groups_state as deltas, greatly reducing DB size (PR
|
||||
#1065)
|
||||
* Reduce amount of state pulled out of the DB during common requests (PR #1069)
|
||||
* Allow PDF to be rendered from media repo (PR #1071)
|
||||
* Reindex state_groups_state after pruning (PR #1085)
|
||||
* Clobber EDUs in send queue (PR #1095)
|
||||
* Conform better to the CAS protocol specification (PR #1100)
|
||||
* Limit how often we ask for keys from dead servers (PR #1114)
|
||||
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix /notifications API when used with ``from`` param (PR #1080)
|
||||
* Fix backfill when cannot find an event. (PR #1107)
|
||||
|
||||
|
||||
Changes in synapse v0.17.3 (2016-09-09)
|
||||
=======================================
|
||||
|
||||
This release fixes a major bug that stopped servers from handling rooms with
|
||||
over 1000 members.
|
||||
|
||||
|
||||
Changes in synapse v0.17.2 (2016-09-08)
|
||||
=======================================
|
||||
|
||||
This release contains security bug fixes. Please upgrade.
|
||||
|
||||
|
||||
No changes since v0.17.2-rc1
|
||||
|
||||
|
||||
Changes in synapse v0.17.2-rc1 (2016-09-05)
|
||||
===========================================
|
||||
|
||||
Features:
|
||||
|
||||
* Start adding store-and-forward direct-to-device messaging (PR #1046, #1050,
|
||||
#1062, #1066)
|
||||
|
||||
|
||||
Changes:
|
||||
|
||||
* Avoid pulling the full state of a room out so often (PR #1047, #1049, #1063,
|
||||
#1068)
|
||||
* Don't notify for online to online presence transitions. (PR #1054)
|
||||
* Occasionally persist unpersisted presence updates (PR #1055)
|
||||
* Allow application services to have an optional 'url' (PR #1056)
|
||||
* Clean up old sent transactions from DB (PR #1059)
|
||||
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix None check in backfill (PR #1043)
|
||||
* Fix membership changes to be idempotent (PR #1067)
|
||||
* Fix bug in get_pdu where it would sometimes return events with incorrect
|
||||
signature
|
||||
|
||||
|
||||
|
||||
Changes in synapse v0.17.1 (2016-08-24)
|
||||
=======================================
|
||||
|
||||
Changes:
|
||||
|
||||
* Delete old received_transactions rows (PR #1038)
|
||||
* Pass through user-supplied content in /join/$room_id (PR #1039)
|
||||
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix bug with backfill (PR #1040)
|
||||
|
||||
|
||||
Changes in synapse v0.17.1-rc1 (2016-08-22)
|
||||
===========================================
|
||||
|
||||
Features:
|
||||
|
||||
* Add notification API (PR #1028)
|
||||
|
||||
|
||||
Changes:
|
||||
|
||||
* Don't print stack traces when failing to get remote keys (PR #996)
|
||||
* Various federation /event/ perf improvements (PR #998)
|
||||
* Only process one local membership event per room at a time (PR #1005)
|
||||
* Move default display name push rule (PR #1011, #1023)
|
||||
* Fix up preview URL API. Add tests. (PR #1015)
|
||||
* Set ``Content-Security-Policy`` on media repo (PR #1021)
|
||||
* Make notify_interested_services faster (PR #1022)
|
||||
* Add usage stats to prometheus monitoring (PR #1037)
|
||||
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix token login (PR #993)
|
||||
* Fix CAS login (PR #994, #995)
|
||||
* Fix /sync to not clobber status_msg (PR #997)
|
||||
* Fix redacted state events to include prev_content (PR #1003)
|
||||
* Fix some bugs in the auth/ldap handler (PR #1007)
|
||||
* Fix backfill request to limit URI length, so that remotes don't reject the
|
||||
requests due to path length limits (PR #1012)
|
||||
* Fix AS push code to not send duplicate events (PR #1025)
|
||||
|
||||
|
||||
|
||||
Changes in synapse v0.17.0 (2016-08-08)
|
||||
=======================================
|
||||
|
||||
This release contains significant security bug fixes regarding authenticating
|
||||
events received over federation. PLEASE UPGRADE.
|
||||
|
||||
This release changes the LDAP configuration format in a backwards incompatible
|
||||
way, see PR #843 for details.
|
||||
|
||||
|
||||
Changes:
|
||||
|
||||
* Add federation /version API (PR #990)
|
||||
* Make psutil dependency optional (PR #992)
|
||||
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix URL preview API to exclude HTML comments in description (PR #988)
|
||||
* Fix error handling of remote joins (PR #991)
|
||||
|
||||
|
||||
Changes in synapse v0.17.0-rc4 (2016-08-05)
|
||||
===========================================
|
||||
|
||||
Changes:
|
||||
|
||||
* Change the way we summarize URLs when previewing (PR #973)
|
||||
* Add new ``/state_ids/`` federation API (PR #979)
|
||||
* Speed up processing of ``/state/`` response (PR #986)
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix event persistence when event has already been partially persisted
|
||||
(PR #975, #983, #985)
|
||||
* Fix port script to also copy across backfilled events (PR #982)
|
||||
|
||||
|
||||
Changes in synapse v0.17.0-rc3 (2016-08-02)
|
||||
===========================================
|
||||
|
||||
Changes:
|
||||
|
||||
* Forbid non-ASes from registering users whose names begin with '_' (PR #958)
|
||||
* Add some basic admin API docs (PR #963)
|
||||
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Send the correct host header when fetching keys (PR #941)
|
||||
* Fix joining a room that has missing auth events (PR #964)
|
||||
* Fix various push bugs (PR #966, #970)
|
||||
* Fix adding emails on registration (PR #968)
|
||||
|
||||
|
||||
Changes in synapse v0.17.0-rc2 (2016-08-02)
|
||||
===========================================
|
||||
|
||||
(This release did not include the changes advertised and was identical to RC1)
|
||||
|
||||
|
||||
Changes in synapse v0.17.0-rc1 (2016-07-28)
|
||||
===========================================
|
||||
|
||||
This release changes the LDAP configuration format in a backwards incompatible
|
||||
way, see PR #843 for details.
|
||||
|
||||
|
||||
Features:
|
||||
|
||||
* Add purge_media_cache admin API (PR #902)
|
||||
* Add deactivate account admin API (PR #903)
|
||||
* Add optional pepper to password hashing (PR #907, #910 by KentShikama)
|
||||
* Add an admin option to shared secret registration (breaks backwards compat)
|
||||
(PR #909)
|
||||
* Add purge local room history API (PR #911, #923, #924)
|
||||
* Add requestToken endpoints (PR #915)
|
||||
* Add an /account/deactivate endpoint (PR #921)
|
||||
* Add filter param to /messages. Add 'contains_url' to filter. (PR #922)
|
||||
* Add device_id support to /login (PR #929)
|
||||
* Add device_id support to /v2/register flow. (PR #937, #942)
|
||||
* Add GET /devices endpoint (PR #939, #944)
|
||||
* Add GET /device/{deviceId} (PR #943)
|
||||
* Add update and delete APIs for devices (PR #949)
|
||||
|
||||
|
||||
Changes:
|
||||
|
||||
* Rewrite LDAP Authentication against ldap3 (PR #843 by mweinelt)
|
||||
* Linearize some federation endpoints based on (origin, room_id) (PR #879)
|
||||
* Remove the legacy v0 content upload API. (PR #888)
|
||||
* Use similar naming we use in email notifs for push (PR #894)
|
||||
* Optionally include password hash in createUser endpoint (PR #905 by
|
||||
KentShikama)
|
||||
* Use a query that postgresql optimises better for get_events_around (PR #906)
|
||||
* Fall back to 'username' if 'user' is not given for appservice registration.
|
||||
(PR #927 by Half-Shot)
|
||||
* Add metrics for psutil derived memory usage (PR #936)
|
||||
* Record device_id in client_ips (PR #938)
|
||||
* Send the correct host header when fetching keys (PR #941)
|
||||
* Log the hostname the reCAPTCHA was completed on (PR #946)
|
||||
* Make the device id on e2e key upload optional (PR #956)
|
||||
* Add r0.2.0 to the "supported versions" list (PR #960)
|
||||
* Don't include name of room for invites in push (PR #961)
|
||||
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix substitution failure in mail template (PR #887)
|
||||
* Put most recent 20 messages in email notif (PR #892)
|
||||
* Ensure that the guest user is in the database when upgrading accounts
|
||||
(PR #914)
|
||||
* Fix various edge cases in auth handling (PR #919)
|
||||
* Fix 500 ISE when sending alias event without a state_key (PR #925)
|
||||
* Fix bug where we stored rejections in the state_group, persist all
|
||||
rejections (PR #948)
|
||||
* Fix lack of check of if the user is banned when handling 3pid invites
|
||||
(PR #952)
|
||||
* Fix a couple of bugs in the transaction and keyring code (PR #954, #955)
|
||||
|
||||
|
||||
|
||||
Changes in synapse v0.16.1-r1 (2016-07-08)
|
||||
==========================================
|
||||
|
||||
|
||||
@@ -14,6 +14,7 @@ recursive-include docs *
|
||||
recursive-include res *
|
||||
recursive-include scripts *
|
||||
recursive-include scripts-dev *
|
||||
recursive-include synapse *.pyi
|
||||
recursive-include tests *.py
|
||||
|
||||
recursive-include synapse/static *.css
|
||||
@@ -23,5 +24,7 @@ recursive-include synapse/static *.js
|
||||
|
||||
exclude jenkins.sh
|
||||
exclude jenkins*.sh
|
||||
exclude jenkins*
|
||||
recursive-exclude jenkins *.sh
|
||||
|
||||
prune demo/etc
|
||||
|
||||
613
README.rst
613
README.rst
@@ -11,8 +11,8 @@ VoIP. The basics you need to know to get up and running are:
|
||||
like ``#matrix:matrix.org`` or ``#test:localhost:8448``.
|
||||
|
||||
- Matrix user IDs look like ``@matthew:matrix.org`` (although in the future
|
||||
you will normally refer to yourself and others using a 3PID: email
|
||||
address, phone number, etc rather than manipulating Matrix user IDs)
|
||||
you will normally refer to yourself and others using a third party identifier
|
||||
(3PID): email address, phone number, etc rather than manipulating Matrix user IDs)
|
||||
|
||||
The overall architecture is::
|
||||
|
||||
@@ -20,12 +20,13 @@ The overall architecture is::
|
||||
https://somewhere.org/_matrix https://elsewhere.net/_matrix
|
||||
|
||||
``#matrix:matrix.org`` is the official support room for Matrix, and can be
|
||||
accessed by any client from https://matrix.org/blog/try-matrix-now or via IRC
|
||||
bridge at irc://irc.freenode.net/matrix.
|
||||
accessed by any client from https://matrix.org/docs/projects/try-matrix-now or
|
||||
via IRC bridge at irc://irc.freenode.net/matrix.
|
||||
|
||||
Synapse is currently in rapid development, but as of version 0.5 we believe it
|
||||
is sufficiently stable to be run as an internet-facing service for real usage!
|
||||
|
||||
|
||||
About Matrix
|
||||
============
|
||||
|
||||
@@ -52,10 +53,10 @@ generation of fully open and interoperable messaging and VoIP apps for the
|
||||
internet.
|
||||
|
||||
Synapse is a reference "homeserver" implementation of Matrix from the core
|
||||
development team at matrix.org, written in Python/Twisted for clarity and
|
||||
simplicity. It is intended to showcase the concept of Matrix and let folks see
|
||||
the spec in the context of a codebase and let you run your own homeserver and
|
||||
generally help bootstrap the ecosystem.
|
||||
development team at matrix.org, written in Python/Twisted. It is intended to
|
||||
showcase the concept of Matrix and let folks see the spec in the context of a
|
||||
codebase and let you run your own homeserver and generally help bootstrap the
|
||||
ecosystem.
|
||||
|
||||
In Matrix, every user runs one or more Matrix clients, which connect through to
|
||||
a Matrix homeserver. The homeserver stores all their personal chat history and
|
||||
@@ -66,26 +67,16 @@ hosted by someone else (e.g. matrix.org) - there is no single point of control
|
||||
or mandatory service provider in Matrix, unlike WhatsApp, Facebook, Hangouts,
|
||||
etc.
|
||||
|
||||
Synapse ships with two basic demo Matrix clients: webclient (a basic group chat
|
||||
web client demo implemented in AngularJS) and cmdclient (a basic Python
|
||||
command line utility which lets you easily see what the JSON APIs are up to).
|
||||
|
||||
Meanwhile, iOS and Android SDKs and clients are available from:
|
||||
|
||||
- https://github.com/matrix-org/matrix-ios-sdk
|
||||
- https://github.com/matrix-org/matrix-ios-kit
|
||||
- https://github.com/matrix-org/matrix-ios-console
|
||||
- https://github.com/matrix-org/matrix-android-sdk
|
||||
|
||||
We'd like to invite you to join #matrix:matrix.org (via
|
||||
https://matrix.org/blog/try-matrix-now), run a homeserver, take a look at the
|
||||
Matrix spec at https://matrix.org/docs/spec and API docs at
|
||||
https://matrix.org/docs/api, experiment with the APIs and the demo clients, and
|
||||
report any bugs via https://matrix.org/jira.
|
||||
https://matrix.org/docs/projects/try-matrix-now), run a homeserver, take a look
|
||||
at the `Matrix spec <https://matrix.org/docs/spec>`_, and experiment with the
|
||||
`APIs <https://matrix.org/docs/api>`_ and `Client SDKs
|
||||
<http://matrix.org/docs/projects/try-matrix-now.html#client-sdks>`_.
|
||||
|
||||
Thanks for using Matrix!
|
||||
|
||||
[1] End-to-end encryption is currently in development - see https://matrix.org/git/olm
|
||||
[1] End-to-end encryption is currently in beta: `blog post <https://matrix.org/blog/2016/11/21/matrixs-olm-end-to-end-encryption-security-assessment-released-and-implemented-cross-platform-on-riot-at-last>`_.
|
||||
|
||||
|
||||
Synapse Installation
|
||||
====================
|
||||
@@ -95,9 +86,14 @@ Synapse is the reference python/twisted Matrix homeserver implementation.
|
||||
System requirements:
|
||||
- POSIX-compliant system (tested on Linux & OS X)
|
||||
- Python 2.7
|
||||
- At least 512 MB RAM.
|
||||
- At least 1GB of free RAM if you want to join large public rooms like #matrix:matrix.org
|
||||
|
||||
Synapse is written in python but some of the libraries is uses are written in
|
||||
Installing from source
|
||||
----------------------
|
||||
(Prebuilt packages are available for some platforms - see `Platform-Specific
|
||||
Instructions`_.)
|
||||
|
||||
Synapse is written in python but some of the libraries it uses are written in
|
||||
C. So before we can install synapse itself we need a working C compiler and the
|
||||
header files for python C extensions.
|
||||
|
||||
@@ -124,6 +120,7 @@ Installing prerequisites on Mac OS X::
|
||||
xcode-select --install
|
||||
sudo easy_install pip
|
||||
sudo pip install virtualenv
|
||||
brew install pkg-config libffi
|
||||
|
||||
Installing prerequisites on Raspbian::
|
||||
|
||||
@@ -134,6 +131,17 @@ Installing prerequisites on Raspbian::
|
||||
sudo pip install --upgrade ndg-httpsclient
|
||||
sudo pip install --upgrade virtualenv
|
||||
|
||||
Installing prerequisites on openSUSE::
|
||||
|
||||
sudo zypper in -t pattern devel_basis
|
||||
sudo zypper in python-pip python-setuptools sqlite3 python-virtualenv \
|
||||
python-devel libffi-devel libopenssl-devel libjpeg62-devel
|
||||
|
||||
Installing prerequisites on OpenBSD::
|
||||
|
||||
doas pkg_add python libffi py-pip py-setuptools sqlite3 py-virtualenv \
|
||||
libxslt
|
||||
|
||||
To install the synapse homeserver run::
|
||||
|
||||
virtualenv -p python2.7 ~/.synapse
|
||||
@@ -145,38 +153,74 @@ This installs synapse, along with the libraries it uses, into a virtual
|
||||
environment under ``~/.synapse``. Feel free to pick a different directory
|
||||
if you prefer.
|
||||
|
||||
In case of problems, please see the _Troubleshooting section below.
|
||||
In case of problems, please see the _`Troubleshooting` section below.
|
||||
|
||||
Alternatively, Silvio Fricke has contributed a Dockerfile to automate the
|
||||
above in Docker at https://registry.hub.docker.com/u/silviof/docker-matrix/.
|
||||
|
||||
Also, Martin Giess has created an auto-deployment process with vagrant/ansible,
|
||||
tested with VirtualBox/AWS/DigitalOcean - see https://github.com/EMnify/matrix-synapse-auto-deploy
|
||||
Also, Martin Giess has created an auto-deployment process with vagrant/ansible,
|
||||
tested with VirtualBox/AWS/DigitalOcean - see https://github.com/EMnify/matrix-synapse-auto-deploy
|
||||
for details.
|
||||
|
||||
To set up your homeserver, run (in your virtualenv, as before)::
|
||||
Configuring synapse
|
||||
-------------------
|
||||
|
||||
Before you can start Synapse, you will need to generate a configuration
|
||||
file. To do this, run (in your virtualenv, as before)::
|
||||
|
||||
cd ~/.synapse
|
||||
python -m synapse.app.homeserver \
|
||||
--server-name machine.my.domain.name \
|
||||
--server-name my.domain.name \
|
||||
--config-path homeserver.yaml \
|
||||
--generate-config \
|
||||
--report-stats=[yes|no]
|
||||
|
||||
...substituting your host and domain name as appropriate.
|
||||
... substituting an appropriate value for ``--server-name``. The server name
|
||||
determines the "domain" part of user-ids for users on your server: these will
|
||||
all be of the format ``@user:my.domain.name``. It also determines how other
|
||||
matrix servers will reach yours for `Federation`_. For a test configuration,
|
||||
set this to the hostname of your server. For a more production-ready setup, you
|
||||
will probably want to specify your domain (``example.com``) rather than a
|
||||
matrix-specific hostname here (in the same way that your email address is
|
||||
probably ``user@example.com`` rather than ``user@email.example.com``) - but
|
||||
doing so may require more advanced setup - see `Setting up
|
||||
Federation`_. Beware that the server name cannot be changed later.
|
||||
|
||||
This will generate you a config file that you can then customise, but it will
|
||||
This command will generate you a config file that you can then customise, but it will
|
||||
also generate a set of keys for you. These keys will allow your Home Server to
|
||||
identify itself to other Home Servers, so don't lose or delete them. It would be
|
||||
wise to back them up somewhere safe. If, for whatever reason, you do need to
|
||||
wise to back them up somewhere safe. (If, for whatever reason, you do need to
|
||||
change your Home Server's keys, you may find that other Home Servers have the
|
||||
old key cached. If you update the signing key, you should change the name of the
|
||||
key in the <server name>.signing.key file (the second word) to something different.
|
||||
key in the ``<server name>.signing.key`` file (the second word) to something
|
||||
different. See `the spec`__ for more information on key management.)
|
||||
|
||||
By default, registration of new users is disabled. You can either enable
|
||||
registration in the config by specifying ``enable_registration: true``
|
||||
(it is then recommended to also set up CAPTCHA - see docs/CAPTCHA_SETUP), or
|
||||
you can use the command line to register new users::
|
||||
.. __: `key_management`_
|
||||
|
||||
The default configuration exposes two HTTP ports: 8008 and 8448. Port 8008 is
|
||||
configured without TLS; it is not recommended this be exposed outside your
|
||||
local network. Port 8448 is configured to use TLS with a self-signed
|
||||
certificate. This is fine for testing with but, to avoid your clients
|
||||
complaining about the certificate, you will almost certainly want to use
|
||||
another certificate for production purposes. (Note that a self-signed
|
||||
certificate is fine for `Federation`_). You can do so by changing
|
||||
``tls_certificate_path``, ``tls_private_key_path`` and ``tls_dh_params_path``
|
||||
in ``homeserver.yaml``; alternatively, you can use a reverse-proxy, but be sure
|
||||
to read `Using a reverse proxy with Synapse`_ when doing so.
|
||||
|
||||
Apart from port 8448 using TLS, both ports are the same in the default
|
||||
configuration.
|
||||
|
||||
Registering a user
|
||||
------------------
|
||||
|
||||
You will need at least one user on your server in order to use a Matrix
|
||||
client. Users can be registered either `via a Matrix client`__, or via a
|
||||
commandline script.
|
||||
|
||||
.. __: `client-user-reg`_
|
||||
|
||||
To get started, it is easiest to use the command line to register new users::
|
||||
|
||||
$ source ~/.synapse/bin/activate
|
||||
$ synctl start # if not already running
|
||||
@@ -186,8 +230,19 @@ you can use the command line to register new users::
|
||||
Confirm password:
|
||||
Success!
|
||||
|
||||
This process uses a setting ``registration_shared_secret`` in
|
||||
``homeserver.yaml``, which is shared between Synapse itself and the
|
||||
``register_new_matrix_user`` script. It doesn't matter what it is (a random
|
||||
value is generated by ``--generate-config``), but it should be kept secret, as
|
||||
anyone with knowledge of it can register users on your server even if
|
||||
``enable_registration`` is ``false``.
|
||||
|
||||
Setting up a TURN server
|
||||
------------------------
|
||||
|
||||
For reliable VoIP calls to be routed via this homeserver, you MUST configure
|
||||
a TURN server. See docs/turn-howto.rst for details.
|
||||
a TURN server. See `<docs/turn-howto.rst>`_ for details.
|
||||
|
||||
|
||||
Running Synapse
|
||||
===============
|
||||
@@ -199,29 +254,66 @@ run (e.g. ``~/.synapse``), and::
|
||||
source ./bin/activate
|
||||
synctl start
|
||||
|
||||
Using PostgreSQL
|
||||
================
|
||||
|
||||
As of Synapse 0.9, `PostgreSQL <http://www.postgresql.org>`_ is supported as an
|
||||
alternative to the `SQLite <http://sqlite.org/>`_ database that Synapse has
|
||||
traditionally used for convenience and simplicity.
|
||||
Connecting to Synapse from a client
|
||||
===================================
|
||||
|
||||
The advantages of Postgres include:
|
||||
The easiest way to try out your new Synapse installation is by connecting to it
|
||||
from a web client. The easiest option is probably the one at
|
||||
http://riot.im/app. You will need to specify a "Custom server" when you log on
|
||||
or register: set this to ``https://localhost:8448`` - remember to specify the
|
||||
port (``:8448``) unless you changed the configuration. (Leave the identity
|
||||
server as the default - see `Identity servers`_.)
|
||||
|
||||
* significant performance improvements due to the superior threading and
|
||||
caching model, smarter query optimiser
|
||||
* allowing the DB to be run on separate hardware
|
||||
* allowing basic active/backup high-availability with a "hot spare" synapse
|
||||
pointing at the same DB master, as well as enabling DB replication in
|
||||
synapse itself.
|
||||
If all goes well you should at least be able to log in, create a room, and
|
||||
start sending messages.
|
||||
|
||||
The only disadvantage is that the code is relatively new as of April 2015 and
|
||||
may have a few regressions relative to SQLite.
|
||||
(The homeserver runs a web client by default at https://localhost:8448/, though
|
||||
as of the time of writing it is somewhat outdated and not really recommended -
|
||||
https://github.com/matrix-org/synapse/issues/1527).
|
||||
|
||||
For information on how to install and use PostgreSQL, please see
|
||||
`docs/postgres.rst <docs/postgres.rst>`_.
|
||||
.. _`client-user-reg`:
|
||||
|
||||
Platform Specific Instructions
|
||||
Registering a new user from a client
|
||||
------------------------------------
|
||||
|
||||
By default, registration of new users via Matrix clients is disabled. To enable
|
||||
it, specify ``enable_registration: true`` in ``homeserver.yaml``. (It is then
|
||||
recommended to also set up CAPTCHA - see `<docs/CAPTCHA_SETUP.rst>`_.)
|
||||
|
||||
Once ``enable_registration`` is set to ``true``, it is possible to register a
|
||||
user via `riot.im <https://riot.im/app/#/register>`_ or other Matrix clients.
|
||||
|
||||
Your new user name will be formed partly from the ``server_name`` (see
|
||||
`Configuring synapse`_), and partly from a localpart you specify when you
|
||||
create the account. Your name will take the form of::
|
||||
|
||||
@localpart:my.domain.name
|
||||
|
||||
(pronounced "at localpart on my dot domain dot name").
|
||||
|
||||
As when logging in, you will need to specify a "Custom server". Specify your
|
||||
desired ``localpart`` in the 'User name' box.
|
||||
|
||||
|
||||
Security Note
|
||||
=============
|
||||
|
||||
Matrix serves raw user generated data in some APIs - specifically the `content
|
||||
repository endpoints <http://matrix.org/docs/spec/client_server/latest.html#get-matrix-media-r0-download-servername-mediaid>`_.
|
||||
|
||||
Whilst we have tried to mitigate against possible XSS attacks (e.g.
|
||||
https://github.com/matrix-org/synapse/pull/1021) we recommend running
|
||||
matrix homeservers on a dedicated domain name, to limit any malicious user generated
|
||||
content served to web browsers a matrix API from being able to attack webapps hosted
|
||||
on the same domain. This is particularly true of sharing a matrix webclient and
|
||||
server on the same domain.
|
||||
|
||||
See https://github.com/vector-im/vector-web/issues/1977 and
|
||||
https://developer.github.com/changes/2014-04-25-user-content-security for more details.
|
||||
|
||||
|
||||
Platform-Specific Instructions
|
||||
==============================
|
||||
|
||||
Debian
|
||||
@@ -229,7 +321,7 @@ Debian
|
||||
|
||||
Matrix provides official Debian packages via apt from http://matrix.org/packages/debian/.
|
||||
Note that these packages do not include a client - choose one from
|
||||
https://matrix.org/blog/try-matrix-now/ (or build your own with one of our SDKs :)
|
||||
https://matrix.org/docs/projects/try-matrix-now/ (or build your own with one of our SDKs :)
|
||||
|
||||
Fedora
|
||||
------
|
||||
@@ -283,6 +375,32 @@ Synapse can be installed via FreeBSD Ports or Packages contributed by Brendan Mo
|
||||
- Ports: ``cd /usr/ports/net/py-matrix-synapse && make install clean``
|
||||
- Packages: ``pkg install py27-matrix-synapse``
|
||||
|
||||
|
||||
OpenBSD
|
||||
-------
|
||||
|
||||
There is currently no port for OpenBSD. Additionally, OpenBSD's security
|
||||
settings require a slightly more difficult installation process.
|
||||
|
||||
1) Create a new directory in ``/usr/local`` called ``_synapse``. Also, create a
|
||||
new user called ``_synapse`` and set that directory as the new user's home.
|
||||
This is required because, by default, OpenBSD only allows binaries which need
|
||||
write and execute permissions on the same memory space to be run from
|
||||
``/usr/local``.
|
||||
2) ``su`` to the new ``_synapse`` user and change to their home directory.
|
||||
3) Create a new virtualenv: ``virtualenv -p python2.7 ~/.synapse``
|
||||
4) Source the virtualenv configuration located at
|
||||
``/usr/local/_synapse/.synapse/bin/activate``. This is done in ``ksh`` by
|
||||
using the ``.`` command, rather than ``bash``'s ``source``.
|
||||
5) Optionally, use ``pip`` to install ``lxml``, which Synapse needs to parse
|
||||
webpages for their titles.
|
||||
6) Use ``pip`` to install this repository: ``pip install
|
||||
https://github.com/matrix-org/synapse/tarball/master``
|
||||
7) Optionally, change ``_synapse``'s shell to ``/bin/false`` to reduce the
|
||||
chance of a compromised Synapse server being used to take over your box.
|
||||
|
||||
After this, you may proceed with the rest of the install directions.
|
||||
|
||||
NixOS
|
||||
-----
|
||||
|
||||
@@ -322,6 +440,7 @@ Troubleshooting:
|
||||
you do, you may need to create a symlink to ``libsodium.a`` so ``ld`` can find
|
||||
it: ``ln -s /usr/local/lib/libsodium.a /usr/lib/libsodium.a``
|
||||
|
||||
|
||||
Troubleshooting
|
||||
===============
|
||||
|
||||
@@ -395,37 +514,6 @@ you will need to explicitly call Python2.7 - either running as::
|
||||
|
||||
...or by editing synctl with the correct python executable.
|
||||
|
||||
Synapse Development
|
||||
===================
|
||||
|
||||
To check out a synapse for development, clone the git repo into a working
|
||||
directory of your choice::
|
||||
|
||||
git clone https://github.com/matrix-org/synapse.git
|
||||
cd synapse
|
||||
|
||||
Synapse has a number of external dependencies, that are easiest
|
||||
to install using pip and a virtualenv::
|
||||
|
||||
virtualenv env
|
||||
source env/bin/activate
|
||||
python synapse/python_dependencies.py | xargs -n1 pip install
|
||||
pip install setuptools_trial mock
|
||||
|
||||
This will run a process of downloading and installing all the needed
|
||||
dependencies into a virtual env.
|
||||
|
||||
Once this is done, you may wish to run Synapse's unit tests, to
|
||||
check that everything is installed as it should be::
|
||||
|
||||
python setup.py test
|
||||
|
||||
This should end with a 'PASSED' result::
|
||||
|
||||
Ran 143 tests in 0.601s
|
||||
|
||||
PASSED (successes=143)
|
||||
|
||||
|
||||
Upgrading an existing Synapse
|
||||
=============================
|
||||
@@ -436,140 +524,248 @@ versions of synapse.
|
||||
|
||||
.. _UPGRADE.rst: UPGRADE.rst
|
||||
|
||||
.. _federation:
|
||||
|
||||
Setting up Federation
|
||||
=====================
|
||||
|
||||
In order for other homeservers to send messages to your server, it will need to
|
||||
be publicly visible on the internet, and they will need to know its host name.
|
||||
You have two choices here, which will influence the form of your Matrix user
|
||||
IDs:
|
||||
Federation is the process by which users on different servers can participate
|
||||
in the same room. For this to work, those other servers must be able to contact
|
||||
yours to send messages.
|
||||
|
||||
1) Use the machine's own hostname as available on public DNS in the form of
|
||||
its A or AAAA records. This is easier to set up initially, perhaps for
|
||||
testing, but lacks the flexibility of SRV.
|
||||
As explained in `Configuring synapse`_, the ``server_name`` in your
|
||||
``homeserver.yaml`` file determines the way that other servers will reach
|
||||
yours. By default, they will treat it as a hostname and try to connect to
|
||||
port 8448. This is easy to set up and will work with the default configuration,
|
||||
provided you set the ``server_name`` to match your machine's public DNS
|
||||
hostname.
|
||||
|
||||
2) Set up a SRV record for your domain name. This requires you create a SRV
|
||||
record in DNS, but gives the flexibility to run the server on your own
|
||||
choice of TCP port, on a machine that might not be the same name as the
|
||||
domain name.
|
||||
For a more flexible configuration, you can set up a DNS SRV record. This allows
|
||||
you to run your server on a machine that might not have the same name as your
|
||||
domain name. For example, you might want to run your server at
|
||||
``synapse.example.com``, but have your Matrix user-ids look like
|
||||
``@user:example.com``. (A SRV record also allows you to change the port from
|
||||
the default 8448. However, if you are thinking of using a reverse-proxy, be
|
||||
sure to read `Reverse-proxying the federation port`_ first.)
|
||||
|
||||
For the first form, simply pass the required hostname (of the machine) as the
|
||||
--server-name parameter::
|
||||
To use a SRV record, first create your SRV record and publish it in DNS. This
|
||||
should have the format ``_matrix._tcp.<yourdomain.com> <ttl> IN SRV 10 0 <port>
|
||||
<synapse.server.name>``. The DNS record should then look something like::
|
||||
|
||||
$ dig -t srv _matrix._tcp.example.com
|
||||
_matrix._tcp.example.com. 3600 IN SRV 10 0 8448 synapse.example.com.
|
||||
|
||||
You can then configure your homeserver to use ``<yourdomain.com>`` as the domain in
|
||||
its user-ids, by setting ``server_name``::
|
||||
|
||||
python -m synapse.app.homeserver \
|
||||
--server-name machine.my.domain.name \
|
||||
--server-name <yourdomain.com> \
|
||||
--config-path homeserver.yaml \
|
||||
--generate-config
|
||||
python -m synapse.app.homeserver --config-path homeserver.yaml
|
||||
|
||||
Alternatively, you can run ``synctl start`` to guide you through the process.
|
||||
|
||||
For the second form, first create your SRV record and publish it in DNS. This
|
||||
needs to be named _matrix._tcp.YOURDOMAIN, and point at at least one hostname
|
||||
and port where the server is running. (At the current time synapse does not
|
||||
support clustering multiple servers into a single logical homeserver). The DNS
|
||||
record would then look something like::
|
||||
|
||||
$ dig -t srv _matrix._tcp.machine.my.domain.name
|
||||
_matrix._tcp IN SRV 10 0 8448 machine.my.domain.name.
|
||||
|
||||
|
||||
At this point, you should then run the homeserver with the hostname of this
|
||||
SRV record, as that is the name other machines will expect it to have::
|
||||
|
||||
python -m synapse.app.homeserver \
|
||||
--server-name YOURDOMAIN \
|
||||
--config-path homeserver.yaml \
|
||||
--generate-config
|
||||
python -m synapse.app.homeserver --config-path homeserver.yaml
|
||||
|
||||
|
||||
If you've already generated the config file, you need to edit the "server_name"
|
||||
in you ```homeserver.yaml``` file. If you've already started Synapse and a
|
||||
If you've already generated the config file, you need to edit the ``server_name``
|
||||
in your ``homeserver.yaml`` file. If you've already started Synapse and a
|
||||
database has been created, you will have to recreate the database.
|
||||
|
||||
You may additionally want to pass one or more "-v" options, in order to
|
||||
increase the verbosity of logging output; at least for initial testing.
|
||||
If all goes well, you should be able to `connect to your server with a client`__,
|
||||
and then join a room via federation. (Try ``#matrix-dev:matrix.org`` as a first
|
||||
step. "Matrix HQ"'s sheer size and activity level tends to make even the
|
||||
largest boxes pause for thought.)
|
||||
|
||||
.. __: `Connecting to Synapse from a client`_
|
||||
|
||||
Troubleshooting
|
||||
---------------
|
||||
The typical failure mode with federation is that when you try to join a room,
|
||||
it is rejected with "401: Unauthorized". Generally this means that other
|
||||
servers in the room couldn't access yours. (Joining a room over federation is a
|
||||
complicated dance which requires connections in both directions).
|
||||
|
||||
So, things to check are:
|
||||
|
||||
* If you are trying to use a reverse-proxy, read `Reverse-proxying the
|
||||
federation port`_.
|
||||
* If you are not using a SRV record, check that your ``server_name`` (the part
|
||||
of your user-id after the ``:``) matches your hostname, and that port 8448 on
|
||||
that hostname is reachable from outside your network.
|
||||
* If you *are* using a SRV record, check that it matches your ``server_name``
|
||||
(it should be ``_matrix._tcp.<server_name>``), and that the port and hostname
|
||||
it specifies are reachable from outside your network.
|
||||
|
||||
Running a Demo Federation of Synapses
|
||||
-------------------------------------
|
||||
|
||||
If you want to get up and running quickly with a trio of homeservers in a
|
||||
private federation (``localhost:8080``, ``localhost:8081`` and
|
||||
``localhost:8082``) which you can then access through the webclient running at
|
||||
http://localhost:8080. Simply run::
|
||||
|
||||
demo/start.sh
|
||||
|
||||
This is mainly useful just for development purposes.
|
||||
|
||||
Running The Demo Web Client
|
||||
===========================
|
||||
|
||||
The homeserver runs a web client by default at https://localhost:8448/.
|
||||
|
||||
If this is the first time you have used the client from that browser (it uses
|
||||
HTML5 local storage to remember its config), you will need to log in to your
|
||||
account. If you don't yet have an account, because you've just started the
|
||||
homeserver for the first time, then you'll need to register one.
|
||||
private federation, there is a script in the ``demo`` directory. This is mainly
|
||||
useful just for development purposes. See `<demo/README>`_.
|
||||
|
||||
|
||||
Registering A New Account
|
||||
-------------------------
|
||||
Using PostgreSQL
|
||||
================
|
||||
|
||||
Your new user name will be formed partly from the hostname your server is
|
||||
running as, and partly from a localpart you specify when you create the
|
||||
account. Your name will take the form of::
|
||||
As of Synapse 0.9, `PostgreSQL <http://www.postgresql.org>`_ is supported as an
|
||||
alternative to the `SQLite <http://sqlite.org/>`_ database that Synapse has
|
||||
traditionally used for convenience and simplicity.
|
||||
|
||||
@localpart:my.domain.here
|
||||
(pronounced "at localpart on my dot domain dot here")
|
||||
The advantages of Postgres include:
|
||||
|
||||
Specify your desired localpart in the topmost box of the "Register for an
|
||||
account" form, and click the "Register" button. Hostnames can contain ports if
|
||||
required due to lack of SRV records (e.g. @matthew:localhost:8448 on an
|
||||
internal synapse sandbox running on localhost).
|
||||
* significant performance improvements due to the superior threading and
|
||||
caching model, smarter query optimiser
|
||||
* allowing the DB to be run on separate hardware
|
||||
* allowing basic active/backup high-availability with a "hot spare" synapse
|
||||
pointing at the same DB master, as well as enabling DB replication in
|
||||
synapse itself.
|
||||
|
||||
If registration fails, you may need to enable it in the homeserver (see
|
||||
`Synapse Installation`_ above)
|
||||
For information on how to install and use PostgreSQL, please see
|
||||
`docs/postgres.rst <docs/postgres.rst>`_.
|
||||
|
||||
|
||||
Logging In To An Existing Account
|
||||
---------------------------------
|
||||
.. _reverse-proxy:
|
||||
|
||||
Using a reverse proxy with Synapse
|
||||
==================================
|
||||
|
||||
It is possible to put a reverse proxy such as
|
||||
`nginx <https://nginx.org/en/docs/http/ngx_http_proxy_module.html>`_,
|
||||
`Apache <https://httpd.apache.org/docs/current/mod/mod_proxy_http.html>`_ or
|
||||
`HAProxy <http://www.haproxy.org/>`_ in front of Synapse. One advantage of
|
||||
doing so is that it means that you can expose the default https port (443) to
|
||||
Matrix clients without needing to run Synapse with root privileges.
|
||||
|
||||
The most important thing to know here is that Matrix clients and other Matrix
|
||||
servers do not necessarily need to connect to your server via the same
|
||||
port. Indeed, clients will use port 443 by default, whereas servers default to
|
||||
port 8448. Where these are different, we refer to the 'client port' and the
|
||||
'federation port'.
|
||||
|
||||
The next most important thing to know is that using a reverse-proxy on the
|
||||
federation port has a number of pitfalls. It is possible, but be sure to read
|
||||
`Reverse-proxying the federation port`_.
|
||||
|
||||
The recommended setup is therefore to configure your reverse-proxy on port 443
|
||||
for client connections, but to also expose port 8448 for server-server
|
||||
connections. All the Matrix endpoints begin ``/_matrix``, so an example nginx
|
||||
configuration might look like::
|
||||
|
||||
server {
|
||||
listen 443 ssl;
|
||||
listen [::]:443 ssl;
|
||||
server_name matrix.example.com;
|
||||
|
||||
location /_matrix {
|
||||
proxy_pass http://localhost:8008;
|
||||
proxy_set_header X-Forwarded-For $remote_addr;
|
||||
}
|
||||
}
|
||||
|
||||
You will also want to set ``bind_addresses: ['127.0.0.1']`` and ``x_forwarded: true``
|
||||
for port 8008 in ``homeserver.yaml`` to ensure that client IP addresses are
|
||||
recorded correctly.
|
||||
|
||||
Having done so, you can then use ``https://matrix.example.com`` (instead of
|
||||
``https://matrix.example.com:8448``) as the "Custom server" when `Connecting to
|
||||
Synapse from a client`_.
|
||||
|
||||
Reverse-proxying the federation port
|
||||
------------------------------------
|
||||
|
||||
There are two issues to consider before using a reverse-proxy on the federation
|
||||
port:
|
||||
|
||||
* Due to the way SSL certificates are managed in the Matrix federation protocol
|
||||
(see `spec`__), Synapse needs to be configured with the path to the SSL
|
||||
certificate, *even if you do not terminate SSL at Synapse*.
|
||||
|
||||
.. __: `key_management`_
|
||||
|
||||
* Synapse does not currently support SNI on the federation protocol
|
||||
(`bug #1491 <https://github.com/matrix-org/synapse/issues/1491>`_), which
|
||||
means that using name-based virtual hosting is unreliable.
|
||||
|
||||
Furthermore, a number of the normal reasons for using a reverse-proxy do not
|
||||
apply:
|
||||
|
||||
* Other servers will connect on port 8448 by default, so there is no need to
|
||||
listen on port 443 (for federation, at least), which avoids the need for root
|
||||
privileges and virtual hosting.
|
||||
|
||||
* A self-signed SSL certificate is fine for federation, so there is no need to
|
||||
automate renewals. (The certificate generated by ``--generate-config`` is
|
||||
valid for 10 years.)
|
||||
|
||||
If you want to set up a reverse-proxy on the federation port despite these
|
||||
caveats, you will need to do the following:
|
||||
|
||||
* In ``homeserver.yaml``, set ``tls_certificate_path`` to the path to the SSL
|
||||
certificate file used by your reverse-proxy, and set ``no_tls`` to ``True``.
|
||||
(``tls_private_key_path`` will be ignored if ``no_tls`` is ``True``.)
|
||||
|
||||
* In your reverse-proxy configuration:
|
||||
|
||||
* If there are other virtual hosts on the same port, make sure that the
|
||||
*default* one uses the certificate configured above.
|
||||
|
||||
* Forward ``/_matrix`` to Synapse.
|
||||
|
||||
* If your reverse-proxy is not listening on port 8448, publish a SRV record to
|
||||
tell other servers how to find you. See `Setting up Federation`_.
|
||||
|
||||
When updating the SSL certificate, just update the file pointed to by
|
||||
``tls_certificate_path``: there is no need to restart synapse. (You may like to
|
||||
use a symbolic link to help make this process atomic.)
|
||||
|
||||
The most common mistake when setting up federation is not to tell Synapse about
|
||||
your SSL certificate. To check it, you can visit
|
||||
``https://matrix.org/federationtester/api/report?server_name=<your_server_name>``.
|
||||
Unfortunately, there is no UI for this yet, but, you should see
|
||||
``"MatchingTLSFingerprint": true``. If not, check that
|
||||
``Certificates[0].SHA256Fingerprint`` (the fingerprint of the certificate
|
||||
presented by your reverse-proxy) matches ``Keys.tls_fingerprints[0].sha256``
|
||||
(the fingerprint of the certificate Synapse is using).
|
||||
|
||||
Just enter the ``@localpart:my.domain.here`` Matrix user ID and password into
|
||||
the form and click the Login button.
|
||||
|
||||
Identity Servers
|
||||
================
|
||||
|
||||
The job of authenticating 3PIDs and tracking which 3PIDs are associated with a
|
||||
given Matrix user is very security-sensitive, as there is obvious risk of spam
|
||||
if it is too easy to sign up for Matrix accounts or harvest 3PID data.
|
||||
Meanwhile the job of publishing the end-to-end encryption public keys for
|
||||
Matrix users is also very security-sensitive for similar reasons.
|
||||
Identity servers have the job of mapping email addresses and other 3rd Party
|
||||
IDs (3PIDs) to Matrix user IDs, as well as verifying the ownership of 3PIDs
|
||||
before creating that mapping.
|
||||
|
||||
Therefore the role of managing trusted identity in the Matrix ecosystem is
|
||||
farmed out to a cluster of known trusted ecosystem partners, who run 'Matrix
|
||||
Identity Servers' such as ``sydent``, whose role is purely to authenticate and
|
||||
track 3PID logins and publish end-user public keys.
|
||||
**They are not where accounts or credentials are stored - these live on home
|
||||
servers. Identity Servers are just for mapping 3rd party IDs to matrix IDs.**
|
||||
|
||||
It's currently early days for identity servers as Matrix is not yet using 3PIDs
|
||||
as the primary means of identity and E2E encryption is not complete. As such,
|
||||
we are running a single identity server (https://matrix.org) at the current
|
||||
time.
|
||||
This process is very security-sensitive, as there is obvious risk of spam if it
|
||||
is too easy to sign up for Matrix accounts or harvest 3PID data. In the longer
|
||||
term, we hope to create a decentralised system to manage it (`matrix-doc #712
|
||||
<https://github.com/matrix-org/matrix-doc/issues/712>`_), but in the meantime,
|
||||
the role of managing trusted identity in the Matrix ecosystem is farmed out to
|
||||
a cluster of known trusted ecosystem partners, who run 'Matrix Identity
|
||||
Servers' such as `Sydent <https://github.com/matrix-org/sydent>`_, whose role
|
||||
is purely to authenticate and track 3PID logins and publish end-user public
|
||||
keys.
|
||||
|
||||
You can host your own copy of Sydent, but this will prevent you reaching other
|
||||
users in the Matrix ecosystem via their email address, and prevent them finding
|
||||
you. We therefore recommend that you use one of the centralised identity servers
|
||||
at ``https://matrix.org`` or ``https://vector.im`` for now.
|
||||
|
||||
To reiterate: the Identity server will only be used if you choose to associate
|
||||
an email address with your account, or send an invite to another user via their
|
||||
email address.
|
||||
|
||||
|
||||
URL Previews
|
||||
============
|
||||
|
||||
Synapse 0.15.0 introduces an experimental new API for previewing URLs at
|
||||
/_matrix/media/r0/preview_url. This is disabled by default. To turn it on
|
||||
you must enable the `url_preview_enabled: True` config parameter and explicitly
|
||||
specify the IP ranges that Synapse is not allowed to spider for previewing in
|
||||
the `url_preview_ip_range_blacklist` configuration parameter. This is critical
|
||||
from a security perspective to stop arbitrary Matrix users spidering 'internal'
|
||||
URLs on your network. At the very least we recommend that your loopback and
|
||||
RFC1918 IP addresses are blacklisted.
|
||||
Synapse 0.15.0 introduces a new API for previewing URLs at
|
||||
``/_matrix/media/r0/preview_url``. This is disabled by default. To turn it on
|
||||
you must enable the ``url_preview_enabled: True`` config parameter and
|
||||
explicitly specify the IP ranges that Synapse is not allowed to spider for
|
||||
previewing in the ``url_preview_ip_range_blacklist`` configuration parameter.
|
||||
This is critical from a security perspective to stop arbitrary Matrix users
|
||||
spidering 'internal' URLs on your network. At the very least we recommend that
|
||||
your loopback and RFC1918 IP addresses are blacklisted.
|
||||
|
||||
This also requires the optional lxml and netaddr python dependencies to be
|
||||
installed.
|
||||
@@ -583,24 +779,54 @@ server, they can request a password-reset token via clients such as Vector.
|
||||
|
||||
A manual password reset can be done via direct database access as follows.
|
||||
|
||||
First calculate the hash of the new password:
|
||||
First calculate the hash of the new password::
|
||||
|
||||
$ source ~/.synapse/bin/activate
|
||||
$ ./scripts/hash_password
|
||||
Password:
|
||||
Confirm password:
|
||||
Password:
|
||||
Confirm password:
|
||||
$2a$12$xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
||||
|
||||
Then update the `users` table in the database:
|
||||
Then update the `users` table in the database::
|
||||
|
||||
UPDATE users SET password_hash='$2a$12$xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'
|
||||
WHERE name='@test:test.com';
|
||||
|
||||
Where's the spec?!
|
||||
==================
|
||||
|
||||
The source of the matrix spec lives at https://github.com/matrix-org/matrix-doc.
|
||||
A recent HTML snapshot of this lives at http://matrix.org/docs/spec
|
||||
Synapse Development
|
||||
===================
|
||||
|
||||
Before setting up a development environment for synapse, make sure you have the
|
||||
system dependencies (such as the python header files) installed - see
|
||||
`Installing from source`_.
|
||||
|
||||
To check out a synapse for development, clone the git repo into a working
|
||||
directory of your choice::
|
||||
|
||||
git clone https://github.com/matrix-org/synapse.git
|
||||
cd synapse
|
||||
|
||||
Synapse has a number of external dependencies, that are easiest
|
||||
to install using pip and a virtualenv::
|
||||
|
||||
virtualenv env
|
||||
source env/bin/activate
|
||||
python synapse/python_dependencies.py | xargs pip install
|
||||
pip install lxml mock
|
||||
|
||||
This will run a process of downloading and installing all the needed
|
||||
dependencies into a virtual env.
|
||||
|
||||
Once this is done, you may wish to run Synapse's unit tests, to
|
||||
check that everything is installed as it should be::
|
||||
|
||||
PYTHONPATH="." trial tests
|
||||
|
||||
This should end with a 'PASSED' result::
|
||||
|
||||
Ran 143 tests in 0.601s
|
||||
|
||||
PASSED (successes=143)
|
||||
|
||||
|
||||
Building Internal API Documentation
|
||||
@@ -617,7 +843,6 @@ Building internal API documentation::
|
||||
python setup.py build_sphinx
|
||||
|
||||
|
||||
|
||||
Help!! Synapse eats all my RAM!
|
||||
===============================
|
||||
|
||||
@@ -633,3 +858,5 @@ around a ~700MB footprint. You can dial it down further to 0.02 if
|
||||
desired, which targets roughly ~512MB. Conversely you can dial it up if
|
||||
you need performance for lots of users and have a box with a lot of RAM.
|
||||
|
||||
|
||||
.. _`key_management`: https://matrix.org/docs/spec/server_server/unstable.html#retrieving-server-keys
|
||||
|
||||
@@ -27,7 +27,7 @@ running:
|
||||
# Pull the latest version of the master branch.
|
||||
git pull
|
||||
# Update the versions of synapse's python dependencies.
|
||||
python synapse/python_dependencies.py | xargs -n1 pip install
|
||||
python synapse/python_dependencies.py | xargs -n1 pip install --upgrade
|
||||
|
||||
|
||||
Upgrading to v0.15.0
|
||||
|
||||
@@ -10,13 +10,13 @@ https://developers.google.com/recaptcha/
|
||||
|
||||
Setting ReCaptcha Keys
|
||||
----------------------
|
||||
The keys are a config option on the home server config. If they are not
|
||||
visible, you can generate them via --generate-config. Set the following value:
|
||||
The keys are a config option on the home server config. If they are not
|
||||
visible, you can generate them via --generate-config. Set the following value::
|
||||
|
||||
recaptcha_public_key: YOUR_PUBLIC_KEY
|
||||
recaptcha_private_key: YOUR_PRIVATE_KEY
|
||||
|
||||
In addition, you MUST enable captchas via:
|
||||
|
||||
In addition, you MUST enable captchas via::
|
||||
|
||||
enable_registration_captcha: true
|
||||
|
||||
@@ -25,7 +25,6 @@ Configuring IP used for auth
|
||||
The ReCaptcha API requires that the IP address of the user who solved the
|
||||
captcha is sent. If the client is connecting through a proxy or load balancer,
|
||||
it may be required to use the X-Forwarded-For (XFF) header instead of the origin
|
||||
IP address. This can be configured as an option on the home server like so:
|
||||
IP address. This can be configured as an option on the home server like so::
|
||||
|
||||
captcha_ip_origin_is_x_forwarded: true
|
||||
|
||||
12
docs/admin_api/README.rst
Normal file
12
docs/admin_api/README.rst
Normal file
@@ -0,0 +1,12 @@
|
||||
Admin APIs
|
||||
==========
|
||||
|
||||
This directory includes documentation for the various synapse specific admin
|
||||
APIs available.
|
||||
|
||||
Only users that are server admins can use these APIs. A user can be marked as a
|
||||
server admin by updating the database directly, e.g.:
|
||||
|
||||
``UPDATE users SET admin = 1 WHERE name = '@foo:bar.com'``
|
||||
|
||||
Restarting may be required for the changes to register.
|
||||
15
docs/admin_api/purge_history_api.rst
Normal file
15
docs/admin_api/purge_history_api.rst
Normal file
@@ -0,0 +1,15 @@
|
||||
Purge History API
|
||||
=================
|
||||
|
||||
The purge history API allows server admins to purge historic events from their
|
||||
database, reclaiming disk space.
|
||||
|
||||
Depending on the amount of history being purged a call to the API may take
|
||||
several minutes or longer. During this period users will not be able to
|
||||
paginate further back in the room from the point being purged from.
|
||||
|
||||
The API is simply:
|
||||
|
||||
``POST /_matrix/client/r0/admin/purge_history/<room_id>/<event_id>``
|
||||
|
||||
including an ``access_token`` of a server admin.
|
||||
17
docs/admin_api/purge_remote_media.rst
Normal file
17
docs/admin_api/purge_remote_media.rst
Normal file
@@ -0,0 +1,17 @@
|
||||
Purge Remote Media API
|
||||
======================
|
||||
|
||||
The purge remote media API allows server admins to purge old cached remote
|
||||
media.
|
||||
|
||||
The API is::
|
||||
|
||||
POST /_matrix/client/r0/admin/purge_media_cache?before_ts=<unix_timestamp_in_ms>&access_token=<access_token>
|
||||
|
||||
{}
|
||||
|
||||
Which will remove all cached media that was last accessed before
|
||||
``<unix_timestamp_in_ms>``.
|
||||
|
||||
If the user re-requests purged remote media, synapse will re-request the media
|
||||
from the originating server.
|
||||
@@ -15,36 +15,45 @@ How to monitor Synapse metrics using Prometheus
|
||||
|
||||
Restart synapse
|
||||
|
||||
3: Check out synapse-prometheus-config
|
||||
https://github.com/matrix-org/synapse-prometheus-config
|
||||
3: Add a prometheus target for synapse. It needs to set the ``metrics_path``
|
||||
to a non-default value::
|
||||
|
||||
4: Add ``synapse.html`` and ``synapse.rules``
|
||||
The ``.html`` file needs to appear in prometheus's ``consoles`` directory,
|
||||
and the ``.rules`` file needs to be invoked somewhere in the main config
|
||||
file. A symlink to each from the git checkout into the prometheus directory
|
||||
might be easiest to ensure ``git pull`` keeps it updated.
|
||||
- job_name: "synapse"
|
||||
metrics_path: "/_synapse/metrics"
|
||||
static_configs:
|
||||
- targets:
|
||||
"my.server.here:9092"
|
||||
|
||||
5: Add a prometheus target for synapse
|
||||
This is easiest if prometheus runs on the same machine as synapse, as it can
|
||||
then just use localhost::
|
||||
Standard Metric Names
|
||||
---------------------
|
||||
|
||||
global: {
|
||||
rule_file: "synapse.rules"
|
||||
}
|
||||
As of synapse version 0.18.2, the format of the process-wide metrics has been
|
||||
changed to fit prometheus standard naming conventions. Additionally the units
|
||||
have been changed to seconds, from miliseconds.
|
||||
|
||||
job: {
|
||||
name: "synapse"
|
||||
================================== =============================
|
||||
New name Old name
|
||||
---------------------------------- -----------------------------
|
||||
process_cpu_user_seconds_total process_resource_utime / 1000
|
||||
process_cpu_system_seconds_total process_resource_stime / 1000
|
||||
process_open_fds (no 'type' label) process_fds
|
||||
================================== =============================
|
||||
|
||||
target_group: {
|
||||
target: "http://localhost:9092/"
|
||||
}
|
||||
}
|
||||
The python-specific counts of garbage collector performance have been renamed.
|
||||
|
||||
6: Start prometheus::
|
||||
=========================== ======================
|
||||
New name Old name
|
||||
--------------------------- ----------------------
|
||||
python_gc_time reactor_gc_time
|
||||
python_gc_unreachable_total reactor_gc_unreachable
|
||||
python_gc_counts reactor_gc_counts
|
||||
=========================== ======================
|
||||
|
||||
./prometheus -config.file=prometheus.conf
|
||||
The twisted-specific reactor metrics have been renamed.
|
||||
|
||||
7: Wait a few seconds for it to start and perform the first scrape,
|
||||
then visit the console:
|
||||
|
||||
http://server-where-prometheus-runs:9090/consoles/synapse.html
|
||||
==================================== =====================
|
||||
New name Old name
|
||||
------------------------------------ ---------------------
|
||||
python_twisted_reactor_pending_calls reactor_pending_calls
|
||||
python_twisted_reactor_tick_time reactor_tick_time
|
||||
==================================== =====================
|
||||
|
||||
98
docs/workers.rst
Normal file
98
docs/workers.rst
Normal file
@@ -0,0 +1,98 @@
|
||||
Scaling synapse via workers
|
||||
---------------------------
|
||||
|
||||
Synapse has experimental support for splitting out functionality into
|
||||
multiple separate python processes, helping greatly with scalability. These
|
||||
processes are called 'workers', and are (eventually) intended to scale
|
||||
horizontally independently.
|
||||
|
||||
All processes continue to share the same database instance, and as such, workers
|
||||
only work with postgres based synapse deployments (sharing a single sqlite
|
||||
across multiple processes is a recipe for disaster, plus you should be using
|
||||
postgres anyway if you care about scalability).
|
||||
|
||||
The workers communicate with the master synapse process via a synapse-specific
|
||||
HTTP protocol called 'replication' - analogous to MySQL or Postgres style
|
||||
database replication; feeding a stream of relevant data to the workers so they
|
||||
can be kept in sync with the main synapse process and database state.
|
||||
|
||||
To enable workers, you need to add a replication listener to the master synapse, e.g.::
|
||||
|
||||
listeners:
|
||||
- port: 9092
|
||||
bind_address: '127.0.0.1'
|
||||
type: http
|
||||
tls: false
|
||||
x_forwarded: false
|
||||
resources:
|
||||
- names: [replication]
|
||||
compress: false
|
||||
|
||||
Under **no circumstances** should this replication API listener be exposed to the
|
||||
public internet; it currently implements no authentication whatsoever and is
|
||||
unencrypted HTTP.
|
||||
|
||||
You then create a set of configs for the various worker processes. These should be
|
||||
worker configuration files should be stored in a dedicated subdirectory, to allow
|
||||
synctl to manipulate them.
|
||||
|
||||
The current available worker applications are:
|
||||
* synapse.app.pusher - handles sending push notifications to sygnal and email
|
||||
* synapse.app.synchrotron - handles /sync endpoints. can scales horizontally through multiple instances.
|
||||
* synapse.app.appservice - handles output traffic to Application Services
|
||||
* synapse.app.federation_reader - handles receiving federation traffic (including public_rooms API)
|
||||
* synapse.app.media_repository - handles the media repository.
|
||||
* synapse.app.client_reader - handles client API endpoints like /publicRooms
|
||||
|
||||
Each worker configuration file inherits the configuration of the main homeserver
|
||||
configuration file. You can then override configuration specific to that worker,
|
||||
e.g. the HTTP listener that it provides (if any); logging configuration; etc.
|
||||
You should minimise the number of overrides though to maintain a usable config.
|
||||
|
||||
You must specify the type of worker application (worker_app) and the replication
|
||||
endpoint that it's talking to on the main synapse process (worker_replication_url).
|
||||
|
||||
For instance::
|
||||
|
||||
worker_app: synapse.app.synchrotron
|
||||
|
||||
# The replication listener on the synapse to talk to.
|
||||
worker_replication_url: http://127.0.0.1:9092/_synapse/replication
|
||||
|
||||
worker_listeners:
|
||||
- type: http
|
||||
port: 8083
|
||||
resources:
|
||||
- names:
|
||||
- client
|
||||
|
||||
worker_daemonize: True
|
||||
worker_pid_file: /home/matrix/synapse/synchrotron.pid
|
||||
worker_log_config: /home/matrix/synapse/config/synchrotron_log_config.yaml
|
||||
|
||||
...is a full configuration for a synchrotron worker instance, which will expose a
|
||||
plain HTTP /sync endpoint on port 8083 separately from the /sync endpoint provided
|
||||
by the main synapse.
|
||||
|
||||
Obviously you should configure your loadbalancer to route the /sync endpoint to
|
||||
the synchrotron instance(s) in this instance.
|
||||
|
||||
Finally, to actually run your worker-based synapse, you must pass synctl the -a
|
||||
commandline option to tell it to operate on all the worker configurations found
|
||||
in the given directory, e.g.::
|
||||
|
||||
synctl -a $CONFIG/workers start
|
||||
|
||||
Currently one should always restart all workers when restarting or upgrading
|
||||
synapse, unless you explicitly know it's safe not to. For instance, restarting
|
||||
synapse without restarting all the synchrotrons may result in broken typing
|
||||
notifications.
|
||||
|
||||
To manipulate a specific worker, you pass the -w option to synctl::
|
||||
|
||||
synctl -w $CONFIG/workers/synchrotron.yaml restart
|
||||
|
||||
All of the above is highly experimental and subject to change as Synapse evolves,
|
||||
but documenting it here to help folks needing highly scalable Synapses similar
|
||||
to the one running matrix.org!
|
||||
|
||||
@@ -4,84 +4,22 @@ set -eux
|
||||
|
||||
: ${WORKSPACE:="$(pwd)"}
|
||||
|
||||
export WORKSPACE
|
||||
export PYTHONDONTWRITEBYTECODE=yep
|
||||
export SYNAPSE_CACHE_FACTOR=1
|
||||
|
||||
# Output test results as junit xml
|
||||
export TRIAL_FLAGS="--reporter=subunit"
|
||||
export TOXSUFFIX="| subunit-1to2 | subunit2junitxml --no-passthrough --output-to=results.xml"
|
||||
# Write coverage reports to a separate file for each process
|
||||
export COVERAGE_OPTS="-p"
|
||||
export DUMP_COVERAGE_COMMAND="coverage help"
|
||||
./jenkins/prepare_synapse.sh
|
||||
./jenkins/clone.sh sytest https://github.com/matrix-org/sytest.git
|
||||
./jenkins/clone.sh dendron https://github.com/matrix-org/dendron.git
|
||||
./dendron/jenkins/build_dendron.sh
|
||||
./sytest/jenkins/prep_sytest_for_postgres.sh
|
||||
|
||||
# Output flake8 violations to violations.flake8.log
|
||||
# Don't exit with non-0 status code on Jenkins,
|
||||
# so that the build steps continue and a later step can decided whether to
|
||||
# UNSTABLE or FAILURE this build.
|
||||
export PEP8SUFFIX="--output-file=violations.flake8.log || echo flake8 finished with status code \$?"
|
||||
|
||||
rm .coverage* || echo "No coverage files to remove"
|
||||
|
||||
tox --notest -e py27
|
||||
|
||||
TOX_BIN=$WORKSPACE/.tox/py27/bin
|
||||
python synapse/python_dependencies.py | xargs -n1 $TOX_BIN/pip install
|
||||
$TOX_BIN/pip install psycopg2
|
||||
$TOX_BIN/pip install lxml
|
||||
|
||||
: ${GIT_BRANCH:="origin/$(git rev-parse --abbrev-ref HEAD)"}
|
||||
|
||||
if [[ ! -e .dendron-base ]]; then
|
||||
git clone https://github.com/matrix-org/dendron.git .dendron-base --mirror
|
||||
else
|
||||
(cd .dendron-base; git fetch -p)
|
||||
fi
|
||||
|
||||
rm -rf dendron
|
||||
git clone .dendron-base dendron --shared
|
||||
cd dendron
|
||||
|
||||
: ${GOPATH:=${WORKSPACE}/.gopath}
|
||||
if [[ "${GOPATH}" != *:* ]]; then
|
||||
mkdir -p "${GOPATH}"
|
||||
export PATH="${GOPATH}/bin:${PATH}"
|
||||
fi
|
||||
export GOPATH
|
||||
|
||||
git checkout "${GIT_BRANCH}" || (echo >&2 "No ref ${GIT_BRANCH} found, falling back to develop" ; git checkout develop)
|
||||
|
||||
go get github.com/constabulary/gb/...
|
||||
gb generate
|
||||
gb build
|
||||
|
||||
cd ..
|
||||
|
||||
|
||||
if [[ ! -e .sytest-base ]]; then
|
||||
git clone https://github.com/matrix-org/sytest.git .sytest-base --mirror
|
||||
else
|
||||
(cd .sytest-base; git fetch -p)
|
||||
fi
|
||||
|
||||
rm -rf sytest
|
||||
git clone .sytest-base sytest --shared
|
||||
cd sytest
|
||||
|
||||
git checkout "${GIT_BRANCH}" || (echo >&2 "No ref ${GIT_BRANCH} found, falling back to develop" ; git checkout develop)
|
||||
|
||||
: ${PORT_BASE:=8000}
|
||||
: ${PORT_COUNT=20}
|
||||
|
||||
./jenkins/prep_sytest_for_postgres.sh
|
||||
|
||||
mkdir -p var
|
||||
|
||||
echo >&2 "Running sytest with PostgreSQL";
|
||||
./jenkins/install_and_run.sh --python $TOX_BIN/python \
|
||||
--synapse-directory $WORKSPACE \
|
||||
--dendron $WORKSPACE/dendron/bin/dendron \
|
||||
--pusher \
|
||||
--synchrotron \
|
||||
--port-range ${PORT_BASE}:$((PORT_BASE+PORT_COUNT-1))
|
||||
|
||||
cd ..
|
||||
./sytest/jenkins/install_and_run.sh \
|
||||
--synapse-directory $WORKSPACE \
|
||||
--dendron $WORKSPACE/dendron/bin/dendron \
|
||||
--pusher \
|
||||
--synchrotron \
|
||||
--federation-reader \
|
||||
--client-reader \
|
||||
--appservice \
|
||||
--federation-sender \
|
||||
|
||||
@@ -4,61 +4,14 @@ set -eux
|
||||
|
||||
: ${WORKSPACE:="$(pwd)"}
|
||||
|
||||
export WORKSPACE
|
||||
export PYTHONDONTWRITEBYTECODE=yep
|
||||
export SYNAPSE_CACHE_FACTOR=1
|
||||
|
||||
# Output test results as junit xml
|
||||
export TRIAL_FLAGS="--reporter=subunit"
|
||||
export TOXSUFFIX="| subunit-1to2 | subunit2junitxml --no-passthrough --output-to=results.xml"
|
||||
# Write coverage reports to a separate file for each process
|
||||
export COVERAGE_OPTS="-p"
|
||||
export DUMP_COVERAGE_COMMAND="coverage help"
|
||||
./jenkins/prepare_synapse.sh
|
||||
./jenkins/clone.sh sytest https://github.com/matrix-org/sytest.git
|
||||
|
||||
# Output flake8 violations to violations.flake8.log
|
||||
# Don't exit with non-0 status code on Jenkins,
|
||||
# so that the build steps continue and a later step can decided whether to
|
||||
# UNSTABLE or FAILURE this build.
|
||||
export PEP8SUFFIX="--output-file=violations.flake8.log || echo flake8 finished with status code \$?"
|
||||
./sytest/jenkins/prep_sytest_for_postgres.sh
|
||||
|
||||
rm .coverage* || echo "No coverage files to remove"
|
||||
|
||||
tox --notest -e py27
|
||||
|
||||
TOX_BIN=$WORKSPACE/.tox/py27/bin
|
||||
python synapse/python_dependencies.py | xargs -n1 $TOX_BIN/pip install
|
||||
$TOX_BIN/pip install psycopg2
|
||||
$TOX_BIN/pip install lxml
|
||||
|
||||
: ${GIT_BRANCH:="origin/$(git rev-parse --abbrev-ref HEAD)"}
|
||||
|
||||
if [[ ! -e .sytest-base ]]; then
|
||||
git clone https://github.com/matrix-org/sytest.git .sytest-base --mirror
|
||||
else
|
||||
(cd .sytest-base; git fetch -p)
|
||||
fi
|
||||
|
||||
rm -rf sytest
|
||||
git clone .sytest-base sytest --shared
|
||||
cd sytest
|
||||
|
||||
git checkout "${GIT_BRANCH}" || (echo >&2 "No ref ${GIT_BRANCH} found, falling back to develop" ; git checkout develop)
|
||||
|
||||
: ${PORT_BASE:=8000}
|
||||
: ${PORT_COUNT=20}
|
||||
|
||||
./jenkins/prep_sytest_for_postgres.sh
|
||||
|
||||
echo >&2 "Running sytest with PostgreSQL";
|
||||
./jenkins/install_and_run.sh --coverage \
|
||||
--python $TOX_BIN/python \
|
||||
--synapse-directory $WORKSPACE \
|
||||
--port-range ${PORT_BASE}:$((PORT_BASE+PORT_COUNT-1)) \
|
||||
|
||||
cd ..
|
||||
cp sytest/.coverage.* .
|
||||
|
||||
# Combine the coverage reports
|
||||
echo "Combining:" .coverage.*
|
||||
$TOX_BIN/python -m coverage combine
|
||||
# Output coverage to coverage.xml
|
||||
$TOX_BIN/coverage xml -o coverage.xml
|
||||
./sytest/jenkins/install_and_run.sh \
|
||||
--synapse-directory $WORKSPACE \
|
||||
|
||||
@@ -4,55 +4,12 @@ set -eux
|
||||
|
||||
: ${WORKSPACE:="$(pwd)"}
|
||||
|
||||
export WORKSPACE
|
||||
export PYTHONDONTWRITEBYTECODE=yep
|
||||
export SYNAPSE_CACHE_FACTOR=1
|
||||
|
||||
# Output test results as junit xml
|
||||
export TRIAL_FLAGS="--reporter=subunit"
|
||||
export TOXSUFFIX="| subunit-1to2 | subunit2junitxml --no-passthrough --output-to=results.xml"
|
||||
# Write coverage reports to a separate file for each process
|
||||
export COVERAGE_OPTS="-p"
|
||||
export DUMP_COVERAGE_COMMAND="coverage help"
|
||||
./jenkins/prepare_synapse.sh
|
||||
./jenkins/clone.sh sytest https://github.com/matrix-org/sytest.git
|
||||
|
||||
# Output flake8 violations to violations.flake8.log
|
||||
# Don't exit with non-0 status code on Jenkins,
|
||||
# so that the build steps continue and a later step can decided whether to
|
||||
# UNSTABLE or FAILURE this build.
|
||||
export PEP8SUFFIX="--output-file=violations.flake8.log || echo flake8 finished with status code \$?"
|
||||
|
||||
rm .coverage* || echo "No coverage files to remove"
|
||||
|
||||
tox --notest -e py27
|
||||
TOX_BIN=$WORKSPACE/.tox/py27/bin
|
||||
python synapse/python_dependencies.py | xargs -n1 $TOX_BIN/pip install
|
||||
$TOX_BIN/pip install lxml
|
||||
|
||||
: ${GIT_BRANCH:="origin/$(git rev-parse --abbrev-ref HEAD)"}
|
||||
|
||||
if [[ ! -e .sytest-base ]]; then
|
||||
git clone https://github.com/matrix-org/sytest.git .sytest-base --mirror
|
||||
else
|
||||
(cd .sytest-base; git fetch -p)
|
||||
fi
|
||||
|
||||
rm -rf sytest
|
||||
git clone .sytest-base sytest --shared
|
||||
cd sytest
|
||||
|
||||
git checkout "${GIT_BRANCH}" || (echo >&2 "No ref ${GIT_BRANCH} found, falling back to develop" ; git checkout develop)
|
||||
|
||||
: ${PORT_COUNT=20}
|
||||
: ${PORT_BASE:=8000}
|
||||
./jenkins/install_and_run.sh --coverage \
|
||||
--python $TOX_BIN/python \
|
||||
--synapse-directory $WORKSPACE \
|
||||
--port-range ${PORT_BASE}:$((PORT_BASE+PORT_COUNT-1)) \
|
||||
|
||||
cd ..
|
||||
cp sytest/.coverage.* .
|
||||
|
||||
# Combine the coverage reports
|
||||
echo "Combining:" .coverage.*
|
||||
$TOX_BIN/python -m coverage combine
|
||||
# Output coverage to coverage.xml
|
||||
$TOX_BIN/coverage xml -o coverage.xml
|
||||
./sytest/jenkins/install_and_run.sh \
|
||||
--synapse-directory $WORKSPACE \
|
||||
|
||||
@@ -22,4 +22,9 @@ export PEP8SUFFIX="--output-file=violations.flake8.log || echo flake8 finished w
|
||||
|
||||
rm .coverage* || echo "No coverage files to remove"
|
||||
|
||||
tox --notest -e py27
|
||||
TOX_BIN=$WORKSPACE/.tox/py27/bin
|
||||
python synapse/python_dependencies.py | xargs -n1 $TOX_BIN/pip install
|
||||
$TOX_BIN/pip install lxml
|
||||
|
||||
tox -e py27
|
||||
|
||||
44
jenkins/clone.sh
Executable file
44
jenkins/clone.sh
Executable file
@@ -0,0 +1,44 @@
|
||||
#! /bin/bash
|
||||
|
||||
# This clones a project from github into a named subdirectory
|
||||
# If the project has a branch with the same name as this branch
|
||||
# then it will checkout that branch after cloning.
|
||||
# Otherwise it will checkout "origin/develop."
|
||||
# The first argument is the name of the directory to checkout
|
||||
# the branch into.
|
||||
# The second argument is the URL of the remote repository to checkout.
|
||||
# Usually something like https://github.com/matrix-org/sytest.git
|
||||
|
||||
set -eux
|
||||
|
||||
NAME=$1
|
||||
PROJECT=$2
|
||||
BASE=".$NAME-base"
|
||||
|
||||
# Update our mirror.
|
||||
if [ ! -d ".$NAME-base" ]; then
|
||||
# Create a local mirror of the source repository.
|
||||
# This saves us from having to download the entire repository
|
||||
# when this script is next run.
|
||||
git clone "$PROJECT" "$BASE" --mirror
|
||||
else
|
||||
# Fetch any updates from the source repository.
|
||||
(cd "$BASE"; git fetch -p)
|
||||
fi
|
||||
|
||||
# Remove the existing repository so that we have a clean copy
|
||||
rm -rf "$NAME"
|
||||
# Cloning with --shared means that we will share portions of the
|
||||
# .git directory with our local mirror.
|
||||
git clone "$BASE" "$NAME" --shared
|
||||
|
||||
# Jenkins may have supplied us with the name of the branch in the
|
||||
# environment. Otherwise we will have to guess based on the current
|
||||
# commit.
|
||||
: ${GIT_BRANCH:="origin/$(git rev-parse --abbrev-ref HEAD)"}
|
||||
cd "$NAME"
|
||||
# check out the relevant branch
|
||||
git checkout "${GIT_BRANCH}" || (
|
||||
echo >&2 "No ref ${GIT_BRANCH} found, falling back to develop"
|
||||
git checkout "origin/develop"
|
||||
)
|
||||
20
jenkins/prepare_synapse.sh
Executable file
20
jenkins/prepare_synapse.sh
Executable file
@@ -0,0 +1,20 @@
|
||||
#! /bin/bash
|
||||
|
||||
cd "`dirname $0`/.."
|
||||
|
||||
TOX_DIR=$WORKSPACE/.tox
|
||||
|
||||
mkdir -p $TOX_DIR
|
||||
|
||||
if ! [ $TOX_DIR -ef .tox ]; then
|
||||
ln -s "$TOX_DIR" .tox
|
||||
fi
|
||||
|
||||
# set up the virtualenv
|
||||
tox -e py27 --notest -v
|
||||
|
||||
TOX_BIN=$TOX_DIR/py27/bin
|
||||
$TOX_BIN/pip install setuptools
|
||||
{ python synapse/python_dependencies.py
|
||||
echo lxml psycopg2
|
||||
} | xargs $TOX_BIN/pip install
|
||||
@@ -18,7 +18,9 @@
|
||||
<div class="summarytext">{{ summary_text }}</div>
|
||||
</td>
|
||||
<td class="logo">
|
||||
{% if app_name == "Vector" %}
|
||||
{% if app_name == "Riot" %}
|
||||
<img src="http://matrix.org/img/riot-logo-email.png" width="83" height="83" alt="[Riot]"/>
|
||||
{% elif app_name == "Vector" %}
|
||||
<img src="http://matrix.org/img/vector-logo-email.png" width="64" height="83" alt="[Vector]"/>
|
||||
{% else %}
|
||||
<img src="http://matrix.org/img/matrix-120x51.png" width="120" height="51" alt="[matrix]"/>
|
||||
|
||||
@@ -116,17 +116,19 @@ def get_json(origin_name, origin_key, destination, path):
|
||||
authorization_headers = []
|
||||
|
||||
for key, sig in signed_json["signatures"][origin_name].items():
|
||||
authorization_headers.append(bytes(
|
||||
"X-Matrix origin=%s,key=\"%s\",sig=\"%s\"" % (
|
||||
origin_name, key, sig,
|
||||
)
|
||||
))
|
||||
header = "X-Matrix origin=%s,key=\"%s\",sig=\"%s\"" % (
|
||||
origin_name, key, sig,
|
||||
)
|
||||
authorization_headers.append(bytes(header))
|
||||
sys.stderr.write(header)
|
||||
sys.stderr.write("\n")
|
||||
|
||||
result = requests.get(
|
||||
lookup(destination, path),
|
||||
headers={"Authorization": authorization_headers[0]},
|
||||
verify=False,
|
||||
)
|
||||
sys.stderr.write("Status Code: %d\n" % (result.status_code,))
|
||||
return result.json()
|
||||
|
||||
|
||||
@@ -141,6 +143,7 @@ def main():
|
||||
)
|
||||
|
||||
json.dump(result, sys.stdout)
|
||||
print ""
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
||||
@@ -34,11 +34,12 @@ logger = logging.getLogger("synapse_port_db")
|
||||
|
||||
|
||||
BOOLEAN_COLUMNS = {
|
||||
"events": ["processed", "outlier"],
|
||||
"events": ["processed", "outlier", "contains_url"],
|
||||
"rooms": ["is_public"],
|
||||
"event_edges": ["is_state"],
|
||||
"presence_list": ["accepted"],
|
||||
"presence_stream": ["currently_active"],
|
||||
"public_room_list_stream": ["visibility"],
|
||||
}
|
||||
|
||||
|
||||
@@ -71,6 +72,14 @@ APPEND_ONLY_TABLES = [
|
||||
"event_to_state_groups",
|
||||
"rejections",
|
||||
"event_search",
|
||||
"presence_stream",
|
||||
"push_rules_stream",
|
||||
"current_state_resets",
|
||||
"ex_outlier_stream",
|
||||
"cache_invalidation_stream",
|
||||
"public_room_list_stream",
|
||||
"state_group_edges",
|
||||
"stream_ordering_to_exterm",
|
||||
]
|
||||
|
||||
|
||||
@@ -92,8 +101,12 @@ class Store(object):
|
||||
|
||||
_simple_select_onecol_txn = SQLBaseStore.__dict__["_simple_select_onecol_txn"]
|
||||
_simple_select_onecol = SQLBaseStore.__dict__["_simple_select_onecol"]
|
||||
_simple_select_one = SQLBaseStore.__dict__["_simple_select_one"]
|
||||
_simple_select_one_txn = SQLBaseStore.__dict__["_simple_select_one_txn"]
|
||||
_simple_select_one_onecol = SQLBaseStore.__dict__["_simple_select_one_onecol"]
|
||||
_simple_select_one_onecol_txn = SQLBaseStore.__dict__["_simple_select_one_onecol_txn"]
|
||||
_simple_select_one_onecol_txn = SQLBaseStore.__dict__[
|
||||
"_simple_select_one_onecol_txn"
|
||||
]
|
||||
|
||||
_simple_update_one = SQLBaseStore.__dict__["_simple_update_one"]
|
||||
_simple_update_one_txn = SQLBaseStore.__dict__["_simple_update_one_txn"]
|
||||
@@ -158,31 +171,40 @@ class Porter(object):
|
||||
def setup_table(self, table):
|
||||
if table in APPEND_ONLY_TABLES:
|
||||
# It's safe to just carry on inserting.
|
||||
next_chunk = yield self.postgres_store._simple_select_one_onecol(
|
||||
row = yield self.postgres_store._simple_select_one(
|
||||
table="port_from_sqlite3",
|
||||
keyvalues={"table_name": table},
|
||||
retcol="rowid",
|
||||
retcols=("forward_rowid", "backward_rowid"),
|
||||
allow_none=True,
|
||||
)
|
||||
|
||||
total_to_port = None
|
||||
if next_chunk is None:
|
||||
if row is None:
|
||||
if table == "sent_transactions":
|
||||
next_chunk, already_ported, total_to_port = (
|
||||
forward_chunk, already_ported, total_to_port = (
|
||||
yield self._setup_sent_transactions()
|
||||
)
|
||||
backward_chunk = 0
|
||||
else:
|
||||
yield self.postgres_store._simple_insert(
|
||||
table="port_from_sqlite3",
|
||||
values={"table_name": table, "rowid": 1}
|
||||
values={
|
||||
"table_name": table,
|
||||
"forward_rowid": 1,
|
||||
"backward_rowid": 0,
|
||||
}
|
||||
)
|
||||
|
||||
next_chunk = 1
|
||||
forward_chunk = 1
|
||||
backward_chunk = 0
|
||||
already_ported = 0
|
||||
else:
|
||||
forward_chunk = row["forward_rowid"]
|
||||
backward_chunk = row["backward_rowid"]
|
||||
|
||||
if total_to_port is None:
|
||||
already_ported, total_to_port = yield self._get_total_count_to_port(
|
||||
table, next_chunk
|
||||
table, forward_chunk, backward_chunk
|
||||
)
|
||||
else:
|
||||
def delete_all(txn):
|
||||
@@ -196,46 +218,85 @@ class Porter(object):
|
||||
|
||||
yield self.postgres_store._simple_insert(
|
||||
table="port_from_sqlite3",
|
||||
values={"table_name": table, "rowid": 0}
|
||||
values={
|
||||
"table_name": table,
|
||||
"forward_rowid": 1,
|
||||
"backward_rowid": 0,
|
||||
}
|
||||
)
|
||||
|
||||
next_chunk = 1
|
||||
forward_chunk = 1
|
||||
backward_chunk = 0
|
||||
|
||||
already_ported, total_to_port = yield self._get_total_count_to_port(
|
||||
table, next_chunk
|
||||
table, forward_chunk, backward_chunk
|
||||
)
|
||||
|
||||
defer.returnValue((table, already_ported, total_to_port, next_chunk))
|
||||
defer.returnValue(
|
||||
(table, already_ported, total_to_port, forward_chunk, backward_chunk)
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def handle_table(self, table, postgres_size, table_size, next_chunk):
|
||||
def handle_table(self, table, postgres_size, table_size, forward_chunk,
|
||||
backward_chunk):
|
||||
if not table_size:
|
||||
return
|
||||
|
||||
self.progress.add_table(table, postgres_size, table_size)
|
||||
|
||||
if table == "event_search":
|
||||
yield self.handle_search_table(postgres_size, table_size, next_chunk)
|
||||
yield self.handle_search_table(
|
||||
postgres_size, table_size, forward_chunk, backward_chunk
|
||||
)
|
||||
return
|
||||
|
||||
select = (
|
||||
forward_select = (
|
||||
"SELECT rowid, * FROM %s WHERE rowid >= ? ORDER BY rowid LIMIT ?"
|
||||
% (table,)
|
||||
)
|
||||
|
||||
backward_select = (
|
||||
"SELECT rowid, * FROM %s WHERE rowid <= ? ORDER BY rowid LIMIT ?"
|
||||
% (table,)
|
||||
)
|
||||
|
||||
do_forward = [True]
|
||||
do_backward = [True]
|
||||
|
||||
while True:
|
||||
def r(txn):
|
||||
txn.execute(select, (next_chunk, self.batch_size,))
|
||||
rows = txn.fetchall()
|
||||
headers = [column[0] for column in txn.description]
|
||||
forward_rows = []
|
||||
backward_rows = []
|
||||
if do_forward[0]:
|
||||
txn.execute(forward_select, (forward_chunk, self.batch_size,))
|
||||
forward_rows = txn.fetchall()
|
||||
if not forward_rows:
|
||||
do_forward[0] = False
|
||||
|
||||
return headers, rows
|
||||
if do_backward[0]:
|
||||
txn.execute(backward_select, (backward_chunk, self.batch_size,))
|
||||
backward_rows = txn.fetchall()
|
||||
if not backward_rows:
|
||||
do_backward[0] = False
|
||||
|
||||
headers, rows = yield self.sqlite_store.runInteraction("select", r)
|
||||
if forward_rows or backward_rows:
|
||||
headers = [column[0] for column in txn.description]
|
||||
else:
|
||||
headers = None
|
||||
|
||||
if rows:
|
||||
next_chunk = rows[-1][0] + 1
|
||||
return headers, forward_rows, backward_rows
|
||||
|
||||
headers, frows, brows = yield self.sqlite_store.runInteraction(
|
||||
"select", r
|
||||
)
|
||||
|
||||
if frows or brows:
|
||||
if frows:
|
||||
forward_chunk = max(row[0] for row in frows) + 1
|
||||
if brows:
|
||||
backward_chunk = min(row[0] for row in brows) - 1
|
||||
|
||||
rows = frows + brows
|
||||
self._convert_rows(table, headers, rows)
|
||||
|
||||
def insert(txn):
|
||||
@@ -247,7 +308,10 @@ class Porter(object):
|
||||
txn,
|
||||
table="port_from_sqlite3",
|
||||
keyvalues={"table_name": table},
|
||||
updatevalues={"rowid": next_chunk},
|
||||
updatevalues={
|
||||
"forward_rowid": forward_chunk,
|
||||
"backward_rowid": backward_chunk,
|
||||
},
|
||||
)
|
||||
|
||||
yield self.postgres_store.execute(insert)
|
||||
@@ -259,7 +323,8 @@ class Porter(object):
|
||||
return
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def handle_search_table(self, postgres_size, table_size, next_chunk):
|
||||
def handle_search_table(self, postgres_size, table_size, forward_chunk,
|
||||
backward_chunk):
|
||||
select = (
|
||||
"SELECT es.rowid, es.*, e.origin_server_ts, e.stream_ordering"
|
||||
" FROM event_search as es"
|
||||
@@ -270,7 +335,7 @@ class Porter(object):
|
||||
|
||||
while True:
|
||||
def r(txn):
|
||||
txn.execute(select, (next_chunk, self.batch_size,))
|
||||
txn.execute(select, (forward_chunk, self.batch_size,))
|
||||
rows = txn.fetchall()
|
||||
headers = [column[0] for column in txn.description]
|
||||
|
||||
@@ -279,7 +344,7 @@ class Porter(object):
|
||||
headers, rows = yield self.sqlite_store.runInteraction("select", r)
|
||||
|
||||
if rows:
|
||||
next_chunk = rows[-1][0] + 1
|
||||
forward_chunk = rows[-1][0] + 1
|
||||
|
||||
# We have to treat event_search differently since it has a
|
||||
# different structure in the two different databases.
|
||||
@@ -312,7 +377,10 @@ class Porter(object):
|
||||
txn,
|
||||
table="port_from_sqlite3",
|
||||
keyvalues={"table_name": "event_search"},
|
||||
updatevalues={"rowid": next_chunk},
|
||||
updatevalues={
|
||||
"forward_rowid": forward_chunk,
|
||||
"backward_rowid": backward_chunk,
|
||||
},
|
||||
)
|
||||
|
||||
yield self.postgres_store.execute(insert)
|
||||
@@ -324,7 +392,6 @@ class Porter(object):
|
||||
else:
|
||||
return
|
||||
|
||||
|
||||
def setup_db(self, db_config, database_engine):
|
||||
db_conn = database_engine.module.connect(
|
||||
**{
|
||||
@@ -395,10 +462,32 @@ class Porter(object):
|
||||
txn.execute(
|
||||
"CREATE TABLE port_from_sqlite3 ("
|
||||
" table_name varchar(100) NOT NULL UNIQUE,"
|
||||
" rowid bigint NOT NULL"
|
||||
" forward_rowid bigint NOT NULL,"
|
||||
" backward_rowid bigint NOT NULL"
|
||||
")"
|
||||
)
|
||||
|
||||
# The old port script created a table with just a "rowid" column.
|
||||
# We want people to be able to rerun this script from an old port
|
||||
# so that they can pick up any missing events that were not
|
||||
# ported across.
|
||||
def alter_table(txn):
|
||||
txn.execute(
|
||||
"ALTER TABLE IF EXISTS port_from_sqlite3"
|
||||
" RENAME rowid TO forward_rowid"
|
||||
)
|
||||
txn.execute(
|
||||
"ALTER TABLE IF EXISTS port_from_sqlite3"
|
||||
" ADD backward_rowid bigint NOT NULL DEFAULT 0"
|
||||
)
|
||||
|
||||
try:
|
||||
yield self.postgres_store.runInteraction(
|
||||
"alter_table", alter_table
|
||||
)
|
||||
except Exception as e:
|
||||
logger.info("Failed to create port table: %s", e)
|
||||
|
||||
try:
|
||||
yield self.postgres_store.runInteraction(
|
||||
"create_port_table", create_port_table
|
||||
@@ -458,7 +547,7 @@ class Porter(object):
|
||||
@defer.inlineCallbacks
|
||||
def _setup_sent_transactions(self):
|
||||
# Only save things from the last day
|
||||
yesterday = int(time.time()*1000) - 86400000
|
||||
yesterday = int(time.time() * 1000) - 86400000
|
||||
|
||||
# And save the max transaction id from each destination
|
||||
select = (
|
||||
@@ -514,7 +603,11 @@ class Porter(object):
|
||||
|
||||
yield self.postgres_store._simple_insert(
|
||||
table="port_from_sqlite3",
|
||||
values={"table_name": "sent_transactions", "rowid": next_chunk}
|
||||
values={
|
||||
"table_name": "sent_transactions",
|
||||
"forward_rowid": next_chunk,
|
||||
"backward_rowid": 0,
|
||||
}
|
||||
)
|
||||
|
||||
def get_sent_table_size(txn):
|
||||
@@ -535,13 +628,18 @@ class Porter(object):
|
||||
defer.returnValue((next_chunk, inserted_rows, total_count))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _get_remaining_count_to_port(self, table, next_chunk):
|
||||
rows = yield self.sqlite_store.execute_sql(
|
||||
def _get_remaining_count_to_port(self, table, forward_chunk, backward_chunk):
|
||||
frows = yield self.sqlite_store.execute_sql(
|
||||
"SELECT count(*) FROM %s WHERE rowid >= ?" % (table,),
|
||||
next_chunk,
|
||||
forward_chunk,
|
||||
)
|
||||
|
||||
defer.returnValue(rows[0][0])
|
||||
brows = yield self.sqlite_store.execute_sql(
|
||||
"SELECT count(*) FROM %s WHERE rowid <= ?" % (table,),
|
||||
backward_chunk,
|
||||
)
|
||||
|
||||
defer.returnValue(frows[0][0] + brows[0][0])
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _get_already_ported_count(self, table):
|
||||
@@ -552,10 +650,10 @@ class Porter(object):
|
||||
defer.returnValue(rows[0][0])
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _get_total_count_to_port(self, table, next_chunk):
|
||||
def _get_total_count_to_port(self, table, forward_chunk, backward_chunk):
|
||||
remaining, done = yield defer.gatherResults(
|
||||
[
|
||||
self._get_remaining_count_to_port(table, next_chunk),
|
||||
self._get_remaining_count_to_port(table, forward_chunk, backward_chunk),
|
||||
self._get_already_ported_count(table),
|
||||
],
|
||||
consumeErrors=True,
|
||||
@@ -686,7 +784,7 @@ class CursesProgress(Progress):
|
||||
color = curses.color_pair(2) if perc == 100 else curses.color_pair(1)
|
||||
|
||||
self.stdscr.addstr(
|
||||
i+2, left_margin + max_len - len(table),
|
||||
i + 2, left_margin + max_len - len(table),
|
||||
table,
|
||||
curses.A_BOLD | color,
|
||||
)
|
||||
@@ -694,18 +792,18 @@ class CursesProgress(Progress):
|
||||
size = 20
|
||||
|
||||
progress = "[%s%s]" % (
|
||||
"#" * int(perc*size/100),
|
||||
" " * (size - int(perc*size/100)),
|
||||
"#" * int(perc * size / 100),
|
||||
" " * (size - int(perc * size / 100)),
|
||||
)
|
||||
|
||||
self.stdscr.addstr(
|
||||
i+2, left_margin + max_len + middle_space,
|
||||
i + 2, left_margin + max_len + middle_space,
|
||||
"%s %3d%% (%d/%d)" % (progress, perc, data["num_done"], data["total"]),
|
||||
)
|
||||
|
||||
if self.finished:
|
||||
self.stdscr.addstr(
|
||||
rows-1, 0,
|
||||
rows - 1, 0,
|
||||
"Press any key to exit...",
|
||||
)
|
||||
|
||||
|
||||
@@ -16,7 +16,5 @@ ignore =
|
||||
|
||||
[flake8]
|
||||
max-line-length = 90
|
||||
ignore = W503 ; W503 requires that binary operators be at the end, not start, of lines. Erik doesn't like it.
|
||||
|
||||
[pep8]
|
||||
max-line-length = 90
|
||||
# W503 requires that binary operators be at the end, not start, of lines. Erik doesn't like it.
|
||||
ignore = W503
|
||||
|
||||
73
setup.py
73
setup.py
@@ -23,6 +23,45 @@ import sys
|
||||
here = os.path.abspath(os.path.dirname(__file__))
|
||||
|
||||
|
||||
# Some notes on `setup.py test`:
|
||||
#
|
||||
# Once upon a time we used to try to make `setup.py test` run `tox` to run the
|
||||
# tests. That's a bad idea for three reasons:
|
||||
#
|
||||
# 1: `setup.py test` is supposed to find out whether the tests work in the
|
||||
# *current* environmentt, not whatever tox sets up.
|
||||
# 2: Empirically, trying to install tox during the test run wasn't working ("No
|
||||
# module named virtualenv").
|
||||
# 3: The tox documentation advises against it[1].
|
||||
#
|
||||
# Even further back in time, we used to use setuptools_trial [2]. That has its
|
||||
# own set of issues: for instance, it requires installation of Twisted to build
|
||||
# an sdist (because the recommended mode of usage is to add it to
|
||||
# `setup_requires`). That in turn means that in order to successfully run tox
|
||||
# you have to have the python header files installed for whichever version of
|
||||
# python tox uses (which is python3 on recent ubuntus, for example).
|
||||
#
|
||||
# So, for now at least, we stick with what appears to be the convention among
|
||||
# Twisted projects, and don't attempt to do anything when someone runs
|
||||
# `setup.py test`; instead we direct people to run `trial` directly if they
|
||||
# care.
|
||||
#
|
||||
# [1]: http://tox.readthedocs.io/en/2.5.0/example/basic.html#integration-with-setup-py-test-command
|
||||
# [2]: https://pypi.python.org/pypi/setuptools_trial
|
||||
class TestCommand(Command):
|
||||
user_options = []
|
||||
|
||||
def initialize_options(self):
|
||||
pass
|
||||
|
||||
def finalize_options(self):
|
||||
pass
|
||||
|
||||
def run(self):
|
||||
print ("""Synapse's tests cannot be run via setup.py. To run them, try:
|
||||
PYTHONPATH="." trial tests
|
||||
""")
|
||||
|
||||
def read_file(path_segments):
|
||||
"""Read a file from the package. Takes a list of strings to join to
|
||||
make the path"""
|
||||
@@ -39,38 +78,6 @@ def exec_file(path_segments):
|
||||
return result
|
||||
|
||||
|
||||
class Tox(Command):
|
||||
user_options = [('tox-args=', 'a', "Arguments to pass to tox")]
|
||||
|
||||
def initialize_options(self):
|
||||
self.tox_args = None
|
||||
|
||||
def finalize_options(self):
|
||||
self.test_args = []
|
||||
self.test_suite = True
|
||||
|
||||
def run(self):
|
||||
#import here, cause outside the eggs aren't loaded
|
||||
try:
|
||||
import tox
|
||||
except ImportError:
|
||||
try:
|
||||
self.distribution.fetch_build_eggs("tox")
|
||||
import tox
|
||||
except:
|
||||
raise RuntimeError(
|
||||
"The tests need 'tox' to run. Please install 'tox'."
|
||||
)
|
||||
import shlex
|
||||
args = self.tox_args
|
||||
if args:
|
||||
args = shlex.split(self.tox_args)
|
||||
else:
|
||||
args = []
|
||||
errno = tox.cmdline(args=args)
|
||||
sys.exit(errno)
|
||||
|
||||
|
||||
version = exec_file(("synapse", "__init__.py"))["__version__"]
|
||||
dependencies = exec_file(("synapse", "python_dependencies.py"))
|
||||
long_description = read_file(("README.rst",))
|
||||
@@ -86,5 +93,5 @@ setup(
|
||||
zip_safe=False,
|
||||
long_description=long_description,
|
||||
scripts=["synctl"] + glob.glob("scripts/*"),
|
||||
cmdclass={'test': Tox},
|
||||
cmdclass={'test': TestCommand},
|
||||
)
|
||||
|
||||
@@ -16,4 +16,4 @@
|
||||
""" This is a reference implementation of a Matrix home server.
|
||||
"""
|
||||
|
||||
__version__ = "0.16.1-r1"
|
||||
__version__ = "0.19.0-rc3"
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -85,3 +85,8 @@ class RoomCreationPreset(object):
|
||||
PRIVATE_CHAT = "private_chat"
|
||||
PUBLIC_CHAT = "public_chat"
|
||||
TRUSTED_PRIVATE_CHAT = "trusted_private_chat"
|
||||
|
||||
|
||||
class ThirdPartyEntityKind(object):
|
||||
USER = "user"
|
||||
LOCATION = "location"
|
||||
|
||||
@@ -39,10 +39,12 @@ class Codes(object):
|
||||
CAPTCHA_NEEDED = "M_CAPTCHA_NEEDED"
|
||||
CAPTCHA_INVALID = "M_CAPTCHA_INVALID"
|
||||
MISSING_PARAM = "M_MISSING_PARAM"
|
||||
INVALID_PARAM = "M_INVALID_PARAM"
|
||||
TOO_LARGE = "M_TOO_LARGE"
|
||||
EXCLUSIVE = "M_EXCLUSIVE"
|
||||
THREEPID_AUTH_FAILED = "M_THREEPID_AUTH_FAILED"
|
||||
THREEPID_IN_USE = "M_THREEPID_IN_USE"
|
||||
THREEPID_NOT_FOUND = "M_THREEPID_NOT_FOUND"
|
||||
INVALID_USERNAME = "M_INVALID_USERNAME"
|
||||
SERVER_NOT_TRUSTED = "M_SERVER_NOT_TRUSTED"
|
||||
|
||||
|
||||
@@ -71,6 +71,21 @@ class Filtering(object):
|
||||
if key in user_filter_json["room"]:
|
||||
self._check_definition(user_filter_json["room"][key])
|
||||
|
||||
if "event_fields" in user_filter_json:
|
||||
if type(user_filter_json["event_fields"]) != list:
|
||||
raise SynapseError(400, "event_fields must be a list of strings")
|
||||
for field in user_filter_json["event_fields"]:
|
||||
if not isinstance(field, basestring):
|
||||
raise SynapseError(400, "Event field must be a string")
|
||||
# Don't allow '\\' in event field filters. This makes matching
|
||||
# events a lot easier as we can then use a negative lookbehind
|
||||
# assertion to split '\.' If we allowed \\ then it would
|
||||
# incorrectly split '\\.' See synapse.events.utils.serialize_event
|
||||
if r'\\' in field:
|
||||
raise SynapseError(
|
||||
400, r'The escape character \ cannot itself be escaped'
|
||||
)
|
||||
|
||||
def _check_definition_room_lists(self, definition):
|
||||
"""Check that "rooms" and "not_rooms" are lists of room ids if they
|
||||
are present
|
||||
@@ -152,6 +167,7 @@ class FilterCollection(object):
|
||||
self.include_leave = filter_json.get("room", {}).get(
|
||||
"include_leave", False
|
||||
)
|
||||
self.event_fields = filter_json.get("event_fields", [])
|
||||
|
||||
def __repr__(self):
|
||||
return "<FilterCollection %s>" % (json.dumps(self._filter_json),)
|
||||
@@ -186,11 +202,51 @@ class FilterCollection(object):
|
||||
def filter_room_account_data(self, events):
|
||||
return self._room_account_data.filter(self._room_filter.filter(events))
|
||||
|
||||
def blocks_all_presence(self):
|
||||
return (
|
||||
self._presence_filter.filters_all_types() or
|
||||
self._presence_filter.filters_all_senders()
|
||||
)
|
||||
|
||||
def blocks_all_room_ephemeral(self):
|
||||
return (
|
||||
self._room_ephemeral_filter.filters_all_types() or
|
||||
self._room_ephemeral_filter.filters_all_senders() or
|
||||
self._room_ephemeral_filter.filters_all_rooms()
|
||||
)
|
||||
|
||||
def blocks_all_room_timeline(self):
|
||||
return (
|
||||
self._room_timeline_filter.filters_all_types() or
|
||||
self._room_timeline_filter.filters_all_senders() or
|
||||
self._room_timeline_filter.filters_all_rooms()
|
||||
)
|
||||
|
||||
|
||||
class Filter(object):
|
||||
def __init__(self, filter_json):
|
||||
self.filter_json = filter_json
|
||||
|
||||
self.types = self.filter_json.get("types", None)
|
||||
self.not_types = self.filter_json.get("not_types", [])
|
||||
|
||||
self.rooms = self.filter_json.get("rooms", None)
|
||||
self.not_rooms = self.filter_json.get("not_rooms", [])
|
||||
|
||||
self.senders = self.filter_json.get("senders", None)
|
||||
self.not_senders = self.filter_json.get("not_senders", [])
|
||||
|
||||
self.contains_url = self.filter_json.get("contains_url", None)
|
||||
|
||||
def filters_all_types(self):
|
||||
return "*" in self.not_types
|
||||
|
||||
def filters_all_senders(self):
|
||||
return "*" in self.not_senders
|
||||
|
||||
def filters_all_rooms(self):
|
||||
return "*" in self.not_rooms
|
||||
|
||||
def check(self, event):
|
||||
"""Checks whether the filter matches the given event.
|
||||
|
||||
@@ -209,9 +265,10 @@ class Filter(object):
|
||||
event.get("room_id", None),
|
||||
sender,
|
||||
event.get("type", None),
|
||||
"url" in event.get("content", {})
|
||||
)
|
||||
|
||||
def check_fields(self, room_id, sender, event_type):
|
||||
def check_fields(self, room_id, sender, event_type, contains_url):
|
||||
"""Checks whether the filter matches the given event fields.
|
||||
|
||||
Returns:
|
||||
@@ -225,15 +282,20 @@ class Filter(object):
|
||||
|
||||
for name, match_func in literal_keys.items():
|
||||
not_name = "not_%s" % (name,)
|
||||
disallowed_values = self.filter_json.get(not_name, [])
|
||||
disallowed_values = getattr(self, not_name)
|
||||
if any(map(match_func, disallowed_values)):
|
||||
return False
|
||||
|
||||
allowed_values = self.filter_json.get(name, None)
|
||||
allowed_values = getattr(self, name)
|
||||
if allowed_values is not None:
|
||||
if not any(map(match_func, allowed_values)):
|
||||
return False
|
||||
|
||||
contains_url_filter = self.filter_json.get("contains_url")
|
||||
if contains_url_filter is not None:
|
||||
if contains_url_filter != contains_url:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def filter_rooms(self, room_ids):
|
||||
|
||||
@@ -23,7 +23,7 @@ class Ratelimiter(object):
|
||||
def __init__(self):
|
||||
self.message_counts = collections.OrderedDict()
|
||||
|
||||
def send_message(self, user_id, time_now_s, msg_rate_hz, burst_count):
|
||||
def send_message(self, user_id, time_now_s, msg_rate_hz, burst_count, update=True):
|
||||
"""Can the user send a message?
|
||||
Args:
|
||||
user_id: The user sending a message.
|
||||
@@ -32,12 +32,15 @@ class Ratelimiter(object):
|
||||
second.
|
||||
burst_count: How many messages the user can send before being
|
||||
limited.
|
||||
update (bool): Whether to update the message rates or not. This is
|
||||
useful to check if a message would be allowed to be sent before
|
||||
its ready to be actually sent.
|
||||
Returns:
|
||||
A pair of a bool indicating if they can send a message now and a
|
||||
time in seconds of when they can next send a message.
|
||||
"""
|
||||
self.prune_message_counts(time_now_s)
|
||||
message_count, time_start, _ignored = self.message_counts.pop(
|
||||
message_count, time_start, _ignored = self.message_counts.get(
|
||||
user_id, (0., time_now_s, None),
|
||||
)
|
||||
time_delta = time_now_s - time_start
|
||||
@@ -52,9 +55,10 @@ class Ratelimiter(object):
|
||||
allowed = True
|
||||
message_count += 1
|
||||
|
||||
self.message_counts[user_id] = (
|
||||
message_count, time_start, msg_rate_hz
|
||||
)
|
||||
if update:
|
||||
self.message_counts[user_id] = (
|
||||
message_count, time_start, msg_rate_hz
|
||||
)
|
||||
|
||||
if msg_rate_hz > 0:
|
||||
time_allowed = (
|
||||
|
||||
@@ -25,4 +25,3 @@ SERVER_KEY_PREFIX = "/_matrix/key/v1"
|
||||
SERVER_KEY_V2_PREFIX = "/_matrix/key/v2"
|
||||
MEDIA_PREFIX = "/_matrix/media/r0"
|
||||
LEGACY_MEDIA_PREFIX = "/_matrix/media/v1"
|
||||
APP_SERVICE_PREFIX = "/_matrix/appservice/v1"
|
||||
|
||||
@@ -16,13 +16,11 @@
|
||||
import sys
|
||||
sys.dont_write_bytecode = True
|
||||
|
||||
from synapse.python_dependencies import (
|
||||
check_requirements, MissingRequirementError
|
||||
) # NOQA
|
||||
from synapse import python_dependencies # noqa: E402
|
||||
|
||||
try:
|
||||
check_requirements()
|
||||
except MissingRequirementError as e:
|
||||
python_dependencies.check_requirements()
|
||||
except python_dependencies.MissingRequirementError as e:
|
||||
message = "\n".join([
|
||||
"Missing Requirement: %s" % (e.message,),
|
||||
"To install run:",
|
||||
|
||||
220
synapse/app/appservice.py
Normal file
220
synapse/app/appservice.py
Normal file
@@ -0,0 +1,220 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import synapse
|
||||
|
||||
from synapse.server import HomeServer
|
||||
from synapse.config._base import ConfigError
|
||||
from synapse.config.logger import setup_logging
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.http.site import SynapseSite
|
||||
from synapse.metrics.resource import MetricsResource, METRICS_PREFIX
|
||||
from synapse.replication.slave.storage.directory import DirectoryStore
|
||||
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||
from synapse.storage.engines import create_engine
|
||||
from synapse.util.async import sleep
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.logcontext import LoggingContext
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.rlimit import change_resource_limit
|
||||
from synapse.util.versionstring import get_version_string
|
||||
|
||||
from synapse import events
|
||||
|
||||
from twisted.internet import reactor, defer
|
||||
from twisted.web.resource import Resource
|
||||
|
||||
from daemonize import Daemonize
|
||||
|
||||
import sys
|
||||
import logging
|
||||
import gc
|
||||
|
||||
logger = logging.getLogger("synapse.app.appservice")
|
||||
|
||||
|
||||
class AppserviceSlaveStore(
|
||||
DirectoryStore, SlavedEventStore, SlavedApplicationServiceStore,
|
||||
SlavedRegistrationStore,
|
||||
):
|
||||
pass
|
||||
|
||||
|
||||
class AppserviceServer(HomeServer):
|
||||
def get_db_conn(self, run_new_connection=True):
|
||||
# Any param beginning with cp_ is a parameter for adbapi, and should
|
||||
# not be passed to the database engine.
|
||||
db_params = {
|
||||
k: v for k, v in self.db_config.get("args", {}).items()
|
||||
if not k.startswith("cp_")
|
||||
}
|
||||
db_conn = self.database_engine.module.connect(**db_params)
|
||||
|
||||
if run_new_connection:
|
||||
self.database_engine.on_new_connection(db_conn)
|
||||
return db_conn
|
||||
|
||||
def setup(self):
|
||||
logger.info("Setting up.")
|
||||
self.datastore = AppserviceSlaveStore(self.get_db_conn(), self)
|
||||
logger.info("Finished setting up.")
|
||||
|
||||
def _listen_http(self, listener_config):
|
||||
port = listener_config["port"]
|
||||
bind_addresses = listener_config["bind_addresses"]
|
||||
site_tag = listener_config.get("tag", port)
|
||||
resources = {}
|
||||
for res in listener_config["resources"]:
|
||||
for name in res["names"]:
|
||||
if name == "metrics":
|
||||
resources[METRICS_PREFIX] = MetricsResource(self)
|
||||
|
||||
root_resource = create_resource_tree(resources, Resource())
|
||||
|
||||
for address in bind_addresses:
|
||||
reactor.listenTCP(
|
||||
port,
|
||||
SynapseSite(
|
||||
"synapse.access.http.%s" % (site_tag,),
|
||||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
),
|
||||
interface=address
|
||||
)
|
||||
|
||||
logger.info("Synapse appservice now listening on port %d", port)
|
||||
|
||||
def start_listening(self, listeners):
|
||||
for listener in listeners:
|
||||
if listener["type"] == "http":
|
||||
self._listen_http(listener)
|
||||
elif listener["type"] == "manhole":
|
||||
bind_addresses = listener["bind_addresses"]
|
||||
|
||||
for address in bind_addresses:
|
||||
reactor.listenTCP(
|
||||
listener["port"],
|
||||
manhole(
|
||||
username="matrix",
|
||||
password="rabbithole",
|
||||
globals={"hs": self},
|
||||
),
|
||||
interface=address
|
||||
)
|
||||
else:
|
||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def replicate(self):
|
||||
http_client = self.get_simple_http_client()
|
||||
store = self.get_datastore()
|
||||
replication_url = self.config.worker_replication_url
|
||||
appservice_handler = self.get_application_service_handler()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def replicate(results):
|
||||
stream = results.get("events")
|
||||
if stream:
|
||||
max_stream_id = stream["position"]
|
||||
yield appservice_handler.notify_interested_services(max_stream_id)
|
||||
|
||||
while True:
|
||||
try:
|
||||
args = store.stream_positions()
|
||||
args["timeout"] = 30000
|
||||
result = yield http_client.get_json(replication_url, args=args)
|
||||
yield store.process_replication(result)
|
||||
replicate(result)
|
||||
except:
|
||||
logger.exception("Error replicating from %r", replication_url)
|
||||
yield sleep(30)
|
||||
|
||||
|
||||
def start(config_options):
|
||||
try:
|
||||
config = HomeServerConfig.load_config(
|
||||
"Synapse appservice", config_options
|
||||
)
|
||||
except ConfigError as e:
|
||||
sys.stderr.write("\n" + e.message + "\n")
|
||||
sys.exit(1)
|
||||
|
||||
assert config.worker_app == "synapse.app.appservice"
|
||||
|
||||
setup_logging(config.worker_log_config, config.worker_log_file)
|
||||
|
||||
events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||
|
||||
database_engine = create_engine(config.database_config)
|
||||
|
||||
if config.notify_appservices:
|
||||
sys.stderr.write(
|
||||
"\nThe appservices must be disabled in the main synapse process"
|
||||
"\nbefore they can be run in a separate worker."
|
||||
"\nPlease add ``notify_appservices: false`` to the main config"
|
||||
"\n"
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
# Force the pushers to start since they will be disabled in the main config
|
||||
config.notify_appservices = True
|
||||
|
||||
ps = AppserviceServer(
|
||||
config.server_name,
|
||||
db_config=config.database_config,
|
||||
config=config,
|
||||
version_string="Synapse/" + get_version_string(synapse),
|
||||
database_engine=database_engine,
|
||||
)
|
||||
|
||||
ps.setup()
|
||||
ps.start_listening(config.worker_listeners)
|
||||
|
||||
def run():
|
||||
with LoggingContext("run"):
|
||||
logger.info("Running")
|
||||
change_resource_limit(config.soft_file_limit)
|
||||
if config.gc_thresholds:
|
||||
gc.set_threshold(*config.gc_thresholds)
|
||||
reactor.run()
|
||||
|
||||
def start():
|
||||
ps.replicate()
|
||||
ps.get_datastore().start_profiling()
|
||||
ps.get_state_handler().start_caching()
|
||||
|
||||
reactor.callWhenRunning(start)
|
||||
|
||||
if config.worker_daemonize:
|
||||
daemon = Daemonize(
|
||||
app="synapse-appservice",
|
||||
pid=config.worker_pid_file,
|
||||
action=run,
|
||||
auto_close_fds=False,
|
||||
verbose=True,
|
||||
logger=logger,
|
||||
)
|
||||
daemon.start()
|
||||
else:
|
||||
run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
with LoggingContext("main"):
|
||||
start(sys.argv[1:])
|
||||
226
synapse/app/client_reader.py
Normal file
226
synapse/app/client_reader.py
Normal file
@@ -0,0 +1,226 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import synapse
|
||||
|
||||
from synapse.config._base import ConfigError
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.config.logger import setup_logging
|
||||
from synapse.http.site import SynapseSite
|
||||
from synapse.http.server import JsonResource
|
||||
from synapse.metrics.resource import MetricsResource, METRICS_PREFIX
|
||||
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||
from synapse.replication.slave.storage.keys import SlavedKeyStore
|
||||
from synapse.replication.slave.storage.room import RoomStore
|
||||
from synapse.replication.slave.storage.directory import DirectoryStore
|
||||
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||
from synapse.rest.client.v1.room import PublicRoomListRestServlet
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.client_ips import ClientIpStore
|
||||
from synapse.storage.engines import create_engine
|
||||
from synapse.util.async import sleep
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.logcontext import LoggingContext
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.rlimit import change_resource_limit
|
||||
from synapse.util.versionstring import get_version_string
|
||||
from synapse.crypto import context_factory
|
||||
|
||||
from synapse import events
|
||||
|
||||
|
||||
from twisted.internet import reactor, defer
|
||||
from twisted.web.resource import Resource
|
||||
|
||||
from daemonize import Daemonize
|
||||
|
||||
import sys
|
||||
import logging
|
||||
import gc
|
||||
|
||||
logger = logging.getLogger("synapse.app.client_reader")
|
||||
|
||||
|
||||
class ClientReaderSlavedStore(
|
||||
SlavedEventStore,
|
||||
SlavedKeyStore,
|
||||
RoomStore,
|
||||
DirectoryStore,
|
||||
SlavedApplicationServiceStore,
|
||||
SlavedRegistrationStore,
|
||||
BaseSlavedStore,
|
||||
ClientIpStore, # After BaseSlavedStore because the constructor is different
|
||||
):
|
||||
pass
|
||||
|
||||
|
||||
class ClientReaderServer(HomeServer):
|
||||
def get_db_conn(self, run_new_connection=True):
|
||||
# Any param beginning with cp_ is a parameter for adbapi, and should
|
||||
# not be passed to the database engine.
|
||||
db_params = {
|
||||
k: v for k, v in self.db_config.get("args", {}).items()
|
||||
if not k.startswith("cp_")
|
||||
}
|
||||
db_conn = self.database_engine.module.connect(**db_params)
|
||||
|
||||
if run_new_connection:
|
||||
self.database_engine.on_new_connection(db_conn)
|
||||
return db_conn
|
||||
|
||||
def setup(self):
|
||||
logger.info("Setting up.")
|
||||
self.datastore = ClientReaderSlavedStore(self.get_db_conn(), self)
|
||||
logger.info("Finished setting up.")
|
||||
|
||||
def _listen_http(self, listener_config):
|
||||
port = listener_config["port"]
|
||||
bind_addresses = listener_config["bind_addresses"]
|
||||
site_tag = listener_config.get("tag", port)
|
||||
resources = {}
|
||||
for res in listener_config["resources"]:
|
||||
for name in res["names"]:
|
||||
if name == "metrics":
|
||||
resources[METRICS_PREFIX] = MetricsResource(self)
|
||||
elif name == "client":
|
||||
resource = JsonResource(self, canonical_json=False)
|
||||
PublicRoomListRestServlet(self).register(resource)
|
||||
resources.update({
|
||||
"/_matrix/client/r0": resource,
|
||||
"/_matrix/client/unstable": resource,
|
||||
"/_matrix/client/v2_alpha": resource,
|
||||
"/_matrix/client/api/v1": resource,
|
||||
})
|
||||
|
||||
root_resource = create_resource_tree(resources, Resource())
|
||||
|
||||
for address in bind_addresses:
|
||||
reactor.listenTCP(
|
||||
port,
|
||||
SynapseSite(
|
||||
"synapse.access.http.%s" % (site_tag,),
|
||||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
),
|
||||
interface=address
|
||||
)
|
||||
|
||||
logger.info("Synapse client reader now listening on port %d", port)
|
||||
|
||||
def start_listening(self, listeners):
|
||||
for listener in listeners:
|
||||
if listener["type"] == "http":
|
||||
self._listen_http(listener)
|
||||
elif listener["type"] == "manhole":
|
||||
bind_addresses = listener["bind_addresses"]
|
||||
|
||||
for address in bind_addresses:
|
||||
reactor.listenTCP(
|
||||
listener["port"],
|
||||
manhole(
|
||||
username="matrix",
|
||||
password="rabbithole",
|
||||
globals={"hs": self},
|
||||
),
|
||||
interface=address
|
||||
)
|
||||
else:
|
||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def replicate(self):
|
||||
http_client = self.get_simple_http_client()
|
||||
store = self.get_datastore()
|
||||
replication_url = self.config.worker_replication_url
|
||||
|
||||
while True:
|
||||
try:
|
||||
args = store.stream_positions()
|
||||
args["timeout"] = 30000
|
||||
result = yield http_client.get_json(replication_url, args=args)
|
||||
yield store.process_replication(result)
|
||||
except:
|
||||
logger.exception("Error replicating from %r", replication_url)
|
||||
yield sleep(5)
|
||||
|
||||
|
||||
def start(config_options):
|
||||
try:
|
||||
config = HomeServerConfig.load_config(
|
||||
"Synapse client reader", config_options
|
||||
)
|
||||
except ConfigError as e:
|
||||
sys.stderr.write("\n" + e.message + "\n")
|
||||
sys.exit(1)
|
||||
|
||||
assert config.worker_app == "synapse.app.client_reader"
|
||||
|
||||
setup_logging(config.worker_log_config, config.worker_log_file)
|
||||
|
||||
events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||
|
||||
database_engine = create_engine(config.database_config)
|
||||
|
||||
tls_server_context_factory = context_factory.ServerContextFactory(config)
|
||||
|
||||
ss = ClientReaderServer(
|
||||
config.server_name,
|
||||
db_config=config.database_config,
|
||||
tls_server_context_factory=tls_server_context_factory,
|
||||
config=config,
|
||||
version_string="Synapse/" + get_version_string(synapse),
|
||||
database_engine=database_engine,
|
||||
)
|
||||
|
||||
ss.setup()
|
||||
ss.get_handlers()
|
||||
ss.start_listening(config.worker_listeners)
|
||||
|
||||
def run():
|
||||
with LoggingContext("run"):
|
||||
logger.info("Running")
|
||||
change_resource_limit(config.soft_file_limit)
|
||||
if config.gc_thresholds:
|
||||
gc.set_threshold(*config.gc_thresholds)
|
||||
reactor.run()
|
||||
|
||||
def start():
|
||||
ss.get_state_handler().start_caching()
|
||||
ss.get_datastore().start_profiling()
|
||||
ss.replicate()
|
||||
|
||||
reactor.callWhenRunning(start)
|
||||
|
||||
if config.worker_daemonize:
|
||||
daemon = Daemonize(
|
||||
app="synapse-client-reader",
|
||||
pid=config.worker_pid_file,
|
||||
action=run,
|
||||
auto_close_fds=False,
|
||||
verbose=True,
|
||||
logger=logger,
|
||||
)
|
||||
daemon.start()
|
||||
else:
|
||||
run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
with LoggingContext("main"):
|
||||
start(sys.argv[1:])
|
||||
217
synapse/app/federation_reader.py
Normal file
217
synapse/app/federation_reader.py
Normal file
@@ -0,0 +1,217 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import synapse
|
||||
|
||||
from synapse.config._base import ConfigError
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.config.logger import setup_logging
|
||||
from synapse.http.site import SynapseSite
|
||||
from synapse.metrics.resource import MetricsResource, METRICS_PREFIX
|
||||
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||
from synapse.replication.slave.storage.keys import SlavedKeyStore
|
||||
from synapse.replication.slave.storage.room import RoomStore
|
||||
from synapse.replication.slave.storage.transactions import TransactionStore
|
||||
from synapse.replication.slave.storage.directory import DirectoryStore
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.engines import create_engine
|
||||
from synapse.util.async import sleep
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.logcontext import LoggingContext
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.rlimit import change_resource_limit
|
||||
from synapse.util.versionstring import get_version_string
|
||||
from synapse.api.urls import FEDERATION_PREFIX
|
||||
from synapse.federation.transport.server import TransportLayerServer
|
||||
from synapse.crypto import context_factory
|
||||
|
||||
from synapse import events
|
||||
|
||||
|
||||
from twisted.internet import reactor, defer
|
||||
from twisted.web.resource import Resource
|
||||
|
||||
from daemonize import Daemonize
|
||||
|
||||
import sys
|
||||
import logging
|
||||
import gc
|
||||
|
||||
logger = logging.getLogger("synapse.app.federation_reader")
|
||||
|
||||
|
||||
class FederationReaderSlavedStore(
|
||||
SlavedEventStore,
|
||||
SlavedKeyStore,
|
||||
RoomStore,
|
||||
DirectoryStore,
|
||||
TransactionStore,
|
||||
BaseSlavedStore,
|
||||
):
|
||||
pass
|
||||
|
||||
|
||||
class FederationReaderServer(HomeServer):
|
||||
def get_db_conn(self, run_new_connection=True):
|
||||
# Any param beginning with cp_ is a parameter for adbapi, and should
|
||||
# not be passed to the database engine.
|
||||
db_params = {
|
||||
k: v for k, v in self.db_config.get("args", {}).items()
|
||||
if not k.startswith("cp_")
|
||||
}
|
||||
db_conn = self.database_engine.module.connect(**db_params)
|
||||
|
||||
if run_new_connection:
|
||||
self.database_engine.on_new_connection(db_conn)
|
||||
return db_conn
|
||||
|
||||
def setup(self):
|
||||
logger.info("Setting up.")
|
||||
self.datastore = FederationReaderSlavedStore(self.get_db_conn(), self)
|
||||
logger.info("Finished setting up.")
|
||||
|
||||
def _listen_http(self, listener_config):
|
||||
port = listener_config["port"]
|
||||
bind_addresses = listener_config["bind_addresses"]
|
||||
site_tag = listener_config.get("tag", port)
|
||||
resources = {}
|
||||
for res in listener_config["resources"]:
|
||||
for name in res["names"]:
|
||||
if name == "metrics":
|
||||
resources[METRICS_PREFIX] = MetricsResource(self)
|
||||
elif name == "federation":
|
||||
resources.update({
|
||||
FEDERATION_PREFIX: TransportLayerServer(self),
|
||||
})
|
||||
|
||||
root_resource = create_resource_tree(resources, Resource())
|
||||
|
||||
for address in bind_addresses:
|
||||
reactor.listenTCP(
|
||||
port,
|
||||
SynapseSite(
|
||||
"synapse.access.http.%s" % (site_tag,),
|
||||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
),
|
||||
interface=address
|
||||
)
|
||||
|
||||
logger.info("Synapse federation reader now listening on port %d", port)
|
||||
|
||||
def start_listening(self, listeners):
|
||||
for listener in listeners:
|
||||
if listener["type"] == "http":
|
||||
self._listen_http(listener)
|
||||
elif listener["type"] == "manhole":
|
||||
bind_addresses = listener["bind_addresses"]
|
||||
|
||||
for address in bind_addresses:
|
||||
reactor.listenTCP(
|
||||
listener["port"],
|
||||
manhole(
|
||||
username="matrix",
|
||||
password="rabbithole",
|
||||
globals={"hs": self},
|
||||
),
|
||||
interface=address
|
||||
)
|
||||
else:
|
||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def replicate(self):
|
||||
http_client = self.get_simple_http_client()
|
||||
store = self.get_datastore()
|
||||
replication_url = self.config.worker_replication_url
|
||||
|
||||
while True:
|
||||
try:
|
||||
args = store.stream_positions()
|
||||
args["timeout"] = 30000
|
||||
result = yield http_client.get_json(replication_url, args=args)
|
||||
yield store.process_replication(result)
|
||||
except:
|
||||
logger.exception("Error replicating from %r", replication_url)
|
||||
yield sleep(5)
|
||||
|
||||
|
||||
def start(config_options):
|
||||
try:
|
||||
config = HomeServerConfig.load_config(
|
||||
"Synapse federation reader", config_options
|
||||
)
|
||||
except ConfigError as e:
|
||||
sys.stderr.write("\n" + e.message + "\n")
|
||||
sys.exit(1)
|
||||
|
||||
assert config.worker_app == "synapse.app.federation_reader"
|
||||
|
||||
setup_logging(config.worker_log_config, config.worker_log_file)
|
||||
|
||||
events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||
|
||||
database_engine = create_engine(config.database_config)
|
||||
|
||||
tls_server_context_factory = context_factory.ServerContextFactory(config)
|
||||
|
||||
ss = FederationReaderServer(
|
||||
config.server_name,
|
||||
db_config=config.database_config,
|
||||
tls_server_context_factory=tls_server_context_factory,
|
||||
config=config,
|
||||
version_string="Synapse/" + get_version_string(synapse),
|
||||
database_engine=database_engine,
|
||||
)
|
||||
|
||||
ss.setup()
|
||||
ss.get_handlers()
|
||||
ss.start_listening(config.worker_listeners)
|
||||
|
||||
def run():
|
||||
with LoggingContext("run"):
|
||||
logger.info("Running")
|
||||
change_resource_limit(config.soft_file_limit)
|
||||
if config.gc_thresholds:
|
||||
gc.set_threshold(*config.gc_thresholds)
|
||||
reactor.run()
|
||||
|
||||
def start():
|
||||
ss.get_state_handler().start_caching()
|
||||
ss.get_datastore().start_profiling()
|
||||
ss.replicate()
|
||||
|
||||
reactor.callWhenRunning(start)
|
||||
|
||||
if config.worker_daemonize:
|
||||
daemon = Daemonize(
|
||||
app="synapse-federation-reader",
|
||||
pid=config.worker_pid_file,
|
||||
action=run,
|
||||
auto_close_fds=False,
|
||||
verbose=True,
|
||||
logger=logger,
|
||||
)
|
||||
daemon.start()
|
||||
else:
|
||||
run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
with LoggingContext("main"):
|
||||
start(sys.argv[1:])
|
||||
338
synapse/app/federation_sender.py
Normal file
338
synapse/app/federation_sender.py
Normal file
@@ -0,0 +1,338 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import synapse
|
||||
|
||||
from synapse.server import HomeServer
|
||||
from synapse.config._base import ConfigError
|
||||
from synapse.config.logger import setup_logging
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.crypto import context_factory
|
||||
from synapse.http.site import SynapseSite
|
||||
from synapse.federation import send_queue
|
||||
from synapse.federation.units import Edu
|
||||
from synapse.metrics.resource import MetricsResource, METRICS_PREFIX
|
||||
from synapse.replication.slave.storage.deviceinbox import SlavedDeviceInboxStore
|
||||
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
|
||||
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||
from synapse.replication.slave.storage.transactions import TransactionStore
|
||||
from synapse.replication.slave.storage.devices import SlavedDeviceStore
|
||||
from synapse.storage.engines import create_engine
|
||||
from synapse.storage.presence import UserPresenceState
|
||||
from synapse.util.async import sleep
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.logcontext import LoggingContext
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.rlimit import change_resource_limit
|
||||
from synapse.util.versionstring import get_version_string
|
||||
|
||||
from synapse import events
|
||||
|
||||
from twisted.internet import reactor, defer
|
||||
from twisted.web.resource import Resource
|
||||
|
||||
from daemonize import Daemonize
|
||||
|
||||
import sys
|
||||
import logging
|
||||
import gc
|
||||
import ujson as json
|
||||
|
||||
logger = logging.getLogger("synapse.app.appservice")
|
||||
|
||||
|
||||
class FederationSenderSlaveStore(
|
||||
SlavedDeviceInboxStore, TransactionStore, SlavedReceiptsStore, SlavedEventStore,
|
||||
SlavedRegistrationStore, SlavedDeviceStore,
|
||||
):
|
||||
pass
|
||||
|
||||
|
||||
class FederationSenderServer(HomeServer):
|
||||
def get_db_conn(self, run_new_connection=True):
|
||||
# Any param beginning with cp_ is a parameter for adbapi, and should
|
||||
# not be passed to the database engine.
|
||||
db_params = {
|
||||
k: v for k, v in self.db_config.get("args", {}).items()
|
||||
if not k.startswith("cp_")
|
||||
}
|
||||
db_conn = self.database_engine.module.connect(**db_params)
|
||||
|
||||
if run_new_connection:
|
||||
self.database_engine.on_new_connection(db_conn)
|
||||
return db_conn
|
||||
|
||||
def setup(self):
|
||||
logger.info("Setting up.")
|
||||
self.datastore = FederationSenderSlaveStore(self.get_db_conn(), self)
|
||||
logger.info("Finished setting up.")
|
||||
|
||||
def _listen_http(self, listener_config):
|
||||
port = listener_config["port"]
|
||||
bind_addresses = listener_config["bind_addresses"]
|
||||
site_tag = listener_config.get("tag", port)
|
||||
resources = {}
|
||||
for res in listener_config["resources"]:
|
||||
for name in res["names"]:
|
||||
if name == "metrics":
|
||||
resources[METRICS_PREFIX] = MetricsResource(self)
|
||||
|
||||
root_resource = create_resource_tree(resources, Resource())
|
||||
|
||||
for address in bind_addresses:
|
||||
reactor.listenTCP(
|
||||
port,
|
||||
SynapseSite(
|
||||
"synapse.access.http.%s" % (site_tag,),
|
||||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
),
|
||||
interface=address
|
||||
)
|
||||
|
||||
logger.info("Synapse federation_sender now listening on port %d", port)
|
||||
|
||||
def start_listening(self, listeners):
|
||||
for listener in listeners:
|
||||
if listener["type"] == "http":
|
||||
self._listen_http(listener)
|
||||
elif listener["type"] == "manhole":
|
||||
bind_addresses = listener["bind_addresses"]
|
||||
|
||||
for address in bind_addresses:
|
||||
reactor.listenTCP(
|
||||
listener["port"],
|
||||
manhole(
|
||||
username="matrix",
|
||||
password="rabbithole",
|
||||
globals={"hs": self},
|
||||
),
|
||||
interface=address
|
||||
)
|
||||
else:
|
||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def replicate(self):
|
||||
http_client = self.get_simple_http_client()
|
||||
store = self.get_datastore()
|
||||
replication_url = self.config.worker_replication_url
|
||||
send_handler = FederationSenderHandler(self)
|
||||
|
||||
send_handler.on_start()
|
||||
|
||||
while True:
|
||||
try:
|
||||
args = store.stream_positions()
|
||||
args.update((yield send_handler.stream_positions()))
|
||||
args["timeout"] = 30000
|
||||
result = yield http_client.get_json(replication_url, args=args)
|
||||
yield store.process_replication(result)
|
||||
yield send_handler.process_replication(result)
|
||||
except:
|
||||
logger.exception("Error replicating from %r", replication_url)
|
||||
yield sleep(30)
|
||||
|
||||
|
||||
def start(config_options):
|
||||
try:
|
||||
config = HomeServerConfig.load_config(
|
||||
"Synapse federation sender", config_options
|
||||
)
|
||||
except ConfigError as e:
|
||||
sys.stderr.write("\n" + e.message + "\n")
|
||||
sys.exit(1)
|
||||
|
||||
assert config.worker_app == "synapse.app.federation_sender"
|
||||
|
||||
setup_logging(config.worker_log_config, config.worker_log_file)
|
||||
|
||||
events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||
|
||||
database_engine = create_engine(config.database_config)
|
||||
|
||||
if config.send_federation:
|
||||
sys.stderr.write(
|
||||
"\nThe send_federation must be disabled in the main synapse process"
|
||||
"\nbefore they can be run in a separate worker."
|
||||
"\nPlease add ``send_federation: false`` to the main config"
|
||||
"\n"
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
# Force the pushers to start since they will be disabled in the main config
|
||||
config.send_federation = True
|
||||
|
||||
tls_server_context_factory = context_factory.ServerContextFactory(config)
|
||||
|
||||
ps = FederationSenderServer(
|
||||
config.server_name,
|
||||
db_config=config.database_config,
|
||||
tls_server_context_factory=tls_server_context_factory,
|
||||
config=config,
|
||||
version_string="Synapse/" + get_version_string(synapse),
|
||||
database_engine=database_engine,
|
||||
)
|
||||
|
||||
ps.setup()
|
||||
ps.start_listening(config.worker_listeners)
|
||||
|
||||
def run():
|
||||
with LoggingContext("run"):
|
||||
logger.info("Running")
|
||||
change_resource_limit(config.soft_file_limit)
|
||||
if config.gc_thresholds:
|
||||
gc.set_threshold(*config.gc_thresholds)
|
||||
reactor.run()
|
||||
|
||||
def start():
|
||||
ps.replicate()
|
||||
ps.get_datastore().start_profiling()
|
||||
ps.get_state_handler().start_caching()
|
||||
|
||||
reactor.callWhenRunning(start)
|
||||
|
||||
if config.worker_daemonize:
|
||||
daemon = Daemonize(
|
||||
app="synapse-federation-sender",
|
||||
pid=config.worker_pid_file,
|
||||
action=run,
|
||||
auto_close_fds=False,
|
||||
verbose=True,
|
||||
logger=logger,
|
||||
)
|
||||
daemon.start()
|
||||
else:
|
||||
run()
|
||||
|
||||
|
||||
class FederationSenderHandler(object):
|
||||
"""Processes the replication stream and forwards the appropriate entries
|
||||
to the federation sender.
|
||||
"""
|
||||
def __init__(self, hs):
|
||||
self.store = hs.get_datastore()
|
||||
self.federation_sender = hs.get_federation_sender()
|
||||
|
||||
self._room_serials = {}
|
||||
self._room_typing = {}
|
||||
|
||||
def on_start(self):
|
||||
# There may be some events that are persisted but haven't been sent,
|
||||
# so send them now.
|
||||
self.federation_sender.notify_new_events(
|
||||
self.store.get_room_max_stream_ordering()
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def stream_positions(self):
|
||||
stream_id = yield self.store.get_federation_out_pos("federation")
|
||||
defer.returnValue({
|
||||
"federation": stream_id,
|
||||
|
||||
# Ack stuff we've "processed", this should only be called from
|
||||
# one process.
|
||||
"federation_ack": stream_id,
|
||||
})
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def process_replication(self, result):
|
||||
# The federation stream contains things that we want to send out, e.g.
|
||||
# presence, typing, etc.
|
||||
fed_stream = result.get("federation")
|
||||
if fed_stream:
|
||||
latest_id = int(fed_stream["position"])
|
||||
|
||||
# The federation stream containis a bunch of different types of
|
||||
# rows that need to be handled differently. We parse the rows, put
|
||||
# them into the appropriate collection and then send them off.
|
||||
presence_to_send = {}
|
||||
keyed_edus = {}
|
||||
edus = {}
|
||||
failures = {}
|
||||
device_destinations = set()
|
||||
|
||||
# Parse the rows in the stream
|
||||
for row in fed_stream["rows"]:
|
||||
position, typ, content_js = row
|
||||
content = json.loads(content_js)
|
||||
|
||||
if typ == send_queue.PRESENCE_TYPE:
|
||||
destination = content["destination"]
|
||||
state = UserPresenceState.from_dict(content["state"])
|
||||
|
||||
presence_to_send.setdefault(destination, []).append(state)
|
||||
elif typ == send_queue.KEYED_EDU_TYPE:
|
||||
key = content["key"]
|
||||
edu = Edu(**content["edu"])
|
||||
|
||||
keyed_edus.setdefault(
|
||||
edu.destination, {}
|
||||
)[(edu.destination, tuple(key))] = edu
|
||||
elif typ == send_queue.EDU_TYPE:
|
||||
edu = Edu(**content)
|
||||
|
||||
edus.setdefault(edu.destination, []).append(edu)
|
||||
elif typ == send_queue.FAILURE_TYPE:
|
||||
destination = content["destination"]
|
||||
failure = content["failure"]
|
||||
|
||||
failures.setdefault(destination, []).append(failure)
|
||||
elif typ == send_queue.DEVICE_MESSAGE_TYPE:
|
||||
device_destinations.add(content["destination"])
|
||||
else:
|
||||
raise Exception("Unrecognised federation type: %r", typ)
|
||||
|
||||
# We've finished collecting, send everything off
|
||||
for destination, states in presence_to_send.items():
|
||||
self.federation_sender.send_presence(destination, states)
|
||||
|
||||
for destination, edu_map in keyed_edus.items():
|
||||
for key, edu in edu_map.items():
|
||||
self.federation_sender.send_edu(
|
||||
edu.destination, edu.edu_type, edu.content, key=key,
|
||||
)
|
||||
|
||||
for destination, edu_list in edus.items():
|
||||
for edu in edu_list:
|
||||
self.federation_sender.send_edu(
|
||||
edu.destination, edu.edu_type, edu.content, key=None,
|
||||
)
|
||||
|
||||
for destination, failure_list in failures.items():
|
||||
for failure in failure_list:
|
||||
self.federation_sender.send_failure(destination, failure)
|
||||
|
||||
for destination in device_destinations:
|
||||
self.federation_sender.send_device_messages(destination)
|
||||
|
||||
# Record where we are in the stream.
|
||||
yield self.store.update_federation_out_pos(
|
||||
"federation", latest_id
|
||||
)
|
||||
|
||||
# We also need to poke the federation sender when new events happen
|
||||
event_stream = result.get("events")
|
||||
if event_stream:
|
||||
latest_pos = event_stream["position"]
|
||||
self.federation_sender.notify_new_events(latest_pos)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
with LoggingContext("main"):
|
||||
start(sys.argv[1:])
|
||||
@@ -51,6 +51,7 @@ from synapse.api.urls import (
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.crypto import context_factory
|
||||
from synapse.util.logcontext import LoggingContext
|
||||
from synapse.metrics import register_memory_metrics, get_metrics_for
|
||||
from synapse.metrics.resource import MetricsResource, METRICS_PREFIX
|
||||
from synapse.replication.resource import ReplicationResource, REPLICATION_PREFIX
|
||||
from synapse.federation.transport.server import TransportLayerServer
|
||||
@@ -106,7 +107,7 @@ def build_resource_for_web_client(hs):
|
||||
class SynapseHomeServer(HomeServer):
|
||||
def _listener_http(self, config, listener_config):
|
||||
port = listener_config["port"]
|
||||
bind_address = listener_config.get("bind_address", "")
|
||||
bind_addresses = listener_config["bind_addresses"]
|
||||
tls = listener_config.get("tls", False)
|
||||
site_tag = listener_config.get("tag", port)
|
||||
|
||||
@@ -172,29 +173,32 @@ class SynapseHomeServer(HomeServer):
|
||||
root_resource = Resource()
|
||||
|
||||
root_resource = create_resource_tree(resources, root_resource)
|
||||
|
||||
if tls:
|
||||
reactor.listenSSL(
|
||||
port,
|
||||
SynapseSite(
|
||||
"synapse.access.https.%s" % (site_tag,),
|
||||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
),
|
||||
self.tls_server_context_factory,
|
||||
interface=bind_address
|
||||
)
|
||||
for address in bind_addresses:
|
||||
reactor.listenSSL(
|
||||
port,
|
||||
SynapseSite(
|
||||
"synapse.access.https.%s" % (site_tag,),
|
||||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
),
|
||||
self.tls_server_context_factory,
|
||||
interface=address
|
||||
)
|
||||
else:
|
||||
reactor.listenTCP(
|
||||
port,
|
||||
SynapseSite(
|
||||
"synapse.access.http.%s" % (site_tag,),
|
||||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
),
|
||||
interface=bind_address
|
||||
)
|
||||
for address in bind_addresses:
|
||||
reactor.listenTCP(
|
||||
port,
|
||||
SynapseSite(
|
||||
"synapse.access.http.%s" % (site_tag,),
|
||||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
),
|
||||
interface=address
|
||||
)
|
||||
logger.info("Synapse now listening on port %d", port)
|
||||
|
||||
def start_listening(self):
|
||||
@@ -204,15 +208,18 @@ class SynapseHomeServer(HomeServer):
|
||||
if listener["type"] == "http":
|
||||
self._listener_http(config, listener)
|
||||
elif listener["type"] == "manhole":
|
||||
reactor.listenTCP(
|
||||
listener["port"],
|
||||
manhole(
|
||||
username="matrix",
|
||||
password="rabbithole",
|
||||
globals={"hs": self},
|
||||
),
|
||||
interface=listener.get("bind_address", '127.0.0.1')
|
||||
)
|
||||
bind_addresses = listener["bind_addresses"]
|
||||
|
||||
for address in bind_addresses:
|
||||
reactor.listenTCP(
|
||||
listener["port"],
|
||||
manhole(
|
||||
username="matrix",
|
||||
password="rabbithole",
|
||||
globals={"hs": self},
|
||||
),
|
||||
interface=address
|
||||
)
|
||||
else:
|
||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||
|
||||
@@ -284,7 +291,7 @@ def setup(config_options):
|
||||
# check any extra requirements we have now we have a config
|
||||
check_requirements(config)
|
||||
|
||||
version_string = get_version_string("Synapse", synapse)
|
||||
version_string = "Synapse/" + get_version_string(synapse)
|
||||
|
||||
logger.info("Server hostname: %s", config.server_name)
|
||||
logger.info("Server version: %s", version_string)
|
||||
@@ -335,6 +342,8 @@ def setup(config_options):
|
||||
hs.get_datastore().start_doing_background_updates()
|
||||
hs.get_replication_layer().start_get_pdu_cache()
|
||||
|
||||
register_memory_metrics(hs)
|
||||
|
||||
reactor.callWhenRunning(start)
|
||||
|
||||
return hs
|
||||
@@ -382,6 +391,8 @@ def run(hs):
|
||||
|
||||
start_time = hs.get_clock().time()
|
||||
|
||||
stats = {}
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def phone_stats_home():
|
||||
logger.info("Gathering stats for reporting")
|
||||
@@ -390,7 +401,10 @@ def run(hs):
|
||||
if uptime < 0:
|
||||
uptime = 0
|
||||
|
||||
stats = {}
|
||||
# If the stats directory is empty then this is the first time we've
|
||||
# reported stats.
|
||||
first_time = not stats
|
||||
|
||||
stats["homeserver"] = hs.config.server_name
|
||||
stats["timestamp"] = now
|
||||
stats["uptime_seconds"] = uptime
|
||||
@@ -403,6 +417,25 @@ def run(hs):
|
||||
daily_messages = yield hs.get_datastore().count_daily_messages()
|
||||
if daily_messages is not None:
|
||||
stats["daily_messages"] = daily_messages
|
||||
else:
|
||||
stats.pop("daily_messages", None)
|
||||
|
||||
if first_time:
|
||||
# Add callbacks to report the synapse stats as metrics whenever
|
||||
# prometheus requests them, typically every 30s.
|
||||
# As some of the stats are expensive to calculate we only update
|
||||
# them when synapse phones home to matrix.org every 24 hours.
|
||||
metrics = get_metrics_for("synapse.usage")
|
||||
metrics.add_callback("timestamp", lambda: stats["timestamp"])
|
||||
metrics.add_callback("uptime_seconds", lambda: stats["uptime_seconds"])
|
||||
metrics.add_callback("total_users", lambda: stats["total_users"])
|
||||
metrics.add_callback("total_room_count", lambda: stats["total_room_count"])
|
||||
metrics.add_callback(
|
||||
"daily_active_users", lambda: stats["daily_active_users"]
|
||||
)
|
||||
metrics.add_callback(
|
||||
"daily_messages", lambda: stats.get("daily_messages", 0)
|
||||
)
|
||||
|
||||
logger.info("Reporting stats to matrix.org: %s" % (stats,))
|
||||
try:
|
||||
|
||||
223
synapse/app/media_repository.py
Normal file
223
synapse/app/media_repository.py
Normal file
@@ -0,0 +1,223 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import synapse
|
||||
|
||||
from synapse.config._base import ConfigError
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.config.logger import setup_logging
|
||||
from synapse.http.site import SynapseSite
|
||||
from synapse.metrics.resource import MetricsResource, METRICS_PREFIX
|
||||
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||
from synapse.rest.media.v0.content_repository import ContentRepoResource
|
||||
from synapse.rest.media.v1.media_repository import MediaRepositoryResource
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.client_ips import ClientIpStore
|
||||
from synapse.storage.engines import create_engine
|
||||
from synapse.storage.media_repository import MediaRepositoryStore
|
||||
from synapse.util.async import sleep
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.logcontext import LoggingContext
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.rlimit import change_resource_limit
|
||||
from synapse.util.versionstring import get_version_string
|
||||
from synapse.api.urls import (
|
||||
CONTENT_REPO_PREFIX, LEGACY_MEDIA_PREFIX, MEDIA_PREFIX
|
||||
)
|
||||
from synapse.crypto import context_factory
|
||||
|
||||
from synapse import events
|
||||
|
||||
|
||||
from twisted.internet import reactor, defer
|
||||
from twisted.web.resource import Resource
|
||||
|
||||
from daemonize import Daemonize
|
||||
|
||||
import sys
|
||||
import logging
|
||||
import gc
|
||||
|
||||
logger = logging.getLogger("synapse.app.media_repository")
|
||||
|
||||
|
||||
class MediaRepositorySlavedStore(
|
||||
SlavedApplicationServiceStore,
|
||||
SlavedRegistrationStore,
|
||||
BaseSlavedStore,
|
||||
MediaRepositoryStore,
|
||||
ClientIpStore,
|
||||
):
|
||||
pass
|
||||
|
||||
|
||||
class MediaRepositoryServer(HomeServer):
|
||||
def get_db_conn(self, run_new_connection=True):
|
||||
# Any param beginning with cp_ is a parameter for adbapi, and should
|
||||
# not be passed to the database engine.
|
||||
db_params = {
|
||||
k: v for k, v in self.db_config.get("args", {}).items()
|
||||
if not k.startswith("cp_")
|
||||
}
|
||||
db_conn = self.database_engine.module.connect(**db_params)
|
||||
|
||||
if run_new_connection:
|
||||
self.database_engine.on_new_connection(db_conn)
|
||||
return db_conn
|
||||
|
||||
def setup(self):
|
||||
logger.info("Setting up.")
|
||||
self.datastore = MediaRepositorySlavedStore(self.get_db_conn(), self)
|
||||
logger.info("Finished setting up.")
|
||||
|
||||
def _listen_http(self, listener_config):
|
||||
port = listener_config["port"]
|
||||
bind_addresses = listener_config["bind_addresses"]
|
||||
site_tag = listener_config.get("tag", port)
|
||||
resources = {}
|
||||
for res in listener_config["resources"]:
|
||||
for name in res["names"]:
|
||||
if name == "metrics":
|
||||
resources[METRICS_PREFIX] = MetricsResource(self)
|
||||
elif name == "media":
|
||||
media_repo = MediaRepositoryResource(self)
|
||||
resources.update({
|
||||
MEDIA_PREFIX: media_repo,
|
||||
LEGACY_MEDIA_PREFIX: media_repo,
|
||||
CONTENT_REPO_PREFIX: ContentRepoResource(
|
||||
self, self.config.uploads_path
|
||||
),
|
||||
})
|
||||
|
||||
root_resource = create_resource_tree(resources, Resource())
|
||||
|
||||
for address in bind_addresses:
|
||||
reactor.listenTCP(
|
||||
port,
|
||||
SynapseSite(
|
||||
"synapse.access.http.%s" % (site_tag,),
|
||||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
),
|
||||
interface=address
|
||||
)
|
||||
|
||||
logger.info("Synapse media repository now listening on port %d", port)
|
||||
|
||||
def start_listening(self, listeners):
|
||||
for listener in listeners:
|
||||
if listener["type"] == "http":
|
||||
self._listen_http(listener)
|
||||
elif listener["type"] == "manhole":
|
||||
bind_addresses = listener["bind_addresses"]
|
||||
|
||||
for address in bind_addresses:
|
||||
reactor.listenTCP(
|
||||
listener["port"],
|
||||
manhole(
|
||||
username="matrix",
|
||||
password="rabbithole",
|
||||
globals={"hs": self},
|
||||
),
|
||||
interface=address
|
||||
)
|
||||
else:
|
||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def replicate(self):
|
||||
http_client = self.get_simple_http_client()
|
||||
store = self.get_datastore()
|
||||
replication_url = self.config.worker_replication_url
|
||||
|
||||
while True:
|
||||
try:
|
||||
args = store.stream_positions()
|
||||
args["timeout"] = 30000
|
||||
result = yield http_client.get_json(replication_url, args=args)
|
||||
yield store.process_replication(result)
|
||||
except:
|
||||
logger.exception("Error replicating from %r", replication_url)
|
||||
yield sleep(5)
|
||||
|
||||
|
||||
def start(config_options):
|
||||
try:
|
||||
config = HomeServerConfig.load_config(
|
||||
"Synapse media repository", config_options
|
||||
)
|
||||
except ConfigError as e:
|
||||
sys.stderr.write("\n" + e.message + "\n")
|
||||
sys.exit(1)
|
||||
|
||||
assert config.worker_app == "synapse.app.media_repository"
|
||||
|
||||
setup_logging(config.worker_log_config, config.worker_log_file)
|
||||
|
||||
events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||
|
||||
database_engine = create_engine(config.database_config)
|
||||
|
||||
tls_server_context_factory = context_factory.ServerContextFactory(config)
|
||||
|
||||
ss = MediaRepositoryServer(
|
||||
config.server_name,
|
||||
db_config=config.database_config,
|
||||
tls_server_context_factory=tls_server_context_factory,
|
||||
config=config,
|
||||
version_string="Synapse/" + get_version_string(synapse),
|
||||
database_engine=database_engine,
|
||||
)
|
||||
|
||||
ss.setup()
|
||||
ss.get_handlers()
|
||||
ss.start_listening(config.worker_listeners)
|
||||
|
||||
def run():
|
||||
with LoggingContext("run"):
|
||||
logger.info("Running")
|
||||
change_resource_limit(config.soft_file_limit)
|
||||
if config.gc_thresholds:
|
||||
gc.set_threshold(*config.gc_thresholds)
|
||||
reactor.run()
|
||||
|
||||
def start():
|
||||
ss.get_state_handler().start_caching()
|
||||
ss.get_datastore().start_profiling()
|
||||
ss.replicate()
|
||||
|
||||
reactor.callWhenRunning(start)
|
||||
|
||||
if config.worker_daemonize:
|
||||
daemon = Daemonize(
|
||||
app="synapse-media-repository",
|
||||
pid=config.worker_pid_file,
|
||||
action=run,
|
||||
auto_close_fds=False,
|
||||
verbose=True,
|
||||
logger=logger,
|
||||
)
|
||||
daemon.start()
|
||||
else:
|
||||
run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
with LoggingContext("main"):
|
||||
start(sys.argv[1:])
|
||||
@@ -36,6 +36,8 @@ from synapse.util.manhole import manhole
|
||||
from synapse.util.rlimit import change_resource_limit
|
||||
from synapse.util.versionstring import get_version_string
|
||||
|
||||
from synapse import events
|
||||
|
||||
from twisted.internet import reactor, defer
|
||||
from twisted.web.resource import Resource
|
||||
|
||||
@@ -80,11 +82,6 @@ class PusherSlaveStore(
|
||||
DataStore.get_profile_displayname.__func__
|
||||
)
|
||||
|
||||
# XXX: This is a bit broken because we don't persist forgotten rooms
|
||||
# in a way that they can be streamed. This means that we don't have a
|
||||
# way to invalidate the forgotten rooms cache correctly.
|
||||
# For now we expire the cache every 10 minutes.
|
||||
BROKEN_CACHE_EXPIRY_MS = 60 * 60 * 1000
|
||||
who_forgot_in_room = (
|
||||
RoomMemberStore.__dict__["who_forgot_in_room"]
|
||||
)
|
||||
@@ -124,7 +121,7 @@ class PusherServer(HomeServer):
|
||||
|
||||
def _listen_http(self, listener_config):
|
||||
port = listener_config["port"]
|
||||
bind_address = listener_config.get("bind_address", "")
|
||||
bind_addresses = listener_config["bind_addresses"]
|
||||
site_tag = listener_config.get("tag", port)
|
||||
resources = {}
|
||||
for res in listener_config["resources"]:
|
||||
@@ -133,16 +130,19 @@ class PusherServer(HomeServer):
|
||||
resources[METRICS_PREFIX] = MetricsResource(self)
|
||||
|
||||
root_resource = create_resource_tree(resources, Resource())
|
||||
reactor.listenTCP(
|
||||
port,
|
||||
SynapseSite(
|
||||
"synapse.access.http.%s" % (site_tag,),
|
||||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
),
|
||||
interface=bind_address
|
||||
)
|
||||
|
||||
for address in bind_addresses:
|
||||
reactor.listenTCP(
|
||||
port,
|
||||
SynapseSite(
|
||||
"synapse.access.http.%s" % (site_tag,),
|
||||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
),
|
||||
interface=address
|
||||
)
|
||||
|
||||
logger.info("Synapse pusher now listening on port %d", port)
|
||||
|
||||
def start_listening(self, listeners):
|
||||
@@ -150,15 +150,18 @@ class PusherServer(HomeServer):
|
||||
if listener["type"] == "http":
|
||||
self._listen_http(listener)
|
||||
elif listener["type"] == "manhole":
|
||||
reactor.listenTCP(
|
||||
listener["port"],
|
||||
manhole(
|
||||
username="matrix",
|
||||
password="rabbithole",
|
||||
globals={"hs": self},
|
||||
),
|
||||
interface=listener.get("bind_address", '127.0.0.1')
|
||||
)
|
||||
bind_addresses = listener["bind_addresses"]
|
||||
|
||||
for address in bind_addresses:
|
||||
reactor.listenTCP(
|
||||
listener["port"],
|
||||
manhole(
|
||||
username="matrix",
|
||||
password="rabbithole",
|
||||
globals={"hs": self},
|
||||
),
|
||||
interface=address
|
||||
)
|
||||
else:
|
||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||
|
||||
@@ -168,7 +171,6 @@ class PusherServer(HomeServer):
|
||||
store = self.get_datastore()
|
||||
replication_url = self.config.worker_replication_url
|
||||
pusher_pool = self.get_pusherpool()
|
||||
clock = self.get_clock()
|
||||
|
||||
def stop_pusher(user_id, app_id, pushkey):
|
||||
key = "%s:%s" % (app_id, pushkey)
|
||||
@@ -203,7 +205,7 @@ class PusherServer(HomeServer):
|
||||
yield start_pusher(user_id, app_id, pushkey)
|
||||
|
||||
stream = results.get("events")
|
||||
if stream:
|
||||
if stream and stream["rows"]:
|
||||
min_stream_id = stream["rows"][0][0]
|
||||
max_stream_id = stream["position"]
|
||||
preserve_fn(pusher_pool.on_new_notifications)(
|
||||
@@ -211,7 +213,7 @@ class PusherServer(HomeServer):
|
||||
)
|
||||
|
||||
stream = results.get("receipts")
|
||||
if stream:
|
||||
if stream and stream["rows"]:
|
||||
rows = stream["rows"]
|
||||
affected_room_ids = set(row[1] for row in rows)
|
||||
min_stream_id = rows[0][0]
|
||||
@@ -220,21 +222,11 @@ class PusherServer(HomeServer):
|
||||
min_stream_id, max_stream_id, affected_room_ids
|
||||
)
|
||||
|
||||
def expire_broken_caches():
|
||||
store.who_forgot_in_room.invalidate_all()
|
||||
|
||||
next_expire_broken_caches_ms = 0
|
||||
while True:
|
||||
try:
|
||||
args = store.stream_positions()
|
||||
args["timeout"] = 30000
|
||||
result = yield http_client.get_json(replication_url, args=args)
|
||||
now_ms = clock.time_msec()
|
||||
if now_ms > next_expire_broken_caches_ms:
|
||||
expire_broken_caches()
|
||||
next_expire_broken_caches_ms = (
|
||||
now_ms + store.BROKEN_CACHE_EXPIRY_MS
|
||||
)
|
||||
yield store.process_replication(result)
|
||||
poke_pushers(result)
|
||||
except:
|
||||
@@ -255,6 +247,8 @@ def start(config_options):
|
||||
|
||||
setup_logging(config.worker_log_config, config.worker_log_file)
|
||||
|
||||
events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||
|
||||
if config.start_pushers:
|
||||
sys.stderr.write(
|
||||
"\nThe pushers must be disabled in the main synapse process"
|
||||
@@ -273,7 +267,7 @@ def start(config_options):
|
||||
config.server_name,
|
||||
db_config=config.database_config,
|
||||
config=config,
|
||||
version_string=get_version_string("Synapse", synapse),
|
||||
version_string="Synapse/" + get_version_string(synapse),
|
||||
database_engine=database_engine,
|
||||
)
|
||||
|
||||
@@ -292,6 +286,7 @@ def start(config_options):
|
||||
ps.replicate()
|
||||
ps.get_pusherpool().start()
|
||||
ps.get_datastore().start_profiling()
|
||||
ps.get_state_handler().start_caching()
|
||||
|
||||
reactor.callWhenRunning(start)
|
||||
|
||||
|
||||
@@ -26,6 +26,9 @@ from synapse.http.site import SynapseSite
|
||||
from synapse.http.server import JsonResource
|
||||
from synapse.metrics.resource import MetricsResource, METRICS_PREFIX
|
||||
from synapse.rest.client.v2_alpha import sync
|
||||
from synapse.rest.client.v1 import events
|
||||
from synapse.rest.client.v1.room import RoomInitialSyncRestServlet
|
||||
from synapse.rest.client.v1.initial_sync import InitialSyncRestServlet
|
||||
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
|
||||
@@ -35,6 +38,9 @@ from synapse.replication.slave.storage.registration import SlavedRegistrationSto
|
||||
from synapse.replication.slave.storage.filtering import SlavedFilteringStore
|
||||
from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore
|
||||
from synapse.replication.slave.storage.presence import SlavedPresenceStore
|
||||
from synapse.replication.slave.storage.deviceinbox import SlavedDeviceInboxStore
|
||||
from synapse.replication.slave.storage.devices import SlavedDeviceStore
|
||||
from synapse.replication.slave.storage.room import RoomStore
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.client_ips import ClientIpStore
|
||||
from synapse.storage.engines import create_engine
|
||||
@@ -71,14 +77,12 @@ class SynchrotronSlavedStore(
|
||||
SlavedRegistrationStore,
|
||||
SlavedFilteringStore,
|
||||
SlavedPresenceStore,
|
||||
SlavedDeviceInboxStore,
|
||||
SlavedDeviceStore,
|
||||
RoomStore,
|
||||
BaseSlavedStore,
|
||||
ClientIpStore, # After BaseSlavedStore because the constructor is different
|
||||
):
|
||||
# XXX: This is a bit broken because we don't persist forgotten rooms
|
||||
# in a way that they can be streamed. This means that we don't have a
|
||||
# way to invalidate the forgotten rooms cache correctly.
|
||||
# For now we expire the cache every 10 minutes.
|
||||
BROKEN_CACHE_EXPIRY_MS = 60 * 60 * 1000
|
||||
who_forgot_in_room = (
|
||||
RoomMemberStore.__dict__["who_forgot_in_room"]
|
||||
)
|
||||
@@ -89,17 +93,23 @@ class SynchrotronSlavedStore(
|
||||
get_presence_list_accepted = PresenceStore.__dict__[
|
||||
"get_presence_list_accepted"
|
||||
]
|
||||
get_presence_list_observers_accepted = PresenceStore.__dict__[
|
||||
"get_presence_list_observers_accepted"
|
||||
]
|
||||
|
||||
|
||||
UPDATE_SYNCING_USERS_MS = 10 * 1000
|
||||
|
||||
|
||||
class SynchrotronPresence(object):
|
||||
def __init__(self, hs):
|
||||
self.is_mine_id = hs.is_mine_id
|
||||
self.http_client = hs.get_simple_http_client()
|
||||
self.store = hs.get_datastore()
|
||||
self.user_to_num_current_syncs = {}
|
||||
self.syncing_users_url = hs.config.worker_replication_url + "/syncing_users"
|
||||
self.clock = hs.get_clock()
|
||||
self.notifier = hs.get_notifier()
|
||||
|
||||
active_presence = self.store.take_presence_startup_info()
|
||||
self.user_to_current_state = {
|
||||
@@ -119,11 +129,13 @@ class SynchrotronPresence(object):
|
||||
|
||||
reactor.addSystemEventTrigger("before", "shutdown", self._on_shutdown)
|
||||
|
||||
def set_state(self, user, state):
|
||||
def set_state(self, user, state, ignore_status_msg=False):
|
||||
# TODO Hows this supposed to work?
|
||||
pass
|
||||
|
||||
get_states = PresenceHandler.get_states.__func__
|
||||
get_state = PresenceHandler.get_state.__func__
|
||||
_get_interested_parties = PresenceHandler._get_interested_parties.__func__
|
||||
current_state_for_users = PresenceHandler.current_state_for_users.__func__
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@@ -194,19 +206,39 @@ class SynchrotronPresence(object):
|
||||
self._need_to_send_sync = False
|
||||
yield self._send_syncing_users_now()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def notify_from_replication(self, states, stream_id):
|
||||
parties = yield self._get_interested_parties(
|
||||
states, calculate_remote_hosts=False
|
||||
)
|
||||
room_ids_to_states, users_to_states, _ = parties
|
||||
|
||||
self.notifier.on_new_event(
|
||||
"presence_key", stream_id, rooms=room_ids_to_states.keys(),
|
||||
users=users_to_states.keys()
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def process_replication(self, result):
|
||||
stream = result.get("presence", {"rows": []})
|
||||
states = []
|
||||
for row in stream["rows"]:
|
||||
(
|
||||
position, user_id, state, last_active_ts,
|
||||
last_federation_update_ts, last_user_sync_ts, status_msg,
|
||||
currently_active
|
||||
) = row
|
||||
self.user_to_current_state[user_id] = UserPresenceState(
|
||||
state = UserPresenceState(
|
||||
user_id, state, last_active_ts,
|
||||
last_federation_update_ts, last_user_sync_ts, status_msg,
|
||||
currently_active
|
||||
)
|
||||
self.user_to_current_state[user_id] = state
|
||||
states.append(state)
|
||||
|
||||
if states and "position" in stream:
|
||||
stream_id = int(stream["position"])
|
||||
yield self.notify_from_replication(states, stream_id)
|
||||
|
||||
|
||||
class SynchrotronTyping(object):
|
||||
@@ -216,6 +248,9 @@ class SynchrotronTyping(object):
|
||||
self._room_typing = {}
|
||||
|
||||
def stream_positions(self):
|
||||
# We must update this typing token from the response of the previous
|
||||
# sync. In particular, the stream id may "reset" back to zero/a low
|
||||
# value which we *must* use for the next replication request.
|
||||
return {"typing": self._latest_room_serial}
|
||||
|
||||
def process_replication(self, result):
|
||||
@@ -256,7 +291,7 @@ class SynchrotronServer(HomeServer):
|
||||
|
||||
def _listen_http(self, listener_config):
|
||||
port = listener_config["port"]
|
||||
bind_address = listener_config.get("bind_address", "")
|
||||
bind_addresses = listener_config["bind_addresses"]
|
||||
site_tag = listener_config.get("tag", port)
|
||||
resources = {}
|
||||
for res in listener_config["resources"]:
|
||||
@@ -266,23 +301,30 @@ class SynchrotronServer(HomeServer):
|
||||
elif name == "client":
|
||||
resource = JsonResource(self, canonical_json=False)
|
||||
sync.register_servlets(self, resource)
|
||||
events.register_servlets(self, resource)
|
||||
InitialSyncRestServlet(self).register(resource)
|
||||
RoomInitialSyncRestServlet(self).register(resource)
|
||||
resources.update({
|
||||
"/_matrix/client/r0": resource,
|
||||
"/_matrix/client/unstable": resource,
|
||||
"/_matrix/client/v2_alpha": resource,
|
||||
"/_matrix/client/api/v1": resource,
|
||||
})
|
||||
|
||||
root_resource = create_resource_tree(resources, Resource())
|
||||
reactor.listenTCP(
|
||||
port,
|
||||
SynapseSite(
|
||||
"synapse.access.http.%s" % (site_tag,),
|
||||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
),
|
||||
interface=bind_address
|
||||
)
|
||||
|
||||
for address in bind_addresses:
|
||||
reactor.listenTCP(
|
||||
port,
|
||||
SynapseSite(
|
||||
"synapse.access.http.%s" % (site_tag,),
|
||||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
),
|
||||
interface=address
|
||||
)
|
||||
|
||||
logger.info("Synapse synchrotron now listening on port %d", port)
|
||||
|
||||
def start_listening(self, listeners):
|
||||
@@ -290,15 +332,18 @@ class SynchrotronServer(HomeServer):
|
||||
if listener["type"] == "http":
|
||||
self._listen_http(listener)
|
||||
elif listener["type"] == "manhole":
|
||||
reactor.listenTCP(
|
||||
listener["port"],
|
||||
manhole(
|
||||
username="matrix",
|
||||
password="rabbithole",
|
||||
globals={"hs": self},
|
||||
),
|
||||
interface=listener.get("bind_address", '127.0.0.1')
|
||||
)
|
||||
bind_addresses = listener["bind_addresses"]
|
||||
|
||||
for address in bind_addresses:
|
||||
reactor.listenTCP(
|
||||
listener["port"],
|
||||
manhole(
|
||||
username="matrix",
|
||||
password="rabbithole",
|
||||
globals={"hs": self},
|
||||
),
|
||||
interface=address
|
||||
)
|
||||
else:
|
||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||
|
||||
@@ -307,15 +352,10 @@ class SynchrotronServer(HomeServer):
|
||||
http_client = self.get_simple_http_client()
|
||||
store = self.get_datastore()
|
||||
replication_url = self.config.worker_replication_url
|
||||
clock = self.get_clock()
|
||||
notifier = self.get_notifier()
|
||||
presence_handler = self.get_presence_handler()
|
||||
typing_handler = self.get_typing_handler()
|
||||
|
||||
def expire_broken_caches():
|
||||
store.who_forgot_in_room.invalidate_all()
|
||||
store.get_presence_list_accepted.invalidate_all()
|
||||
|
||||
def notify_from_stream(
|
||||
result, stream_name, stream_key, room=None, user=None
|
||||
):
|
||||
@@ -342,6 +382,27 @@ class SynchrotronServer(HomeServer):
|
||||
stream_key, position, users=users, rooms=rooms
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def notify_device_list_update(result):
|
||||
stream = result.get("device_lists")
|
||||
if not stream:
|
||||
return
|
||||
|
||||
position_index = stream["field_names"].index("position")
|
||||
user_index = stream["field_names"].index("user_id")
|
||||
|
||||
for row in stream["rows"]:
|
||||
position = row[position_index]
|
||||
user_id = row[user_index]
|
||||
|
||||
rooms = yield store.get_rooms_for_user(user_id)
|
||||
room_ids = [r.room_id for r in rooms]
|
||||
|
||||
notifier.on_new_event(
|
||||
"device_list_key", position, rooms=room_ids,
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def notify(result):
|
||||
stream = result.get("events")
|
||||
if stream:
|
||||
@@ -376,24 +437,21 @@ class SynchrotronServer(HomeServer):
|
||||
notify_from_stream(
|
||||
result, "typing", "typing_key", room="room_id"
|
||||
)
|
||||
notify_from_stream(
|
||||
result, "to_device", "to_device_key", user="user_id"
|
||||
)
|
||||
yield notify_device_list_update(result)
|
||||
|
||||
next_expire_broken_caches_ms = 0
|
||||
while True:
|
||||
try:
|
||||
args = store.stream_positions()
|
||||
args.update(typing_handler.stream_positions())
|
||||
args["timeout"] = 30000
|
||||
result = yield http_client.get_json(replication_url, args=args)
|
||||
now_ms = clock.time_msec()
|
||||
if now_ms > next_expire_broken_caches_ms:
|
||||
expire_broken_caches()
|
||||
next_expire_broken_caches_ms = (
|
||||
now_ms + store.BROKEN_CACHE_EXPIRY_MS
|
||||
)
|
||||
yield store.process_replication(result)
|
||||
typing_handler.process_replication(result)
|
||||
presence_handler.process_replication(result)
|
||||
notify(result)
|
||||
yield presence_handler.process_replication(result)
|
||||
yield notify(result)
|
||||
except:
|
||||
logger.exception("Error replicating from %r", replication_url)
|
||||
yield sleep(5)
|
||||
@@ -418,13 +476,15 @@ def start(config_options):
|
||||
|
||||
setup_logging(config.worker_log_config, config.worker_log_file)
|
||||
|
||||
synapse.events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||
|
||||
database_engine = create_engine(config.database_config)
|
||||
|
||||
ss = SynchrotronServer(
|
||||
config.server_name,
|
||||
db_config=config.database_config,
|
||||
config=config,
|
||||
version_string=get_version_string("Synapse", synapse),
|
||||
version_string="Synapse/" + get_version_string(synapse),
|
||||
database_engine=database_engine,
|
||||
application_service_handler=SynchrotronApplicationService(),
|
||||
)
|
||||
@@ -443,6 +503,7 @@ def start(config_options):
|
||||
def start():
|
||||
ss.get_datastore().start_profiling()
|
||||
ss.replicate()
|
||||
ss.get_state_handler().start_caching()
|
||||
|
||||
reactor.callWhenRunning(start)
|
||||
|
||||
|
||||
@@ -24,7 +24,7 @@ import subprocess
|
||||
import sys
|
||||
import yaml
|
||||
|
||||
SYNAPSE = ["python", "-B", "-m", "synapse.app.homeserver"]
|
||||
SYNAPSE = [sys.executable, "-B", "-m", "synapse.app.homeserver"]
|
||||
|
||||
GREEN = "\x1b[1;32m"
|
||||
RED = "\x1b[1;31m"
|
||||
|
||||
@@ -14,6 +14,8 @@
|
||||
# limitations under the License.
|
||||
from synapse.api.constants import EventTypes
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
import logging
|
||||
import re
|
||||
|
||||
@@ -79,7 +81,7 @@ class ApplicationService(object):
|
||||
NS_LIST = [NS_USERS, NS_ALIASES, NS_ROOMS]
|
||||
|
||||
def __init__(self, token, url=None, namespaces=None, hs_token=None,
|
||||
sender=None, id=None):
|
||||
sender=None, id=None, protocols=None, rate_limited=True):
|
||||
self.token = token
|
||||
self.url = url
|
||||
self.hs_token = hs_token
|
||||
@@ -87,6 +89,17 @@ class ApplicationService(object):
|
||||
self.namespaces = self._check_namespaces(namespaces)
|
||||
self.id = id
|
||||
|
||||
if "|" in self.id:
|
||||
raise Exception("application service ID cannot contain '|' character")
|
||||
|
||||
# .protocols is a publicly visible field
|
||||
if protocols:
|
||||
self.protocols = set(protocols)
|
||||
else:
|
||||
self.protocols = set()
|
||||
|
||||
self.rate_limited = rate_limited
|
||||
|
||||
def _check_namespaces(self, namespaces):
|
||||
# Sanity check that it is of the form:
|
||||
# {
|
||||
@@ -138,65 +151,66 @@ class ApplicationService(object):
|
||||
return regex_obj["exclusive"]
|
||||
return False
|
||||
|
||||
def _matches_user(self, event, member_list):
|
||||
if (hasattr(event, "sender") and
|
||||
self.is_interested_in_user(event.sender)):
|
||||
return True
|
||||
@defer.inlineCallbacks
|
||||
def _matches_user(self, event, store):
|
||||
if not event:
|
||||
defer.returnValue(False)
|
||||
|
||||
if self.is_interested_in_user(event.sender):
|
||||
defer.returnValue(True)
|
||||
# also check m.room.member state key
|
||||
if (hasattr(event, "type") and event.type == EventTypes.Member
|
||||
and hasattr(event, "state_key")
|
||||
and self.is_interested_in_user(event.state_key)):
|
||||
return True
|
||||
if (event.type == EventTypes.Member and
|
||||
self.is_interested_in_user(event.state_key)):
|
||||
defer.returnValue(True)
|
||||
|
||||
if not store:
|
||||
defer.returnValue(False)
|
||||
|
||||
member_list = yield store.get_users_in_room(event.room_id)
|
||||
|
||||
# check joined member events
|
||||
for user_id in member_list:
|
||||
if self.is_interested_in_user(user_id):
|
||||
return True
|
||||
return False
|
||||
defer.returnValue(True)
|
||||
defer.returnValue(False)
|
||||
|
||||
def _matches_room_id(self, event):
|
||||
if hasattr(event, "room_id"):
|
||||
return self.is_interested_in_room(event.room_id)
|
||||
return False
|
||||
|
||||
def _matches_aliases(self, event, alias_list):
|
||||
@defer.inlineCallbacks
|
||||
def _matches_aliases(self, event, store):
|
||||
if not store or not event:
|
||||
defer.returnValue(False)
|
||||
|
||||
alias_list = yield store.get_aliases_for_room(event.room_id)
|
||||
for alias in alias_list:
|
||||
if self.is_interested_in_alias(alias):
|
||||
return True
|
||||
return False
|
||||
defer.returnValue(True)
|
||||
defer.returnValue(False)
|
||||
|
||||
def is_interested(self, event, restrict_to=None, aliases_for_event=None,
|
||||
member_list=None):
|
||||
@defer.inlineCallbacks
|
||||
def is_interested(self, event, store=None):
|
||||
"""Check if this service is interested in this event.
|
||||
|
||||
Args:
|
||||
event(Event): The event to check.
|
||||
restrict_to(str): The namespace to restrict regex tests to.
|
||||
aliases_for_event(list): A list of all the known room aliases for
|
||||
this event.
|
||||
member_list(list): A list of all joined user_ids in this room.
|
||||
store(DataStore)
|
||||
Returns:
|
||||
bool: True if this service would like to know about this event.
|
||||
"""
|
||||
if aliases_for_event is None:
|
||||
aliases_for_event = []
|
||||
if member_list is None:
|
||||
member_list = []
|
||||
# Do cheap checks first
|
||||
if self._matches_room_id(event):
|
||||
defer.returnValue(True)
|
||||
|
||||
if restrict_to and restrict_to not in ApplicationService.NS_LIST:
|
||||
# this is a programming error, so fail early and raise a general
|
||||
# exception
|
||||
raise Exception("Unexpected restrict_to value: %s". restrict_to)
|
||||
if (yield self._matches_aliases(event, store)):
|
||||
defer.returnValue(True)
|
||||
|
||||
if not restrict_to:
|
||||
return (self._matches_user(event, member_list)
|
||||
or self._matches_aliases(event, aliases_for_event)
|
||||
or self._matches_room_id(event))
|
||||
elif restrict_to == ApplicationService.NS_ALIASES:
|
||||
return self._matches_aliases(event, aliases_for_event)
|
||||
elif restrict_to == ApplicationService.NS_ROOMS:
|
||||
return self._matches_room_id(event)
|
||||
elif restrict_to == ApplicationService.NS_USERS:
|
||||
return self._matches_user(event, member_list)
|
||||
if (yield self._matches_user(event, store)):
|
||||
defer.returnValue(True)
|
||||
|
||||
defer.returnValue(False)
|
||||
|
||||
def is_interested_in_user(self, user_id):
|
||||
return (
|
||||
@@ -216,11 +230,17 @@ class ApplicationService(object):
|
||||
or user_id == self.sender
|
||||
)
|
||||
|
||||
def is_interested_in_protocol(self, protocol):
|
||||
return protocol in self.protocols
|
||||
|
||||
def is_exclusive_alias(self, alias):
|
||||
return self._is_exclusive(ApplicationService.NS_ALIASES, alias)
|
||||
|
||||
def is_exclusive_room(self, room_id):
|
||||
return self._is_exclusive(ApplicationService.NS_ROOMS, room_id)
|
||||
|
||||
def is_rate_limited(self):
|
||||
return self.rate_limited
|
||||
|
||||
def __str__(self):
|
||||
return "ApplicationService: %s" % (self.__dict__,)
|
||||
|
||||
@@ -14,9 +14,12 @@
|
||||
# limitations under the License.
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.constants import ThirdPartyEntityKind
|
||||
from synapse.api.errors import CodeMessageException
|
||||
from synapse.http.client import SimpleHttpClient
|
||||
from synapse.events.utils import serialize_event
|
||||
from synapse.util.caches.response_cache import ResponseCache
|
||||
from synapse.types import ThirdPartyInstanceID
|
||||
|
||||
import logging
|
||||
import urllib
|
||||
@@ -24,6 +27,42 @@ import urllib
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
HOUR_IN_MS = 60 * 60 * 1000
|
||||
|
||||
|
||||
APP_SERVICE_PREFIX = "/_matrix/app/unstable"
|
||||
|
||||
|
||||
def _is_valid_3pe_metadata(info):
|
||||
if "instances" not in info:
|
||||
return False
|
||||
if not isinstance(info["instances"], list):
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def _is_valid_3pe_result(r, field):
|
||||
if not isinstance(r, dict):
|
||||
return False
|
||||
|
||||
for k in (field, "protocol"):
|
||||
if k not in r:
|
||||
return False
|
||||
if not isinstance(r[k], str):
|
||||
return False
|
||||
|
||||
if "fields" not in r:
|
||||
return False
|
||||
fields = r["fields"]
|
||||
if not isinstance(fields, dict):
|
||||
return False
|
||||
for k in fields.keys():
|
||||
if not isinstance(fields[k], str):
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
class ApplicationServiceApi(SimpleHttpClient):
|
||||
"""This class manages HS -> AS communications, including querying and
|
||||
pushing.
|
||||
@@ -33,8 +72,12 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
super(ApplicationServiceApi, self).__init__(hs)
|
||||
self.clock = hs.get_clock()
|
||||
|
||||
self.protocol_meta_cache = ResponseCache(hs, timeout_ms=HOUR_IN_MS)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def query_user(self, service, user_id):
|
||||
if service.url is None:
|
||||
defer.returnValue(False)
|
||||
uri = service.url + ("/users/%s" % urllib.quote(user_id))
|
||||
response = None
|
||||
try:
|
||||
@@ -54,6 +97,8 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def query_alias(self, service, alias):
|
||||
if service.url is None:
|
||||
defer.returnValue(False)
|
||||
uri = service.url + ("/rooms/%s" % urllib.quote(alias))
|
||||
response = None
|
||||
try:
|
||||
@@ -71,8 +116,91 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
logger.warning("query_alias to %s threw exception %s", uri, ex)
|
||||
defer.returnValue(False)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def query_3pe(self, service, kind, protocol, fields):
|
||||
if kind == ThirdPartyEntityKind.USER:
|
||||
required_field = "userid"
|
||||
elif kind == ThirdPartyEntityKind.LOCATION:
|
||||
required_field = "alias"
|
||||
else:
|
||||
raise ValueError(
|
||||
"Unrecognised 'kind' argument %r to query_3pe()", kind
|
||||
)
|
||||
if service.url is None:
|
||||
defer.returnValue([])
|
||||
|
||||
uri = "%s%s/thirdparty/%s/%s" % (
|
||||
service.url,
|
||||
APP_SERVICE_PREFIX,
|
||||
kind,
|
||||
urllib.quote(protocol)
|
||||
)
|
||||
try:
|
||||
response = yield self.get_json(uri, fields)
|
||||
if not isinstance(response, list):
|
||||
logger.warning(
|
||||
"query_3pe to %s returned an invalid response %r",
|
||||
uri, response
|
||||
)
|
||||
defer.returnValue([])
|
||||
|
||||
ret = []
|
||||
for r in response:
|
||||
if _is_valid_3pe_result(r, field=required_field):
|
||||
ret.append(r)
|
||||
else:
|
||||
logger.warning(
|
||||
"query_3pe to %s returned an invalid result %r",
|
||||
uri, r
|
||||
)
|
||||
|
||||
defer.returnValue(ret)
|
||||
except Exception as ex:
|
||||
logger.warning("query_3pe to %s threw exception %s", uri, ex)
|
||||
defer.returnValue([])
|
||||
|
||||
def get_3pe_protocol(self, service, protocol):
|
||||
if service.url is None:
|
||||
defer.returnValue({})
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _get():
|
||||
uri = "%s%s/thirdparty/protocol/%s" % (
|
||||
service.url,
|
||||
APP_SERVICE_PREFIX,
|
||||
urllib.quote(protocol)
|
||||
)
|
||||
try:
|
||||
info = yield self.get_json(uri, {})
|
||||
|
||||
if not _is_valid_3pe_metadata(info):
|
||||
logger.warning("query_3pe_protocol to %s did not return a"
|
||||
" valid result", uri)
|
||||
defer.returnValue(None)
|
||||
|
||||
for instance in info.get("instances", []):
|
||||
network_id = instance.get("network_id", None)
|
||||
if network_id is not None:
|
||||
instance["instance_id"] = ThirdPartyInstanceID(
|
||||
service.id, network_id,
|
||||
).to_string()
|
||||
|
||||
defer.returnValue(info)
|
||||
except Exception as ex:
|
||||
logger.warning("query_3pe_protocol to %s threw exception %s",
|
||||
uri, ex)
|
||||
defer.returnValue(None)
|
||||
|
||||
key = (service.id, protocol)
|
||||
return self.protocol_meta_cache.get(key) or (
|
||||
self.protocol_meta_cache.set(key, _get())
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def push_bulk(self, service, events, txn_id=None):
|
||||
if service.url is None:
|
||||
defer.returnValue(True)
|
||||
|
||||
events = self._serialize(events)
|
||||
|
||||
if txn_id is None:
|
||||
|
||||
@@ -48,9 +48,12 @@ UP & quit +---------- YES SUCCESS
|
||||
This is all tied together by the AppServiceScheduler which DIs the required
|
||||
components.
|
||||
"""
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.appservice import ApplicationServiceState
|
||||
from twisted.internet import defer
|
||||
from synapse.util.logcontext import preserve_fn
|
||||
from synapse.util.metrics import Measure
|
||||
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -73,7 +76,7 @@ class ApplicationServiceScheduler(object):
|
||||
self.txn_ctrl = _TransactionController(
|
||||
self.clock, self.store, self.as_api, create_recoverer
|
||||
)
|
||||
self.queuer = _ServiceQueuer(self.txn_ctrl)
|
||||
self.queuer = _ServiceQueuer(self.txn_ctrl, self.clock)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def start(self):
|
||||
@@ -94,38 +97,36 @@ class _ServiceQueuer(object):
|
||||
this schedules any other events in the queue to run.
|
||||
"""
|
||||
|
||||
def __init__(self, txn_ctrl):
|
||||
def __init__(self, txn_ctrl, clock):
|
||||
self.queued_events = {} # dict of {service_id: [events]}
|
||||
self.pending_requests = {} # dict of {service_id: Deferred}
|
||||
self.requests_in_flight = set()
|
||||
self.txn_ctrl = txn_ctrl
|
||||
self.clock = clock
|
||||
|
||||
def enqueue(self, service, event):
|
||||
# if this service isn't being sent something
|
||||
if not self.pending_requests.get(service.id):
|
||||
self._send_request(service, [event])
|
||||
else:
|
||||
# add to queue for this service
|
||||
if service.id not in self.queued_events:
|
||||
self.queued_events[service.id] = []
|
||||
self.queued_events[service.id].append(event)
|
||||
self.queued_events.setdefault(service.id, []).append(event)
|
||||
preserve_fn(self._send_request)(service)
|
||||
|
||||
def _send_request(self, service, events):
|
||||
# send request and add callbacks
|
||||
d = self.txn_ctrl.send(service, events)
|
||||
d.addBoth(self._on_request_finish)
|
||||
d.addErrback(self._on_request_fail)
|
||||
self.pending_requests[service.id] = d
|
||||
@defer.inlineCallbacks
|
||||
def _send_request(self, service):
|
||||
if service.id in self.requests_in_flight:
|
||||
return
|
||||
|
||||
def _on_request_finish(self, service):
|
||||
self.pending_requests[service.id] = None
|
||||
# if there are queued events, then send them.
|
||||
if (service.id in self.queued_events
|
||||
and len(self.queued_events[service.id]) > 0):
|
||||
self._send_request(service, self.queued_events[service.id])
|
||||
self.queued_events[service.id] = []
|
||||
self.requests_in_flight.add(service.id)
|
||||
try:
|
||||
while True:
|
||||
events = self.queued_events.pop(service.id, [])
|
||||
if not events:
|
||||
return
|
||||
|
||||
def _on_request_fail(self, err):
|
||||
logger.error("AS request failed: %s", err)
|
||||
with Measure(self.clock, "servicequeuer.send"):
|
||||
try:
|
||||
yield self.txn_ctrl.send(service, events)
|
||||
except:
|
||||
logger.exception("AS request failed")
|
||||
finally:
|
||||
self.requests_in_flight.discard(service.id)
|
||||
|
||||
|
||||
class _TransactionController(object):
|
||||
@@ -149,14 +150,12 @@ class _TransactionController(object):
|
||||
if service_is_up:
|
||||
sent = yield txn.send(self.as_api)
|
||||
if sent:
|
||||
txn.complete(self.store)
|
||||
yield txn.complete(self.store)
|
||||
else:
|
||||
self._start_recoverer(service)
|
||||
preserve_fn(self._start_recoverer)(service)
|
||||
except Exception as e:
|
||||
logger.exception(e)
|
||||
self._start_recoverer(service)
|
||||
# request has finished
|
||||
defer.returnValue(service)
|
||||
preserve_fn(self._start_recoverer)(service)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_recovered(self, recoverer):
|
||||
|
||||
@@ -64,11 +64,12 @@ class Config(object):
|
||||
if isinstance(value, int) or isinstance(value, long):
|
||||
return value
|
||||
second = 1000
|
||||
hour = 60 * 60 * second
|
||||
minute = 60 * second
|
||||
hour = 60 * minute
|
||||
day = 24 * hour
|
||||
week = 7 * day
|
||||
year = 365 * day
|
||||
sizes = {"s": second, "h": hour, "d": day, "w": week, "y": year}
|
||||
sizes = {"s": second, "m": minute, "h": hour, "d": day, "w": week, "y": year}
|
||||
size = 1
|
||||
suffix = value[-1]
|
||||
if suffix in sizes:
|
||||
|
||||
@@ -28,6 +28,7 @@ class AppServiceConfig(Config):
|
||||
|
||||
def read_config(self, config):
|
||||
self.app_service_config_files = config.get("app_service_config_files", [])
|
||||
self.notify_appservices = config.get("notify_appservices", True)
|
||||
|
||||
def default_config(cls, **kwargs):
|
||||
return """\
|
||||
@@ -85,7 +86,7 @@ def load_appservices(hostname, config_files):
|
||||
|
||||
def _load_appservice(hostname, as_info, config_filename):
|
||||
required_string_fields = [
|
||||
"id", "url", "as_token", "hs_token", "sender_localpart"
|
||||
"id", "as_token", "hs_token", "sender_localpart"
|
||||
]
|
||||
for field in required_string_fields:
|
||||
if not isinstance(as_info.get(field), basestring):
|
||||
@@ -93,6 +94,14 @@ def _load_appservice(hostname, as_info, config_filename):
|
||||
field, config_filename,
|
||||
))
|
||||
|
||||
# 'url' must either be a string or explicitly null, not missing
|
||||
# to avoid accidentally turning off push for ASes.
|
||||
if (not isinstance(as_info.get("url"), basestring) and
|
||||
as_info.get("url", "") is not None):
|
||||
raise KeyError(
|
||||
"Required string field or explicit null: 'url' (%s)" % (config_filename,)
|
||||
)
|
||||
|
||||
localpart = as_info["sender_localpart"]
|
||||
if urllib.quote(localpart) != localpart:
|
||||
raise ValueError(
|
||||
@@ -101,6 +110,11 @@ def _load_appservice(hostname, as_info, config_filename):
|
||||
user = UserID(localpart, hostname)
|
||||
user_id = user.to_string()
|
||||
|
||||
# Rate limiting for users of this AS is on by default (excludes sender)
|
||||
rate_limited = True
|
||||
if isinstance(as_info.get("rate_limited"), bool):
|
||||
rate_limited = as_info.get("rate_limited")
|
||||
|
||||
# namespace checks
|
||||
if not isinstance(as_info.get("namespaces"), dict):
|
||||
raise KeyError("Requires 'namespaces' object.")
|
||||
@@ -122,6 +136,22 @@ def _load_appservice(hostname, as_info, config_filename):
|
||||
raise ValueError(
|
||||
"Missing/bad type 'exclusive' key in %s", regex_obj
|
||||
)
|
||||
# protocols check
|
||||
protocols = as_info.get("protocols")
|
||||
if protocols:
|
||||
# Because strings are lists in python
|
||||
if isinstance(protocols, str) or not isinstance(protocols, list):
|
||||
raise KeyError("Optional 'protocols' must be a list if present.")
|
||||
for p in protocols:
|
||||
if not isinstance(p, str):
|
||||
raise KeyError("Bad value for 'protocols' item")
|
||||
|
||||
if as_info["url"] is None:
|
||||
logger.info(
|
||||
"(%s) Explicitly empty 'url' provided. This application service"
|
||||
" will not receive events or queries.",
|
||||
config_filename,
|
||||
)
|
||||
return ApplicationService(
|
||||
token=as_info["as_token"],
|
||||
url=as_info["url"],
|
||||
@@ -129,4 +159,6 @@ def _load_appservice(hostname, as_info, config_filename):
|
||||
hs_token=as_info["hs_token"],
|
||||
sender=user_id,
|
||||
id=as_info["id"],
|
||||
protocols=protocols,
|
||||
rate_limited=rate_limited
|
||||
)
|
||||
|
||||
@@ -68,6 +68,9 @@ class EmailConfig(Config):
|
||||
self.email_notif_for_new_users = email_config.get(
|
||||
"notif_for_new_users", True
|
||||
)
|
||||
self.email_riot_base_url = email_config.get(
|
||||
"riot_base_url", None
|
||||
)
|
||||
if "app_name" in email_config:
|
||||
self.email_app_name = email_config["app_name"]
|
||||
else:
|
||||
@@ -85,6 +88,9 @@ class EmailConfig(Config):
|
||||
def default_config(self, config_dir_path, server_name, **kwargs):
|
||||
return """
|
||||
# Enable sending emails for notification events
|
||||
# Defining a custom URL for Riot is only needed if email notifications
|
||||
# should contain links to a self-hosted installation of Riot; when set
|
||||
# the "app_name" setting is ignored.
|
||||
#email:
|
||||
# enable_notifs: false
|
||||
# smtp_host: "localhost"
|
||||
@@ -95,4 +101,5 @@ class EmailConfig(Config):
|
||||
# notif_template_html: notif_mail.html
|
||||
# notif_template_text: notif_mail.txt
|
||||
# notif_for_new_users: True
|
||||
# riot_base_url: "http://localhost/riot"
|
||||
"""
|
||||
|
||||
@@ -30,7 +30,7 @@ from .saml2 import SAML2Config
|
||||
from .cas import CasConfig
|
||||
from .password import PasswordConfig
|
||||
from .jwt import JWTConfig
|
||||
from .ldap import LDAPConfig
|
||||
from .password_auth_providers import PasswordAuthProviderConfig
|
||||
from .emailconfig import EmailConfig
|
||||
from .workers import WorkerConfig
|
||||
|
||||
@@ -39,8 +39,8 @@ class HomeServerConfig(TlsConfig, ServerConfig, DatabaseConfig, LoggingConfig,
|
||||
RatelimitConfig, ContentRepositoryConfig, CaptchaConfig,
|
||||
VoipConfig, RegistrationConfig, MetricsConfig, ApiConfig,
|
||||
AppServiceConfig, KeyConfig, SAML2Config, CasConfig,
|
||||
JWTConfig, LDAPConfig, PasswordConfig, EmailConfig,
|
||||
WorkerConfig,):
|
||||
JWTConfig, PasswordConfig, EmailConfig,
|
||||
WorkerConfig, PasswordAuthProviderConfig,):
|
||||
pass
|
||||
|
||||
|
||||
|
||||
@@ -1,100 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2015 Niklas Riekenbrauck
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from ._base import Config, ConfigError
|
||||
|
||||
|
||||
MISSING_LDAP3 = (
|
||||
"Missing ldap3 library. This is required for LDAP Authentication."
|
||||
)
|
||||
|
||||
|
||||
class LDAPMode(object):
|
||||
SIMPLE = "simple",
|
||||
SEARCH = "search",
|
||||
|
||||
LIST = (SIMPLE, SEARCH)
|
||||
|
||||
|
||||
class LDAPConfig(Config):
|
||||
def read_config(self, config):
|
||||
ldap_config = config.get("ldap_config", {})
|
||||
|
||||
self.ldap_enabled = ldap_config.get("enabled", False)
|
||||
|
||||
if self.ldap_enabled:
|
||||
# verify dependencies are available
|
||||
try:
|
||||
import ldap3
|
||||
ldap3 # to stop unused lint
|
||||
except ImportError:
|
||||
raise ConfigError(MISSING_LDAP3)
|
||||
|
||||
self.ldap_mode = LDAPMode.SIMPLE
|
||||
|
||||
# verify config sanity
|
||||
self.require_keys(ldap_config, [
|
||||
"uri",
|
||||
"base",
|
||||
"attributes",
|
||||
])
|
||||
|
||||
self.ldap_uri = ldap_config["uri"]
|
||||
self.ldap_start_tls = ldap_config.get("start_tls", False)
|
||||
self.ldap_base = ldap_config["base"]
|
||||
self.ldap_attributes = ldap_config["attributes"]
|
||||
|
||||
if "bind_dn" in ldap_config:
|
||||
self.ldap_mode = LDAPMode.SEARCH
|
||||
self.require_keys(ldap_config, [
|
||||
"bind_dn",
|
||||
"bind_password",
|
||||
])
|
||||
|
||||
self.ldap_bind_dn = ldap_config["bind_dn"]
|
||||
self.ldap_bind_password = ldap_config["bind_password"]
|
||||
self.ldap_filter = ldap_config.get("filter", None)
|
||||
|
||||
# verify attribute lookup
|
||||
self.require_keys(ldap_config['attributes'], [
|
||||
"uid",
|
||||
"name",
|
||||
"mail",
|
||||
])
|
||||
|
||||
def require_keys(self, config, required):
|
||||
missing = [key for key in required if key not in config]
|
||||
if missing:
|
||||
raise ConfigError(
|
||||
"LDAP enabled but missing required config values: {}".format(
|
||||
", ".join(missing)
|
||||
)
|
||||
)
|
||||
|
||||
def default_config(self, **kwargs):
|
||||
return """\
|
||||
# ldap_config:
|
||||
# enabled: true
|
||||
# uri: "ldap://ldap.example.com:389"
|
||||
# start_tls: true
|
||||
# base: "ou=users,dc=example,dc=com"
|
||||
# attributes:
|
||||
# uid: "cn"
|
||||
# mail: "email"
|
||||
# name: "givenName"
|
||||
# #bind_dn:
|
||||
# #bind_password:
|
||||
# #filter: "(objectClass=posixAccount)"
|
||||
"""
|
||||
@@ -15,14 +15,13 @@
|
||||
|
||||
from ._base import Config
|
||||
from synapse.util.logcontext import LoggingContextFilter
|
||||
from twisted.python.log import PythonLoggingObserver
|
||||
from twisted.logger import globalLogBeginner, STDLibLogObserver
|
||||
import logging
|
||||
import logging.config
|
||||
import yaml
|
||||
from string import Template
|
||||
import os
|
||||
import signal
|
||||
from synapse.util.debug import debug_deferreds
|
||||
|
||||
|
||||
DEFAULT_LOG_CONFIG = Template("""
|
||||
@@ -50,6 +49,7 @@ handlers:
|
||||
console:
|
||||
class: logging.StreamHandler
|
||||
formatter: precise
|
||||
filters: [context]
|
||||
|
||||
loggers:
|
||||
synapse:
|
||||
@@ -70,8 +70,6 @@ class LoggingConfig(Config):
|
||||
self.verbosity = config.get("verbose", 0)
|
||||
self.log_config = self.abspath(config.get("log_config"))
|
||||
self.log_file = self.abspath(config.get("log_file"))
|
||||
if config.get("full_twisted_stacktraces"):
|
||||
debug_deferreds()
|
||||
|
||||
def default_config(self, config_dir_path, server_name, **kwargs):
|
||||
log_file = self.abspath("homeserver.log")
|
||||
@@ -87,11 +85,6 @@ class LoggingConfig(Config):
|
||||
|
||||
# A yaml python logging config file
|
||||
log_config: "%(log_config)s"
|
||||
|
||||
# Stop twisted from discarding the stack traces of exceptions in
|
||||
# deferreds by waiting a reactor tick before running a deferred's
|
||||
# callbacks.
|
||||
# full_twisted_stacktraces: true
|
||||
""" % locals()
|
||||
|
||||
def read_arguments(self, args):
|
||||
@@ -179,5 +172,15 @@ def setup_logging(log_config=None, log_file=None, verbosity=None):
|
||||
with open(log_config, 'r') as f:
|
||||
logging.config.dictConfig(yaml.load(f))
|
||||
|
||||
observer = PythonLoggingObserver()
|
||||
observer.start()
|
||||
# It's critical to point twisted's internal logging somewhere, otherwise it
|
||||
# stacks up and leaks kup to 64K object;
|
||||
# see: https://twistedmatrix.com/trac/ticket/8164
|
||||
#
|
||||
# Routing to the python logging framework could be a performance problem if
|
||||
# the handlers blocked for a long time as python.logging is a blocking API
|
||||
# see https://twistedmatrix.com/documents/current/core/howto/logger.html
|
||||
# filed as https://github.com/matrix-org/synapse/issues/1727
|
||||
#
|
||||
# However this may not be too much of a problem if we are just writing to a file.
|
||||
observer = STDLibLogObserver()
|
||||
globalLogBeginner.beginLoggingTo([observer])
|
||||
|
||||
72
synapse/config/password_auth_providers.py
Normal file
72
synapse/config/password_auth_providers.py
Normal file
@@ -0,0 +1,72 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2016 Openmarket
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from ._base import Config, ConfigError
|
||||
|
||||
import importlib
|
||||
|
||||
|
||||
class PasswordAuthProviderConfig(Config):
|
||||
def read_config(self, config):
|
||||
self.password_providers = []
|
||||
|
||||
# We want to be backwards compatible with the old `ldap_config`
|
||||
# param.
|
||||
ldap_config = config.get("ldap_config", {})
|
||||
self.ldap_enabled = ldap_config.get("enabled", False)
|
||||
if self.ldap_enabled:
|
||||
from ldap_auth_provider import LdapAuthProvider
|
||||
parsed_config = LdapAuthProvider.parse_config(ldap_config)
|
||||
self.password_providers.append((LdapAuthProvider, parsed_config))
|
||||
|
||||
providers = config.get("password_providers", [])
|
||||
for provider in providers:
|
||||
# This is for backwards compat when the ldap auth provider resided
|
||||
# in this package.
|
||||
if provider['module'] == "synapse.util.ldap_auth_provider.LdapAuthProvider":
|
||||
from ldap_auth_provider import LdapAuthProvider
|
||||
provider_class = LdapAuthProvider
|
||||
else:
|
||||
# We need to import the module, and then pick the class out of
|
||||
# that, so we split based on the last dot.
|
||||
module, clz = provider['module'].rsplit(".", 1)
|
||||
module = importlib.import_module(module)
|
||||
provider_class = getattr(module, clz)
|
||||
|
||||
try:
|
||||
provider_config = provider_class.parse_config(provider["config"])
|
||||
except Exception as e:
|
||||
raise ConfigError(
|
||||
"Failed to parse config for %r: %r" % (provider['module'], e)
|
||||
)
|
||||
self.password_providers.append((provider_class, provider_config))
|
||||
|
||||
def default_config(self, **kwargs):
|
||||
return """\
|
||||
# password_providers:
|
||||
# - module: "ldap_auth_provider.LdapAuthProvider"
|
||||
# config:
|
||||
# enabled: true
|
||||
# uri: "ldap://ldap.example.com:389"
|
||||
# start_tls: true
|
||||
# base: "ou=users,dc=example,dc=com"
|
||||
# attributes:
|
||||
# uid: "cn"
|
||||
# mail: "email"
|
||||
# name: "givenName"
|
||||
# #bind_dn:
|
||||
# #bind_password:
|
||||
# #filter: "(objectClass=posixAccount)"
|
||||
"""
|
||||
@@ -32,7 +32,6 @@ class RegistrationConfig(Config):
|
||||
)
|
||||
|
||||
self.registration_shared_secret = config.get("registration_shared_secret")
|
||||
self.user_creation_max_duration = int(config["user_creation_max_duration"])
|
||||
|
||||
self.bcrypt_rounds = config.get("bcrypt_rounds", 12)
|
||||
self.trusted_third_party_id_servers = config["trusted_third_party_id_servers"]
|
||||
@@ -55,11 +54,6 @@ class RegistrationConfig(Config):
|
||||
# secret, even if registration is otherwise disabled.
|
||||
registration_shared_secret: "%(registration_shared_secret)s"
|
||||
|
||||
# Sets the expiry for the short term user creation in
|
||||
# milliseconds. For instance the bellow duration is two weeks
|
||||
# in milliseconds.
|
||||
user_creation_max_duration: 1209600000
|
||||
|
||||
# Set the number of bcrypt rounds used to generate password hash.
|
||||
# Larger numbers increase the work factor needed to generate the hash.
|
||||
# The default number of rounds is 12.
|
||||
|
||||
@@ -167,6 +167,8 @@ class ContentRepositoryConfig(Config):
|
||||
# - '10.0.0.0/8'
|
||||
# - '172.16.0.0/12'
|
||||
# - '192.168.0.0/16'
|
||||
# - '100.64.0.0/10'
|
||||
# - '169.254.0.0/16'
|
||||
#
|
||||
# List of IP address CIDR ranges that the URL preview spider is allowed
|
||||
# to access even if they are specified in url_preview_ip_range_blacklist.
|
||||
|
||||
@@ -29,7 +29,11 @@ class ServerConfig(Config):
|
||||
self.user_agent_suffix = config.get("user_agent_suffix")
|
||||
self.use_frozen_dicts = config.get("use_frozen_dicts", False)
|
||||
self.public_baseurl = config.get("public_baseurl")
|
||||
self.secondary_directory_servers = config.get("secondary_directory_servers", [])
|
||||
|
||||
# Whether to send federation traffic out in this process. This only
|
||||
# applies to some federation traffic, and so shouldn't be used to
|
||||
# "disable" federation
|
||||
self.send_federation = config.get("send_federation", True)
|
||||
|
||||
if self.public_baseurl is not None:
|
||||
if self.public_baseurl[-1] != '/':
|
||||
@@ -38,6 +42,15 @@ class ServerConfig(Config):
|
||||
|
||||
self.listeners = config.get("listeners", [])
|
||||
|
||||
for listener in self.listeners:
|
||||
bind_address = listener.pop("bind_address", None)
|
||||
bind_addresses = listener.setdefault("bind_addresses", [])
|
||||
|
||||
if bind_address:
|
||||
bind_addresses.append(bind_address)
|
||||
elif not bind_addresses:
|
||||
bind_addresses.append('')
|
||||
|
||||
self.gc_thresholds = read_gc_thresholds(config.get("gc_thresholds", None))
|
||||
|
||||
bind_port = config.get("bind_port")
|
||||
@@ -50,7 +63,7 @@ class ServerConfig(Config):
|
||||
|
||||
self.listeners.append({
|
||||
"port": bind_port,
|
||||
"bind_address": bind_host,
|
||||
"bind_addresses": [bind_host],
|
||||
"tls": True,
|
||||
"type": "http",
|
||||
"resources": [
|
||||
@@ -69,7 +82,7 @@ class ServerConfig(Config):
|
||||
if unsecure_port:
|
||||
self.listeners.append({
|
||||
"port": unsecure_port,
|
||||
"bind_address": bind_host,
|
||||
"bind_addresses": [bind_host],
|
||||
"tls": False,
|
||||
"type": "http",
|
||||
"resources": [
|
||||
@@ -88,7 +101,7 @@ class ServerConfig(Config):
|
||||
if manhole:
|
||||
self.listeners.append({
|
||||
"port": manhole,
|
||||
"bind_address": "127.0.0.1",
|
||||
"bind_addresses": ["127.0.0.1"],
|
||||
"type": "manhole",
|
||||
})
|
||||
|
||||
@@ -96,7 +109,7 @@ class ServerConfig(Config):
|
||||
if metrics_port:
|
||||
self.listeners.append({
|
||||
"port": metrics_port,
|
||||
"bind_address": config.get("metrics_bind_host", "127.0.0.1"),
|
||||
"bind_addresses": [config.get("metrics_bind_host", "127.0.0.1")],
|
||||
"tls": False,
|
||||
"type": "http",
|
||||
"resources": [
|
||||
@@ -142,14 +155,6 @@ class ServerConfig(Config):
|
||||
# The GC threshold parameters to pass to `gc.set_threshold`, if defined
|
||||
# gc_thresholds: [700, 10, 10]
|
||||
|
||||
# A list of other Home Servers to fetch the public room directory from
|
||||
# and include in the public room directory of this home server
|
||||
# This is a temporary stopgap solution to populate new server with a
|
||||
# list of rooms until there exists a good solution of a decentralized
|
||||
# room directory.
|
||||
# secondary_directory_servers:
|
||||
# - matrix.org
|
||||
|
||||
# List of ports that Synapse should listen on, their purpose and their
|
||||
# configuration.
|
||||
listeners:
|
||||
@@ -159,9 +164,14 @@ class ServerConfig(Config):
|
||||
# The port to listen for HTTPS requests on.
|
||||
port: %(bind_port)s
|
||||
|
||||
# Local interface to listen on.
|
||||
# The empty string will cause synapse to listen on all interfaces.
|
||||
bind_address: ''
|
||||
# Local addresses to listen on.
|
||||
# This will listen on all IPv4 addresses by default.
|
||||
bind_addresses:
|
||||
- '0.0.0.0'
|
||||
# Uncomment to listen on all IPv6 interfaces
|
||||
# N.B: On at least Linux this will also listen on all IPv4
|
||||
# addresses, so you will need to comment out the line above.
|
||||
# - '::'
|
||||
|
||||
# This is a 'http' listener, allows us to specify 'resources'.
|
||||
type: http
|
||||
@@ -192,7 +202,7 @@ class ServerConfig(Config):
|
||||
# For when matrix traffic passes through loadbalancer that unwraps TLS.
|
||||
- port: %(unsecure_port)s
|
||||
tls: false
|
||||
bind_address: ''
|
||||
bind_addresses: ['0.0.0.0']
|
||||
type: http
|
||||
|
||||
x_forwarded: false
|
||||
|
||||
@@ -19,6 +19,9 @@ from OpenSSL import crypto
|
||||
import subprocess
|
||||
import os
|
||||
|
||||
from hashlib import sha256
|
||||
from unpaddedbase64 import encode_base64
|
||||
|
||||
GENERATE_DH_PARAMS = False
|
||||
|
||||
|
||||
@@ -42,6 +45,19 @@ class TlsConfig(Config):
|
||||
config.get("tls_dh_params_path"), "tls_dh_params"
|
||||
)
|
||||
|
||||
self.tls_fingerprints = config["tls_fingerprints"]
|
||||
|
||||
# Check that our own certificate is included in the list of fingerprints
|
||||
# and include it if it is not.
|
||||
x509_certificate_bytes = crypto.dump_certificate(
|
||||
crypto.FILETYPE_ASN1,
|
||||
self.tls_certificate
|
||||
)
|
||||
sha256_fingerprint = encode_base64(sha256(x509_certificate_bytes).digest())
|
||||
sha256_fingerprints = set(f["sha256"] for f in self.tls_fingerprints)
|
||||
if sha256_fingerprint not in sha256_fingerprints:
|
||||
self.tls_fingerprints.append({u"sha256": sha256_fingerprint})
|
||||
|
||||
# This config option applies to non-federation HTTP clients
|
||||
# (e.g. for talking to recaptcha, identity servers, and such)
|
||||
# It should never be used in production, and is intended for
|
||||
@@ -73,6 +89,28 @@ class TlsConfig(Config):
|
||||
|
||||
# Don't bind to the https port
|
||||
no_tls: False
|
||||
|
||||
# List of allowed TLS fingerprints for this server to publish along
|
||||
# with the signing keys for this server. Other matrix servers that
|
||||
# make HTTPS requests to this server will check that the TLS
|
||||
# certificates returned by this server match one of the fingerprints.
|
||||
#
|
||||
# Synapse automatically adds its the fingerprint of its own certificate
|
||||
# to the list. So if federation traffic is handle directly by synapse
|
||||
# then no modification to the list is required.
|
||||
#
|
||||
# If synapse is run behind a load balancer that handles the TLS then it
|
||||
# will be necessary to add the fingerprints of the certificates used by
|
||||
# the loadbalancers to this list if they are different to the one
|
||||
# synapse is using.
|
||||
#
|
||||
# Homeservers are permitted to cache the list of TLS fingerprints
|
||||
# returned in the key responses up to the "valid_until_ts" returned in
|
||||
# key. It may be necessary to publish the fingerprints of a new
|
||||
# certificate and wait until the "valid_until_ts" of the previous key
|
||||
# responses have passed before deploying it.
|
||||
tls_fingerprints: []
|
||||
# tls_fingerprints: [{"sha256": "<base64_encoded_sha256_fingerprint>"}]
|
||||
""" % locals()
|
||||
|
||||
def read_tls_certificate(self, cert_path):
|
||||
|
||||
@@ -19,7 +19,9 @@ class VoipConfig(Config):
|
||||
|
||||
def read_config(self, config):
|
||||
self.turn_uris = config.get("turn_uris", [])
|
||||
self.turn_shared_secret = config["turn_shared_secret"]
|
||||
self.turn_shared_secret = config.get("turn_shared_secret")
|
||||
self.turn_username = config.get("turn_username")
|
||||
self.turn_password = config.get("turn_password")
|
||||
self.turn_user_lifetime = self.parse_duration(config["turn_user_lifetime"])
|
||||
|
||||
def default_config(self, **kwargs):
|
||||
@@ -32,6 +34,11 @@ class VoipConfig(Config):
|
||||
# The shared secret used to compute passwords for the TURN server
|
||||
turn_shared_secret: "YOUR_SHARED_SECRET"
|
||||
|
||||
# The Username and password if the TURN server needs them and
|
||||
# does not use a token
|
||||
#turn_username: "TURNSERVER_USERNAME"
|
||||
#turn_password: "TURNSERVER_PASSWORD"
|
||||
|
||||
# How long generated TURN credentials last
|
||||
turn_user_lifetime: "1h"
|
||||
"""
|
||||
|
||||
@@ -29,3 +29,13 @@ class WorkerConfig(Config):
|
||||
self.worker_log_file = config.get("worker_log_file")
|
||||
self.worker_log_config = config.get("worker_log_config")
|
||||
self.worker_replication_url = config.get("worker_replication_url")
|
||||
|
||||
if self.worker_listeners:
|
||||
for listener in self.worker_listeners:
|
||||
bind_address = listener.pop("bind_address", None)
|
||||
bind_addresses = listener.setdefault("bind_addresses", [])
|
||||
|
||||
if bind_address:
|
||||
bind_addresses.append(bind_address)
|
||||
elif not bind_addresses:
|
||||
bind_addresses.append('')
|
||||
|
||||
@@ -77,10 +77,12 @@ class SynapseKeyClientProtocol(HTTPClient):
|
||||
def __init__(self):
|
||||
self.remote_key = defer.Deferred()
|
||||
self.host = None
|
||||
self._peer = None
|
||||
|
||||
def connectionMade(self):
|
||||
self.host = self.transport.getHost()
|
||||
logger.debug("Connected to %s", self.host)
|
||||
self._peer = self.transport.getPeer()
|
||||
logger.debug("Connected to %s", self._peer)
|
||||
|
||||
self.sendCommand(b"GET", self.path)
|
||||
if self.host:
|
||||
self.sendHeader(b"Host", self.host)
|
||||
@@ -124,7 +126,10 @@ class SynapseKeyClientProtocol(HTTPClient):
|
||||
self.timer.cancel()
|
||||
|
||||
def on_timeout(self):
|
||||
logger.debug("Timeout waiting for response from %s", self.host)
|
||||
logger.debug(
|
||||
"Timeout waiting for response from %s: %s",
|
||||
self.host, self._peer,
|
||||
)
|
||||
self.errback(IOError("Timeout waiting for response"))
|
||||
self.transport.abortConnection()
|
||||
|
||||
@@ -133,4 +138,5 @@ class SynapseKeyClientFactory(Factory):
|
||||
def protocol(self):
|
||||
protocol = SynapseKeyClientProtocol()
|
||||
protocol.path = self.path
|
||||
protocol.host = self.host
|
||||
return protocol
|
||||
|
||||
@@ -22,6 +22,7 @@ from synapse.util.logcontext import (
|
||||
preserve_context_over_deferred, preserve_context_over_fn, PreserveLoggingContext,
|
||||
preserve_fn
|
||||
)
|
||||
from synapse.util.metrics import Measure
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
@@ -44,7 +45,25 @@ import logging
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
KeyGroup = namedtuple("KeyGroup", ("server_name", "group_id", "key_ids"))
|
||||
VerifyKeyRequest = namedtuple("VerifyRequest", (
|
||||
"server_name", "key_ids", "json_object", "deferred"
|
||||
))
|
||||
"""
|
||||
A request for a verify key to verify a JSON object.
|
||||
|
||||
Attributes:
|
||||
server_name(str): The name of the server to verify against.
|
||||
key_ids(set(str)): The set of key_ids to that could be used to verify the
|
||||
JSON object
|
||||
json_object(dict): The JSON object to verify.
|
||||
deferred(twisted.internet.defer.Deferred):
|
||||
A deferred (server_name, key_id, verify_key) tuple that resolves when
|
||||
a verify key has been fetched
|
||||
"""
|
||||
|
||||
|
||||
class KeyLookupError(ValueError):
|
||||
pass
|
||||
|
||||
|
||||
class Keyring(object):
|
||||
@@ -74,39 +93,32 @@ class Keyring(object):
|
||||
list of deferreds indicating success or failure to verify each
|
||||
json object's signature for the given server_name.
|
||||
"""
|
||||
group_id_to_json = {}
|
||||
group_id_to_group = {}
|
||||
group_ids = []
|
||||
|
||||
next_group_id = 0
|
||||
deferreds = {}
|
||||
verify_requests = []
|
||||
|
||||
for server_name, json_object in server_and_json:
|
||||
logger.debug("Verifying for %s", server_name)
|
||||
group_id = next_group_id
|
||||
next_group_id += 1
|
||||
group_ids.append(group_id)
|
||||
|
||||
key_ids = signature_ids(json_object, server_name)
|
||||
if not key_ids:
|
||||
deferreds[group_id] = defer.fail(SynapseError(
|
||||
deferred = defer.fail(SynapseError(
|
||||
400,
|
||||
"Not signed with a supported algorithm",
|
||||
Codes.UNAUTHORIZED,
|
||||
))
|
||||
else:
|
||||
deferreds[group_id] = defer.Deferred()
|
||||
deferred = defer.Deferred()
|
||||
|
||||
group = KeyGroup(server_name, group_id, key_ids)
|
||||
verify_request = VerifyKeyRequest(
|
||||
server_name, key_ids, json_object, deferred
|
||||
)
|
||||
|
||||
group_id_to_group[group_id] = group
|
||||
group_id_to_json[group_id] = json_object
|
||||
verify_requests.append(verify_request)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def handle_key_deferred(group, deferred):
|
||||
server_name = group.server_name
|
||||
def handle_key_deferred(verify_request):
|
||||
server_name = verify_request.server_name
|
||||
try:
|
||||
_, _, key_id, verify_key = yield deferred
|
||||
_, key_id, verify_key = yield verify_request.deferred
|
||||
except IOError as e:
|
||||
logger.warn(
|
||||
"Got IOError when downloading keys for %s: %s %s",
|
||||
@@ -128,7 +140,7 @@ class Keyring(object):
|
||||
Codes.UNAUTHORIZED,
|
||||
)
|
||||
|
||||
json_object = group_id_to_json[group.group_id]
|
||||
json_object = verify_request.json_object
|
||||
|
||||
try:
|
||||
verify_signed_json(json_object, server_name, verify_key)
|
||||
@@ -157,36 +169,34 @@ class Keyring(object):
|
||||
|
||||
# Actually start fetching keys.
|
||||
wait_on_deferred.addBoth(
|
||||
lambda _: self.get_server_verify_keys(group_id_to_group, deferreds)
|
||||
lambda _: self.get_server_verify_keys(verify_requests)
|
||||
)
|
||||
|
||||
# When we've finished fetching all the keys for a given server_name,
|
||||
# resolve the deferred passed to `wait_for_previous_lookups` so that
|
||||
# any lookups waiting will proceed.
|
||||
server_to_gids = {}
|
||||
server_to_request_ids = {}
|
||||
|
||||
def remove_deferreds(res, server_name, group_id):
|
||||
server_to_gids[server_name].discard(group_id)
|
||||
if not server_to_gids[server_name]:
|
||||
def remove_deferreds(res, server_name, verify_request):
|
||||
request_id = id(verify_request)
|
||||
server_to_request_ids[server_name].discard(request_id)
|
||||
if not server_to_request_ids[server_name]:
|
||||
d = server_to_deferred.pop(server_name, None)
|
||||
if d:
|
||||
d.callback(None)
|
||||
return res
|
||||
|
||||
for g_id, deferred in deferreds.items():
|
||||
server_name = group_id_to_group[g_id].server_name
|
||||
server_to_gids.setdefault(server_name, set()).add(g_id)
|
||||
deferred.addBoth(remove_deferreds, server_name, g_id)
|
||||
for verify_request in verify_requests:
|
||||
server_name = verify_request.server_name
|
||||
request_id = id(verify_request)
|
||||
server_to_request_ids.setdefault(server_name, set()).add(request_id)
|
||||
deferred.addBoth(remove_deferreds, server_name, verify_request)
|
||||
|
||||
# Pass those keys to handle_key_deferred so that the json object
|
||||
# signatures can be verified
|
||||
return [
|
||||
preserve_context_over_fn(
|
||||
handle_key_deferred,
|
||||
group_id_to_group[g_id],
|
||||
deferreds[g_id],
|
||||
)
|
||||
for g_id in group_ids
|
||||
preserve_context_over_fn(handle_key_deferred, verify_request)
|
||||
for verify_request in verify_requests
|
||||
]
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@@ -220,7 +230,7 @@ class Keyring(object):
|
||||
|
||||
d.addBoth(rm, server_name)
|
||||
|
||||
def get_server_verify_keys(self, group_id_to_group, group_id_to_deferred):
|
||||
def get_server_verify_keys(self, verify_requests):
|
||||
"""Takes a dict of KeyGroups and tries to find at least one key for
|
||||
each group.
|
||||
"""
|
||||
@@ -234,76 +244,79 @@ class Keyring(object):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def do_iterations():
|
||||
merged_results = {}
|
||||
with Measure(self.clock, "get_server_verify_keys"):
|
||||
merged_results = {}
|
||||
|
||||
missing_keys = {}
|
||||
for group in group_id_to_group.values():
|
||||
missing_keys.setdefault(group.server_name, set()).update(
|
||||
group.key_ids
|
||||
)
|
||||
|
||||
for fn in key_fetch_fns:
|
||||
results = yield fn(missing_keys.items())
|
||||
merged_results.update(results)
|
||||
|
||||
# We now need to figure out which groups we have keys for
|
||||
# and which we don't
|
||||
missing_groups = {}
|
||||
for group in group_id_to_group.values():
|
||||
for key_id in group.key_ids:
|
||||
if key_id in merged_results[group.server_name]:
|
||||
with PreserveLoggingContext():
|
||||
group_id_to_deferred[group.group_id].callback((
|
||||
group.group_id,
|
||||
group.server_name,
|
||||
key_id,
|
||||
merged_results[group.server_name][key_id],
|
||||
))
|
||||
break
|
||||
else:
|
||||
missing_groups.setdefault(
|
||||
group.server_name, []
|
||||
).append(group)
|
||||
|
||||
if not missing_groups:
|
||||
break
|
||||
|
||||
missing_keys = {
|
||||
server_name: set(
|
||||
key_id for group in groups for key_id in group.key_ids
|
||||
missing_keys = {}
|
||||
for verify_request in verify_requests:
|
||||
missing_keys.setdefault(verify_request.server_name, set()).update(
|
||||
verify_request.key_ids
|
||||
)
|
||||
for server_name, groups in missing_groups.items()
|
||||
}
|
||||
|
||||
for group in missing_groups.values():
|
||||
group_id_to_deferred[group.group_id].errback(SynapseError(
|
||||
401,
|
||||
"No key for %s with id %s" % (
|
||||
group.server_name, group.key_ids,
|
||||
),
|
||||
Codes.UNAUTHORIZED,
|
||||
))
|
||||
for fn in key_fetch_fns:
|
||||
results = yield fn(missing_keys.items())
|
||||
merged_results.update(results)
|
||||
|
||||
# We now need to figure out which verify requests we have keys
|
||||
# for and which we don't
|
||||
missing_keys = {}
|
||||
requests_missing_keys = []
|
||||
for verify_request in verify_requests:
|
||||
server_name = verify_request.server_name
|
||||
result_keys = merged_results[server_name]
|
||||
|
||||
if verify_request.deferred.called:
|
||||
# We've already called this deferred, which probably
|
||||
# means that we've already found a key for it.
|
||||
continue
|
||||
|
||||
for key_id in verify_request.key_ids:
|
||||
if key_id in result_keys:
|
||||
with PreserveLoggingContext():
|
||||
verify_request.deferred.callback((
|
||||
server_name,
|
||||
key_id,
|
||||
result_keys[key_id],
|
||||
))
|
||||
break
|
||||
else:
|
||||
# The else block is only reached if the loop above
|
||||
# doesn't break.
|
||||
missing_keys.setdefault(server_name, set()).update(
|
||||
verify_request.key_ids
|
||||
)
|
||||
requests_missing_keys.append(verify_request)
|
||||
|
||||
if not missing_keys:
|
||||
break
|
||||
|
||||
for verify_request in requests_missing_keys.values():
|
||||
verify_request.deferred.errback(SynapseError(
|
||||
401,
|
||||
"No key for %s with id %s" % (
|
||||
verify_request.server_name, verify_request.key_ids,
|
||||
),
|
||||
Codes.UNAUTHORIZED,
|
||||
))
|
||||
|
||||
def on_err(err):
|
||||
for deferred in group_id_to_deferred.values():
|
||||
if not deferred.called:
|
||||
deferred.errback(err)
|
||||
for verify_request in verify_requests:
|
||||
if not verify_request.deferred.called:
|
||||
verify_request.deferred.errback(err)
|
||||
|
||||
do_iterations().addErrback(on_err)
|
||||
|
||||
return group_id_to_deferred
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_keys_from_store(self, server_name_and_key_ids):
|
||||
res = yield defer.gatherResults(
|
||||
res = yield preserve_context_over_deferred(defer.gatherResults(
|
||||
[
|
||||
self.store.get_server_verify_keys(
|
||||
preserve_fn(self.store.get_server_verify_keys)(
|
||||
server_name, key_ids
|
||||
).addCallback(lambda ks, server: (server, ks), server_name)
|
||||
for server_name, key_ids in server_name_and_key_ids
|
||||
],
|
||||
consumeErrors=True,
|
||||
).addErrback(unwrapFirstError)
|
||||
)).addErrback(unwrapFirstError)
|
||||
|
||||
defer.returnValue(dict(res))
|
||||
|
||||
@@ -324,13 +337,13 @@ class Keyring(object):
|
||||
)
|
||||
defer.returnValue({})
|
||||
|
||||
results = yield defer.gatherResults(
|
||||
results = yield preserve_context_over_deferred(defer.gatherResults(
|
||||
[
|
||||
get_key(p_name, p_keys)
|
||||
preserve_fn(get_key)(p_name, p_keys)
|
||||
for p_name, p_keys in self.perspective_servers.items()
|
||||
],
|
||||
consumeErrors=True,
|
||||
).addErrback(unwrapFirstError)
|
||||
)).addErrback(unwrapFirstError)
|
||||
|
||||
union_of_keys = {}
|
||||
for result in results:
|
||||
@@ -356,7 +369,7 @@ class Keyring(object):
|
||||
)
|
||||
except Exception as e:
|
||||
logger.info(
|
||||
"Unable to getting key %r for %r directly: %s %s",
|
||||
"Unable to get key %r for %r directly: %s %s",
|
||||
key_ids, server_name,
|
||||
type(e).__name__, str(e.message),
|
||||
)
|
||||
@@ -370,13 +383,13 @@ class Keyring(object):
|
||||
|
||||
defer.returnValue(keys)
|
||||
|
||||
results = yield defer.gatherResults(
|
||||
results = yield preserve_context_over_deferred(defer.gatherResults(
|
||||
[
|
||||
get_key(server_name, key_ids)
|
||||
preserve_fn(get_key)(server_name, key_ids)
|
||||
for server_name, key_ids in server_name_and_key_ids
|
||||
],
|
||||
consumeErrors=True,
|
||||
).addErrback(unwrapFirstError)
|
||||
)).addErrback(unwrapFirstError)
|
||||
|
||||
merged = {}
|
||||
for result in results:
|
||||
@@ -418,7 +431,7 @@ class Keyring(object):
|
||||
for response in responses:
|
||||
if (u"signatures" not in response
|
||||
or perspective_name not in response[u"signatures"]):
|
||||
raise ValueError(
|
||||
raise KeyLookupError(
|
||||
"Key response not signed by perspective server"
|
||||
" %r" % (perspective_name,)
|
||||
)
|
||||
@@ -441,21 +454,21 @@ class Keyring(object):
|
||||
list(response[u"signatures"][perspective_name]),
|
||||
list(perspective_keys)
|
||||
)
|
||||
raise ValueError(
|
||||
raise KeyLookupError(
|
||||
"Response not signed with a known key for perspective"
|
||||
" server %r" % (perspective_name,)
|
||||
)
|
||||
|
||||
processed_response = yield self.process_v2_response(
|
||||
perspective_name, response
|
||||
perspective_name, response, only_from_server=False
|
||||
)
|
||||
|
||||
for server_name, response_keys in processed_response.items():
|
||||
keys.setdefault(server_name, {}).update(response_keys)
|
||||
|
||||
yield defer.gatherResults(
|
||||
yield preserve_context_over_deferred(defer.gatherResults(
|
||||
[
|
||||
self.store_keys(
|
||||
preserve_fn(self.store_keys)(
|
||||
server_name=server_name,
|
||||
from_server=perspective_name,
|
||||
verify_keys=response_keys,
|
||||
@@ -463,7 +476,7 @@ class Keyring(object):
|
||||
for server_name, response_keys in keys.items()
|
||||
],
|
||||
consumeErrors=True
|
||||
).addErrback(unwrapFirstError)
|
||||
)).addErrback(unwrapFirstError)
|
||||
|
||||
defer.returnValue(keys)
|
||||
|
||||
@@ -484,10 +497,10 @@ class Keyring(object):
|
||||
|
||||
if (u"signatures" not in response
|
||||
or server_name not in response[u"signatures"]):
|
||||
raise ValueError("Key response not signed by remote server")
|
||||
raise KeyLookupError("Key response not signed by remote server")
|
||||
|
||||
if "tls_fingerprints" not in response:
|
||||
raise ValueError("Key response missing TLS fingerprints")
|
||||
raise KeyLookupError("Key response missing TLS fingerprints")
|
||||
|
||||
certificate_bytes = crypto.dump_certificate(
|
||||
crypto.FILETYPE_ASN1, tls_certificate
|
||||
@@ -501,7 +514,7 @@ class Keyring(object):
|
||||
response_sha256_fingerprints.add(fingerprint[u"sha256"])
|
||||
|
||||
if sha256_fingerprint_b64 not in response_sha256_fingerprints:
|
||||
raise ValueError("TLS certificate not allowed by fingerprints")
|
||||
raise KeyLookupError("TLS certificate not allowed by fingerprints")
|
||||
|
||||
response_keys = yield self.process_v2_response(
|
||||
from_server=server_name,
|
||||
@@ -511,7 +524,7 @@ class Keyring(object):
|
||||
|
||||
keys.update(response_keys)
|
||||
|
||||
yield defer.gatherResults(
|
||||
yield preserve_context_over_deferred(defer.gatherResults(
|
||||
[
|
||||
preserve_fn(self.store_keys)(
|
||||
server_name=key_server_name,
|
||||
@@ -521,13 +534,13 @@ class Keyring(object):
|
||||
for key_server_name, verify_keys in keys.items()
|
||||
],
|
||||
consumeErrors=True
|
||||
).addErrback(unwrapFirstError)
|
||||
)).addErrback(unwrapFirstError)
|
||||
|
||||
defer.returnValue(keys)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def process_v2_response(self, from_server, response_json,
|
||||
requested_ids=[]):
|
||||
requested_ids=[], only_from_server=True):
|
||||
time_now_ms = self.clock.time_msec()
|
||||
response_keys = {}
|
||||
verify_keys = {}
|
||||
@@ -551,9 +564,16 @@ class Keyring(object):
|
||||
|
||||
results = {}
|
||||
server_name = response_json["server_name"]
|
||||
if only_from_server:
|
||||
if server_name != from_server:
|
||||
raise KeyLookupError(
|
||||
"Expected a response for server %r not %r" % (
|
||||
from_server, server_name
|
||||
)
|
||||
)
|
||||
for key_id in response_json["signatures"].get(server_name, {}):
|
||||
if key_id not in response_json["verify_keys"]:
|
||||
raise ValueError(
|
||||
raise KeyLookupError(
|
||||
"Key response must include verification keys for all"
|
||||
" signatures"
|
||||
)
|
||||
@@ -580,7 +600,7 @@ class Keyring(object):
|
||||
response_keys.update(verify_keys)
|
||||
response_keys.update(old_verify_keys)
|
||||
|
||||
yield defer.gatherResults(
|
||||
yield preserve_context_over_deferred(defer.gatherResults(
|
||||
[
|
||||
preserve_fn(self.store.store_server_keys_json)(
|
||||
server_name=server_name,
|
||||
@@ -593,7 +613,7 @@ class Keyring(object):
|
||||
for key_id in updated_key_ids
|
||||
],
|
||||
consumeErrors=True,
|
||||
).addErrback(unwrapFirstError)
|
||||
)).addErrback(unwrapFirstError)
|
||||
|
||||
results[server_name] = response_keys
|
||||
|
||||
@@ -621,15 +641,15 @@ class Keyring(object):
|
||||
|
||||
if ("signatures" not in response
|
||||
or server_name not in response["signatures"]):
|
||||
raise ValueError("Key response not signed by remote server")
|
||||
raise KeyLookupError("Key response not signed by remote server")
|
||||
|
||||
if "tls_certificate" not in response:
|
||||
raise ValueError("Key response missing TLS certificate")
|
||||
raise KeyLookupError("Key response missing TLS certificate")
|
||||
|
||||
tls_certificate_b64 = response["tls_certificate"]
|
||||
|
||||
if encode_base64(x509_certificate_bytes) != tls_certificate_b64:
|
||||
raise ValueError("TLS certificate doesn't match")
|
||||
raise KeyLookupError("TLS certificate doesn't match")
|
||||
|
||||
# Cache the result in the datastore.
|
||||
|
||||
@@ -645,7 +665,7 @@ class Keyring(object):
|
||||
|
||||
for key_id in response["signatures"][server_name]:
|
||||
if key_id not in response["verify_keys"]:
|
||||
raise ValueError(
|
||||
raise KeyLookupError(
|
||||
"Key response must include verification keys for all"
|
||||
" signatures"
|
||||
)
|
||||
@@ -682,7 +702,7 @@ class Keyring(object):
|
||||
A deferred that completes when the keys are stored.
|
||||
"""
|
||||
# TODO(markjh): Store whether the keys have expired.
|
||||
yield defer.gatherResults(
|
||||
yield preserve_context_over_deferred(defer.gatherResults(
|
||||
[
|
||||
preserve_fn(self.store.store_server_verify_key)(
|
||||
server_name, server_name, key.time_added, key
|
||||
@@ -690,4 +710,4 @@ class Keyring(object):
|
||||
for key_id, key in verify_keys.items()
|
||||
],
|
||||
consumeErrors=True,
|
||||
).addErrback(unwrapFirstError)
|
||||
)).addErrback(unwrapFirstError)
|
||||
|
||||
678
synapse/event_auth.py
Normal file
678
synapse/event_auth.py
Normal file
@@ -0,0 +1,678 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014 - 2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
|
||||
from canonicaljson import encode_canonical_json
|
||||
from signedjson.key import decode_verify_key_bytes
|
||||
from signedjson.sign import verify_signed_json, SignatureVerifyException
|
||||
from unpaddedbase64 import decode_base64
|
||||
|
||||
from synapse.api.constants import EventTypes, Membership, JoinRules
|
||||
from synapse.api.errors import AuthError, SynapseError, EventSizeError
|
||||
from synapse.types import UserID, get_domain_from_id
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def check(event, auth_events, do_sig_check=True, do_size_check=True):
|
||||
""" Checks if this event is correctly authed.
|
||||
|
||||
Args:
|
||||
event: the event being checked.
|
||||
auth_events (dict: event-key -> event): the existing room state.
|
||||
|
||||
|
||||
Returns:
|
||||
True if the auth checks pass.
|
||||
"""
|
||||
if do_size_check:
|
||||
_check_size_limits(event)
|
||||
|
||||
if not hasattr(event, "room_id"):
|
||||
raise AuthError(500, "Event has no room_id: %s" % event)
|
||||
|
||||
if do_sig_check:
|
||||
sender_domain = get_domain_from_id(event.sender)
|
||||
event_id_domain = get_domain_from_id(event.event_id)
|
||||
|
||||
is_invite_via_3pid = (
|
||||
event.type == EventTypes.Member
|
||||
and event.membership == Membership.INVITE
|
||||
and "third_party_invite" in event.content
|
||||
)
|
||||
|
||||
# Check the sender's domain has signed the event
|
||||
if not event.signatures.get(sender_domain):
|
||||
# We allow invites via 3pid to have a sender from a different
|
||||
# HS, as the sender must match the sender of the original
|
||||
# 3pid invite. This is checked further down with the
|
||||
# other dedicated membership checks.
|
||||
if not is_invite_via_3pid:
|
||||
raise AuthError(403, "Event not signed by sender's server")
|
||||
|
||||
# Check the event_id's domain has signed the event
|
||||
if not event.signatures.get(event_id_domain):
|
||||
raise AuthError(403, "Event not signed by sending server")
|
||||
|
||||
if auth_events is None:
|
||||
# Oh, we don't know what the state of the room was, so we
|
||||
# are trusting that this is allowed (at least for now)
|
||||
logger.warn("Trusting event: %s", event.event_id)
|
||||
return True
|
||||
|
||||
if event.type == EventTypes.Create:
|
||||
room_id_domain = get_domain_from_id(event.room_id)
|
||||
if room_id_domain != sender_domain:
|
||||
raise AuthError(
|
||||
403,
|
||||
"Creation event's room_id domain does not match sender's"
|
||||
)
|
||||
# FIXME
|
||||
return True
|
||||
|
||||
creation_event = auth_events.get((EventTypes.Create, ""), None)
|
||||
|
||||
if not creation_event:
|
||||
raise SynapseError(
|
||||
403,
|
||||
"Room %r does not exist" % (event.room_id,)
|
||||
)
|
||||
|
||||
creating_domain = get_domain_from_id(event.room_id)
|
||||
originating_domain = get_domain_from_id(event.sender)
|
||||
if creating_domain != originating_domain:
|
||||
if not _can_federate(event, auth_events):
|
||||
raise AuthError(
|
||||
403,
|
||||
"This room has been marked as unfederatable."
|
||||
)
|
||||
|
||||
# FIXME: Temp hack
|
||||
if event.type == EventTypes.Aliases:
|
||||
if not event.is_state():
|
||||
raise AuthError(
|
||||
403,
|
||||
"Alias event must be a state event",
|
||||
)
|
||||
if not event.state_key:
|
||||
raise AuthError(
|
||||
403,
|
||||
"Alias event must have non-empty state_key"
|
||||
)
|
||||
sender_domain = get_domain_from_id(event.sender)
|
||||
if event.state_key != sender_domain:
|
||||
raise AuthError(
|
||||
403,
|
||||
"Alias event's state_key does not match sender's domain"
|
||||
)
|
||||
return True
|
||||
|
||||
if logger.isEnabledFor(logging.DEBUG):
|
||||
logger.debug(
|
||||
"Auth events: %s",
|
||||
[a.event_id for a in auth_events.values()]
|
||||
)
|
||||
|
||||
if event.type == EventTypes.Member:
|
||||
allowed = _is_membership_change_allowed(
|
||||
event, auth_events
|
||||
)
|
||||
if allowed:
|
||||
logger.debug("Allowing! %s", event)
|
||||
else:
|
||||
logger.debug("Denying! %s", event)
|
||||
return allowed
|
||||
|
||||
_check_event_sender_in_room(event, auth_events)
|
||||
|
||||
# Special case to allow m.room.third_party_invite events wherever
|
||||
# a user is allowed to issue invites. Fixes
|
||||
# https://github.com/vector-im/vector-web/issues/1208 hopefully
|
||||
if event.type == EventTypes.ThirdPartyInvite:
|
||||
user_level = get_user_power_level(event.user_id, auth_events)
|
||||
invite_level = _get_named_level(auth_events, "invite", 0)
|
||||
|
||||
if user_level < invite_level:
|
||||
raise AuthError(
|
||||
403, (
|
||||
"You cannot issue a third party invite for %s." %
|
||||
(event.content.display_name,)
|
||||
)
|
||||
)
|
||||
else:
|
||||
return True
|
||||
|
||||
_can_send_event(event, auth_events)
|
||||
|
||||
if event.type == EventTypes.PowerLevels:
|
||||
_check_power_levels(event, auth_events)
|
||||
|
||||
if event.type == EventTypes.Redaction:
|
||||
check_redaction(event, auth_events)
|
||||
|
||||
logger.debug("Allowing! %s", event)
|
||||
|
||||
|
||||
def _check_size_limits(event):
|
||||
def too_big(field):
|
||||
raise EventSizeError("%s too large" % (field,))
|
||||
|
||||
if len(event.user_id) > 255:
|
||||
too_big("user_id")
|
||||
if len(event.room_id) > 255:
|
||||
too_big("room_id")
|
||||
if event.is_state() and len(event.state_key) > 255:
|
||||
too_big("state_key")
|
||||
if len(event.type) > 255:
|
||||
too_big("type")
|
||||
if len(event.event_id) > 255:
|
||||
too_big("event_id")
|
||||
if len(encode_canonical_json(event.get_pdu_json())) > 65536:
|
||||
too_big("event")
|
||||
|
||||
|
||||
def _can_federate(event, auth_events):
|
||||
creation_event = auth_events.get((EventTypes.Create, ""))
|
||||
|
||||
return creation_event.content.get("m.federate", True) is True
|
||||
|
||||
|
||||
def _is_membership_change_allowed(event, auth_events):
|
||||
membership = event.content["membership"]
|
||||
|
||||
# Check if this is the room creator joining:
|
||||
if len(event.prev_events) == 1 and Membership.JOIN == membership:
|
||||
# Get room creation event:
|
||||
key = (EventTypes.Create, "", )
|
||||
create = auth_events.get(key)
|
||||
if create and event.prev_events[0][0] == create.event_id:
|
||||
if create.content["creator"] == event.state_key:
|
||||
return True
|
||||
|
||||
target_user_id = event.state_key
|
||||
|
||||
creating_domain = get_domain_from_id(event.room_id)
|
||||
target_domain = get_domain_from_id(target_user_id)
|
||||
if creating_domain != target_domain:
|
||||
if not _can_federate(event, auth_events):
|
||||
raise AuthError(
|
||||
403,
|
||||
"This room has been marked as unfederatable."
|
||||
)
|
||||
|
||||
# get info about the caller
|
||||
key = (EventTypes.Member, event.user_id, )
|
||||
caller = auth_events.get(key)
|
||||
|
||||
caller_in_room = caller and caller.membership == Membership.JOIN
|
||||
caller_invited = caller and caller.membership == Membership.INVITE
|
||||
|
||||
# get info about the target
|
||||
key = (EventTypes.Member, target_user_id, )
|
||||
target = auth_events.get(key)
|
||||
|
||||
target_in_room = target and target.membership == Membership.JOIN
|
||||
target_banned = target and target.membership == Membership.BAN
|
||||
|
||||
key = (EventTypes.JoinRules, "", )
|
||||
join_rule_event = auth_events.get(key)
|
||||
if join_rule_event:
|
||||
join_rule = join_rule_event.content.get(
|
||||
"join_rule", JoinRules.INVITE
|
||||
)
|
||||
else:
|
||||
join_rule = JoinRules.INVITE
|
||||
|
||||
user_level = get_user_power_level(event.user_id, auth_events)
|
||||
target_level = get_user_power_level(
|
||||
target_user_id, auth_events
|
||||
)
|
||||
|
||||
# FIXME (erikj): What should we do here as the default?
|
||||
ban_level = _get_named_level(auth_events, "ban", 50)
|
||||
|
||||
logger.debug(
|
||||
"_is_membership_change_allowed: %s",
|
||||
{
|
||||
"caller_in_room": caller_in_room,
|
||||
"caller_invited": caller_invited,
|
||||
"target_banned": target_banned,
|
||||
"target_in_room": target_in_room,
|
||||
"membership": membership,
|
||||
"join_rule": join_rule,
|
||||
"target_user_id": target_user_id,
|
||||
"event.user_id": event.user_id,
|
||||
}
|
||||
)
|
||||
|
||||
if Membership.INVITE == membership and "third_party_invite" in event.content:
|
||||
if not _verify_third_party_invite(event, auth_events):
|
||||
raise AuthError(403, "You are not invited to this room.")
|
||||
if target_banned:
|
||||
raise AuthError(
|
||||
403, "%s is banned from the room" % (target_user_id,)
|
||||
)
|
||||
return True
|
||||
|
||||
if Membership.JOIN != membership:
|
||||
if (caller_invited
|
||||
and Membership.LEAVE == membership
|
||||
and target_user_id == event.user_id):
|
||||
return True
|
||||
|
||||
if not caller_in_room: # caller isn't joined
|
||||
raise AuthError(
|
||||
403,
|
||||
"%s not in room %s." % (event.user_id, event.room_id,)
|
||||
)
|
||||
|
||||
if Membership.INVITE == membership:
|
||||
# TODO (erikj): We should probably handle this more intelligently
|
||||
# PRIVATE join rules.
|
||||
|
||||
# Invites are valid iff caller is in the room and target isn't.
|
||||
if target_banned:
|
||||
raise AuthError(
|
||||
403, "%s is banned from the room" % (target_user_id,)
|
||||
)
|
||||
elif target_in_room: # the target is already in the room.
|
||||
raise AuthError(403, "%s is already in the room." %
|
||||
target_user_id)
|
||||
else:
|
||||
invite_level = _get_named_level(auth_events, "invite", 0)
|
||||
|
||||
if user_level < invite_level:
|
||||
raise AuthError(
|
||||
403, "You cannot invite user %s." % target_user_id
|
||||
)
|
||||
elif Membership.JOIN == membership:
|
||||
# Joins are valid iff caller == target and they were:
|
||||
# invited: They are accepting the invitation
|
||||
# joined: It's a NOOP
|
||||
if event.user_id != target_user_id:
|
||||
raise AuthError(403, "Cannot force another user to join.")
|
||||
elif target_banned:
|
||||
raise AuthError(403, "You are banned from this room")
|
||||
elif join_rule == JoinRules.PUBLIC:
|
||||
pass
|
||||
elif join_rule == JoinRules.INVITE:
|
||||
if not caller_in_room and not caller_invited:
|
||||
raise AuthError(403, "You are not invited to this room.")
|
||||
else:
|
||||
# TODO (erikj): may_join list
|
||||
# TODO (erikj): private rooms
|
||||
raise AuthError(403, "You are not allowed to join this room")
|
||||
elif Membership.LEAVE == membership:
|
||||
# TODO (erikj): Implement kicks.
|
||||
if target_banned and user_level < ban_level:
|
||||
raise AuthError(
|
||||
403, "You cannot unban user &s." % (target_user_id,)
|
||||
)
|
||||
elif target_user_id != event.user_id:
|
||||
kick_level = _get_named_level(auth_events, "kick", 50)
|
||||
|
||||
if user_level < kick_level or user_level <= target_level:
|
||||
raise AuthError(
|
||||
403, "You cannot kick user %s." % target_user_id
|
||||
)
|
||||
elif Membership.BAN == membership:
|
||||
if user_level < ban_level or user_level <= target_level:
|
||||
raise AuthError(403, "You don't have permission to ban")
|
||||
else:
|
||||
raise AuthError(500, "Unknown membership %s" % membership)
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def _check_event_sender_in_room(event, auth_events):
|
||||
key = (EventTypes.Member, event.user_id, )
|
||||
member_event = auth_events.get(key)
|
||||
|
||||
return _check_joined_room(
|
||||
member_event,
|
||||
event.user_id,
|
||||
event.room_id
|
||||
)
|
||||
|
||||
|
||||
def _check_joined_room(member, user_id, room_id):
|
||||
if not member or member.membership != Membership.JOIN:
|
||||
raise AuthError(403, "User %s not in room %s (%s)" % (
|
||||
user_id, room_id, repr(member)
|
||||
))
|
||||
|
||||
|
||||
def get_send_level(etype, state_key, auth_events):
|
||||
key = (EventTypes.PowerLevels, "", )
|
||||
send_level_event = auth_events.get(key)
|
||||
send_level = None
|
||||
if send_level_event:
|
||||
send_level = send_level_event.content.get("events", {}).get(
|
||||
etype
|
||||
)
|
||||
if send_level is None:
|
||||
if state_key is not None:
|
||||
send_level = send_level_event.content.get(
|
||||
"state_default", 50
|
||||
)
|
||||
else:
|
||||
send_level = send_level_event.content.get(
|
||||
"events_default", 0
|
||||
)
|
||||
|
||||
if send_level:
|
||||
send_level = int(send_level)
|
||||
else:
|
||||
send_level = 0
|
||||
|
||||
return send_level
|
||||
|
||||
|
||||
def _can_send_event(event, auth_events):
|
||||
send_level = get_send_level(
|
||||
event.type, event.get("state_key", None), auth_events
|
||||
)
|
||||
user_level = get_user_power_level(event.user_id, auth_events)
|
||||
|
||||
if user_level < send_level:
|
||||
raise AuthError(
|
||||
403,
|
||||
"You don't have permission to post that to the room. " +
|
||||
"user_level (%d) < send_level (%d)" % (user_level, send_level)
|
||||
)
|
||||
|
||||
# Check state_key
|
||||
if hasattr(event, "state_key"):
|
||||
if event.state_key.startswith("@"):
|
||||
if event.state_key != event.user_id:
|
||||
raise AuthError(
|
||||
403,
|
||||
"You are not allowed to set others state"
|
||||
)
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def check_redaction(event, auth_events):
|
||||
"""Check whether the event sender is allowed to redact the target event.
|
||||
|
||||
Returns:
|
||||
True if the the sender is allowed to redact the target event if the
|
||||
target event was created by them.
|
||||
False if the sender is allowed to redact the target event with no
|
||||
further checks.
|
||||
|
||||
Raises:
|
||||
AuthError if the event sender is definitely not allowed to redact
|
||||
the target event.
|
||||
"""
|
||||
user_level = get_user_power_level(event.user_id, auth_events)
|
||||
|
||||
redact_level = _get_named_level(auth_events, "redact", 50)
|
||||
|
||||
if user_level >= redact_level:
|
||||
return False
|
||||
|
||||
redacter_domain = get_domain_from_id(event.event_id)
|
||||
redactee_domain = get_domain_from_id(event.redacts)
|
||||
if redacter_domain == redactee_domain:
|
||||
return True
|
||||
|
||||
raise AuthError(
|
||||
403,
|
||||
"You don't have permission to redact events"
|
||||
)
|
||||
|
||||
|
||||
def _check_power_levels(event, auth_events):
|
||||
user_list = event.content.get("users", {})
|
||||
# Validate users
|
||||
for k, v in user_list.items():
|
||||
try:
|
||||
UserID.from_string(k)
|
||||
except:
|
||||
raise SynapseError(400, "Not a valid user_id: %s" % (k,))
|
||||
|
||||
try:
|
||||
int(v)
|
||||
except:
|
||||
raise SynapseError(400, "Not a valid power level: %s" % (v,))
|
||||
|
||||
key = (event.type, event.state_key, )
|
||||
current_state = auth_events.get(key)
|
||||
|
||||
if not current_state:
|
||||
return
|
||||
|
||||
user_level = get_user_power_level(event.user_id, auth_events)
|
||||
|
||||
# Check other levels:
|
||||
levels_to_check = [
|
||||
("users_default", None),
|
||||
("events_default", None),
|
||||
("state_default", None),
|
||||
("ban", None),
|
||||
("redact", None),
|
||||
("kick", None),
|
||||
("invite", None),
|
||||
]
|
||||
|
||||
old_list = current_state.content.get("users")
|
||||
for user in set(old_list.keys() + user_list.keys()):
|
||||
levels_to_check.append(
|
||||
(user, "users")
|
||||
)
|
||||
|
||||
old_list = current_state.content.get("events")
|
||||
new_list = event.content.get("events")
|
||||
for ev_id in set(old_list.keys() + new_list.keys()):
|
||||
levels_to_check.append(
|
||||
(ev_id, "events")
|
||||
)
|
||||
|
||||
old_state = current_state.content
|
||||
new_state = event.content
|
||||
|
||||
for level_to_check, dir in levels_to_check:
|
||||
old_loc = old_state
|
||||
new_loc = new_state
|
||||
if dir:
|
||||
old_loc = old_loc.get(dir, {})
|
||||
new_loc = new_loc.get(dir, {})
|
||||
|
||||
if level_to_check in old_loc:
|
||||
old_level = int(old_loc[level_to_check])
|
||||
else:
|
||||
old_level = None
|
||||
|
||||
if level_to_check in new_loc:
|
||||
new_level = int(new_loc[level_to_check])
|
||||
else:
|
||||
new_level = None
|
||||
|
||||
if new_level is not None and old_level is not None:
|
||||
if new_level == old_level:
|
||||
continue
|
||||
|
||||
if dir == "users" and level_to_check != event.user_id:
|
||||
if old_level == user_level:
|
||||
raise AuthError(
|
||||
403,
|
||||
"You don't have permission to remove ops level equal "
|
||||
"to your own"
|
||||
)
|
||||
|
||||
if old_level > user_level or new_level > user_level:
|
||||
raise AuthError(
|
||||
403,
|
||||
"You don't have permission to add ops level greater "
|
||||
"than your own"
|
||||
)
|
||||
|
||||
|
||||
def _get_power_level_event(auth_events):
|
||||
key = (EventTypes.PowerLevels, "", )
|
||||
return auth_events.get(key)
|
||||
|
||||
|
||||
def get_user_power_level(user_id, auth_events):
|
||||
power_level_event = _get_power_level_event(auth_events)
|
||||
|
||||
if power_level_event:
|
||||
level = power_level_event.content.get("users", {}).get(user_id)
|
||||
if not level:
|
||||
level = power_level_event.content.get("users_default", 0)
|
||||
|
||||
if level is None:
|
||||
return 0
|
||||
else:
|
||||
return int(level)
|
||||
else:
|
||||
key = (EventTypes.Create, "", )
|
||||
create_event = auth_events.get(key)
|
||||
if (create_event is not None and
|
||||
create_event.content["creator"] == user_id):
|
||||
return 100
|
||||
else:
|
||||
return 0
|
||||
|
||||
|
||||
def _get_named_level(auth_events, name, default):
|
||||
power_level_event = _get_power_level_event(auth_events)
|
||||
|
||||
if not power_level_event:
|
||||
return default
|
||||
|
||||
level = power_level_event.content.get(name, None)
|
||||
if level is not None:
|
||||
return int(level)
|
||||
else:
|
||||
return default
|
||||
|
||||
|
||||
def _verify_third_party_invite(event, auth_events):
|
||||
"""
|
||||
Validates that the invite event is authorized by a previous third-party invite.
|
||||
|
||||
Checks that the public key, and keyserver, match those in the third party invite,
|
||||
and that the invite event has a signature issued using that public key.
|
||||
|
||||
Args:
|
||||
event: The m.room.member join event being validated.
|
||||
auth_events: All relevant previous context events which may be used
|
||||
for authorization decisions.
|
||||
|
||||
Return:
|
||||
True if the event fulfills the expectations of a previous third party
|
||||
invite event.
|
||||
"""
|
||||
if "third_party_invite" not in event.content:
|
||||
return False
|
||||
if "signed" not in event.content["third_party_invite"]:
|
||||
return False
|
||||
signed = event.content["third_party_invite"]["signed"]
|
||||
for key in {"mxid", "token"}:
|
||||
if key not in signed:
|
||||
return False
|
||||
|
||||
token = signed["token"]
|
||||
|
||||
invite_event = auth_events.get(
|
||||
(EventTypes.ThirdPartyInvite, token,)
|
||||
)
|
||||
if not invite_event:
|
||||
return False
|
||||
|
||||
if invite_event.sender != event.sender:
|
||||
return False
|
||||
|
||||
if event.user_id != invite_event.user_id:
|
||||
return False
|
||||
|
||||
if signed["mxid"] != event.state_key:
|
||||
return False
|
||||
if signed["token"] != token:
|
||||
return False
|
||||
|
||||
for public_key_object in get_public_keys(invite_event):
|
||||
public_key = public_key_object["public_key"]
|
||||
try:
|
||||
for server, signature_block in signed["signatures"].items():
|
||||
for key_name, encoded_signature in signature_block.items():
|
||||
if not key_name.startswith("ed25519:"):
|
||||
continue
|
||||
verify_key = decode_verify_key_bytes(
|
||||
key_name,
|
||||
decode_base64(public_key)
|
||||
)
|
||||
verify_signed_json(signed, server, verify_key)
|
||||
|
||||
# We got the public key from the invite, so we know that the
|
||||
# correct server signed the signed bundle.
|
||||
# The caller is responsible for checking that the signing
|
||||
# server has not revoked that public key.
|
||||
return True
|
||||
except (KeyError, SignatureVerifyException,):
|
||||
continue
|
||||
return False
|
||||
|
||||
|
||||
def get_public_keys(invite_event):
|
||||
public_keys = []
|
||||
if "public_key" in invite_event.content:
|
||||
o = {
|
||||
"public_key": invite_event.content["public_key"],
|
||||
}
|
||||
if "key_validity_url" in invite_event.content:
|
||||
o["key_validity_url"] = invite_event.content["key_validity_url"]
|
||||
public_keys.append(o)
|
||||
public_keys.extend(invite_event.content.get("public_keys", []))
|
||||
return public_keys
|
||||
|
||||
|
||||
def auth_types_for_event(event):
|
||||
"""Given an event, return a list of (EventType, StateKey) that may be
|
||||
needed to auth the event. The returned list may be a superset of what
|
||||
would actually be required depending on the full state of the room.
|
||||
|
||||
Used to limit the number of events to fetch from the database to
|
||||
actually auth the event.
|
||||
"""
|
||||
if event.type == EventTypes.Create:
|
||||
return []
|
||||
|
||||
auth_types = []
|
||||
|
||||
auth_types.append((EventTypes.PowerLevels, "", ))
|
||||
auth_types.append((EventTypes.Member, event.user_id, ))
|
||||
auth_types.append((EventTypes.Create, "", ))
|
||||
|
||||
if event.type == EventTypes.Member:
|
||||
membership = event.content["membership"]
|
||||
if membership in [Membership.JOIN, Membership.INVITE]:
|
||||
auth_types.append((EventTypes.JoinRules, "", ))
|
||||
|
||||
auth_types.append((EventTypes.Member, event.state_key, ))
|
||||
|
||||
if membership == Membership.INVITE:
|
||||
if "third_party_invite" in event.content:
|
||||
key = (
|
||||
EventTypes.ThirdPartyInvite,
|
||||
event.content["third_party_invite"]["signed"]["token"]
|
||||
)
|
||||
auth_types.append(key)
|
||||
|
||||
return auth_types
|
||||
@@ -36,6 +36,15 @@ class _EventInternalMetadata(object):
|
||||
def is_invite_from_remote(self):
|
||||
return getattr(self, "invite_from_remote", False)
|
||||
|
||||
def get_send_on_behalf_of(self):
|
||||
"""Whether this server should send the event on behalf of another server.
|
||||
This is used by the federation "send_join" API to forward the initial join
|
||||
event for a server in the room.
|
||||
|
||||
returns a str with the name of the server this event is sent on behalf of.
|
||||
"""
|
||||
return getattr(self, "send_on_behalf_of", None)
|
||||
|
||||
|
||||
def _event_dict_property(key):
|
||||
def getter(self):
|
||||
@@ -70,7 +79,6 @@ class EventBase(object):
|
||||
auth_events = _event_dict_property("auth_events")
|
||||
depth = _event_dict_property("depth")
|
||||
content = _event_dict_property("content")
|
||||
event_id = _event_dict_property("event_id")
|
||||
hashes = _event_dict_property("hashes")
|
||||
origin = _event_dict_property("origin")
|
||||
origin_server_ts = _event_dict_property("origin_server_ts")
|
||||
@@ -79,8 +87,6 @@ class EventBase(object):
|
||||
redacts = _event_dict_property("redacts")
|
||||
room_id = _event_dict_property("room_id")
|
||||
sender = _event_dict_property("sender")
|
||||
state_key = _event_dict_property("state_key")
|
||||
type = _event_dict_property("type")
|
||||
user_id = _event_dict_property("sender")
|
||||
|
||||
@property
|
||||
@@ -99,7 +105,7 @@ class EventBase(object):
|
||||
|
||||
return d
|
||||
|
||||
def get(self, key, default):
|
||||
def get(self, key, default=None):
|
||||
return self._event_dict.get(key, default)
|
||||
|
||||
def get_internal_metadata_dict(self):
|
||||
@@ -153,6 +159,11 @@ class FrozenEvent(EventBase):
|
||||
else:
|
||||
frozen_dict = event_dict
|
||||
|
||||
self.event_id = event_dict["event_id"]
|
||||
self.type = event_dict["type"]
|
||||
if "state_key" in event_dict:
|
||||
self.state_key = event_dict["state_key"]
|
||||
|
||||
super(FrozenEvent, self).__init__(
|
||||
frozen_dict,
|
||||
signatures=signatures,
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from . import EventBase, FrozenEvent
|
||||
from . import EventBase, FrozenEvent, _event_dict_property
|
||||
|
||||
from synapse.types import EventID
|
||||
|
||||
@@ -34,6 +34,10 @@ class EventBuilder(EventBase):
|
||||
internal_metadata_dict=internal_metadata_dict,
|
||||
)
|
||||
|
||||
event_id = _event_dict_property("event_id")
|
||||
state_key = _event_dict_property("state_key")
|
||||
type = _event_dict_property("type")
|
||||
|
||||
def build(self):
|
||||
return FrozenEvent.from_event(self)
|
||||
|
||||
|
||||
@@ -15,9 +15,30 @@
|
||||
|
||||
|
||||
class EventContext(object):
|
||||
__slots__ = [
|
||||
"current_state_ids",
|
||||
"prev_state_ids",
|
||||
"state_group",
|
||||
"rejected",
|
||||
"push_actions",
|
||||
"prev_group",
|
||||
"delta_ids",
|
||||
"prev_state_events",
|
||||
]
|
||||
|
||||
def __init__(self, current_state=None):
|
||||
self.current_state = current_state
|
||||
def __init__(self):
|
||||
# The current state including the current event
|
||||
self.current_state_ids = None
|
||||
# The current state excluding the current event
|
||||
self.prev_state_ids = None
|
||||
self.state_group = None
|
||||
|
||||
self.rejected = False
|
||||
self.push_actions = []
|
||||
|
||||
# A previously persisted state group and a delta between that
|
||||
# and this state.
|
||||
self.prev_group = None
|
||||
self.delta_ids = None
|
||||
|
||||
self.prev_state_events = None
|
||||
|
||||
@@ -16,6 +16,17 @@
|
||||
from synapse.api.constants import EventTypes
|
||||
from . import EventBase
|
||||
|
||||
from frozendict import frozendict
|
||||
|
||||
import re
|
||||
|
||||
# Split strings on "." but not "\." This uses a negative lookbehind assertion for '\'
|
||||
# (?<!stuff) matches if the current position in the string is not preceded
|
||||
# by a match for 'stuff'.
|
||||
# TODO: This is fast, but fails to handle "foo\\.bar" which should be treated as
|
||||
# the literal fields "foo\" and "bar" but will instead be treated as "foo\\.bar"
|
||||
SPLIT_FIELD_REGEX = re.compile(r'(?<!\\)\.')
|
||||
|
||||
|
||||
def prune_event(event):
|
||||
""" Returns a pruned version of the given event, which removes all keys we
|
||||
@@ -88,6 +99,8 @@ def prune_event(event):
|
||||
|
||||
if "age_ts" in event.unsigned:
|
||||
allowed_fields["unsigned"]["age_ts"] = event.unsigned["age_ts"]
|
||||
if "replaces_state" in event.unsigned:
|
||||
allowed_fields["unsigned"]["replaces_state"] = event.unsigned["replaces_state"]
|
||||
|
||||
return type(event)(
|
||||
allowed_fields,
|
||||
@@ -95,6 +108,83 @@ def prune_event(event):
|
||||
)
|
||||
|
||||
|
||||
def _copy_field(src, dst, field):
|
||||
"""Copy the field in 'src' to 'dst'.
|
||||
|
||||
For example, if src={"foo":{"bar":5}} and dst={}, and field=["foo","bar"]
|
||||
then dst={"foo":{"bar":5}}.
|
||||
|
||||
Args:
|
||||
src(dict): The dict to read from.
|
||||
dst(dict): The dict to modify.
|
||||
field(list<str>): List of keys to drill down to in 'src'.
|
||||
"""
|
||||
if len(field) == 0: # this should be impossible
|
||||
return
|
||||
if len(field) == 1: # common case e.g. 'origin_server_ts'
|
||||
if field[0] in src:
|
||||
dst[field[0]] = src[field[0]]
|
||||
return
|
||||
|
||||
# Else is a nested field e.g. 'content.body'
|
||||
# Pop the last field as that's the key to move across and we need the
|
||||
# parent dict in order to access the data. Drill down to the right dict.
|
||||
key_to_move = field.pop(-1)
|
||||
sub_dict = src
|
||||
for sub_field in field: # e.g. sub_field => "content"
|
||||
if sub_field in sub_dict and type(sub_dict[sub_field]) in [dict, frozendict]:
|
||||
sub_dict = sub_dict[sub_field]
|
||||
else:
|
||||
return
|
||||
|
||||
if key_to_move not in sub_dict:
|
||||
return
|
||||
|
||||
# Insert the key into the output dictionary, creating nested objects
|
||||
# as required. We couldn't do this any earlier or else we'd need to delete
|
||||
# the empty objects if the key didn't exist.
|
||||
sub_out_dict = dst
|
||||
for sub_field in field:
|
||||
sub_out_dict = sub_out_dict.setdefault(sub_field, {})
|
||||
sub_out_dict[key_to_move] = sub_dict[key_to_move]
|
||||
|
||||
|
||||
def only_fields(dictionary, fields):
|
||||
"""Return a new dict with only the fields in 'dictionary' which are present
|
||||
in 'fields'.
|
||||
|
||||
If there are no event fields specified then all fields are included.
|
||||
The entries may include '.' charaters to indicate sub-fields.
|
||||
So ['content.body'] will include the 'body' field of the 'content' object.
|
||||
A literal '.' character in a field name may be escaped using a '\'.
|
||||
|
||||
Args:
|
||||
dictionary(dict): The dictionary to read from.
|
||||
fields(list<str>): A list of fields to copy over. Only shallow refs are
|
||||
taken.
|
||||
Returns:
|
||||
dict: A new dictionary with only the given fields. If fields was empty,
|
||||
the same dictionary is returned.
|
||||
"""
|
||||
if len(fields) == 0:
|
||||
return dictionary
|
||||
|
||||
# for each field, convert it:
|
||||
# ["content.body.thing\.with\.dots"] => [["content", "body", "thing\.with\.dots"]]
|
||||
split_fields = [SPLIT_FIELD_REGEX.split(f) for f in fields]
|
||||
|
||||
# for each element of the output array of arrays:
|
||||
# remove escaping so we can use the right key names.
|
||||
split_fields[:] = [
|
||||
[f.replace(r'\.', r'.') for f in field_array] for field_array in split_fields
|
||||
]
|
||||
|
||||
output = {}
|
||||
for field_array in split_fields:
|
||||
_copy_field(dictionary, output, field_array)
|
||||
return output
|
||||
|
||||
|
||||
def format_event_raw(d):
|
||||
return d
|
||||
|
||||
@@ -135,7 +225,7 @@ def format_event_for_client_v2_without_room_id(d):
|
||||
|
||||
def serialize_event(e, time_now_ms, as_client_event=True,
|
||||
event_format=format_event_for_client_v1,
|
||||
token_id=None):
|
||||
token_id=None, only_event_fields=None):
|
||||
# FIXME(erikj): To handle the case of presence events and the like
|
||||
if not isinstance(e, EventBase):
|
||||
return e
|
||||
@@ -162,6 +252,12 @@ def serialize_event(e, time_now_ms, as_client_event=True,
|
||||
d["unsigned"]["transaction_id"] = txn_id
|
||||
|
||||
if as_client_event:
|
||||
return event_format(d)
|
||||
else:
|
||||
return d
|
||||
d = event_format(d)
|
||||
|
||||
if only_event_fields:
|
||||
if (not isinstance(only_event_fields, list) or
|
||||
not all(isinstance(f, basestring) for f in only_event_fields)):
|
||||
raise TypeError("only_event_fields must be a list of strings")
|
||||
d = only_fields(d, only_event_fields)
|
||||
|
||||
return d
|
||||
|
||||
@@ -17,10 +17,9 @@
|
||||
"""
|
||||
|
||||
from .replication import ReplicationLayer
|
||||
from .transport.client import TransportLayerClient
|
||||
|
||||
|
||||
def initialize_http_replication(homeserver):
|
||||
transport = TransportLayerClient(homeserver)
|
||||
def initialize_http_replication(hs):
|
||||
transport = hs.get_federation_transport_client()
|
||||
|
||||
return ReplicationLayer(homeserver, transport)
|
||||
return ReplicationLayer(hs, transport)
|
||||
|
||||
@@ -23,6 +23,7 @@ from synapse.crypto.event_signing import check_event_content_hash
|
||||
from synapse.api.errors import SynapseError
|
||||
|
||||
from synapse.util import unwrapFirstError
|
||||
from synapse.util.logcontext import preserve_fn, preserve_context_over_deferred
|
||||
|
||||
import logging
|
||||
|
||||
@@ -102,10 +103,10 @@ class FederationBase(object):
|
||||
warn, pdu
|
||||
)
|
||||
|
||||
valid_pdus = yield defer.gatherResults(
|
||||
valid_pdus = yield preserve_context_over_deferred(defer.gatherResults(
|
||||
deferreds,
|
||||
consumeErrors=True
|
||||
).addErrback(unwrapFirstError)
|
||||
)).addErrback(unwrapFirstError)
|
||||
|
||||
if include_none:
|
||||
defer.returnValue(valid_pdus)
|
||||
@@ -129,7 +130,7 @@ class FederationBase(object):
|
||||
for pdu in pdus
|
||||
]
|
||||
|
||||
deferreds = self.keyring.verify_json_objects_for_server([
|
||||
deferreds = preserve_fn(self.keyring.verify_json_objects_for_server)([
|
||||
(p.origin, p.get_pdu_json())
|
||||
for p in redacted_pdus
|
||||
])
|
||||
|
||||
@@ -18,16 +18,15 @@ from twisted.internet import defer
|
||||
|
||||
from .federation_base import FederationBase
|
||||
from synapse.api.constants import Membership
|
||||
from .units import Edu
|
||||
|
||||
from synapse.api.errors import (
|
||||
CodeMessageException, HttpResponseException, SynapseError,
|
||||
)
|
||||
from synapse.util import unwrapFirstError
|
||||
from synapse.util.async import concurrently_execute
|
||||
from synapse.util.caches.expiringcache import ExpiringCache
|
||||
from synapse.util.logutils import log_function
|
||||
from synapse.events import FrozenEvent
|
||||
from synapse.util.logcontext import preserve_fn, preserve_context_over_deferred
|
||||
from synapse.events import FrozenEvent, builder
|
||||
import synapse.metrics
|
||||
|
||||
from synapse.util.retryutils import get_retry_limiter, NotRetryingDestination
|
||||
@@ -44,17 +43,38 @@ logger = logging.getLogger(__name__)
|
||||
# synapse.federation.federation_client is a silly name
|
||||
metrics = synapse.metrics.get_metrics_for("synapse.federation.client")
|
||||
|
||||
sent_pdus_destination_dist = metrics.register_distribution("sent_pdu_destinations")
|
||||
|
||||
sent_edus_counter = metrics.register_counter("sent_edus")
|
||||
|
||||
sent_queries_counter = metrics.register_counter("sent_queries", labels=["type"])
|
||||
|
||||
|
||||
PDU_RETRY_TIME_MS = 1 * 60 * 1000
|
||||
|
||||
|
||||
class FederationClient(FederationBase):
|
||||
def __init__(self, hs):
|
||||
super(FederationClient, self).__init__(hs)
|
||||
|
||||
self.pdu_destination_tried = {}
|
||||
self._clock.looping_call(
|
||||
self._clear_tried_cache, 60 * 1000,
|
||||
)
|
||||
self.state = hs.get_state_handler()
|
||||
|
||||
def _clear_tried_cache(self):
|
||||
"""Clear pdu_destination_tried cache"""
|
||||
now = self._clock.time_msec()
|
||||
|
||||
old_dict = self.pdu_destination_tried
|
||||
self.pdu_destination_tried = {}
|
||||
|
||||
for event_id, destination_dict in old_dict.items():
|
||||
destination_dict = {
|
||||
dest: time
|
||||
for dest, time in destination_dict.items()
|
||||
if time + PDU_RETRY_TIME_MS > now
|
||||
}
|
||||
if destination_dict:
|
||||
self.pdu_destination_tried[event_id] = destination_dict
|
||||
|
||||
def start_get_pdu_cache(self):
|
||||
self._get_pdu_cache = ExpiringCache(
|
||||
cache_name="get_pdu_cache",
|
||||
@@ -66,55 +86,6 @@ class FederationClient(FederationBase):
|
||||
|
||||
self._get_pdu_cache.start()
|
||||
|
||||
@log_function
|
||||
def send_pdu(self, pdu, destinations):
|
||||
"""Informs the replication layer about a new PDU generated within the
|
||||
home server that should be transmitted to others.
|
||||
|
||||
TODO: Figure out when we should actually resolve the deferred.
|
||||
|
||||
Args:
|
||||
pdu (Pdu): The new Pdu.
|
||||
|
||||
Returns:
|
||||
Deferred: Completes when we have successfully processed the PDU
|
||||
and replicated it to any interested remote home servers.
|
||||
"""
|
||||
order = self._order
|
||||
self._order += 1
|
||||
|
||||
sent_pdus_destination_dist.inc_by(len(destinations))
|
||||
|
||||
logger.debug("[%s] transaction_layer.enqueue_pdu... ", pdu.event_id)
|
||||
|
||||
# TODO, add errback, etc.
|
||||
self._transaction_queue.enqueue_pdu(pdu, destinations, order)
|
||||
|
||||
logger.debug(
|
||||
"[%s] transaction_layer.enqueue_pdu... done",
|
||||
pdu.event_id
|
||||
)
|
||||
|
||||
@log_function
|
||||
def send_edu(self, destination, edu_type, content):
|
||||
edu = Edu(
|
||||
origin=self.server_name,
|
||||
destination=destination,
|
||||
edu_type=edu_type,
|
||||
content=content,
|
||||
)
|
||||
|
||||
sent_edus_counter.inc()
|
||||
|
||||
# TODO, add errback, etc.
|
||||
self._transaction_queue.enqueue_edu(edu)
|
||||
return defer.succeed(None)
|
||||
|
||||
@log_function
|
||||
def send_failure(self, failure, destination):
|
||||
self._transaction_queue.enqueue_failure(failure, destination)
|
||||
return defer.succeed(None)
|
||||
|
||||
@log_function
|
||||
def make_query(self, destination, query_type, args,
|
||||
retry_on_dns_fail=False):
|
||||
@@ -139,7 +110,7 @@ class FederationClient(FederationBase):
|
||||
)
|
||||
|
||||
@log_function
|
||||
def query_client_keys(self, destination, content):
|
||||
def query_client_keys(self, destination, content, timeout):
|
||||
"""Query device keys for a device hosted on a remote server.
|
||||
|
||||
Args:
|
||||
@@ -151,10 +122,22 @@ class FederationClient(FederationBase):
|
||||
response
|
||||
"""
|
||||
sent_queries_counter.inc("client_device_keys")
|
||||
return self.transport_layer.query_client_keys(destination, content)
|
||||
return self.transport_layer.query_client_keys(
|
||||
destination, content, timeout
|
||||
)
|
||||
|
||||
@log_function
|
||||
def claim_client_keys(self, destination, content):
|
||||
def query_user_devices(self, destination, user_id, timeout=30000):
|
||||
"""Query the device keys for a list of user ids hosted on a remote
|
||||
server.
|
||||
"""
|
||||
sent_queries_counter.inc("user_devices")
|
||||
return self.transport_layer.query_user_devices(
|
||||
destination, user_id, timeout
|
||||
)
|
||||
|
||||
@log_function
|
||||
def claim_client_keys(self, destination, content, timeout):
|
||||
"""Claims one-time keys for a device hosted on a remote server.
|
||||
|
||||
Args:
|
||||
@@ -166,7 +149,9 @@ class FederationClient(FederationBase):
|
||||
response
|
||||
"""
|
||||
sent_queries_counter.inc("client_one_time_keys")
|
||||
return self.transport_layer.claim_client_keys(destination, content)
|
||||
return self.transport_layer.claim_client_keys(
|
||||
destination, content, timeout
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
@@ -201,10 +186,10 @@ class FederationClient(FederationBase):
|
||||
]
|
||||
|
||||
# FIXME: We should handle signature failures more gracefully.
|
||||
pdus[:] = yield defer.gatherResults(
|
||||
pdus[:] = yield preserve_context_over_deferred(defer.gatherResults(
|
||||
self._check_sigs_and_hashes(pdus),
|
||||
consumeErrors=True,
|
||||
).addErrback(unwrapFirstError)
|
||||
)).addErrback(unwrapFirstError)
|
||||
|
||||
defer.returnValue(pdus)
|
||||
|
||||
@@ -236,12 +221,19 @@ class FederationClient(FederationBase):
|
||||
# TODO: Rate limit the number of times we try and get the same event.
|
||||
|
||||
if self._get_pdu_cache:
|
||||
e = self._get_pdu_cache.get(event_id)
|
||||
if e:
|
||||
defer.returnValue(e)
|
||||
ev = self._get_pdu_cache.get(event_id)
|
||||
if ev:
|
||||
defer.returnValue(ev)
|
||||
|
||||
pdu = None
|
||||
pdu_attempts = self.pdu_destination_tried.setdefault(event_id, {})
|
||||
|
||||
signed_pdu = None
|
||||
for destination in destinations:
|
||||
now = self._clock.time_msec()
|
||||
last_attempt = pdu_attempts.get(destination, 0)
|
||||
if last_attempt + PDU_RETRY_TIME_MS > now:
|
||||
continue
|
||||
|
||||
try:
|
||||
limiter = yield get_retry_limiter(
|
||||
destination,
|
||||
@@ -265,39 +257,33 @@ class FederationClient(FederationBase):
|
||||
pdu = pdu_list[0]
|
||||
|
||||
# Check signatures are correct.
|
||||
pdu = yield self._check_sigs_and_hashes([pdu])[0]
|
||||
signed_pdu = yield self._check_sigs_and_hashes([pdu])[0]
|
||||
|
||||
break
|
||||
|
||||
except SynapseError:
|
||||
logger.info(
|
||||
"Failed to get PDU %s from %s because %s",
|
||||
event_id, destination, e,
|
||||
)
|
||||
continue
|
||||
except CodeMessageException as e:
|
||||
if 400 <= e.code < 500:
|
||||
raise
|
||||
pdu_attempts[destination] = now
|
||||
|
||||
except SynapseError as e:
|
||||
logger.info(
|
||||
"Failed to get PDU %s from %s because %s",
|
||||
event_id, destination, e,
|
||||
)
|
||||
continue
|
||||
except NotRetryingDestination as e:
|
||||
logger.info(e.message)
|
||||
continue
|
||||
except Exception as e:
|
||||
pdu_attempts[destination] = now
|
||||
|
||||
logger.info(
|
||||
"Failed to get PDU %s from %s because %s",
|
||||
event_id, destination, e,
|
||||
)
|
||||
continue
|
||||
|
||||
if self._get_pdu_cache is not None and pdu:
|
||||
self._get_pdu_cache[event_id] = pdu
|
||||
if self._get_pdu_cache is not None and signed_pdu:
|
||||
self._get_pdu_cache[event_id] = signed_pdu
|
||||
|
||||
defer.returnValue(pdu)
|
||||
defer.returnValue(signed_pdu)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
@@ -314,6 +300,42 @@ class FederationClient(FederationBase):
|
||||
Deferred: Results in a list of PDUs.
|
||||
"""
|
||||
|
||||
try:
|
||||
# First we try and ask for just the IDs, as thats far quicker if
|
||||
# we have most of the state and auth_chain already.
|
||||
# However, this may 404 if the other side has an old synapse.
|
||||
result = yield self.transport_layer.get_room_state_ids(
|
||||
destination, room_id, event_id=event_id,
|
||||
)
|
||||
|
||||
state_event_ids = result["pdu_ids"]
|
||||
auth_event_ids = result.get("auth_chain_ids", [])
|
||||
|
||||
fetched_events, failed_to_fetch = yield self.get_events(
|
||||
[destination], room_id, set(state_event_ids + auth_event_ids)
|
||||
)
|
||||
|
||||
if failed_to_fetch:
|
||||
logger.warn("Failed to get %r", failed_to_fetch)
|
||||
|
||||
event_map = {
|
||||
ev.event_id: ev for ev in fetched_events
|
||||
}
|
||||
|
||||
pdus = [event_map[e_id] for e_id in state_event_ids if e_id in event_map]
|
||||
auth_chain = [
|
||||
event_map[e_id] for e_id in auth_event_ids if e_id in event_map
|
||||
]
|
||||
|
||||
auth_chain.sort(key=lambda e: e.depth)
|
||||
|
||||
defer.returnValue((pdus, auth_chain))
|
||||
except HttpResponseException as e:
|
||||
if e.code == 400 or e.code == 404:
|
||||
logger.info("Failed to use get_room_state_ids API, falling back")
|
||||
else:
|
||||
raise e
|
||||
|
||||
result = yield self.transport_layer.get_room_state(
|
||||
destination, room_id, event_id=event_id,
|
||||
)
|
||||
@@ -327,18 +349,95 @@ class FederationClient(FederationBase):
|
||||
for p in result.get("auth_chain", [])
|
||||
]
|
||||
|
||||
seen_events = yield self.store.get_events([
|
||||
ev.event_id for ev in itertools.chain(pdus, auth_chain)
|
||||
])
|
||||
|
||||
signed_pdus = yield self._check_sigs_and_hash_and_fetch(
|
||||
destination, pdus, outlier=True
|
||||
destination,
|
||||
[p for p in pdus if p.event_id not in seen_events],
|
||||
outlier=True
|
||||
)
|
||||
signed_pdus.extend(
|
||||
seen_events[p.event_id] for p in pdus if p.event_id in seen_events
|
||||
)
|
||||
|
||||
signed_auth = yield self._check_sigs_and_hash_and_fetch(
|
||||
destination, auth_chain, outlier=True
|
||||
destination,
|
||||
[p for p in auth_chain if p.event_id not in seen_events],
|
||||
outlier=True
|
||||
)
|
||||
signed_auth.extend(
|
||||
seen_events[p.event_id] for p in auth_chain if p.event_id in seen_events
|
||||
)
|
||||
|
||||
signed_auth.sort(key=lambda e: e.depth)
|
||||
|
||||
defer.returnValue((signed_pdus, signed_auth))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_events(self, destinations, room_id, event_ids, return_local=True):
|
||||
"""Fetch events from some remote destinations, checking if we already
|
||||
have them.
|
||||
|
||||
Args:
|
||||
destinations (list)
|
||||
room_id (str)
|
||||
event_ids (list)
|
||||
return_local (bool): Whether to include events we already have in
|
||||
the DB in the returned list of events
|
||||
|
||||
Returns:
|
||||
Deferred: A deferred resolving to a 2-tuple where the first is a list of
|
||||
events and the second is a list of event ids that we failed to fetch.
|
||||
"""
|
||||
if return_local:
|
||||
seen_events = yield self.store.get_events(event_ids, allow_rejected=True)
|
||||
signed_events = seen_events.values()
|
||||
else:
|
||||
seen_events = yield self.store.have_events(event_ids)
|
||||
signed_events = []
|
||||
|
||||
failed_to_fetch = set()
|
||||
|
||||
missing_events = set(event_ids)
|
||||
for k in seen_events:
|
||||
missing_events.discard(k)
|
||||
|
||||
if not missing_events:
|
||||
defer.returnValue((signed_events, failed_to_fetch))
|
||||
|
||||
def random_server_list():
|
||||
srvs = list(destinations)
|
||||
random.shuffle(srvs)
|
||||
return srvs
|
||||
|
||||
batch_size = 20
|
||||
missing_events = list(missing_events)
|
||||
for i in xrange(0, len(missing_events), batch_size):
|
||||
batch = set(missing_events[i:i + batch_size])
|
||||
|
||||
deferreds = [
|
||||
preserve_fn(self.get_pdu)(
|
||||
destinations=random_server_list(),
|
||||
event_id=e_id,
|
||||
)
|
||||
for e_id in batch
|
||||
]
|
||||
|
||||
res = yield preserve_context_over_deferred(
|
||||
defer.DeferredList(deferreds, consumeErrors=True)
|
||||
)
|
||||
for success, result in res:
|
||||
if success and result:
|
||||
signed_events.append(result)
|
||||
batch.discard(result.event_id)
|
||||
|
||||
# We removed all events we successfully fetched from `batch`
|
||||
failed_to_fetch.update(batch)
|
||||
|
||||
defer.returnValue((signed_events, failed_to_fetch))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def get_event_auth(self, destination, room_id, event_id):
|
||||
@@ -410,18 +509,25 @@ class FederationClient(FederationBase):
|
||||
if "prev_state" not in pdu_dict:
|
||||
pdu_dict["prev_state"] = []
|
||||
|
||||
ev = builder.EventBuilder(pdu_dict)
|
||||
|
||||
defer.returnValue(
|
||||
(destination, self.event_from_pdu_json(pdu_dict))
|
||||
(destination, ev)
|
||||
)
|
||||
break
|
||||
except CodeMessageException:
|
||||
raise
|
||||
except CodeMessageException as e:
|
||||
if not 500 <= e.code < 600:
|
||||
raise
|
||||
else:
|
||||
logger.warn(
|
||||
"Failed to make_%s via %s: %s",
|
||||
membership, destination, e.message
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warn(
|
||||
"Failed to make_%s via %s: %s",
|
||||
membership, destination, e.message
|
||||
)
|
||||
raise
|
||||
|
||||
raise RuntimeError("Failed to send to any server.")
|
||||
|
||||
@@ -493,8 +599,14 @@ class FederationClient(FederationBase):
|
||||
"auth_chain": signed_auth,
|
||||
"origin": destination,
|
||||
})
|
||||
except CodeMessageException:
|
||||
raise
|
||||
except CodeMessageException as e:
|
||||
if not 500 <= e.code < 600:
|
||||
raise
|
||||
else:
|
||||
logger.exception(
|
||||
"Failed to send_join via %s: %s",
|
||||
destination, e.message
|
||||
)
|
||||
except Exception as e:
|
||||
logger.exception(
|
||||
"Failed to send_join via %s: %s",
|
||||
@@ -553,24 +665,17 @@ class FederationClient(FederationBase):
|
||||
|
||||
raise RuntimeError("Failed to send to any server.")
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_public_rooms(self, destinations):
|
||||
results_by_server = {}
|
||||
def get_public_rooms(self, destination, limit=None, since_token=None,
|
||||
search_filter=None, include_all_networks=False,
|
||||
third_party_instance_id=None):
|
||||
if destination == self.server_name:
|
||||
return
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _get_result(s):
|
||||
if s == self.server_name:
|
||||
defer.returnValue()
|
||||
|
||||
try:
|
||||
result = yield self.transport_layer.get_public_rooms(s)
|
||||
results_by_server[s] = result
|
||||
except:
|
||||
logger.exception("Error getting room list from server %r", s)
|
||||
|
||||
yield concurrently_execute(_get_result, destinations, 3)
|
||||
|
||||
defer.returnValue(results_by_server)
|
||||
return self.transport_layer.get_public_rooms(
|
||||
destination, limit, since_token, search_filter,
|
||||
include_all_networks=include_all_networks,
|
||||
third_party_instance_id=third_party_instance_id,
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def query_auth(self, destination, room_id, event_id, local_auth):
|
||||
@@ -614,7 +719,7 @@ class FederationClient(FederationBase):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_missing_events(self, destination, room_id, earliest_events_ids,
|
||||
latest_events, limit, min_depth):
|
||||
latest_events, limit, min_depth, timeout):
|
||||
"""Tries to fetch events we are missing. This is called when we receive
|
||||
an event without having received all of its ancestors.
|
||||
|
||||
@@ -628,6 +733,7 @@ class FederationClient(FederationBase):
|
||||
have all previous events for.
|
||||
limit (int): Maximum number of events to return.
|
||||
min_depth (int): Minimum depth of events tor return.
|
||||
timeout (int): Max time to wait in ms
|
||||
"""
|
||||
try:
|
||||
content = yield self.transport_layer.get_missing_events(
|
||||
@@ -637,6 +743,7 @@ class FederationClient(FederationBase):
|
||||
latest_events=[e.event_id for e in latest_events],
|
||||
limit=limit,
|
||||
min_depth=min_depth,
|
||||
timeout=timeout,
|
||||
)
|
||||
|
||||
events = [
|
||||
@@ -647,8 +754,6 @@ class FederationClient(FederationBase):
|
||||
signed_events = yield self._check_sigs_and_hash_and_fetch(
|
||||
destination, events, outlier=False
|
||||
)
|
||||
|
||||
have_gotten_all_from_destination = True
|
||||
except HttpResponseException as e:
|
||||
if not e.code == 400:
|
||||
raise
|
||||
@@ -656,69 +761,6 @@ class FederationClient(FederationBase):
|
||||
# We are probably hitting an old server that doesn't support
|
||||
# get_missing_events
|
||||
signed_events = []
|
||||
have_gotten_all_from_destination = False
|
||||
|
||||
if len(signed_events) >= limit:
|
||||
defer.returnValue(signed_events)
|
||||
|
||||
servers = yield self.store.get_joined_hosts_for_room(room_id)
|
||||
|
||||
servers = set(servers)
|
||||
servers.discard(self.server_name)
|
||||
|
||||
failed_to_fetch = set()
|
||||
|
||||
while len(signed_events) < limit:
|
||||
# Are we missing any?
|
||||
|
||||
seen_events = set(earliest_events_ids)
|
||||
seen_events.update(e.event_id for e in signed_events if e)
|
||||
|
||||
missing_events = {}
|
||||
for e in itertools.chain(latest_events, signed_events):
|
||||
if e.depth > min_depth:
|
||||
missing_events.update({
|
||||
e_id: e.depth for e_id, _ in e.prev_events
|
||||
if e_id not in seen_events
|
||||
and e_id not in failed_to_fetch
|
||||
})
|
||||
|
||||
if not missing_events:
|
||||
break
|
||||
|
||||
have_seen = yield self.store.have_events(missing_events)
|
||||
|
||||
for k in have_seen:
|
||||
missing_events.pop(k, None)
|
||||
|
||||
if not missing_events:
|
||||
break
|
||||
|
||||
# Okay, we haven't gotten everything yet. Lets get them.
|
||||
ordered_missing = sorted(missing_events.items(), key=lambda x: x[0])
|
||||
|
||||
if have_gotten_all_from_destination:
|
||||
servers.discard(destination)
|
||||
|
||||
def random_server_list():
|
||||
srvs = list(servers)
|
||||
random.shuffle(srvs)
|
||||
return srvs
|
||||
|
||||
deferreds = [
|
||||
self.get_pdu(
|
||||
destinations=random_server_list(),
|
||||
event_id=e_id,
|
||||
)
|
||||
for e_id, depth in ordered_missing[:limit - len(signed_events)]
|
||||
]
|
||||
|
||||
res = yield defer.DeferredList(deferreds, consumeErrors=True)
|
||||
for (result, val), (e_id, _) in zip(res, ordered_missing):
|
||||
if result and val:
|
||||
signed_events.append(val)
|
||||
else:
|
||||
failed_to_fetch.add(e_id)
|
||||
|
||||
defer.returnValue(signed_events)
|
||||
|
||||
|
||||
@@ -21,10 +21,12 @@ from .units import Transaction, Edu
|
||||
|
||||
from synapse.util.async import Linearizer
|
||||
from synapse.util.logutils import log_function
|
||||
from synapse.util.caches.response_cache import ResponseCache
|
||||
from synapse.events import FrozenEvent
|
||||
from synapse.types import get_domain_from_id
|
||||
import synapse.metrics
|
||||
|
||||
from synapse.api.errors import FederationError, SynapseError
|
||||
from synapse.api.errors import AuthError, FederationError, SynapseError
|
||||
|
||||
from synapse.crypto.event_signing import compute_event_signature
|
||||
|
||||
@@ -48,8 +50,14 @@ class FederationServer(FederationBase):
|
||||
def __init__(self, hs):
|
||||
super(FederationServer, self).__init__(hs)
|
||||
|
||||
self._room_pdu_linearizer = Linearizer()
|
||||
self._server_linearizer = Linearizer()
|
||||
self.auth = hs.get_auth()
|
||||
|
||||
self._room_pdu_linearizer = Linearizer("fed_room_pdu")
|
||||
self._server_linearizer = Linearizer("fed_server")
|
||||
|
||||
# We cache responses to state queries, as they take a while and often
|
||||
# come in waves.
|
||||
self._state_resp_cache = ResponseCache(hs, timeout_ms=30000)
|
||||
|
||||
def set_handler(self, handler):
|
||||
"""Sets the handler that the replication layer will use to communicate
|
||||
@@ -125,7 +133,7 @@ class FederationServer(FederationBase):
|
||||
|
||||
if response:
|
||||
logger.debug(
|
||||
"[%s] We've already responed to this request",
|
||||
"[%s] We've already responded to this request",
|
||||
transaction.transaction_id
|
||||
)
|
||||
defer.returnValue(response)
|
||||
@@ -136,6 +144,26 @@ class FederationServer(FederationBase):
|
||||
results = []
|
||||
|
||||
for pdu in pdu_list:
|
||||
# check that it's actually being sent from a valid destination to
|
||||
# workaround bug #1753 in 0.18.5 and 0.18.6
|
||||
if transaction.origin != get_domain_from_id(pdu.event_id):
|
||||
if not (
|
||||
pdu.type == 'm.room.member' and
|
||||
pdu.content and
|
||||
pdu.content.get("membership", None) == 'join' and
|
||||
self.hs.is_mine_id(pdu.state_key)
|
||||
):
|
||||
logger.info(
|
||||
"Discarding PDU %s from invalid origin %s",
|
||||
pdu.event_id, transaction.origin
|
||||
)
|
||||
continue
|
||||
else:
|
||||
logger.info(
|
||||
"Accepting join PDU %s from %s",
|
||||
pdu.event_id, transaction.origin
|
||||
)
|
||||
|
||||
try:
|
||||
yield self._handle_new_pdu(transaction.origin, pdu)
|
||||
results.append({})
|
||||
@@ -181,40 +209,76 @@ class FederationServer(FederationBase):
|
||||
except SynapseError as e:
|
||||
logger.info("Failed to handle edu %r: %r", edu_type, e)
|
||||
except Exception as e:
|
||||
logger.exception("Failed to handle edu %r", edu_type, e)
|
||||
logger.exception("Failed to handle edu %r", edu_type)
|
||||
else:
|
||||
logger.warn("Received EDU of type %s with no handler", edu_type)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def on_context_state_request(self, origin, room_id, event_id):
|
||||
with (yield self._server_linearizer.queue((origin, room_id))):
|
||||
if event_id:
|
||||
pdus = yield self.handler.get_state_for_pdu(
|
||||
origin, room_id, event_id,
|
||||
)
|
||||
auth_chain = yield self.store.get_auth_chain(
|
||||
[pdu.event_id for pdu in pdus]
|
||||
)
|
||||
if not event_id:
|
||||
raise NotImplementedError("Specify an event")
|
||||
|
||||
for event in auth_chain:
|
||||
# We sign these again because there was a bug where we
|
||||
# incorrectly signed things the first time round
|
||||
if self.hs.is_mine_id(event.event_id):
|
||||
event.signatures.update(
|
||||
compute_event_signature(
|
||||
event,
|
||||
self.hs.hostname,
|
||||
self.hs.config.signing_key[0]
|
||||
)
|
||||
)
|
||||
else:
|
||||
raise NotImplementedError("Specify an event")
|
||||
in_room = yield self.auth.check_host_in_room(room_id, origin)
|
||||
if not in_room:
|
||||
raise AuthError(403, "Host not in room.")
|
||||
|
||||
result = self._state_resp_cache.get((room_id, event_id))
|
||||
if not result:
|
||||
with (yield self._server_linearizer.queue((origin, room_id))):
|
||||
resp = yield self._state_resp_cache.set(
|
||||
(room_id, event_id),
|
||||
self._on_context_state_request_compute(room_id, event_id)
|
||||
)
|
||||
else:
|
||||
resp = yield result
|
||||
|
||||
defer.returnValue((200, resp))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_state_ids_request(self, origin, room_id, event_id):
|
||||
if not event_id:
|
||||
raise NotImplementedError("Specify an event")
|
||||
|
||||
in_room = yield self.auth.check_host_in_room(room_id, origin)
|
||||
if not in_room:
|
||||
raise AuthError(403, "Host not in room.")
|
||||
|
||||
state_ids = yield self.handler.get_state_ids_for_pdu(
|
||||
room_id, event_id,
|
||||
)
|
||||
auth_chain_ids = yield self.store.get_auth_chain_ids(state_ids)
|
||||
|
||||
defer.returnValue((200, {
|
||||
"pdu_ids": state_ids,
|
||||
"auth_chain_ids": auth_chain_ids,
|
||||
}))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _on_context_state_request_compute(self, room_id, event_id):
|
||||
pdus = yield self.handler.get_state_for_pdu(
|
||||
room_id, event_id,
|
||||
)
|
||||
auth_chain = yield self.store.get_auth_chain(
|
||||
[pdu.event_id for pdu in pdus]
|
||||
)
|
||||
|
||||
for event in auth_chain:
|
||||
# We sign these again because there was a bug where we
|
||||
# incorrectly signed things the first time round
|
||||
if self.hs.is_mine_id(event.event_id):
|
||||
event.signatures.update(
|
||||
compute_event_signature(
|
||||
event,
|
||||
self.hs.hostname,
|
||||
self.hs.config.signing_key[0]
|
||||
)
|
||||
)
|
||||
|
||||
defer.returnValue({
|
||||
"pdus": [pdu.get_pdu_json() for pdu in pdus],
|
||||
"auth_chain": [pdu.get_pdu_json() for pdu in auth_chain],
|
||||
}))
|
||||
})
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
@@ -348,27 +412,12 @@ class FederationServer(FederationBase):
|
||||
(200, send_content)
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def on_query_client_keys(self, origin, content):
|
||||
query = []
|
||||
for user_id, device_ids in content.get("device_keys", {}).items():
|
||||
if not device_ids:
|
||||
query.append((user_id, None))
|
||||
else:
|
||||
for device_id in device_ids:
|
||||
query.append((user_id, device_id))
|
||||
return self.on_query_request("client_keys", content)
|
||||
|
||||
results = yield self.store.get_e2e_device_keys(query)
|
||||
|
||||
json_result = {}
|
||||
for user_id, device_keys in results.items():
|
||||
for device_id, json_bytes in device_keys.items():
|
||||
json_result.setdefault(user_id, {})[device_id] = json.loads(
|
||||
json_bytes
|
||||
)
|
||||
|
||||
defer.returnValue({"device_keys": json_result})
|
||||
def on_query_user_devices(self, origin, user_id):
|
||||
return self.on_query_request("user_devices", user_id)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
@@ -400,6 +449,7 @@ class FederationServer(FederationBase):
|
||||
" limit: %d, min_depth: %d",
|
||||
earliest_events, latest_events, limit, min_depth
|
||||
)
|
||||
|
||||
missing_events = yield self.handler.on_get_missing_events(
|
||||
origin, room_id, earliest_events, latest_events, limit, min_depth
|
||||
)
|
||||
@@ -449,6 +499,7 @@ class FederationServer(FederationBase):
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def _handle_new_pdu(self, origin, pdu, get_missing=True):
|
||||
|
||||
# We reprocess pdus when we have seen them only as outliers
|
||||
existing = yield self._get_persisted_pdu(
|
||||
origin, pdu.event_id, do_auth=False
|
||||
@@ -513,7 +564,16 @@ class FederationServer(FederationBase):
|
||||
if get_missing and prevs - seen:
|
||||
# If we're missing stuff, ensure we only fetch stuff one
|
||||
# at a time.
|
||||
logger.info(
|
||||
"Acquiring lock for room %r to fetch %d missing events: %r...",
|
||||
pdu.room_id, len(prevs - seen), list(prevs - seen)[:5],
|
||||
)
|
||||
with (yield self._room_pdu_linearizer.queue(pdu.room_id)):
|
||||
logger.info(
|
||||
"Acquired lock for room %r to fetch %d missing events",
|
||||
pdu.room_id, len(prevs - seen),
|
||||
)
|
||||
|
||||
# We recalculate seen, since it may have changed.
|
||||
have_seen = yield self.store.have_events(prevs)
|
||||
seen = set(have_seen.keys())
|
||||
@@ -533,6 +593,25 @@ class FederationServer(FederationBase):
|
||||
len(prevs - seen), pdu.room_id, list(prevs - seen)[:5]
|
||||
)
|
||||
|
||||
# XXX: we set timeout to 10s to help workaround
|
||||
# https://github.com/matrix-org/synapse/issues/1733.
|
||||
# The reason is to avoid holding the linearizer lock
|
||||
# whilst processing inbound /send transactions, causing
|
||||
# FDs to stack up and block other inbound transactions
|
||||
# which empirically can currently take up to 30 minutes.
|
||||
#
|
||||
# N.B. this explicitly disables retry attempts.
|
||||
#
|
||||
# N.B. this also increases our chances of falling back to
|
||||
# fetching fresh state for the room if the missing event
|
||||
# can't be found, which slightly reduces our security.
|
||||
# it may also increase our DAG extremity count for the room,
|
||||
# causing additional state resolution? See #1760.
|
||||
# However, fetching state doesn't hold the linearizer lock
|
||||
# apparently.
|
||||
#
|
||||
# see https://github.com/matrix-org/synapse/pull/1744
|
||||
|
||||
missing_events = yield self.get_missing_events(
|
||||
origin,
|
||||
pdu.room_id,
|
||||
@@ -540,6 +619,7 @@ class FederationServer(FederationBase):
|
||||
latest_events=[pdu],
|
||||
limit=10,
|
||||
min_depth=min_depth,
|
||||
timeout=10000,
|
||||
)
|
||||
|
||||
# We want to sort these by depth so we process them and
|
||||
@@ -578,7 +658,7 @@ class FederationServer(FederationBase):
|
||||
origin, pdu.room_id, pdu.event_id,
|
||||
)
|
||||
except:
|
||||
logger.warn("Failed to get state for event: %s", pdu.event_id)
|
||||
logger.exception("Failed to get state for event: %s", pdu.event_id)
|
||||
|
||||
yield self.handler.on_receive_pdu(
|
||||
origin,
|
||||
|
||||
@@ -20,8 +20,6 @@ a given transport.
|
||||
from .federation_client import FederationClient
|
||||
from .federation_server import FederationServer
|
||||
|
||||
from .transaction_queue import TransactionQueue
|
||||
|
||||
from .persistence import TransactionActions
|
||||
|
||||
import logging
|
||||
@@ -66,9 +64,6 @@ class ReplicationLayer(FederationClient, FederationServer):
|
||||
self._clock = hs.get_clock()
|
||||
|
||||
self.transaction_actions = TransactionActions(self.store)
|
||||
self._transaction_queue = TransactionQueue(hs, transport_layer)
|
||||
|
||||
self._order = 0
|
||||
|
||||
self.hs = hs
|
||||
|
||||
|
||||
298
synapse/federation/send_queue.py
Normal file
298
synapse/federation/send_queue.py
Normal file
@@ -0,0 +1,298 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""A federation sender that forwards things to be sent across replication to
|
||||
a worker process.
|
||||
|
||||
It assumes there is a single worker process feeding off of it.
|
||||
|
||||
Each row in the replication stream consists of a type and some json, where the
|
||||
types indicate whether they are presence, or edus, etc.
|
||||
|
||||
Ephemeral or non-event data are queued up in-memory. When the worker requests
|
||||
updates since a particular point, all in-memory data since before that point is
|
||||
dropped. We also expire things in the queue after 5 minutes, to ensure that a
|
||||
dead worker doesn't cause the queues to grow limitlessly.
|
||||
|
||||
Events are replicated via a separate events stream.
|
||||
"""
|
||||
|
||||
from .units import Edu
|
||||
|
||||
from synapse.util.metrics import Measure
|
||||
import synapse.metrics
|
||||
|
||||
from blist import sorteddict
|
||||
import ujson
|
||||
|
||||
|
||||
metrics = synapse.metrics.get_metrics_for(__name__)
|
||||
|
||||
|
||||
PRESENCE_TYPE = "p"
|
||||
KEYED_EDU_TYPE = "k"
|
||||
EDU_TYPE = "e"
|
||||
FAILURE_TYPE = "f"
|
||||
DEVICE_MESSAGE_TYPE = "d"
|
||||
|
||||
|
||||
class FederationRemoteSendQueue(object):
|
||||
"""A drop in replacement for TransactionQueue"""
|
||||
|
||||
def __init__(self, hs):
|
||||
self.server_name = hs.hostname
|
||||
self.clock = hs.get_clock()
|
||||
|
||||
self.presence_map = {}
|
||||
self.presence_changed = sorteddict()
|
||||
|
||||
self.keyed_edu = {}
|
||||
self.keyed_edu_changed = sorteddict()
|
||||
|
||||
self.edus = sorteddict()
|
||||
|
||||
self.failures = sorteddict()
|
||||
|
||||
self.device_messages = sorteddict()
|
||||
|
||||
self.pos = 1
|
||||
self.pos_time = sorteddict()
|
||||
|
||||
# EVERYTHING IS SAD. In particular, python only makes new scopes when
|
||||
# we make a new function, so we need to make a new function so the inner
|
||||
# lambda binds to the queue rather than to the name of the queue which
|
||||
# changes. ARGH.
|
||||
def register(name, queue):
|
||||
metrics.register_callback(
|
||||
queue_name + "_size",
|
||||
lambda: len(queue),
|
||||
)
|
||||
|
||||
for queue_name in [
|
||||
"presence_map", "presence_changed", "keyed_edu", "keyed_edu_changed",
|
||||
"edus", "failures", "device_messages", "pos_time",
|
||||
]:
|
||||
register(queue_name, getattr(self, queue_name))
|
||||
|
||||
self.clock.looping_call(self._clear_queue, 30 * 1000)
|
||||
|
||||
def _next_pos(self):
|
||||
pos = self.pos
|
||||
self.pos += 1
|
||||
self.pos_time[self.clock.time_msec()] = pos
|
||||
return pos
|
||||
|
||||
def _clear_queue(self):
|
||||
"""Clear the queues for anything older than N minutes"""
|
||||
|
||||
FIVE_MINUTES_AGO = 5 * 60 * 1000
|
||||
now = self.clock.time_msec()
|
||||
|
||||
keys = self.pos_time.keys()
|
||||
time = keys.bisect_left(now - FIVE_MINUTES_AGO)
|
||||
if not keys[:time]:
|
||||
return
|
||||
|
||||
position_to_delete = max(keys[:time])
|
||||
for key in keys[:time]:
|
||||
del self.pos_time[key]
|
||||
|
||||
self._clear_queue_before_pos(position_to_delete)
|
||||
|
||||
def _clear_queue_before_pos(self, position_to_delete):
|
||||
"""Clear all the queues from before a given position"""
|
||||
with Measure(self.clock, "send_queue._clear"):
|
||||
# Delete things out of presence maps
|
||||
keys = self.presence_changed.keys()
|
||||
i = keys.bisect_left(position_to_delete)
|
||||
for key in keys[:i]:
|
||||
del self.presence_changed[key]
|
||||
|
||||
user_ids = set(
|
||||
user_id for uids in self.presence_changed.values() for _, user_id in uids
|
||||
)
|
||||
|
||||
to_del = [
|
||||
user_id for user_id in self.presence_map if user_id not in user_ids
|
||||
]
|
||||
for user_id in to_del:
|
||||
del self.presence_map[user_id]
|
||||
|
||||
# Delete things out of keyed edus
|
||||
keys = self.keyed_edu_changed.keys()
|
||||
i = keys.bisect_left(position_to_delete)
|
||||
for key in keys[:i]:
|
||||
del self.keyed_edu_changed[key]
|
||||
|
||||
live_keys = set()
|
||||
for edu_key in self.keyed_edu_changed.values():
|
||||
live_keys.add(edu_key)
|
||||
|
||||
to_del = [edu_key for edu_key in self.keyed_edu if edu_key not in live_keys]
|
||||
for edu_key in to_del:
|
||||
del self.keyed_edu[edu_key]
|
||||
|
||||
# Delete things out of edu map
|
||||
keys = self.edus.keys()
|
||||
i = keys.bisect_left(position_to_delete)
|
||||
for key in keys[:i]:
|
||||
del self.edus[key]
|
||||
|
||||
# Delete things out of failure map
|
||||
keys = self.failures.keys()
|
||||
i = keys.bisect_left(position_to_delete)
|
||||
for key in keys[:i]:
|
||||
del self.failures[key]
|
||||
|
||||
# Delete things out of device map
|
||||
keys = self.device_messages.keys()
|
||||
i = keys.bisect_left(position_to_delete)
|
||||
for key in keys[:i]:
|
||||
del self.device_messages[key]
|
||||
|
||||
def notify_new_events(self, current_id):
|
||||
"""As per TransactionQueue"""
|
||||
# We don't need to replicate this as it gets sent down a different
|
||||
# stream.
|
||||
pass
|
||||
|
||||
def send_edu(self, destination, edu_type, content, key=None):
|
||||
"""As per TransactionQueue"""
|
||||
pos = self._next_pos()
|
||||
|
||||
edu = Edu(
|
||||
origin=self.server_name,
|
||||
destination=destination,
|
||||
edu_type=edu_type,
|
||||
content=content,
|
||||
)
|
||||
|
||||
if key:
|
||||
assert isinstance(key, tuple)
|
||||
self.keyed_edu[(destination, key)] = edu
|
||||
self.keyed_edu_changed[pos] = (destination, key)
|
||||
else:
|
||||
self.edus[pos] = edu
|
||||
|
||||
def send_presence(self, destination, states):
|
||||
"""As per TransactionQueue"""
|
||||
pos = self._next_pos()
|
||||
|
||||
self.presence_map.update({
|
||||
state.user_id: state
|
||||
for state in states
|
||||
})
|
||||
|
||||
self.presence_changed[pos] = [
|
||||
(destination, state.user_id) for state in states
|
||||
]
|
||||
|
||||
def send_failure(self, failure, destination):
|
||||
"""As per TransactionQueue"""
|
||||
pos = self._next_pos()
|
||||
|
||||
self.failures[pos] = (destination, str(failure))
|
||||
|
||||
def send_device_messages(self, destination):
|
||||
"""As per TransactionQueue"""
|
||||
pos = self._next_pos()
|
||||
self.device_messages[pos] = destination
|
||||
|
||||
def get_current_token(self):
|
||||
return self.pos - 1
|
||||
|
||||
def get_replication_rows(self, token, limit, federation_ack=None):
|
||||
"""
|
||||
Args:
|
||||
token (int)
|
||||
limit (int)
|
||||
federation_ack (int): Optional. The position where the worker is
|
||||
explicitly acknowledged it has handled. Allows us to drop
|
||||
data from before that point
|
||||
"""
|
||||
# TODO: Handle limit.
|
||||
|
||||
# To handle restarts where we wrap around
|
||||
if token > self.pos:
|
||||
token = -1
|
||||
|
||||
rows = []
|
||||
|
||||
# There should be only one reader, so lets delete everything its
|
||||
# acknowledged its seen.
|
||||
if federation_ack:
|
||||
self._clear_queue_before_pos(federation_ack)
|
||||
|
||||
# Fetch changed presence
|
||||
keys = self.presence_changed.keys()
|
||||
i = keys.bisect_right(token)
|
||||
dest_user_ids = set(
|
||||
(pos, dest_user_id)
|
||||
for pos in keys[i:]
|
||||
for dest_user_id in self.presence_changed[pos]
|
||||
)
|
||||
|
||||
for (key, (dest, user_id)) in dest_user_ids:
|
||||
rows.append((key, PRESENCE_TYPE, ujson.dumps({
|
||||
"destination": dest,
|
||||
"state": self.presence_map[user_id].as_dict(),
|
||||
})))
|
||||
|
||||
# Fetch changes keyed edus
|
||||
keys = self.keyed_edu_changed.keys()
|
||||
i = keys.bisect_right(token)
|
||||
keyed_edus = set((k, self.keyed_edu_changed[k]) for k in keys[i:])
|
||||
|
||||
for (pos, (destination, edu_key)) in keyed_edus:
|
||||
rows.append(
|
||||
(pos, KEYED_EDU_TYPE, ujson.dumps({
|
||||
"key": edu_key,
|
||||
"edu": self.keyed_edu[(destination, edu_key)].get_internal_dict(),
|
||||
}))
|
||||
)
|
||||
|
||||
# Fetch changed edus
|
||||
keys = self.edus.keys()
|
||||
i = keys.bisect_right(token)
|
||||
edus = set((k, self.edus[k]) for k in keys[i:])
|
||||
|
||||
for (pos, edu) in edus:
|
||||
rows.append((pos, EDU_TYPE, ujson.dumps(edu.get_internal_dict())))
|
||||
|
||||
# Fetch changed failures
|
||||
keys = self.failures.keys()
|
||||
i = keys.bisect_right(token)
|
||||
failures = set((k, self.failures[k]) for k in keys[i:])
|
||||
|
||||
for (pos, (destination, failure)) in failures:
|
||||
rows.append((pos, FAILURE_TYPE, ujson.dumps({
|
||||
"destination": destination,
|
||||
"failure": failure,
|
||||
})))
|
||||
|
||||
# Fetch changed device messages
|
||||
keys = self.device_messages.keys()
|
||||
i = keys.bisect_right(token)
|
||||
device_messages = set((k, self.device_messages[k]) for k in keys[i:])
|
||||
|
||||
for (pos, destination) in device_messages:
|
||||
rows.append((pos, DEVICE_MESSAGE_TYPE, ujson.dumps({
|
||||
"destination": destination,
|
||||
})))
|
||||
|
||||
# Sort rows based on pos
|
||||
rows.sort()
|
||||
|
||||
return rows
|
||||
@@ -17,15 +17,17 @@
|
||||
from twisted.internet import defer
|
||||
|
||||
from .persistence import TransactionActions
|
||||
from .units import Transaction
|
||||
from .units import Transaction, Edu
|
||||
|
||||
from synapse.api.errors import HttpResponseException
|
||||
from synapse.util.async import run_on_reactor
|
||||
from synapse.util.logutils import log_function
|
||||
from synapse.util.logcontext import PreserveLoggingContext
|
||||
from synapse.util.logcontext import preserve_context_over_fn
|
||||
from synapse.util.retryutils import (
|
||||
get_retry_limiter, NotRetryingDestination,
|
||||
)
|
||||
from synapse.util.metrics import measure_func
|
||||
from synapse.types import get_domain_from_id
|
||||
from synapse.handlers.presence import format_user_presence_state
|
||||
import synapse.metrics
|
||||
|
||||
import logging
|
||||
@@ -35,6 +37,12 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
metrics = synapse.metrics.get_metrics_for(__name__)
|
||||
|
||||
client_metrics = synapse.metrics.get_metrics_for("synapse.federation.client")
|
||||
sent_pdus_destination_dist = client_metrics.register_distribution(
|
||||
"sent_pdu_destinations"
|
||||
)
|
||||
sent_edus_counter = client_metrics.register_counter("sent_edus")
|
||||
|
||||
|
||||
class TransactionQueue(object):
|
||||
"""This class makes sure we only have one transaction in flight at
|
||||
@@ -43,15 +51,17 @@ class TransactionQueue(object):
|
||||
It batches pending PDUs into single transactions.
|
||||
"""
|
||||
|
||||
def __init__(self, hs, transport_layer):
|
||||
def __init__(self, hs):
|
||||
self.server_name = hs.hostname
|
||||
|
||||
self.store = hs.get_datastore()
|
||||
self.state = hs.get_state_handler()
|
||||
self.transaction_actions = TransactionActions(self.store)
|
||||
|
||||
self.transport_layer = transport_layer
|
||||
self.transport_layer = hs.get_federation_transport_client()
|
||||
|
||||
self._clock = hs.get_clock()
|
||||
self.clock = hs.get_clock()
|
||||
self.is_mine_id = hs.is_mine_id
|
||||
|
||||
# Is a mapping from destinations -> deferreds. Used to keep track
|
||||
# of which destinations have transactions in flight and when they are
|
||||
@@ -69,20 +79,36 @@ class TransactionQueue(object):
|
||||
# destination -> list of tuple(edu, deferred)
|
||||
self.pending_edus_by_dest = edus = {}
|
||||
|
||||
# Presence needs to be separate as we send single aggragate EDUs
|
||||
self.pending_presence_by_dest = presence = {}
|
||||
self.pending_edus_keyed_by_dest = edus_keyed = {}
|
||||
|
||||
metrics.register_callback(
|
||||
"pending_pdus",
|
||||
lambda: sum(map(len, pdus.values())),
|
||||
)
|
||||
metrics.register_callback(
|
||||
"pending_edus",
|
||||
lambda: sum(map(len, edus.values())),
|
||||
lambda: (
|
||||
sum(map(len, edus.values()))
|
||||
+ sum(map(len, presence.values()))
|
||||
+ sum(map(len, edus_keyed.values()))
|
||||
),
|
||||
)
|
||||
|
||||
# destination -> list of tuple(failure, deferred)
|
||||
self.pending_failures_by_dest = {}
|
||||
|
||||
self.last_device_stream_id_by_dest = {}
|
||||
self.last_device_list_stream_id_by_dest = {}
|
||||
|
||||
# HACK to get unique tx id
|
||||
self._next_txn_id = int(self._clock.time_msec())
|
||||
self._next_txn_id = int(self.clock.time_msec())
|
||||
|
||||
self._order = 1
|
||||
|
||||
self._is_processing = False
|
||||
self._last_poked_id = -1
|
||||
|
||||
def can_send_to(self, destination):
|
||||
"""Can we send messages to the given server?
|
||||
@@ -104,11 +130,76 @@ class TransactionQueue(object):
|
||||
else:
|
||||
return not destination.startswith("localhost")
|
||||
|
||||
def enqueue_pdu(self, pdu, destinations, order):
|
||||
@defer.inlineCallbacks
|
||||
def notify_new_events(self, current_id):
|
||||
"""This gets called when we have some new events we might want to
|
||||
send out to other servers.
|
||||
"""
|
||||
self._last_poked_id = max(current_id, self._last_poked_id)
|
||||
|
||||
if self._is_processing:
|
||||
return
|
||||
|
||||
try:
|
||||
self._is_processing = True
|
||||
while True:
|
||||
last_token = yield self.store.get_federation_out_pos("events")
|
||||
next_token, events = yield self.store.get_all_new_events_stream(
|
||||
last_token, self._last_poked_id, limit=20,
|
||||
)
|
||||
|
||||
logger.debug("Handling %s -> %s", last_token, next_token)
|
||||
|
||||
if not events and next_token >= self._last_poked_id:
|
||||
break
|
||||
|
||||
for event in events:
|
||||
# Only send events for this server.
|
||||
send_on_behalf_of = event.internal_metadata.get_send_on_behalf_of()
|
||||
is_mine = self.is_mine_id(event.event_id)
|
||||
if not is_mine and send_on_behalf_of is None:
|
||||
continue
|
||||
|
||||
# Get the state from before the event.
|
||||
# We need to make sure that this is the state from before
|
||||
# the event and not from after it.
|
||||
# Otherwise if the last member on a server in a room is
|
||||
# banned then it won't receive the event because it won't
|
||||
# be in the room after the ban.
|
||||
users_in_room = yield self.state.get_current_user_in_room(
|
||||
event.room_id, latest_event_ids=[
|
||||
prev_id for prev_id, _ in event.prev_events
|
||||
],
|
||||
)
|
||||
|
||||
destinations = set(
|
||||
get_domain_from_id(user_id) for user_id in users_in_room
|
||||
)
|
||||
if send_on_behalf_of is not None:
|
||||
# If we are sending the event on behalf of another server
|
||||
# then it already has the event and there is no reason to
|
||||
# send the event to it.
|
||||
destinations.discard(send_on_behalf_of)
|
||||
|
||||
logger.debug("Sending %s to %r", event, destinations)
|
||||
|
||||
self._send_pdu(event, destinations)
|
||||
|
||||
yield self.store.update_federation_out_pos(
|
||||
"events", next_token
|
||||
)
|
||||
|
||||
finally:
|
||||
self._is_processing = False
|
||||
|
||||
def _send_pdu(self, pdu, destinations):
|
||||
# We loop through all destinations to see whether we already have
|
||||
# a transaction in progress. If we do, stick it in the pending_pdus
|
||||
# table and we'll get back to it later.
|
||||
|
||||
order = self._order
|
||||
self._order += 1
|
||||
|
||||
destinations = set(destinations)
|
||||
destinations = set(
|
||||
dest for dest in destinations if self.can_send_to(dest)
|
||||
@@ -119,89 +210,84 @@ class TransactionQueue(object):
|
||||
if not destinations:
|
||||
return
|
||||
|
||||
deferreds = []
|
||||
sent_pdus_destination_dist.inc_by(len(destinations))
|
||||
|
||||
for destination in destinations:
|
||||
deferred = defer.Deferred()
|
||||
self.pending_pdus_by_dest.setdefault(destination, []).append(
|
||||
(pdu, deferred, order)
|
||||
(pdu, order)
|
||||
)
|
||||
|
||||
def chain(failure):
|
||||
if not deferred.called:
|
||||
deferred.errback(failure)
|
||||
preserve_context_over_fn(
|
||||
self._attempt_new_transaction, destination
|
||||
)
|
||||
|
||||
def log_failure(f):
|
||||
logger.warn("Failed to send pdu to %s: %s", destination, f.value)
|
||||
def send_presence(self, destination, states):
|
||||
if not self.can_send_to(destination):
|
||||
return
|
||||
|
||||
deferred.addErrback(log_failure)
|
||||
self.pending_presence_by_dest.setdefault(destination, {}).update({
|
||||
state.user_id: state for state in states
|
||||
})
|
||||
|
||||
with PreserveLoggingContext():
|
||||
self._attempt_new_transaction(destination).addErrback(chain)
|
||||
preserve_context_over_fn(
|
||||
self._attempt_new_transaction, destination
|
||||
)
|
||||
|
||||
deferreds.append(deferred)
|
||||
|
||||
# NO inlineCallbacks
|
||||
def enqueue_edu(self, edu):
|
||||
destination = edu.destination
|
||||
def send_edu(self, destination, edu_type, content, key=None):
|
||||
edu = Edu(
|
||||
origin=self.server_name,
|
||||
destination=destination,
|
||||
edu_type=edu_type,
|
||||
content=content,
|
||||
)
|
||||
|
||||
if not self.can_send_to(destination):
|
||||
return
|
||||
|
||||
deferred = defer.Deferred()
|
||||
self.pending_edus_by_dest.setdefault(destination, []).append(
|
||||
(edu, deferred)
|
||||
sent_edus_counter.inc()
|
||||
|
||||
if key:
|
||||
self.pending_edus_keyed_by_dest.setdefault(
|
||||
destination, {}
|
||||
)[(edu.edu_type, key)] = edu
|
||||
else:
|
||||
self.pending_edus_by_dest.setdefault(destination, []).append(edu)
|
||||
|
||||
preserve_context_over_fn(
|
||||
self._attempt_new_transaction, destination
|
||||
)
|
||||
|
||||
def chain(failure):
|
||||
if not deferred.called:
|
||||
deferred.errback(failure)
|
||||
|
||||
def log_failure(f):
|
||||
logger.warn("Failed to send edu to %s: %s", destination, f.value)
|
||||
|
||||
deferred.addErrback(log_failure)
|
||||
|
||||
with PreserveLoggingContext():
|
||||
self._attempt_new_transaction(destination).addErrback(chain)
|
||||
|
||||
return deferred
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def enqueue_failure(self, failure, destination):
|
||||
def send_failure(self, failure, destination):
|
||||
if destination == self.server_name or destination == "localhost":
|
||||
return
|
||||
|
||||
deferred = defer.Deferred()
|
||||
|
||||
if not self.can_send_to(destination):
|
||||
return
|
||||
|
||||
self.pending_failures_by_dest.setdefault(
|
||||
destination, []
|
||||
).append(
|
||||
(failure, deferred)
|
||||
).append(failure)
|
||||
|
||||
preserve_context_over_fn(
|
||||
self._attempt_new_transaction, destination
|
||||
)
|
||||
|
||||
def chain(f):
|
||||
if not deferred.called:
|
||||
deferred.errback(f)
|
||||
def send_device_messages(self, destination):
|
||||
if destination == self.server_name or destination == "localhost":
|
||||
return
|
||||
|
||||
def log_failure(f):
|
||||
logger.warn("Failed to send failure to %s: %s", destination, f.value)
|
||||
if not self.can_send_to(destination):
|
||||
return
|
||||
|
||||
deferred.addErrback(log_failure)
|
||||
preserve_context_over_fn(
|
||||
self._attempt_new_transaction, destination
|
||||
)
|
||||
|
||||
with PreserveLoggingContext():
|
||||
self._attempt_new_transaction(destination).addErrback(chain)
|
||||
|
||||
yield deferred
|
||||
def get_current_token(self):
|
||||
return 0
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def _attempt_new_transaction(self, destination):
|
||||
yield run_on_reactor()
|
||||
|
||||
# list of (pending_pdu, deferred, order)
|
||||
if destination in self.pending_transactions:
|
||||
# XXX: pending_transactions can get stuck on by a never-ending
|
||||
@@ -214,55 +300,154 @@ class TransactionQueue(object):
|
||||
)
|
||||
return
|
||||
|
||||
pending_pdus = self.pending_pdus_by_dest.pop(destination, [])
|
||||
pending_edus = self.pending_edus_by_dest.pop(destination, [])
|
||||
pending_failures = self.pending_failures_by_dest.pop(destination, [])
|
||||
|
||||
if pending_pdus:
|
||||
logger.debug("TX [%s] len(pending_pdus_by_dest[dest]) = %d",
|
||||
destination, len(pending_pdus))
|
||||
|
||||
if not pending_pdus and not pending_edus and not pending_failures:
|
||||
logger.debug("TX [%s] Nothing to send", destination)
|
||||
return
|
||||
|
||||
try:
|
||||
self.pending_transactions[destination] = 1
|
||||
|
||||
yield run_on_reactor()
|
||||
|
||||
while True:
|
||||
pending_pdus = self.pending_pdus_by_dest.pop(destination, [])
|
||||
pending_edus = self.pending_edus_by_dest.pop(destination, [])
|
||||
pending_presence = self.pending_presence_by_dest.pop(destination, {})
|
||||
pending_failures = self.pending_failures_by_dest.pop(destination, [])
|
||||
|
||||
pending_edus.extend(
|
||||
self.pending_edus_keyed_by_dest.pop(destination, {}).values()
|
||||
)
|
||||
|
||||
limiter = yield get_retry_limiter(
|
||||
destination,
|
||||
self.clock,
|
||||
self.store,
|
||||
backoff_on_404=True, # If we get a 404 the other side has gone
|
||||
)
|
||||
|
||||
device_message_edus, device_stream_id, dev_list_id = (
|
||||
yield self._get_new_device_messages(destination)
|
||||
)
|
||||
|
||||
pending_edus.extend(device_message_edus)
|
||||
if pending_presence:
|
||||
pending_edus.append(
|
||||
Edu(
|
||||
origin=self.server_name,
|
||||
destination=destination,
|
||||
edu_type="m.presence",
|
||||
content={
|
||||
"push": [
|
||||
format_user_presence_state(
|
||||
presence, self.clock.time_msec()
|
||||
)
|
||||
for presence in pending_presence.values()
|
||||
]
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
if pending_pdus:
|
||||
logger.debug("TX [%s] len(pending_pdus_by_dest[dest]) = %d",
|
||||
destination, len(pending_pdus))
|
||||
|
||||
if not pending_pdus and not pending_edus and not pending_failures:
|
||||
logger.debug("TX [%s] Nothing to send", destination)
|
||||
self.last_device_stream_id_by_dest[destination] = (
|
||||
device_stream_id
|
||||
)
|
||||
return
|
||||
|
||||
success = yield self._send_new_transaction(
|
||||
destination, pending_pdus, pending_edus, pending_failures,
|
||||
limiter=limiter,
|
||||
)
|
||||
if success:
|
||||
# Remove the acknowledged device messages from the database
|
||||
# Only bother if we actually sent some device messages
|
||||
if device_message_edus:
|
||||
yield self.store.delete_device_msgs_for_remote(
|
||||
destination, device_stream_id
|
||||
)
|
||||
logger.info("Marking as sent %r %r", destination, dev_list_id)
|
||||
yield self.store.mark_as_sent_devices_by_remote(
|
||||
destination, dev_list_id
|
||||
)
|
||||
|
||||
self.last_device_stream_id_by_dest[destination] = device_stream_id
|
||||
self.last_device_list_stream_id_by_dest[destination] = dev_list_id
|
||||
else:
|
||||
break
|
||||
except NotRetryingDestination:
|
||||
logger.debug(
|
||||
"TX [%s] not ready for retry yet - "
|
||||
"dropping transaction for now",
|
||||
destination,
|
||||
)
|
||||
finally:
|
||||
# We want to be *very* sure we delete this after we stop processing
|
||||
self.pending_transactions.pop(destination, None)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _get_new_device_messages(self, destination):
|
||||
last_device_stream_id = self.last_device_stream_id_by_dest.get(destination, 0)
|
||||
to_device_stream_id = self.store.get_to_device_stream_token()
|
||||
contents, stream_id = yield self.store.get_new_device_msgs_for_remote(
|
||||
destination, last_device_stream_id, to_device_stream_id
|
||||
)
|
||||
edus = [
|
||||
Edu(
|
||||
origin=self.server_name,
|
||||
destination=destination,
|
||||
edu_type="m.direct_to_device",
|
||||
content=content,
|
||||
)
|
||||
for content in contents
|
||||
]
|
||||
|
||||
last_device_list = self.last_device_list_stream_id_by_dest.get(destination, 0)
|
||||
now_stream_id, results = yield self.store.get_devices_by_remote(
|
||||
destination, last_device_list
|
||||
)
|
||||
edus.extend(
|
||||
Edu(
|
||||
origin=self.server_name,
|
||||
destination=destination,
|
||||
edu_type="m.device_list_update",
|
||||
content=content,
|
||||
)
|
||||
for content in results
|
||||
)
|
||||
defer.returnValue((edus, stream_id, now_stream_id))
|
||||
|
||||
@measure_func("_send_new_transaction")
|
||||
@defer.inlineCallbacks
|
||||
def _send_new_transaction(self, destination, pending_pdus, pending_edus,
|
||||
pending_failures, limiter):
|
||||
|
||||
# Sort based on the order field
|
||||
pending_pdus.sort(key=lambda t: t[1])
|
||||
pdus = [x[0] for x in pending_pdus]
|
||||
edus = pending_edus
|
||||
failures = [x.get_dict() for x in pending_failures]
|
||||
|
||||
success = True
|
||||
|
||||
try:
|
||||
logger.debug("TX [%s] _attempt_new_transaction", destination)
|
||||
|
||||
# Sort based on the order field
|
||||
pending_pdus.sort(key=lambda t: t[2])
|
||||
|
||||
pdus = [x[0] for x in pending_pdus]
|
||||
edus = [x[0] for x in pending_edus]
|
||||
failures = [x[0].get_dict() for x in pending_failures]
|
||||
deferreds = [
|
||||
x[1]
|
||||
for x in pending_pdus + pending_edus + pending_failures
|
||||
]
|
||||
|
||||
txn_id = str(self._next_txn_id)
|
||||
|
||||
limiter = yield get_retry_limiter(
|
||||
destination,
|
||||
self._clock,
|
||||
self.store,
|
||||
)
|
||||
|
||||
logger.debug(
|
||||
"TX [%s] {%s} Attempting new transaction"
|
||||
" (pdus: %d, edus: %d, failures: %d)",
|
||||
destination, txn_id,
|
||||
len(pending_pdus),
|
||||
len(pending_edus),
|
||||
len(pending_failures)
|
||||
len(pdus),
|
||||
len(edus),
|
||||
len(failures)
|
||||
)
|
||||
|
||||
logger.debug("TX [%s] Persisting transaction...", destination)
|
||||
|
||||
transaction = Transaction.create_new(
|
||||
origin_server_ts=int(self._clock.time_msec()),
|
||||
origin_server_ts=int(self.clock.time_msec()),
|
||||
transaction_id=txn_id,
|
||||
origin=self.server_name,
|
||||
destination=destination,
|
||||
@@ -281,9 +466,9 @@ class TransactionQueue(object):
|
||||
" (PDUs: %d, EDUs: %d, failures: %d)",
|
||||
destination, txn_id,
|
||||
transaction.transaction_id,
|
||||
len(pending_pdus),
|
||||
len(pending_edus),
|
||||
len(pending_failures),
|
||||
len(pdus),
|
||||
len(edus),
|
||||
len(failures),
|
||||
)
|
||||
|
||||
with limiter:
|
||||
@@ -293,7 +478,7 @@ class TransactionQueue(object):
|
||||
# keys work
|
||||
def json_data_cb():
|
||||
data = transaction.get_dict()
|
||||
now = int(self._clock.time_msec())
|
||||
now = int(self.clock.time_msec())
|
||||
if "pdus" in data:
|
||||
for p in data["pdus"]:
|
||||
if "age_ts" in p:
|
||||
@@ -319,6 +504,13 @@ class TransactionQueue(object):
|
||||
code = e.code
|
||||
response = e.response
|
||||
|
||||
if e.code in (401, 404, 429) or 500 <= e.code:
|
||||
logger.info(
|
||||
"TX [%s] {%s} got %d response",
|
||||
destination, txn_id, code
|
||||
)
|
||||
raise e
|
||||
|
||||
logger.info(
|
||||
"TX [%s] {%s} got %d response",
|
||||
destination, txn_id, code
|
||||
@@ -333,28 +525,12 @@ class TransactionQueue(object):
|
||||
|
||||
logger.debug("TX [%s] Marked as delivered", destination)
|
||||
|
||||
logger.debug("TX [%s] Yielding to callbacks...", destination)
|
||||
|
||||
for deferred in deferreds:
|
||||
if code == 200:
|
||||
deferred.callback(None)
|
||||
else:
|
||||
deferred.errback(RuntimeError("Got status %d" % code))
|
||||
|
||||
# Ensures we don't continue until all callbacks on that
|
||||
# deferred have fired
|
||||
try:
|
||||
yield deferred
|
||||
except:
|
||||
pass
|
||||
|
||||
logger.debug("TX [%s] Yielded to callbacks", destination)
|
||||
except NotRetryingDestination:
|
||||
logger.info(
|
||||
"TX [%s] not ready for retry yet - "
|
||||
"dropping transaction for now",
|
||||
destination,
|
||||
)
|
||||
if code != 200:
|
||||
for p in pdus:
|
||||
logger.info(
|
||||
"Failed to send event %s to %s", p.event_id, destination
|
||||
)
|
||||
success = False
|
||||
except RuntimeError as e:
|
||||
# We capture this here as there as nothing actually listens
|
||||
# for this finishing functions deferred.
|
||||
@@ -363,6 +539,11 @@ class TransactionQueue(object):
|
||||
destination,
|
||||
e,
|
||||
)
|
||||
|
||||
success = False
|
||||
|
||||
for p in pdus:
|
||||
logger.info("Failed to send event %s to %s", p.event_id, destination)
|
||||
except Exception as e:
|
||||
# We capture this here as there as nothing actually listens
|
||||
# for this finishing functions deferred.
|
||||
@@ -372,13 +553,9 @@ class TransactionQueue(object):
|
||||
e,
|
||||
)
|
||||
|
||||
for deferred in deferreds:
|
||||
if not deferred.called:
|
||||
deferred.errback(e)
|
||||
success = False
|
||||
|
||||
finally:
|
||||
# We want to be *very* sure we delete this after we stop processing
|
||||
self.pending_transactions.pop(destination, None)
|
||||
for p in pdus:
|
||||
logger.info("Failed to send event %s to %s", p.event_id, destination)
|
||||
|
||||
# Check to see if there is anything else to send.
|
||||
self._attempt_new_transaction(destination)
|
||||
defer.returnValue(success)
|
||||
|
||||
@@ -54,6 +54,28 @@ class TransportLayerClient(object):
|
||||
destination, path=path, args={"event_id": event_id},
|
||||
)
|
||||
|
||||
@log_function
|
||||
def get_room_state_ids(self, destination, room_id, event_id):
|
||||
""" Requests all state for a given room from the given server at the
|
||||
given event. Returns the state's event_id's
|
||||
|
||||
Args:
|
||||
destination (str): The host name of the remote home server we want
|
||||
to get the state from.
|
||||
context (str): The name of the context we want the state of
|
||||
event_id (str): The event we want the context at.
|
||||
|
||||
Returns:
|
||||
Deferred: Results in a dict received from the remote homeserver.
|
||||
"""
|
||||
logger.debug("get_room_state_ids dest=%s, room=%s",
|
||||
destination, room_id)
|
||||
|
||||
path = PREFIX + "/state_ids/%s/" % room_id
|
||||
return self.client.get_json(
|
||||
destination, path=path, args={"event_id": event_id},
|
||||
)
|
||||
|
||||
@log_function
|
||||
def get_event(self, destination, event_id, timeout=None):
|
||||
""" Requests the pdu with give id and origin from the given server.
|
||||
@@ -226,12 +248,27 @@ class TransportLayerClient(object):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def get_public_rooms(self, remote_server):
|
||||
def get_public_rooms(self, remote_server, limit, since_token,
|
||||
search_filter=None, include_all_networks=False,
|
||||
third_party_instance_id=None):
|
||||
path = PREFIX + "/publicRooms"
|
||||
|
||||
args = {
|
||||
"include_all_networks": "true" if include_all_networks else "false",
|
||||
}
|
||||
if third_party_instance_id:
|
||||
args["third_party_instance_id"] = third_party_instance_id,
|
||||
if limit:
|
||||
args["limit"] = [str(limit)]
|
||||
if since_token:
|
||||
args["since"] = [since_token]
|
||||
|
||||
# TODO(erikj): Actually send the search_filter across federation.
|
||||
|
||||
response = yield self.client.get_json(
|
||||
destination=remote_server,
|
||||
path=path,
|
||||
args=args,
|
||||
)
|
||||
|
||||
defer.returnValue(response)
|
||||
@@ -276,7 +313,7 @@ class TransportLayerClient(object):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def query_client_keys(self, destination, query_content):
|
||||
def query_client_keys(self, destination, query_content, timeout):
|
||||
"""Query the device keys for a list of user ids hosted on a remote
|
||||
server.
|
||||
|
||||
@@ -305,12 +342,39 @@ class TransportLayerClient(object):
|
||||
destination=destination,
|
||||
path=path,
|
||||
data=query_content,
|
||||
timeout=timeout,
|
||||
)
|
||||
defer.returnValue(content)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def claim_client_keys(self, destination, query_content):
|
||||
def query_user_devices(self, destination, user_id, timeout):
|
||||
"""Query the devices for a user id hosted on a remote server.
|
||||
|
||||
Response:
|
||||
{
|
||||
"stream_id": "...",
|
||||
"devices": [ { ... } ]
|
||||
}
|
||||
|
||||
Args:
|
||||
destination(str): The server to query.
|
||||
query_content(dict): The user ids to query.
|
||||
Returns:
|
||||
A dict containg the device keys.
|
||||
"""
|
||||
path = PREFIX + "/user/devices/" + user_id
|
||||
|
||||
content = yield self.client.get_json(
|
||||
destination=destination,
|
||||
path=path,
|
||||
timeout=timeout,
|
||||
)
|
||||
defer.returnValue(content)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def claim_client_keys(self, destination, query_content, timeout):
|
||||
"""Claim one-time keys for a list of devices hosted on a remote server.
|
||||
|
||||
Request:
|
||||
@@ -341,13 +405,14 @@ class TransportLayerClient(object):
|
||||
destination=destination,
|
||||
path=path,
|
||||
data=query_content,
|
||||
timeout=timeout,
|
||||
)
|
||||
defer.returnValue(content)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def get_missing_events(self, destination, room_id, earliest_events,
|
||||
latest_events, limit, min_depth):
|
||||
latest_events, limit, min_depth, timeout):
|
||||
path = PREFIX + "/get_missing_events/%s" % (room_id,)
|
||||
|
||||
content = yield self.client.post_json(
|
||||
@@ -358,7 +423,8 @@ class TransportLayerClient(object):
|
||||
"min_depth": int(min_depth),
|
||||
"earliest_events": earliest_events,
|
||||
"latest_events": latest_events,
|
||||
}
|
||||
},
|
||||
timeout=timeout,
|
||||
)
|
||||
|
||||
defer.returnValue(content)
|
||||
|
||||
@@ -18,13 +18,18 @@ from twisted.internet import defer
|
||||
from synapse.api.urls import FEDERATION_PREFIX as PREFIX
|
||||
from synapse.api.errors import Codes, SynapseError
|
||||
from synapse.http.server import JsonResource
|
||||
from synapse.http.servlet import parse_json_object_from_request, parse_string
|
||||
from synapse.http.servlet import (
|
||||
parse_json_object_from_request, parse_integer_from_args, parse_string_from_args,
|
||||
parse_boolean_from_args,
|
||||
)
|
||||
from synapse.util.ratelimitutils import FederationRateLimiter
|
||||
from synapse.util.versionstring import get_version_string
|
||||
from synapse.types import ThirdPartyInstanceID
|
||||
|
||||
import functools
|
||||
import logging
|
||||
import simplejson as json
|
||||
import re
|
||||
import synapse
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -60,6 +65,16 @@ class TransportLayerServer(JsonResource):
|
||||
)
|
||||
|
||||
|
||||
class AuthenticationError(SynapseError):
|
||||
"""There was a problem authenticating the request"""
|
||||
pass
|
||||
|
||||
|
||||
class NoAuthenticationError(AuthenticationError):
|
||||
"""The request had no authentication information"""
|
||||
pass
|
||||
|
||||
|
||||
class Authenticator(object):
|
||||
def __init__(self, hs):
|
||||
self.keyring = hs.get_keyring()
|
||||
@@ -67,7 +82,7 @@ class Authenticator(object):
|
||||
|
||||
# A method just so we can pass 'self' as the authenticator to the Servlets
|
||||
@defer.inlineCallbacks
|
||||
def authenticate_request(self, request):
|
||||
def authenticate_request(self, request, content):
|
||||
json_request = {
|
||||
"method": request.method,
|
||||
"uri": request.uri,
|
||||
@@ -75,17 +90,10 @@ class Authenticator(object):
|
||||
"signatures": {},
|
||||
}
|
||||
|
||||
content = None
|
||||
origin = None
|
||||
if content is not None:
|
||||
json_request["content"] = content
|
||||
|
||||
if request.method in ["PUT", "POST"]:
|
||||
# TODO: Handle other method types? other content types?
|
||||
try:
|
||||
content_bytes = request.content.read()
|
||||
content = json.loads(content_bytes)
|
||||
json_request["content"] = content
|
||||
except:
|
||||
raise SynapseError(400, "Unable to parse JSON", Codes.BAD_JSON)
|
||||
origin = None
|
||||
|
||||
def parse_auth_header(header_str):
|
||||
try:
|
||||
@@ -103,14 +111,14 @@ class Authenticator(object):
|
||||
sig = strip_quotes(param_dict["sig"])
|
||||
return (origin, key, sig)
|
||||
except:
|
||||
raise SynapseError(
|
||||
raise AuthenticationError(
|
||||
400, "Malformed Authorization header", Codes.UNAUTHORIZED
|
||||
)
|
||||
|
||||
auth_headers = request.requestHeaders.getRawHeaders(b"Authorization")
|
||||
|
||||
if not auth_headers:
|
||||
raise SynapseError(
|
||||
raise NoAuthenticationError(
|
||||
401, "Missing Authorization headers", Codes.UNAUTHORIZED,
|
||||
)
|
||||
|
||||
@@ -121,7 +129,7 @@ class Authenticator(object):
|
||||
json_request["signatures"].setdefault(origin, {})[key] = sig
|
||||
|
||||
if not json_request["signatures"]:
|
||||
raise SynapseError(
|
||||
raise NoAuthenticationError(
|
||||
401, "Missing Authorization headers", Codes.UNAUTHORIZED,
|
||||
)
|
||||
|
||||
@@ -130,10 +138,12 @@ class Authenticator(object):
|
||||
logger.info("Request from %s", origin)
|
||||
request.authenticated_entity = origin
|
||||
|
||||
defer.returnValue((origin, content))
|
||||
defer.returnValue(origin)
|
||||
|
||||
|
||||
class BaseFederationServlet(object):
|
||||
REQUIRE_AUTH = True
|
||||
|
||||
def __init__(self, handler, authenticator, ratelimiter, server_name,
|
||||
room_list_handler):
|
||||
self.handler = handler
|
||||
@@ -141,29 +151,46 @@ class BaseFederationServlet(object):
|
||||
self.ratelimiter = ratelimiter
|
||||
self.room_list_handler = room_list_handler
|
||||
|
||||
def _wrap(self, code):
|
||||
def _wrap(self, func):
|
||||
authenticator = self.authenticator
|
||||
ratelimiter = self.ratelimiter
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@functools.wraps(code)
|
||||
def new_code(request, *args, **kwargs):
|
||||
@functools.wraps(func)
|
||||
def new_func(request, *args, **kwargs):
|
||||
content = None
|
||||
if request.method in ["PUT", "POST"]:
|
||||
# TODO: Handle other method types? other content types?
|
||||
content = parse_json_object_from_request(request)
|
||||
|
||||
try:
|
||||
(origin, content) = yield authenticator.authenticate_request(request)
|
||||
with ratelimiter.ratelimit(origin) as d:
|
||||
yield d
|
||||
response = yield code(
|
||||
origin, content, request.args, *args, **kwargs
|
||||
)
|
||||
origin = yield authenticator.authenticate_request(request, content)
|
||||
except NoAuthenticationError:
|
||||
origin = None
|
||||
if self.REQUIRE_AUTH:
|
||||
logger.exception("authenticate_request failed")
|
||||
raise
|
||||
except:
|
||||
logger.exception("authenticate_request failed")
|
||||
raise
|
||||
|
||||
if origin:
|
||||
with ratelimiter.ratelimit(origin) as d:
|
||||
yield d
|
||||
response = yield func(
|
||||
origin, content, request.args, *args, **kwargs
|
||||
)
|
||||
else:
|
||||
response = yield func(
|
||||
origin, content, request.args, *args, **kwargs
|
||||
)
|
||||
|
||||
defer.returnValue(response)
|
||||
|
||||
# Extra logic that functools.wraps() doesn't finish
|
||||
new_code.__self__ = code.__self__
|
||||
new_func.__self__ = func.__self__
|
||||
|
||||
return new_code
|
||||
return new_func
|
||||
|
||||
def register(self, server):
|
||||
pattern = re.compile("^" + PREFIX + self.PATH + "$")
|
||||
@@ -271,6 +298,17 @@ class FederationStateServlet(BaseFederationServlet):
|
||||
)
|
||||
|
||||
|
||||
class FederationStateIdsServlet(BaseFederationServlet):
|
||||
PATH = "/state_ids/(?P<room_id>[^/]*)/"
|
||||
|
||||
def on_GET(self, origin, content, query, room_id):
|
||||
return self.handler.on_state_ids_request(
|
||||
origin,
|
||||
room_id,
|
||||
query.get("event_id", [None])[0],
|
||||
)
|
||||
|
||||
|
||||
class FederationBackfillServlet(BaseFederationServlet):
|
||||
PATH = "/backfill/(?P<context>[^/]*)/"
|
||||
|
||||
@@ -367,10 +405,15 @@ class FederationThirdPartyInviteExchangeServlet(BaseFederationServlet):
|
||||
class FederationClientKeysQueryServlet(BaseFederationServlet):
|
||||
PATH = "/user/keys/query"
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_POST(self, origin, content, query):
|
||||
response = yield self.handler.on_query_client_keys(origin, content)
|
||||
defer.returnValue((200, response))
|
||||
return self.handler.on_query_client_keys(origin, content)
|
||||
|
||||
|
||||
class FederationUserDevicesQueryServlet(BaseFederationServlet):
|
||||
PATH = "/user/devices/(?P<user_id>[^/]*)"
|
||||
|
||||
def on_GET(self, origin, content, query, user_id):
|
||||
return self.handler.on_query_user_devices(origin, user_id)
|
||||
|
||||
|
||||
class FederationClientKeysClaimServlet(BaseFederationServlet):
|
||||
@@ -420,9 +463,10 @@ class FederationGetMissingEventsServlet(BaseFederationServlet):
|
||||
class On3pidBindServlet(BaseFederationServlet):
|
||||
PATH = "/3pid/onbind"
|
||||
|
||||
REQUIRE_AUTH = False
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_POST(self, request):
|
||||
content = parse_json_object_from_request(request)
|
||||
def on_POST(self, origin, content, query):
|
||||
if "invites" in content:
|
||||
last_exception = None
|
||||
for invite in content["invites"]:
|
||||
@@ -444,11 +488,6 @@ class On3pidBindServlet(BaseFederationServlet):
|
||||
raise last_exception
|
||||
defer.returnValue((200, {}))
|
||||
|
||||
# Avoid doing remote HS authorization checks which are done by default by
|
||||
# BaseFederationServlet.
|
||||
def _wrap(self, code):
|
||||
return code
|
||||
|
||||
|
||||
class OpenIdUserInfo(BaseFederationServlet):
|
||||
"""
|
||||
@@ -469,9 +508,11 @@ class OpenIdUserInfo(BaseFederationServlet):
|
||||
|
||||
PATH = "/openid/userinfo"
|
||||
|
||||
REQUIRE_AUTH = False
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_GET(self, request):
|
||||
token = parse_string(request, "access_token")
|
||||
def on_GET(self, origin, content, query):
|
||||
token = query.get("access_token", [None])[0]
|
||||
if token is None:
|
||||
defer.returnValue((401, {
|
||||
"errcode": "M_MISSING_TOKEN", "error": "Access Token required"
|
||||
@@ -488,11 +529,6 @@ class OpenIdUserInfo(BaseFederationServlet):
|
||||
|
||||
defer.returnValue((200, {"sub": user_id}))
|
||||
|
||||
# Avoid doing remote HS authorization checks which are done by default by
|
||||
# BaseFederationServlet.
|
||||
def _wrap(self, code):
|
||||
return code
|
||||
|
||||
|
||||
class PublicRoomList(BaseFederationServlet):
|
||||
"""
|
||||
@@ -529,15 +565,49 @@ class PublicRoomList(BaseFederationServlet):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_GET(self, origin, content, query):
|
||||
data = yield self.room_list_handler.get_local_public_room_list()
|
||||
limit = parse_integer_from_args(query, "limit", 0)
|
||||
since_token = parse_string_from_args(query, "since", None)
|
||||
include_all_networks = parse_boolean_from_args(
|
||||
query, "include_all_networks", False
|
||||
)
|
||||
third_party_instance_id = parse_string_from_args(
|
||||
query, "third_party_instance_id", None
|
||||
)
|
||||
|
||||
if include_all_networks:
|
||||
network_tuple = None
|
||||
elif third_party_instance_id:
|
||||
network_tuple = ThirdPartyInstanceID.from_string(third_party_instance_id)
|
||||
else:
|
||||
network_tuple = ThirdPartyInstanceID(None, None)
|
||||
|
||||
data = yield self.room_list_handler.get_local_public_room_list(
|
||||
limit, since_token,
|
||||
network_tuple=network_tuple
|
||||
)
|
||||
defer.returnValue((200, data))
|
||||
|
||||
|
||||
class FederationVersionServlet(BaseFederationServlet):
|
||||
PATH = "/version"
|
||||
|
||||
REQUIRE_AUTH = False
|
||||
|
||||
def on_GET(self, origin, content, query):
|
||||
return defer.succeed((200, {
|
||||
"server": {
|
||||
"name": "Synapse",
|
||||
"version": get_version_string(synapse)
|
||||
},
|
||||
}))
|
||||
|
||||
|
||||
SERVLET_CLASSES = (
|
||||
FederationSendServlet,
|
||||
FederationPullServlet,
|
||||
FederationEventServlet,
|
||||
FederationStateServlet,
|
||||
FederationStateIdsServlet,
|
||||
FederationBackfillServlet,
|
||||
FederationQueryServlet,
|
||||
FederationMakeJoinServlet,
|
||||
@@ -550,11 +620,13 @@ SERVLET_CLASSES = (
|
||||
FederationGetMissingEventsServlet,
|
||||
FederationEventAuthServlet,
|
||||
FederationClientKeysQueryServlet,
|
||||
FederationUserDevicesQueryServlet,
|
||||
FederationClientKeysClaimServlet,
|
||||
FederationThirdPartyInviteExchangeServlet,
|
||||
On3pidBindServlet,
|
||||
OpenIdUserInfo,
|
||||
PublicRoomList,
|
||||
FederationVersionServlet,
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -19,22 +19,31 @@ from .room import (
|
||||
)
|
||||
from .room_member import RoomMemberHandler
|
||||
from .message import MessageHandler
|
||||
from .events import EventStreamHandler, EventHandler
|
||||
from .federation import FederationHandler
|
||||
from .profile import ProfileHandler
|
||||
from .directory import DirectoryHandler
|
||||
from .admin import AdminHandler
|
||||
from .identity import IdentityHandler
|
||||
from .receipts import ReceiptsHandler
|
||||
from .search import SearchHandler
|
||||
|
||||
|
||||
class Handlers(object):
|
||||
|
||||
""" A collection of all the event handlers.
|
||||
""" Deprecated. A collection of handlers.
|
||||
|
||||
There's no need to lazily create these; we'll just make them all eagerly
|
||||
at construction time.
|
||||
At some point most of the classes whose name ended "Handler" were
|
||||
accessed through this class.
|
||||
|
||||
However this makes it painful to unit test the handlers and to run cut
|
||||
down versions of synapse that only use specific handlers because using a
|
||||
single handler required creating all of the handlers. So some of the
|
||||
handlers have been lifted out of the Handlers object and are now accessed
|
||||
directly through the homeserver object itself.
|
||||
|
||||
Any new handlers should follow the new pattern of being accessed through
|
||||
the homeserver object and should not be added to the Handlers object.
|
||||
|
||||
The remaining handlers should be moved out of the handlers object.
|
||||
"""
|
||||
|
||||
def __init__(self, hs):
|
||||
@@ -42,13 +51,10 @@ class Handlers(object):
|
||||
self.message_handler = MessageHandler(hs)
|
||||
self.room_creation_handler = RoomCreationHandler(hs)
|
||||
self.room_member_handler = RoomMemberHandler(hs)
|
||||
self.event_stream_handler = EventStreamHandler(hs)
|
||||
self.event_handler = EventHandler(hs)
|
||||
self.federation_handler = FederationHandler(hs)
|
||||
self.profile_handler = ProfileHandler(hs)
|
||||
self.directory_handler = DirectoryHandler(hs)
|
||||
self.admin_handler = AdminHandler(hs)
|
||||
self.receipts_handler = ReceiptsHandler(hs)
|
||||
self.identity_handler = IdentityHandler(hs)
|
||||
self.search_handler = SearchHandler(hs)
|
||||
self.room_context_handler = RoomContextHandler(hs)
|
||||
|
||||
@@ -13,14 +13,14 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.errors import LimitExceededError
|
||||
import synapse.types
|
||||
from synapse.api.constants import Membership, EventTypes
|
||||
from synapse.types import UserID, Requester
|
||||
|
||||
|
||||
import logging
|
||||
from synapse.api.errors import LimitExceededError
|
||||
from synapse.types import UserID
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -31,11 +31,15 @@ class BaseHandler(object):
|
||||
Common base class for the event handlers.
|
||||
|
||||
Attributes:
|
||||
store (synapse.storage.events.StateStore):
|
||||
store (synapse.storage.DataStore):
|
||||
state_handler (synapse.state.StateHandler):
|
||||
"""
|
||||
|
||||
def __init__(self, hs):
|
||||
"""
|
||||
Args:
|
||||
hs (synapse.server.HomeServer):
|
||||
"""
|
||||
self.store = hs.get_datastore()
|
||||
self.auth = hs.get_auth()
|
||||
self.notifier = hs.get_notifier()
|
||||
@@ -51,8 +55,20 @@ class BaseHandler(object):
|
||||
|
||||
def ratelimit(self, requester):
|
||||
time_now = self.clock.time()
|
||||
user_id = requester.user.to_string()
|
||||
|
||||
# The AS user itself is never rate limited.
|
||||
app_service = self.store.get_app_service_by_user_id(user_id)
|
||||
if app_service is not None:
|
||||
return # do not ratelimit app service senders
|
||||
|
||||
# Disable rate limiting of users belonging to any AS that is configured
|
||||
# not to be rate limited in its registration file (rate_limited: true|false).
|
||||
if requester.app_service and not requester.app_service.is_rate_limited():
|
||||
return
|
||||
|
||||
allowed, time_allowed = self.ratelimiter.send_message(
|
||||
requester.user.to_string(), time_now,
|
||||
user_id, time_now,
|
||||
msg_rate_hz=self.hs.config.rc_messages_per_second,
|
||||
burst_count=self.hs.config.rc_message_burst_count,
|
||||
)
|
||||
@@ -61,33 +77,25 @@ class BaseHandler(object):
|
||||
retry_after_ms=int(1000 * (time_allowed - time_now)),
|
||||
)
|
||||
|
||||
def is_host_in_room(self, current_state):
|
||||
room_members = [
|
||||
(state_key, event.membership)
|
||||
for ((event_type, state_key), event) in current_state.items()
|
||||
if event_type == EventTypes.Member
|
||||
]
|
||||
if len(room_members) == 0:
|
||||
# Have we just created the room, and is this about to be the very
|
||||
# first member event?
|
||||
create_event = current_state.get(("m.room.create", ""))
|
||||
if create_event:
|
||||
return True
|
||||
for (state_key, membership) in room_members:
|
||||
if (
|
||||
self.hs.is_mine_id(state_key)
|
||||
and membership == Membership.JOIN
|
||||
):
|
||||
return True
|
||||
return False
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def maybe_kick_guest_users(self, event, current_state):
|
||||
def maybe_kick_guest_users(self, event, context=None):
|
||||
# Technically this function invalidates current_state by changing it.
|
||||
# Hopefully this isn't that important to the caller.
|
||||
if event.type == EventTypes.GuestAccess:
|
||||
guest_access = event.content.get("guest_access", "forbidden")
|
||||
if guest_access != "can_join":
|
||||
if context:
|
||||
current_state = yield self.store.get_events(
|
||||
context.current_state_ids.values()
|
||||
)
|
||||
else:
|
||||
current_state = yield self.state_handler.get_current_state(
|
||||
event.room_id
|
||||
)
|
||||
|
||||
current_state = current_state.values()
|
||||
|
||||
logger.info("maybe_kick_guest_users %r", current_state)
|
||||
yield self.kick_guest_users(current_state)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@@ -120,7 +128,8 @@ class BaseHandler(object):
|
||||
# and having homeservers have their own users leave keeps more
|
||||
# of that decision-making and control local to the guest-having
|
||||
# homeserver.
|
||||
requester = Requester(target_user, "", True)
|
||||
requester = synapse.types.create_requester(
|
||||
target_user, is_guest=True)
|
||||
handler = self.hs.get_handlers().room_member_handler
|
||||
yield handler.update_membership(
|
||||
requester,
|
||||
|
||||
@@ -16,7 +16,8 @@
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.constants import EventTypes
|
||||
from synapse.appservice import ApplicationService
|
||||
from synapse.util.metrics import Measure
|
||||
from synapse.util.logcontext import preserve_fn, preserve_context_over_deferred
|
||||
|
||||
import logging
|
||||
|
||||
@@ -42,36 +43,73 @@ class ApplicationServicesHandler(object):
|
||||
self.appservice_api = hs.get_application_service_api()
|
||||
self.scheduler = hs.get_application_service_scheduler()
|
||||
self.started_scheduler = False
|
||||
self.clock = hs.get_clock()
|
||||
self.notify_appservices = hs.config.notify_appservices
|
||||
|
||||
self.current_max = 0
|
||||
self.is_processing = False
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def notify_interested_services(self, event):
|
||||
def notify_interested_services(self, current_id):
|
||||
"""Notifies (pushes) all application services interested in this event.
|
||||
|
||||
Pushing is done asynchronously, so this method won't block for any
|
||||
prolonged length of time.
|
||||
|
||||
Args:
|
||||
event(Event): The event to push out to interested services.
|
||||
current_id(int): The current maximum ID.
|
||||
"""
|
||||
# Gather interested services
|
||||
services = yield self._get_services_for_event(event)
|
||||
if len(services) == 0:
|
||||
return # no services need notifying
|
||||
services = self.store.get_app_services()
|
||||
if not services or not self.notify_appservices:
|
||||
return
|
||||
|
||||
# Do we know this user exists? If not, poke the user query API for
|
||||
# all services which match that user regex. This needs to block as these
|
||||
# user queries need to be made BEFORE pushing the event.
|
||||
yield self._check_user_exists(event.sender)
|
||||
if event.type == EventTypes.Member:
|
||||
yield self._check_user_exists(event.state_key)
|
||||
self.current_max = max(self.current_max, current_id)
|
||||
if self.is_processing:
|
||||
return
|
||||
|
||||
if not self.started_scheduler:
|
||||
self.scheduler.start().addErrback(log_failure)
|
||||
self.started_scheduler = True
|
||||
with Measure(self.clock, "notify_interested_services"):
|
||||
self.is_processing = True
|
||||
try:
|
||||
upper_bound = self.current_max
|
||||
limit = 100
|
||||
while True:
|
||||
upper_bound, events = yield self.store.get_new_events_for_appservice(
|
||||
upper_bound, limit
|
||||
)
|
||||
|
||||
# Fork off pushes to these services
|
||||
for service in services:
|
||||
self.scheduler.submit_event_for_as(service, event)
|
||||
if not events:
|
||||
break
|
||||
|
||||
for event in events:
|
||||
# Gather interested services
|
||||
services = yield self._get_services_for_event(event)
|
||||
if len(services) == 0:
|
||||
continue # no services need notifying
|
||||
|
||||
# Do we know this user exists? If not, poke the user
|
||||
# query API for all services which match that user regex.
|
||||
# This needs to block as these user queries need to be
|
||||
# made BEFORE pushing the event.
|
||||
yield self._check_user_exists(event.sender)
|
||||
if event.type == EventTypes.Member:
|
||||
yield self._check_user_exists(event.state_key)
|
||||
|
||||
if not self.started_scheduler:
|
||||
self.scheduler.start().addErrback(log_failure)
|
||||
self.started_scheduler = True
|
||||
|
||||
# Fork off pushes to these services
|
||||
for service in services:
|
||||
preserve_fn(self.scheduler.submit_event_for_as)(
|
||||
service, event
|
||||
)
|
||||
|
||||
yield self.store.set_appservice_last_pos(upper_bound)
|
||||
|
||||
if len(events) < limit:
|
||||
break
|
||||
finally:
|
||||
self.is_processing = False
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def query_user_exists(self, user_id):
|
||||
@@ -104,11 +142,12 @@ class ApplicationServicesHandler(object):
|
||||
association can be found.
|
||||
"""
|
||||
room_alias_str = room_alias.to_string()
|
||||
alias_query_services = yield self._get_services_for_event(
|
||||
event=None,
|
||||
restrict_to=ApplicationService.NS_ALIASES,
|
||||
alias_list=[room_alias_str]
|
||||
)
|
||||
services = self.store.get_app_services()
|
||||
alias_query_services = [
|
||||
s for s in services if (
|
||||
s.is_interested_in_alias(room_alias_str)
|
||||
)
|
||||
]
|
||||
for alias_service in alias_query_services:
|
||||
is_known_alias = yield self.appservice_api.query_alias(
|
||||
alias_service, room_alias_str
|
||||
@@ -121,47 +160,93 @@ class ApplicationServicesHandler(object):
|
||||
defer.returnValue(result)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _get_services_for_event(self, event, restrict_to="", alias_list=None):
|
||||
def query_3pe(self, kind, protocol, fields):
|
||||
services = yield self._get_services_for_3pn(protocol)
|
||||
|
||||
results = yield preserve_context_over_deferred(defer.DeferredList([
|
||||
preserve_fn(self.appservice_api.query_3pe)(service, kind, protocol, fields)
|
||||
for service in services
|
||||
], consumeErrors=True))
|
||||
|
||||
ret = []
|
||||
for (success, result) in results:
|
||||
if success:
|
||||
ret.extend(result)
|
||||
|
||||
defer.returnValue(ret)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_3pe_protocols(self, only_protocol=None):
|
||||
services = self.store.get_app_services()
|
||||
protocols = {}
|
||||
|
||||
# Collect up all the individual protocol responses out of the ASes
|
||||
for s in services:
|
||||
for p in s.protocols:
|
||||
if only_protocol is not None and p != only_protocol:
|
||||
continue
|
||||
|
||||
if p not in protocols:
|
||||
protocols[p] = []
|
||||
|
||||
info = yield self.appservice_api.get_3pe_protocol(s, p)
|
||||
|
||||
if info is not None:
|
||||
protocols[p].append(info)
|
||||
|
||||
def _merge_instances(infos):
|
||||
if not infos:
|
||||
return {}
|
||||
|
||||
# Merge the 'instances' lists of multiple results, but just take
|
||||
# the other fields from the first as they ought to be identical
|
||||
# copy the result so as not to corrupt the cached one
|
||||
combined = dict(infos[0])
|
||||
combined["instances"] = list(combined["instances"])
|
||||
|
||||
for info in infos[1:]:
|
||||
combined["instances"].extend(info["instances"])
|
||||
|
||||
return combined
|
||||
|
||||
for p in protocols.keys():
|
||||
protocols[p] = _merge_instances(protocols[p])
|
||||
|
||||
defer.returnValue(protocols)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _get_services_for_event(self, event):
|
||||
"""Retrieve a list of application services interested in this event.
|
||||
|
||||
Args:
|
||||
event(Event): The event to check. Can be None if alias_list is not.
|
||||
restrict_to(str): The namespace to restrict regex tests to.
|
||||
alias_list: A list of aliases to get services for. If None, this
|
||||
list is obtained from the database.
|
||||
Returns:
|
||||
list<ApplicationService>: A list of services interested in this
|
||||
event based on the service regex.
|
||||
"""
|
||||
member_list = None
|
||||
if hasattr(event, "room_id"):
|
||||
# We need to know the aliases associated with this event.room_id,
|
||||
# if any.
|
||||
if not alias_list:
|
||||
alias_list = yield self.store.get_aliases_for_room(
|
||||
event.room_id
|
||||
)
|
||||
# We need to know the members associated with this event.room_id,
|
||||
# if any.
|
||||
member_list = yield self.store.get_users_in_room(event.room_id)
|
||||
|
||||
services = yield self.store.get_app_services()
|
||||
services = self.store.get_app_services()
|
||||
interested_list = [
|
||||
s for s in services if (
|
||||
s.is_interested(event, restrict_to, alias_list, member_list)
|
||||
yield s.is_interested(event, self.store)
|
||||
)
|
||||
]
|
||||
defer.returnValue(interested_list)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _get_services_for_user(self, user_id):
|
||||
services = yield self.store.get_app_services()
|
||||
services = self.store.get_app_services()
|
||||
interested_list = [
|
||||
s for s in services if (
|
||||
s.is_interested_in_user(user_id)
|
||||
)
|
||||
]
|
||||
defer.returnValue(interested_list)
|
||||
return defer.succeed(interested_list)
|
||||
|
||||
def _get_services_for_3pn(self, protocol):
|
||||
services = self.store.get_app_services()
|
||||
interested_list = [
|
||||
s for s in services if s.is_interested_in_protocol(protocol)
|
||||
]
|
||||
return defer.succeed(interested_list)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _is_unknown_user(self, user_id):
|
||||
@@ -177,7 +262,7 @@ class ApplicationServicesHandler(object):
|
||||
return
|
||||
|
||||
# user not found; could be the AS though, so check.
|
||||
services = yield self.store.get_app_services()
|
||||
services = self.store.get_app_services()
|
||||
service_list = [s for s in services if s.sender == user_id]
|
||||
defer.returnValue(len(service_list) == 0)
|
||||
|
||||
|
||||
@@ -20,7 +20,6 @@ from synapse.api.constants import LoginType
|
||||
from synapse.types import UserID
|
||||
from synapse.api.errors import AuthError, LoginError, Codes, StoreError, SynapseError
|
||||
from synapse.util.async import run_on_reactor
|
||||
from synapse.config.ldap import LDAPMode
|
||||
|
||||
from twisted.web.client import PartialDownloadError
|
||||
|
||||
@@ -29,12 +28,6 @@ import bcrypt
|
||||
import pymacaroons
|
||||
import simplejson
|
||||
|
||||
try:
|
||||
import ldap3
|
||||
except ImportError:
|
||||
ldap3 = None
|
||||
pass
|
||||
|
||||
import synapse.util.stringutils as stringutils
|
||||
|
||||
|
||||
@@ -45,6 +38,10 @@ class AuthHandler(BaseHandler):
|
||||
SESSION_EXPIRE_MS = 48 * 60 * 60 * 1000
|
||||
|
||||
def __init__(self, hs):
|
||||
"""
|
||||
Args:
|
||||
hs (synapse.server.HomeServer):
|
||||
"""
|
||||
super(AuthHandler, self).__init__(hs)
|
||||
self.checkers = {
|
||||
LoginType.PASSWORD: self._check_password_auth,
|
||||
@@ -54,25 +51,21 @@ class AuthHandler(BaseHandler):
|
||||
}
|
||||
self.bcrypt_rounds = hs.config.bcrypt_rounds
|
||||
self.sessions = {}
|
||||
self.INVALID_TOKEN_HTTP_STATUS = 401
|
||||
|
||||
self.ldap_enabled = hs.config.ldap_enabled
|
||||
if self.ldap_enabled:
|
||||
if not ldap3:
|
||||
raise RuntimeError(
|
||||
'Missing ldap3 library. This is required for LDAP Authentication.'
|
||||
)
|
||||
self.ldap_mode = hs.config.ldap_mode
|
||||
self.ldap_uri = hs.config.ldap_uri
|
||||
self.ldap_start_tls = hs.config.ldap_start_tls
|
||||
self.ldap_base = hs.config.ldap_base
|
||||
self.ldap_filter = hs.config.ldap_filter
|
||||
self.ldap_attributes = hs.config.ldap_attributes
|
||||
if self.ldap_mode == LDAPMode.SEARCH:
|
||||
self.ldap_bind_dn = hs.config.ldap_bind_dn
|
||||
self.ldap_bind_password = hs.config.ldap_bind_password
|
||||
account_handler = _AccountHandler(
|
||||
hs, check_user_exists=self.check_user_exists
|
||||
)
|
||||
|
||||
self.password_providers = [
|
||||
module(config=config, account_handler=account_handler)
|
||||
for module, config in hs.config.password_providers
|
||||
]
|
||||
|
||||
logger.info("Extra password_providers: %r", self.password_providers)
|
||||
|
||||
self.hs = hs # FIXME better possibility to access registrationHandler later?
|
||||
self.device_handler = hs.get_device_handler()
|
||||
self.macaroon_gen = hs.get_macaroon_generator()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def check_auth(self, flows, clientdict, clientip):
|
||||
@@ -143,21 +136,47 @@ class AuthHandler(BaseHandler):
|
||||
creds = session['creds']
|
||||
|
||||
# check auth type currently being presented
|
||||
errordict = {}
|
||||
if 'type' in authdict:
|
||||
if authdict['type'] not in self.checkers:
|
||||
login_type = authdict['type']
|
||||
if login_type not in self.checkers:
|
||||
raise LoginError(400, "", Codes.UNRECOGNIZED)
|
||||
result = yield self.checkers[authdict['type']](authdict, clientip)
|
||||
if result:
|
||||
creds[authdict['type']] = result
|
||||
self._save_session(session)
|
||||
try:
|
||||
result = yield self.checkers[login_type](authdict, clientip)
|
||||
if result:
|
||||
creds[login_type] = result
|
||||
self._save_session(session)
|
||||
except LoginError, e:
|
||||
if login_type == LoginType.EMAIL_IDENTITY:
|
||||
# riot used to have a bug where it would request a new
|
||||
# validation token (thus sending a new email) each time it
|
||||
# got a 401 with a 'flows' field.
|
||||
# (https://github.com/vector-im/vector-web/issues/2447).
|
||||
#
|
||||
# Grandfather in the old behaviour for now to avoid
|
||||
# breaking old riot deployments.
|
||||
raise e
|
||||
|
||||
# this step failed. Merge the error dict into the response
|
||||
# so that the client can have another go.
|
||||
errordict = e.error_dict()
|
||||
|
||||
for f in flows:
|
||||
if len(set(f) - set(creds.keys())) == 0:
|
||||
logger.info("Auth completed with creds: %r", creds)
|
||||
# it's very useful to know what args are stored, but this can
|
||||
# include the password in the case of registering, so only log
|
||||
# the keys (confusingly, clientdict may contain a password
|
||||
# param, creds is just what the user authed as for UI auth
|
||||
# and is not sensitive).
|
||||
logger.info(
|
||||
"Auth completed with creds: %r. Client dict has keys: %r",
|
||||
creds, clientdict.keys()
|
||||
)
|
||||
defer.returnValue((True, creds, clientdict, session['id']))
|
||||
|
||||
ret = self._auth_dict_for_flows(flows, session)
|
||||
ret['completed'] = creds.keys()
|
||||
ret.update(errordict)
|
||||
defer.returnValue((False, ret, clientdict, session['id']))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@@ -230,7 +249,6 @@ class AuthHandler(BaseHandler):
|
||||
sess = self._get_session_info(session_id)
|
||||
return sess.setdefault('serverdict', {}).get(key, default)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _check_password_auth(self, authdict, _):
|
||||
if "user" not in authdict or "password" not in authdict:
|
||||
raise LoginError(400, "", Codes.MISSING_PARAM)
|
||||
@@ -240,11 +258,7 @@ class AuthHandler(BaseHandler):
|
||||
if not user_id.startswith('@'):
|
||||
user_id = UserID.create(user_id, self.hs.hostname).to_string()
|
||||
|
||||
if not (yield self._check_password(user_id, password)):
|
||||
logger.warn("Failed password login for user %s", user_id)
|
||||
raise LoginError(403, "", errcode=Codes.FORBIDDEN)
|
||||
|
||||
defer.returnValue(user_id)
|
||||
return self._check_password(user_id, password)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _check_recaptcha(self, authdict, clientip):
|
||||
@@ -280,8 +294,17 @@ class AuthHandler(BaseHandler):
|
||||
data = pde.response
|
||||
resp_body = simplejson.loads(data)
|
||||
|
||||
if 'success' in resp_body and resp_body['success']:
|
||||
defer.returnValue(True)
|
||||
if 'success' in resp_body:
|
||||
# Note that we do NOT check the hostname here: we explicitly
|
||||
# intend the CAPTCHA to be presented by whatever client the
|
||||
# user is using, we just care that they have completed a CAPTCHA.
|
||||
logger.info(
|
||||
"%s reCAPTCHA from hostname %s",
|
||||
"Successful" if resp_body['success'] else "Failed",
|
||||
resp_body.get('hostname')
|
||||
)
|
||||
if resp_body['success']:
|
||||
defer.returnValue(True)
|
||||
raise LoginError(401, "", errcode=Codes.UNAUTHORIZED)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@@ -348,362 +371,185 @@ class AuthHandler(BaseHandler):
|
||||
|
||||
return self.sessions[session_id]
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def login_with_password(self, user_id, password):
|
||||
def validate_password_login(self, user_id, password):
|
||||
"""
|
||||
Authenticates the user with their username and password.
|
||||
|
||||
Used only by the v1 login API.
|
||||
|
||||
Args:
|
||||
user_id (str): User ID
|
||||
user_id (str): complete @user:id
|
||||
password (str): Password
|
||||
Returns:
|
||||
A tuple of:
|
||||
The user's ID.
|
||||
The access token for the user's session.
|
||||
The refresh token for the user's session.
|
||||
defer.Deferred: (str) canonical user id
|
||||
Raises:
|
||||
StoreError if there was a problem storing the token.
|
||||
StoreError if there was a problem accessing the database
|
||||
LoginError if there was an authentication problem.
|
||||
"""
|
||||
|
||||
if not (yield self._check_password(user_id, password)):
|
||||
logger.warn("Failed password login for user %s", user_id)
|
||||
raise LoginError(403, "", errcode=Codes.FORBIDDEN)
|
||||
|
||||
logger.info("Logging in user %s", user_id)
|
||||
access_token = yield self.issue_access_token(user_id)
|
||||
refresh_token = yield self.issue_refresh_token(user_id)
|
||||
defer.returnValue((user_id, access_token, refresh_token))
|
||||
return self._check_password(user_id, password)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_login_tuple_for_user_id(self, user_id):
|
||||
def get_access_token_for_user_id(self, user_id, device_id=None,
|
||||
initial_display_name=None):
|
||||
"""
|
||||
Gets login tuple for the user with the given user ID.
|
||||
Creates a new access token for the user with the given user ID.
|
||||
|
||||
The user is assumed to have been authenticated by some other
|
||||
machanism (e.g. CAS)
|
||||
machanism (e.g. CAS), and the user_id converted to the canonical case.
|
||||
|
||||
The device will be recorded in the table if it is not there already.
|
||||
|
||||
Args:
|
||||
user_id (str): User ID
|
||||
user_id (str): canonical User ID
|
||||
device_id (str|None): the device ID to associate with the tokens.
|
||||
None to leave the tokens unassociated with a device (deprecated:
|
||||
we should always have a device ID)
|
||||
initial_display_name (str): display name to associate with the
|
||||
device if it needs re-registering
|
||||
Returns:
|
||||
A tuple of:
|
||||
The user's ID.
|
||||
The access token for the user's session.
|
||||
The refresh token for the user's session.
|
||||
Raises:
|
||||
StoreError if there was a problem storing the token.
|
||||
LoginError if there was an authentication problem.
|
||||
"""
|
||||
user_id, ignored = yield self._find_user_id_and_pwd_hash(user_id)
|
||||
logger.info("Logging in user %s on device %s", user_id, device_id)
|
||||
access_token = yield self.issue_access_token(user_id, device_id)
|
||||
|
||||
logger.info("Logging in user %s", user_id)
|
||||
access_token = yield self.issue_access_token(user_id)
|
||||
refresh_token = yield self.issue_refresh_token(user_id)
|
||||
defer.returnValue((user_id, access_token, refresh_token))
|
||||
# the device *should* have been registered before we got here; however,
|
||||
# it's possible we raced against a DELETE operation. The thing we
|
||||
# really don't want is active access_tokens without a record of the
|
||||
# device, so we double-check it here.
|
||||
if device_id is not None:
|
||||
yield self.device_handler.check_device_registered(
|
||||
user_id, device_id, initial_display_name
|
||||
)
|
||||
|
||||
defer.returnValue(access_token)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def does_user_exist(self, user_id):
|
||||
try:
|
||||
yield self._find_user_id_and_pwd_hash(user_id)
|
||||
defer.returnValue(True)
|
||||
except LoginError:
|
||||
defer.returnValue(False)
|
||||
def check_user_exists(self, user_id):
|
||||
"""
|
||||
Checks to see if a user with the given id exists. Will check case
|
||||
insensitively, but return None if there are multiple inexact matches.
|
||||
|
||||
Args:
|
||||
(str) user_id: complete @user:id
|
||||
|
||||
Returns:
|
||||
defer.Deferred: (str) canonical_user_id, or None if zero or
|
||||
multiple matches
|
||||
"""
|
||||
res = yield self._find_user_id_and_pwd_hash(user_id)
|
||||
if res is not None:
|
||||
defer.returnValue(res[0])
|
||||
defer.returnValue(None)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _find_user_id_and_pwd_hash(self, user_id):
|
||||
"""Checks to see if a user with the given id exists. Will check case
|
||||
insensitively, but will throw if there are multiple inexact matches.
|
||||
insensitively, but will return None if there are multiple inexact
|
||||
matches.
|
||||
|
||||
Returns:
|
||||
tuple: A 2-tuple of `(canonical_user_id, password_hash)`
|
||||
None: if there is not exactly one match
|
||||
"""
|
||||
user_infos = yield self.store.get_users_by_id_case_insensitive(user_id)
|
||||
|
||||
result = None
|
||||
if not user_infos:
|
||||
logger.warn("Attempted to login as %s but they do not exist", user_id)
|
||||
raise LoginError(403, "", errcode=Codes.FORBIDDEN)
|
||||
|
||||
if len(user_infos) > 1:
|
||||
if user_id not in user_infos:
|
||||
logger.warn(
|
||||
"Attempted to login as %s but it matches more than one user "
|
||||
"inexactly: %r",
|
||||
user_id, user_infos.keys()
|
||||
)
|
||||
raise LoginError(403, "", errcode=Codes.FORBIDDEN)
|
||||
|
||||
defer.returnValue((user_id, user_infos[user_id]))
|
||||
elif len(user_infos) == 1:
|
||||
# a single match (possibly not exact)
|
||||
result = user_infos.popitem()
|
||||
elif user_id in user_infos:
|
||||
# multiple matches, but one is exact
|
||||
result = (user_id, user_infos[user_id])
|
||||
else:
|
||||
defer.returnValue(user_infos.popitem())
|
||||
# multiple matches, none of them exact
|
||||
logger.warn(
|
||||
"Attempted to login as %s but it matches more than one user "
|
||||
"inexactly: %r",
|
||||
user_id, user_infos.keys()
|
||||
)
|
||||
defer.returnValue(result)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _check_password(self, user_id, password):
|
||||
"""
|
||||
"""Authenticate a user against the LDAP and local databases.
|
||||
|
||||
user_id is checked case insensitively against the local database, but
|
||||
will throw if there are multiple inexact matches.
|
||||
|
||||
Args:
|
||||
user_id (str): complete @user:id
|
||||
Returns:
|
||||
True if the user_id successfully authenticated
|
||||
(str) the canonical_user_id
|
||||
Raises:
|
||||
LoginError if login fails
|
||||
"""
|
||||
valid_ldap = yield self._check_ldap_password(user_id, password)
|
||||
if valid_ldap:
|
||||
defer.returnValue(True)
|
||||
for provider in self.password_providers:
|
||||
is_valid = yield provider.check_password(user_id, password)
|
||||
if is_valid:
|
||||
defer.returnValue(user_id)
|
||||
|
||||
valid_local_password = yield self._check_local_password(user_id, password)
|
||||
if valid_local_password:
|
||||
defer.returnValue(True)
|
||||
canonical_user_id = yield self._check_local_password(user_id, password)
|
||||
|
||||
defer.returnValue(False)
|
||||
if canonical_user_id:
|
||||
defer.returnValue(canonical_user_id)
|
||||
|
||||
# unknown username or invalid password. We raise a 403 here, but note
|
||||
# that if we're doing user-interactive login, it turns all LoginErrors
|
||||
# into a 401 anyway.
|
||||
raise LoginError(
|
||||
403, "Invalid password",
|
||||
errcode=Codes.FORBIDDEN
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _check_local_password(self, user_id, password):
|
||||
try:
|
||||
user_id, password_hash = yield self._find_user_id_and_pwd_hash(user_id)
|
||||
defer.returnValue(self.validate_hash(password, password_hash))
|
||||
except LoginError:
|
||||
defer.returnValue(False)
|
||||
"""Authenticate a user against the local password database.
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _check_ldap_password(self, user_id, password):
|
||||
""" Attempt to authenticate a user against an LDAP Server
|
||||
and register an account if none exists.
|
||||
user_id is checked case insensitively, but will return None if there are
|
||||
multiple inexact matches.
|
||||
|
||||
Returns:
|
||||
True if authentication against LDAP was successful
|
||||
Args:
|
||||
user_id (str): complete @user:id
|
||||
Returns:
|
||||
(str) the canonical_user_id, or None if unknown user / bad password
|
||||
"""
|
||||
|
||||
if not ldap3 or not self.ldap_enabled:
|
||||
defer.returnValue(False)
|
||||
|
||||
if self.ldap_mode not in LDAPMode.LIST:
|
||||
raise RuntimeError(
|
||||
'Invalid ldap mode specified: {mode}'.format(
|
||||
mode=self.ldap_mode
|
||||
)
|
||||
)
|
||||
|
||||
try:
|
||||
server = ldap3.Server(self.ldap_uri)
|
||||
logger.debug(
|
||||
"Attempting ldap connection with %s",
|
||||
self.ldap_uri
|
||||
)
|
||||
|
||||
localpart = UserID.from_string(user_id).localpart
|
||||
if self.ldap_mode == LDAPMode.SIMPLE:
|
||||
# bind with the the local users ldap credentials
|
||||
bind_dn = "{prop}={value},{base}".format(
|
||||
prop=self.ldap_attributes['uid'],
|
||||
value=localpart,
|
||||
base=self.ldap_base
|
||||
)
|
||||
conn = ldap3.Connection(server, bind_dn, password)
|
||||
logger.debug(
|
||||
"Established ldap connection in simple mode: %s",
|
||||
conn
|
||||
)
|
||||
|
||||
if self.ldap_start_tls:
|
||||
conn.start_tls()
|
||||
logger.debug(
|
||||
"Upgraded ldap connection in simple mode through StartTLS: %s",
|
||||
conn
|
||||
)
|
||||
|
||||
conn.bind()
|
||||
|
||||
elif self.ldap_mode == LDAPMode.SEARCH:
|
||||
# connect with preconfigured credentials and search for local user
|
||||
conn = ldap3.Connection(
|
||||
server,
|
||||
self.ldap_bind_dn,
|
||||
self.ldap_bind_password
|
||||
)
|
||||
logger.debug(
|
||||
"Established ldap connection in search mode: %s",
|
||||
conn
|
||||
)
|
||||
|
||||
if self.ldap_start_tls:
|
||||
conn.start_tls()
|
||||
logger.debug(
|
||||
"Upgraded ldap connection in search mode through StartTLS: %s",
|
||||
conn
|
||||
)
|
||||
|
||||
conn.bind()
|
||||
|
||||
# find matching dn
|
||||
query = "({prop}={value})".format(
|
||||
prop=self.ldap_attributes['uid'],
|
||||
value=localpart
|
||||
)
|
||||
if self.ldap_filter:
|
||||
query = "(&{query}{filter})".format(
|
||||
query=query,
|
||||
filter=self.ldap_filter
|
||||
)
|
||||
logger.debug("ldap search filter: %s", query)
|
||||
result = conn.search(self.ldap_base, query)
|
||||
|
||||
if result and len(conn.response) == 1:
|
||||
# found exactly one result
|
||||
user_dn = conn.response[0]['dn']
|
||||
logger.debug('ldap search found dn: %s', user_dn)
|
||||
|
||||
# unbind and reconnect, rebind with found dn
|
||||
conn.unbind()
|
||||
conn = ldap3.Connection(
|
||||
server,
|
||||
user_dn,
|
||||
password,
|
||||
auto_bind=True
|
||||
)
|
||||
else:
|
||||
# found 0 or > 1 results, abort!
|
||||
logger.warn(
|
||||
"ldap search returned unexpected (%d!=1) amount of results",
|
||||
len(conn.response)
|
||||
)
|
||||
defer.returnValue(False)
|
||||
|
||||
logger.info(
|
||||
"User authenticated against ldap server: %s",
|
||||
conn
|
||||
)
|
||||
|
||||
# check for existing account, if none exists, create one
|
||||
if not (yield self.does_user_exist(user_id)):
|
||||
# query user metadata for account creation
|
||||
query = "({prop}={value})".format(
|
||||
prop=self.ldap_attributes['uid'],
|
||||
value=localpart
|
||||
)
|
||||
|
||||
if self.ldap_mode == LDAPMode.SEARCH and self.ldap_filter:
|
||||
query = "(&{filter}{user_filter})".format(
|
||||
filter=query,
|
||||
user_filter=self.ldap_filter
|
||||
)
|
||||
logger.debug("ldap registration filter: %s", query)
|
||||
|
||||
result = conn.search(
|
||||
search_base=self.ldap_base,
|
||||
search_filter=query,
|
||||
attributes=[
|
||||
self.ldap_attributes['name'],
|
||||
self.ldap_attributes['mail']
|
||||
]
|
||||
)
|
||||
|
||||
if len(conn.response) == 1:
|
||||
attrs = conn.response[0]['attributes']
|
||||
mail = attrs[self.ldap_attributes['mail']][0]
|
||||
name = attrs[self.ldap_attributes['name']][0]
|
||||
|
||||
# create account
|
||||
registration_handler = self.hs.get_handlers().registration_handler
|
||||
user_id, access_token = (
|
||||
yield registration_handler.register(localpart=localpart)
|
||||
)
|
||||
|
||||
# TODO: bind email, set displayname with data from ldap directory
|
||||
|
||||
logger.info(
|
||||
"ldap registration successful: %d: %s (%s, %)",
|
||||
user_id,
|
||||
localpart,
|
||||
name,
|
||||
mail
|
||||
)
|
||||
else:
|
||||
logger.warn(
|
||||
"ldap registration failed: unexpected (%d!=1) amount of results",
|
||||
len(result)
|
||||
)
|
||||
defer.returnValue(False)
|
||||
|
||||
defer.returnValue(True)
|
||||
except ldap3.core.exceptions.LDAPException as e:
|
||||
logger.warn("Error during ldap authentication: %s", e)
|
||||
defer.returnValue(False)
|
||||
lookupres = yield self._find_user_id_and_pwd_hash(user_id)
|
||||
if not lookupres:
|
||||
defer.returnValue(None)
|
||||
(user_id, password_hash) = lookupres
|
||||
result = self.validate_hash(password, password_hash)
|
||||
if not result:
|
||||
logger.warn("Failed password login for user %s", user_id)
|
||||
defer.returnValue(None)
|
||||
defer.returnValue(user_id)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def issue_access_token(self, user_id):
|
||||
access_token = self.generate_access_token(user_id)
|
||||
yield self.store.add_access_token_to_user(user_id, access_token)
|
||||
def issue_access_token(self, user_id, device_id=None):
|
||||
access_token = self.macaroon_gen.generate_access_token(user_id)
|
||||
yield self.store.add_access_token_to_user(user_id, access_token,
|
||||
device_id)
|
||||
defer.returnValue(access_token)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def issue_refresh_token(self, user_id):
|
||||
refresh_token = self.generate_refresh_token(user_id)
|
||||
yield self.store.add_refresh_token_to_user(user_id, refresh_token)
|
||||
defer.returnValue(refresh_token)
|
||||
|
||||
def generate_access_token(self, user_id, extra_caveats=None):
|
||||
extra_caveats = extra_caveats or []
|
||||
macaroon = self._generate_base_macaroon(user_id)
|
||||
macaroon.add_first_party_caveat("type = access")
|
||||
now = self.hs.get_clock().time_msec()
|
||||
expiry = now + (60 * 60 * 1000)
|
||||
macaroon.add_first_party_caveat("time < %d" % (expiry,))
|
||||
for caveat in extra_caveats:
|
||||
macaroon.add_first_party_caveat(caveat)
|
||||
return macaroon.serialize()
|
||||
|
||||
def generate_refresh_token(self, user_id):
|
||||
m = self._generate_base_macaroon(user_id)
|
||||
m.add_first_party_caveat("type = refresh")
|
||||
# Important to add a nonce, because otherwise every refresh token for a
|
||||
# user will be the same.
|
||||
m.add_first_party_caveat("nonce = %s" % (
|
||||
stringutils.random_string_with_symbols(16),
|
||||
))
|
||||
return m.serialize()
|
||||
|
||||
def generate_short_term_login_token(self, user_id, duration_in_ms=(2 * 60 * 1000)):
|
||||
macaroon = self._generate_base_macaroon(user_id)
|
||||
macaroon.add_first_party_caveat("type = login")
|
||||
now = self.hs.get_clock().time_msec()
|
||||
expiry = now + duration_in_ms
|
||||
macaroon.add_first_party_caveat("time < %d" % (expiry,))
|
||||
return macaroon.serialize()
|
||||
|
||||
def generate_delete_pusher_token(self, user_id):
|
||||
macaroon = self._generate_base_macaroon(user_id)
|
||||
macaroon.add_first_party_caveat("type = delete_pusher")
|
||||
return macaroon.serialize()
|
||||
|
||||
def validate_short_term_login_token_and_get_user_id(self, login_token):
|
||||
auth_api = self.hs.get_auth()
|
||||
try:
|
||||
macaroon = pymacaroons.Macaroon.deserialize(login_token)
|
||||
auth_api = self.hs.get_auth()
|
||||
auth_api.validate_macaroon(macaroon, "login", True)
|
||||
return self.get_user_from_macaroon(macaroon)
|
||||
except (pymacaroons.exceptions.MacaroonException, TypeError, ValueError):
|
||||
raise AuthError(401, "Invalid token", errcode=Codes.UNKNOWN_TOKEN)
|
||||
|
||||
def _generate_base_macaroon(self, user_id):
|
||||
macaroon = pymacaroons.Macaroon(
|
||||
location=self.hs.config.server_name,
|
||||
identifier="key",
|
||||
key=self.hs.config.macaroon_secret_key)
|
||||
macaroon.add_first_party_caveat("gen = 1")
|
||||
macaroon.add_first_party_caveat("user_id = %s" % (user_id,))
|
||||
return macaroon
|
||||
|
||||
def get_user_from_macaroon(self, macaroon):
|
||||
user_prefix = "user_id = "
|
||||
for caveat in macaroon.caveats:
|
||||
if caveat.caveat_id.startswith(user_prefix):
|
||||
return caveat.caveat_id[len(user_prefix):]
|
||||
raise AuthError(
|
||||
self.INVALID_TOKEN_HTTP_STATUS, "No user_id found in token",
|
||||
errcode=Codes.UNKNOWN_TOKEN
|
||||
)
|
||||
user_id = auth_api.get_user_id_from_macaroon(macaroon)
|
||||
auth_api.validate_macaroon(macaroon, "login", True, user_id)
|
||||
return user_id
|
||||
except Exception:
|
||||
raise AuthError(403, "Invalid token", errcode=Codes.FORBIDDEN)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def set_password(self, user_id, newpassword, requester=None):
|
||||
password_hash = self.hash(newpassword)
|
||||
|
||||
except_access_token_ids = [requester.access_token_id] if requester else []
|
||||
except_access_token_id = requester.access_token_id if requester else None
|
||||
|
||||
try:
|
||||
yield self.store.user_set_password_hash(user_id, password_hash)
|
||||
@@ -712,19 +558,42 @@ class AuthHandler(BaseHandler):
|
||||
raise SynapseError(404, "Unknown user", Codes.NOT_FOUND)
|
||||
raise e
|
||||
yield self.store.user_delete_access_tokens(
|
||||
user_id, except_access_token_ids
|
||||
user_id, except_access_token_id
|
||||
)
|
||||
yield self.hs.get_pusherpool().remove_pushers_by_user(
|
||||
user_id, except_access_token_ids
|
||||
user_id, except_access_token_id
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def add_threepid(self, user_id, medium, address, validated_at):
|
||||
# 'Canonicalise' email addresses down to lower case.
|
||||
# We've now moving towards the Home Server being the entity that
|
||||
# is responsible for validating threepids used for resetting passwords
|
||||
# on accounts, so in future Synapse will gain knowledge of specific
|
||||
# types (mediums) of threepid. For now, we still use the existing
|
||||
# infrastructure, but this is the start of synapse gaining knowledge
|
||||
# of specific types of threepid (and fixes the fact that checking
|
||||
# for the presence of an email address during password reset was
|
||||
# case sensitive).
|
||||
if medium == 'email':
|
||||
address = address.lower()
|
||||
|
||||
yield self.store.user_add_threepid(
|
||||
user_id, medium, address, validated_at,
|
||||
self.hs.get_clock().time_msec()
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def delete_threepid(self, user_id, medium, address):
|
||||
# 'Canonicalise' email addresses as per above
|
||||
if medium == 'email':
|
||||
address = address.lower()
|
||||
|
||||
ret = yield self.store.user_delete_threepid(
|
||||
user_id, medium, address,
|
||||
)
|
||||
defer.returnValue(ret)
|
||||
|
||||
def _save_session(self, session):
|
||||
# TODO: Persistent storage
|
||||
logger.debug("Saving session %s", session)
|
||||
@@ -750,7 +619,7 @@ class AuthHandler(BaseHandler):
|
||||
Returns:
|
||||
Hashed password (str).
|
||||
"""
|
||||
return bcrypt.hashpw(password + self.hs.config.password_pepper,
|
||||
return bcrypt.hashpw(password.encode('utf8') + self.hs.config.password_pepper,
|
||||
bcrypt.gensalt(self.bcrypt_rounds))
|
||||
|
||||
def validate_hash(self, password, stored_hash):
|
||||
@@ -764,7 +633,76 @@ class AuthHandler(BaseHandler):
|
||||
Whether self.hash(password) == stored_hash (bool).
|
||||
"""
|
||||
if stored_hash:
|
||||
return bcrypt.hashpw(password + self.hs.config.password_pepper,
|
||||
stored_hash.encode('utf-8')) == stored_hash
|
||||
return bcrypt.hashpw(password.encode('utf8') + self.hs.config.password_pepper,
|
||||
stored_hash.encode('utf8')) == stored_hash
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
class MacaroonGeneartor(object):
|
||||
def __init__(self, hs):
|
||||
self.clock = hs.get_clock()
|
||||
self.server_name = hs.config.server_name
|
||||
self.macaroon_secret_key = hs.config.macaroon_secret_key
|
||||
|
||||
def generate_access_token(self, user_id, extra_caveats=None):
|
||||
extra_caveats = extra_caveats or []
|
||||
macaroon = self._generate_base_macaroon(user_id)
|
||||
macaroon.add_first_party_caveat("type = access")
|
||||
# Include a nonce, to make sure that each login gets a different
|
||||
# access token.
|
||||
macaroon.add_first_party_caveat("nonce = %s" % (
|
||||
stringutils.random_string_with_symbols(16),
|
||||
))
|
||||
for caveat in extra_caveats:
|
||||
macaroon.add_first_party_caveat(caveat)
|
||||
return macaroon.serialize()
|
||||
|
||||
def generate_short_term_login_token(self, user_id, duration_in_ms=(2 * 60 * 1000)):
|
||||
macaroon = self._generate_base_macaroon(user_id)
|
||||
macaroon.add_first_party_caveat("type = login")
|
||||
now = self.clock.time_msec()
|
||||
expiry = now + duration_in_ms
|
||||
macaroon.add_first_party_caveat("time < %d" % (expiry,))
|
||||
return macaroon.serialize()
|
||||
|
||||
def generate_delete_pusher_token(self, user_id):
|
||||
macaroon = self._generate_base_macaroon(user_id)
|
||||
macaroon.add_first_party_caveat("type = delete_pusher")
|
||||
return macaroon.serialize()
|
||||
|
||||
def _generate_base_macaroon(self, user_id):
|
||||
macaroon = pymacaroons.Macaroon(
|
||||
location=self.server_name,
|
||||
identifier="key",
|
||||
key=self.macaroon_secret_key)
|
||||
macaroon.add_first_party_caveat("gen = 1")
|
||||
macaroon.add_first_party_caveat("user_id = %s" % (user_id,))
|
||||
return macaroon
|
||||
|
||||
|
||||
class _AccountHandler(object):
|
||||
"""A proxy object that gets passed to password auth providers so they
|
||||
can register new users etc if necessary.
|
||||
"""
|
||||
def __init__(self, hs, check_user_exists):
|
||||
self.hs = hs
|
||||
|
||||
self._check_user_exists = check_user_exists
|
||||
|
||||
def check_user_exists(self, user_id):
|
||||
"""Check if user exissts.
|
||||
|
||||
Returns:
|
||||
Deferred(bool)
|
||||
"""
|
||||
return self._check_user_exists(user_id)
|
||||
|
||||
def register(self, localpart):
|
||||
"""Registers a new user with given localpart
|
||||
|
||||
Returns:
|
||||
Deferred: a 2-tuple of (user_id, access_token)
|
||||
"""
|
||||
reg = self.hs.get_handlers().registration_handler
|
||||
return reg.register(localpart=localpart)
|
||||
|
||||
354
synapse/handlers/device.py
Normal file
354
synapse/handlers/device.py
Normal file
@@ -0,0 +1,354 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from synapse.api import errors
|
||||
from synapse.api.constants import EventTypes
|
||||
from synapse.util import stringutils
|
||||
from synapse.util.async import Linearizer
|
||||
from synapse.types import get_domain_from_id, RoomStreamToken
|
||||
from twisted.internet import defer
|
||||
from ._base import BaseHandler
|
||||
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class DeviceHandler(BaseHandler):
|
||||
def __init__(self, hs):
|
||||
super(DeviceHandler, self).__init__(hs)
|
||||
|
||||
self.hs = hs
|
||||
self.state = hs.get_state_handler()
|
||||
self.federation_sender = hs.get_federation_sender()
|
||||
self.federation = hs.get_replication_layer()
|
||||
self._remote_edue_linearizer = Linearizer(name="remote_device_list")
|
||||
|
||||
self.federation.register_edu_handler(
|
||||
"m.device_list_update", self._incoming_device_list_update,
|
||||
)
|
||||
self.federation.register_query_handler(
|
||||
"user_devices", self.on_federation_query_user_devices,
|
||||
)
|
||||
|
||||
hs.get_distributor().observe("user_left_room", self.user_left_room)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def check_device_registered(self, user_id, device_id,
|
||||
initial_device_display_name=None):
|
||||
"""
|
||||
If the given device has not been registered, register it with the
|
||||
supplied display name.
|
||||
|
||||
If no device_id is supplied, we make one up.
|
||||
|
||||
Args:
|
||||
user_id (str): @user:id
|
||||
device_id (str | None): device id supplied by client
|
||||
initial_device_display_name (str | None): device display name from
|
||||
client
|
||||
Returns:
|
||||
str: device id (generated if none was supplied)
|
||||
"""
|
||||
if device_id is not None:
|
||||
new_device = yield self.store.store_device(
|
||||
user_id=user_id,
|
||||
device_id=device_id,
|
||||
initial_device_display_name=initial_device_display_name,
|
||||
)
|
||||
if new_device:
|
||||
yield self.notify_device_update(user_id, [device_id])
|
||||
defer.returnValue(device_id)
|
||||
|
||||
# if the device id is not specified, we'll autogen one, but loop a few
|
||||
# times in case of a clash.
|
||||
attempts = 0
|
||||
while attempts < 5:
|
||||
device_id = stringutils.random_string(10).upper()
|
||||
new_device = yield self.store.store_device(
|
||||
user_id=user_id,
|
||||
device_id=device_id,
|
||||
initial_device_display_name=initial_device_display_name,
|
||||
)
|
||||
if new_device:
|
||||
yield self.notify_device_update(user_id, [device_id])
|
||||
defer.returnValue(device_id)
|
||||
attempts += 1
|
||||
|
||||
raise errors.StoreError(500, "Couldn't generate a device ID.")
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_devices_by_user(self, user_id):
|
||||
"""
|
||||
Retrieve the given user's devices
|
||||
|
||||
Args:
|
||||
user_id (str):
|
||||
Returns:
|
||||
defer.Deferred: list[dict[str, X]]: info on each device
|
||||
"""
|
||||
|
||||
device_map = yield self.store.get_devices_by_user(user_id)
|
||||
|
||||
ips = yield self.store.get_last_client_ip_by_device(
|
||||
devices=((user_id, device_id) for device_id in device_map.keys())
|
||||
)
|
||||
|
||||
devices = device_map.values()
|
||||
for device in devices:
|
||||
_update_device_from_client_ips(device, ips)
|
||||
|
||||
defer.returnValue(devices)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_device(self, user_id, device_id):
|
||||
""" Retrieve the given device
|
||||
|
||||
Args:
|
||||
user_id (str):
|
||||
device_id (str):
|
||||
|
||||
Returns:
|
||||
defer.Deferred: dict[str, X]: info on the device
|
||||
Raises:
|
||||
errors.NotFoundError: if the device was not found
|
||||
"""
|
||||
try:
|
||||
device = yield self.store.get_device(user_id, device_id)
|
||||
except errors.StoreError:
|
||||
raise errors.NotFoundError
|
||||
ips = yield self.store.get_last_client_ip_by_device(
|
||||
devices=((user_id, device_id),)
|
||||
)
|
||||
_update_device_from_client_ips(device, ips)
|
||||
defer.returnValue(device)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def delete_device(self, user_id, device_id):
|
||||
""" Delete the given device
|
||||
|
||||
Args:
|
||||
user_id (str):
|
||||
device_id (str):
|
||||
|
||||
Returns:
|
||||
defer.Deferred:
|
||||
"""
|
||||
|
||||
try:
|
||||
yield self.store.delete_device(user_id, device_id)
|
||||
except errors.StoreError, e:
|
||||
if e.code == 404:
|
||||
# no match
|
||||
pass
|
||||
else:
|
||||
raise
|
||||
|
||||
yield self.store.user_delete_access_tokens(
|
||||
user_id, device_id=device_id,
|
||||
delete_refresh_tokens=True,
|
||||
)
|
||||
|
||||
yield self.store.delete_e2e_keys_by_device(
|
||||
user_id=user_id, device_id=device_id
|
||||
)
|
||||
|
||||
yield self.notify_device_update(user_id, [device_id])
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def update_device(self, user_id, device_id, content):
|
||||
""" Update the given device
|
||||
|
||||
Args:
|
||||
user_id (str):
|
||||
device_id (str):
|
||||
content (dict): body of update request
|
||||
|
||||
Returns:
|
||||
defer.Deferred:
|
||||
"""
|
||||
|
||||
try:
|
||||
yield self.store.update_device(
|
||||
user_id,
|
||||
device_id,
|
||||
new_display_name=content.get("display_name")
|
||||
)
|
||||
yield self.notify_device_update(user_id, [device_id])
|
||||
except errors.StoreError, e:
|
||||
if e.code == 404:
|
||||
raise errors.NotFoundError()
|
||||
else:
|
||||
raise
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def notify_device_update(self, user_id, device_ids):
|
||||
"""Notify that a user's device(s) has changed. Pokes the notifier, and
|
||||
remote servers if the user is local.
|
||||
"""
|
||||
users_who_share_room = yield self.store.get_users_who_share_room_with_user(
|
||||
user_id
|
||||
)
|
||||
|
||||
hosts = set()
|
||||
if self.hs.is_mine_id(user_id):
|
||||
hosts.update(get_domain_from_id(u) for u in users_who_share_room)
|
||||
hosts.discard(self.server_name)
|
||||
|
||||
position = yield self.store.add_device_change_to_streams(
|
||||
user_id, device_ids, list(hosts)
|
||||
)
|
||||
|
||||
rooms = yield self.store.get_rooms_for_user(user_id)
|
||||
room_ids = [r.room_id for r in rooms]
|
||||
|
||||
yield self.notifier.on_new_event(
|
||||
"device_list_key", position, rooms=room_ids,
|
||||
)
|
||||
|
||||
if hosts:
|
||||
logger.info("Sending device list update notif to: %r", hosts)
|
||||
for host in hosts:
|
||||
self.federation_sender.send_device_messages(host)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_user_ids_changed(self, user_id, from_token):
|
||||
"""Get list of users that have had the devices updated, or have newly
|
||||
joined a room, that `user_id` may be interested in.
|
||||
|
||||
Args:
|
||||
user_id (str)
|
||||
from_token (StreamToken)
|
||||
"""
|
||||
rooms = yield self.store.get_rooms_for_user(user_id)
|
||||
room_ids = set(r.room_id for r in rooms)
|
||||
|
||||
# First we check if any devices have changed
|
||||
changed = yield self.store.get_user_whose_devices_changed(
|
||||
from_token.device_list_key
|
||||
)
|
||||
|
||||
# Then work out if any users have since joined
|
||||
rooms_changed = self.store.get_rooms_that_changed(room_ids, from_token.room_key)
|
||||
|
||||
possibly_changed = set(changed)
|
||||
for room_id in rooms_changed:
|
||||
# Fetch the current state at the time.
|
||||
stream_ordering = RoomStreamToken.parse_stream_token(from_token.room_key)
|
||||
|
||||
try:
|
||||
event_ids = yield self.store.get_forward_extremeties_for_room(
|
||||
room_id, stream_ordering=stream_ordering
|
||||
)
|
||||
prev_state_ids = yield self.store.get_state_ids_for_events(event_ids)
|
||||
except:
|
||||
prev_state_ids = {}
|
||||
|
||||
current_state_ids = yield self.state.get_current_state_ids(room_id)
|
||||
|
||||
# If there has been any change in membership, include them in the
|
||||
# possibly changed list. We'll check if they are joined below,
|
||||
# and we're not toooo worried about spuriously adding users.
|
||||
for key, event_id in current_state_ids.iteritems():
|
||||
etype, state_key = key
|
||||
if etype == EventTypes.Member:
|
||||
prev_event_id = prev_state_ids.get(key, None)
|
||||
if not prev_event_id or prev_event_id != event_id:
|
||||
possibly_changed.add(state_key)
|
||||
|
||||
users_who_share_room = yield self.store.get_users_who_share_room_with_user(
|
||||
user_id
|
||||
)
|
||||
|
||||
# Take the intersection of the users whose devices may have changed
|
||||
# and those that actually still share a room with the user
|
||||
defer.returnValue(users_who_share_room & possibly_changed)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _incoming_device_list_update(self, origin, edu_content):
|
||||
user_id = edu_content["user_id"]
|
||||
device_id = edu_content["device_id"]
|
||||
stream_id = edu_content["stream_id"]
|
||||
prev_ids = edu_content.get("prev_id", [])
|
||||
|
||||
if get_domain_from_id(user_id) != origin:
|
||||
# TODO: Raise?
|
||||
logger.warning("Got device list update edu for %r from %r", user_id, origin)
|
||||
return
|
||||
|
||||
rooms = yield self.store.get_rooms_for_user(user_id)
|
||||
if not rooms:
|
||||
# We don't share any rooms with this user. Ignore update, as we
|
||||
# probably won't get any further updates.
|
||||
return
|
||||
|
||||
with (yield self._remote_edue_linearizer.queue(user_id)):
|
||||
# If the prev id matches whats in our cache table, then we don't need
|
||||
# to resync the users device list, otherwise we do.
|
||||
resync = True
|
||||
if len(prev_ids) == 1:
|
||||
extremity = yield self.store.get_device_list_last_stream_id_for_remote(
|
||||
user_id
|
||||
)
|
||||
logger.info("Extrem: %r, prev_ids: %r", extremity, prev_ids)
|
||||
if str(extremity) == str(prev_ids[0]):
|
||||
resync = False
|
||||
|
||||
if resync:
|
||||
# Fetch all devices for the user.
|
||||
result = yield self.federation.query_user_devices(origin, user_id)
|
||||
stream_id = result["stream_id"]
|
||||
devices = result["devices"]
|
||||
yield self.store.update_remote_device_list_cache(
|
||||
user_id, devices, stream_id,
|
||||
)
|
||||
device_ids = [device["device_id"] for device in devices]
|
||||
yield self.notify_device_update(user_id, device_ids)
|
||||
else:
|
||||
# Simply update the single device, since we know that is the only
|
||||
# change (becuase of the single prev_id matching the current cache)
|
||||
content = dict(edu_content)
|
||||
for key in ("user_id", "device_id", "stream_id", "prev_ids"):
|
||||
content.pop(key, None)
|
||||
yield self.store.update_remote_device_list_cache_entry(
|
||||
user_id, device_id, content, stream_id,
|
||||
)
|
||||
yield self.notify_device_update(user_id, [device_id])
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_federation_query_user_devices(self, user_id):
|
||||
stream_id, devices = yield self.store.get_devices_with_keys_by_user(user_id)
|
||||
defer.returnValue({
|
||||
"user_id": user_id,
|
||||
"stream_id": stream_id,
|
||||
"devices": devices,
|
||||
})
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def user_left_room(self, user, room_id):
|
||||
user_id = user.to_string()
|
||||
rooms = yield self.store.get_rooms_for_user(user_id)
|
||||
if not rooms:
|
||||
# We no longer share rooms with this user, so we'll no longer
|
||||
# receive device updates. Mark this in DB.
|
||||
yield self.store.mark_remote_user_device_list_as_unsubscribed(user_id)
|
||||
|
||||
|
||||
def _update_device_from_client_ips(device, client_ips):
|
||||
ip = client_ips.get((device["user_id"], device["device_id"]), {})
|
||||
device.update({
|
||||
"last_seen_ts": ip.get("last_seen"),
|
||||
"last_seen_ip": ip.get("ip"),
|
||||
})
|
||||
117
synapse/handlers/devicemessage.py
Normal file
117
synapse/handlers/devicemessage.py
Normal file
@@ -0,0 +1,117 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.types import get_domain_from_id
|
||||
from synapse.util.stringutils import random_string
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class DeviceMessageHandler(object):
|
||||
|
||||
def __init__(self, hs):
|
||||
"""
|
||||
Args:
|
||||
hs (synapse.server.HomeServer): server
|
||||
"""
|
||||
self.store = hs.get_datastore()
|
||||
self.notifier = hs.get_notifier()
|
||||
self.is_mine_id = hs.is_mine_id
|
||||
self.federation = hs.get_federation_sender()
|
||||
|
||||
hs.get_replication_layer().register_edu_handler(
|
||||
"m.direct_to_device", self.on_direct_to_device_edu
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_direct_to_device_edu(self, origin, content):
|
||||
local_messages = {}
|
||||
sender_user_id = content["sender"]
|
||||
if origin != get_domain_from_id(sender_user_id):
|
||||
logger.warn(
|
||||
"Dropping device message from %r with spoofed sender %r",
|
||||
origin, sender_user_id
|
||||
)
|
||||
message_type = content["type"]
|
||||
message_id = content["message_id"]
|
||||
for user_id, by_device in content["messages"].items():
|
||||
messages_by_device = {
|
||||
device_id: {
|
||||
"content": message_content,
|
||||
"type": message_type,
|
||||
"sender": sender_user_id,
|
||||
}
|
||||
for device_id, message_content in by_device.items()
|
||||
}
|
||||
if messages_by_device:
|
||||
local_messages[user_id] = messages_by_device
|
||||
|
||||
stream_id = yield self.store.add_messages_from_remote_to_device_inbox(
|
||||
origin, message_id, local_messages
|
||||
)
|
||||
|
||||
self.notifier.on_new_event(
|
||||
"to_device_key", stream_id, users=local_messages.keys()
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def send_device_message(self, sender_user_id, message_type, messages):
|
||||
|
||||
local_messages = {}
|
||||
remote_messages = {}
|
||||
for user_id, by_device in messages.items():
|
||||
if self.is_mine_id(user_id):
|
||||
messages_by_device = {
|
||||
device_id: {
|
||||
"content": message_content,
|
||||
"type": message_type,
|
||||
"sender": sender_user_id,
|
||||
}
|
||||
for device_id, message_content in by_device.items()
|
||||
}
|
||||
if messages_by_device:
|
||||
local_messages[user_id] = messages_by_device
|
||||
else:
|
||||
destination = get_domain_from_id(user_id)
|
||||
remote_messages.setdefault(destination, {})[user_id] = by_device
|
||||
|
||||
message_id = random_string(16)
|
||||
|
||||
remote_edu_contents = {}
|
||||
for destination, messages in remote_messages.items():
|
||||
remote_edu_contents[destination] = {
|
||||
"messages": messages,
|
||||
"sender": sender_user_id,
|
||||
"type": message_type,
|
||||
"message_id": message_id,
|
||||
}
|
||||
|
||||
stream_id = yield self.store.add_messages_to_device_inbox(
|
||||
local_messages, remote_edu_contents
|
||||
)
|
||||
|
||||
self.notifier.on_new_event(
|
||||
"to_device_key", stream_id, users=local_messages.keys()
|
||||
)
|
||||
|
||||
for destination in remote_messages.keys():
|
||||
# Enqueue a new federation transaction to send the new
|
||||
# device messages to each remote destination.
|
||||
self.federation.send_device_messages(destination)
|
||||
@@ -19,7 +19,7 @@ from ._base import BaseHandler
|
||||
|
||||
from synapse.api.errors import SynapseError, Codes, CodeMessageException, AuthError
|
||||
from synapse.api.constants import EventTypes
|
||||
from synapse.types import RoomAlias, UserID
|
||||
from synapse.types import RoomAlias, UserID, get_domain_from_id
|
||||
|
||||
import logging
|
||||
import string
|
||||
@@ -55,7 +55,8 @@ class DirectoryHandler(BaseHandler):
|
||||
# TODO(erikj): Add transactions.
|
||||
# TODO(erikj): Check if there is a current association.
|
||||
if not servers:
|
||||
servers = yield self.store.get_joined_hosts_for_room(room_id)
|
||||
users = yield self.state.get_current_user_in_room(room_id)
|
||||
servers = set(get_domain_from_id(u) for u in users)
|
||||
|
||||
if not servers:
|
||||
raise SynapseError(400, "Failed to get server list")
|
||||
@@ -193,7 +194,8 @@ class DirectoryHandler(BaseHandler):
|
||||
Codes.NOT_FOUND
|
||||
)
|
||||
|
||||
extra_servers = yield self.store.get_joined_hosts_for_room(room_id)
|
||||
users = yield self.state.get_current_user_in_room(room_id)
|
||||
extra_servers = set(get_domain_from_id(u) for u in users)
|
||||
servers = set(extra_servers) | set(servers)
|
||||
|
||||
# If this server is in the list of servers, return it first.
|
||||
@@ -286,13 +288,12 @@ class DirectoryHandler(BaseHandler):
|
||||
result = yield as_handler.query_room_alias_exists(room_alias)
|
||||
defer.returnValue(result)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def can_modify_alias(self, alias, user_id=None):
|
||||
# Any application service "interested" in an alias they are regexing on
|
||||
# can modify the alias.
|
||||
# Users can only modify the alias if ALL the interested services have
|
||||
# non-exclusive locks on the alias (or there are no interested services)
|
||||
services = yield self.store.get_app_services()
|
||||
services = self.store.get_app_services()
|
||||
interested_services = [
|
||||
s for s in services if s.is_interested_in_alias(alias.to_string())
|
||||
]
|
||||
@@ -300,14 +301,12 @@ class DirectoryHandler(BaseHandler):
|
||||
for service in interested_services:
|
||||
if user_id == service.sender:
|
||||
# this user IS the app service so they can do whatever they like
|
||||
defer.returnValue(True)
|
||||
return
|
||||
return defer.succeed(True)
|
||||
elif service.is_exclusive_alias(alias.to_string()):
|
||||
# another service has an exclusive lock on this alias.
|
||||
defer.returnValue(False)
|
||||
return
|
||||
return defer.succeed(False)
|
||||
# either no interested services, or no service with an exclusive lock
|
||||
defer.returnValue(True)
|
||||
return defer.succeed(True)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _user_can_delete_alias(self, alias, user_id):
|
||||
@@ -340,3 +339,22 @@ class DirectoryHandler(BaseHandler):
|
||||
yield self.auth.check_can_change_room_list(room_id, requester.user)
|
||||
|
||||
yield self.store.set_room_is_public(room_id, visibility == "public")
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def edit_published_appservice_room_list(self, appservice_id, network_id,
|
||||
room_id, visibility):
|
||||
"""Add or remove a room from the appservice/network specific public
|
||||
room list.
|
||||
|
||||
Args:
|
||||
appservice_id (str): ID of the appservice that owns the list
|
||||
network_id (str): The ID of the network the list is associated with
|
||||
room_id (str)
|
||||
visibility (str): either "public" or "private"
|
||||
"""
|
||||
if visibility not in ["public", "private"]:
|
||||
raise SynapseError(400, "Invalid visibility setting")
|
||||
|
||||
yield self.store.set_room_is_public_appservice(
|
||||
room_id, appservice_id, network_id, visibility == "public"
|
||||
)
|
||||
|
||||
323
synapse/handlers/e2e_keys.py
Normal file
323
synapse/handlers/e2e_keys.py
Normal file
@@ -0,0 +1,323 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import ujson as json
|
||||
import logging
|
||||
|
||||
from canonicaljson import encode_canonical_json
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.errors import SynapseError, CodeMessageException
|
||||
from synapse.types import get_domain_from_id
|
||||
from synapse.util.logcontext import preserve_fn, preserve_context_over_deferred
|
||||
from synapse.util.retryutils import get_retry_limiter, NotRetryingDestination
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class E2eKeysHandler(object):
|
||||
def __init__(self, hs):
|
||||
self.store = hs.get_datastore()
|
||||
self.federation = hs.get_replication_layer()
|
||||
self.device_handler = hs.get_device_handler()
|
||||
self.is_mine_id = hs.is_mine_id
|
||||
self.clock = hs.get_clock()
|
||||
|
||||
# doesn't really work as part of the generic query API, because the
|
||||
# query request requires an object POST, but we abuse the
|
||||
# "query handler" interface.
|
||||
self.federation.register_query_handler(
|
||||
"client_keys", self.on_federation_query_client_keys
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def query_devices(self, query_body, timeout):
|
||||
""" Handle a device key query from a client
|
||||
|
||||
{
|
||||
"device_keys": {
|
||||
"<user_id>": ["<device_id>"]
|
||||
}
|
||||
}
|
||||
->
|
||||
{
|
||||
"device_keys": {
|
||||
"<user_id>": {
|
||||
"<device_id>": {
|
||||
...
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
device_keys_query = query_body.get("device_keys", {})
|
||||
|
||||
# separate users by domain.
|
||||
# make a map from domain to user_id to device_ids
|
||||
local_query = {}
|
||||
remote_queries = {}
|
||||
|
||||
for user_id, device_ids in device_keys_query.items():
|
||||
if self.is_mine_id(user_id):
|
||||
local_query[user_id] = device_ids
|
||||
else:
|
||||
remote_queries[user_id] = device_ids
|
||||
|
||||
# Firt get local devices.
|
||||
failures = {}
|
||||
results = {}
|
||||
if local_query:
|
||||
local_result = yield self.query_local_devices(local_query)
|
||||
for user_id, keys in local_result.items():
|
||||
if user_id in local_query:
|
||||
results[user_id] = keys
|
||||
|
||||
# Now attempt to get any remote devices from our local cache.
|
||||
remote_queries_not_in_cache = {}
|
||||
if remote_queries:
|
||||
query_list = []
|
||||
for user_id, device_ids in remote_queries.iteritems():
|
||||
if device_ids:
|
||||
query_list.extend((user_id, device_id) for device_id in device_ids)
|
||||
else:
|
||||
query_list.append((user_id, None))
|
||||
|
||||
user_ids_not_in_cache, remote_results = (
|
||||
yield self.store.get_user_devices_from_cache(
|
||||
query_list
|
||||
)
|
||||
)
|
||||
for user_id, devices in remote_results.iteritems():
|
||||
user_devices = results.setdefault(user_id, {})
|
||||
for device_id, device in devices.iteritems():
|
||||
keys = device.get("keys", None)
|
||||
device_display_name = device.get("device_display_name", None)
|
||||
if keys:
|
||||
result = dict(keys)
|
||||
unsigned = result.setdefault("unsigned", {})
|
||||
if device_display_name:
|
||||
unsigned["device_display_name"] = device_display_name
|
||||
user_devices[device_id] = result
|
||||
|
||||
for user_id in user_ids_not_in_cache:
|
||||
domain = get_domain_from_id(user_id)
|
||||
r = remote_queries_not_in_cache.setdefault(domain, {})
|
||||
r[user_id] = remote_queries[user_id]
|
||||
|
||||
# Now fetch any devices that we don't have in our cache
|
||||
@defer.inlineCallbacks
|
||||
def do_remote_query(destination):
|
||||
destination_query = remote_queries_not_in_cache[destination]
|
||||
try:
|
||||
limiter = yield get_retry_limiter(
|
||||
destination, self.clock, self.store
|
||||
)
|
||||
with limiter:
|
||||
remote_result = yield self.federation.query_client_keys(
|
||||
destination,
|
||||
{"device_keys": destination_query},
|
||||
timeout=timeout
|
||||
)
|
||||
|
||||
for user_id, keys in remote_result["device_keys"].items():
|
||||
if user_id in destination_query:
|
||||
results[user_id] = keys
|
||||
|
||||
except CodeMessageException as e:
|
||||
failures[destination] = {
|
||||
"status": e.code, "message": e.message
|
||||
}
|
||||
except NotRetryingDestination as e:
|
||||
failures[destination] = {
|
||||
"status": 503, "message": "Not ready for retry",
|
||||
}
|
||||
except Exception as e:
|
||||
# include ConnectionRefused and other errors
|
||||
failures[destination] = {
|
||||
"status": 503, "message": e.message
|
||||
}
|
||||
|
||||
yield preserve_context_over_deferred(defer.gatherResults([
|
||||
preserve_fn(do_remote_query)(destination)
|
||||
for destination in remote_queries_not_in_cache
|
||||
]))
|
||||
|
||||
defer.returnValue({
|
||||
"device_keys": results, "failures": failures,
|
||||
})
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def query_local_devices(self, query):
|
||||
"""Get E2E device keys for local users
|
||||
|
||||
Args:
|
||||
query (dict[string, list[string]|None): map from user_id to a list
|
||||
of devices to query (None for all devices)
|
||||
|
||||
Returns:
|
||||
defer.Deferred: (resolves to dict[string, dict[string, dict]]):
|
||||
map from user_id -> device_id -> device details
|
||||
"""
|
||||
local_query = []
|
||||
|
||||
result_dict = {}
|
||||
for user_id, device_ids in query.items():
|
||||
if not self.is_mine_id(user_id):
|
||||
logger.warning("Request for keys for non-local user %s",
|
||||
user_id)
|
||||
raise SynapseError(400, "Not a user here")
|
||||
|
||||
if not device_ids:
|
||||
local_query.append((user_id, None))
|
||||
else:
|
||||
for device_id in device_ids:
|
||||
local_query.append((user_id, device_id))
|
||||
|
||||
# make sure that each queried user appears in the result dict
|
||||
result_dict[user_id] = {}
|
||||
|
||||
results = yield self.store.get_e2e_device_keys(local_query)
|
||||
|
||||
# Build the result structure, un-jsonify the results, and add the
|
||||
# "unsigned" section
|
||||
for user_id, device_keys in results.items():
|
||||
for device_id, device_info in device_keys.items():
|
||||
r = dict(device_info["keys"])
|
||||
r["unsigned"] = {}
|
||||
display_name = device_info["device_display_name"]
|
||||
if display_name is not None:
|
||||
r["unsigned"]["device_display_name"] = display_name
|
||||
result_dict[user_id][device_id] = r
|
||||
|
||||
defer.returnValue(result_dict)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_federation_query_client_keys(self, query_body):
|
||||
""" Handle a device key query from a federated server
|
||||
"""
|
||||
device_keys_query = query_body.get("device_keys", {})
|
||||
res = yield self.query_local_devices(device_keys_query)
|
||||
defer.returnValue({"device_keys": res})
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def claim_one_time_keys(self, query, timeout):
|
||||
local_query = []
|
||||
remote_queries = {}
|
||||
|
||||
for user_id, device_keys in query.get("one_time_keys", {}).items():
|
||||
if self.is_mine_id(user_id):
|
||||
for device_id, algorithm in device_keys.items():
|
||||
local_query.append((user_id, device_id, algorithm))
|
||||
else:
|
||||
domain = get_domain_from_id(user_id)
|
||||
remote_queries.setdefault(domain, {})[user_id] = device_keys
|
||||
|
||||
results = yield self.store.claim_e2e_one_time_keys(local_query)
|
||||
|
||||
json_result = {}
|
||||
failures = {}
|
||||
for user_id, device_keys in results.items():
|
||||
for device_id, keys in device_keys.items():
|
||||
for key_id, json_bytes in keys.items():
|
||||
json_result.setdefault(user_id, {})[device_id] = {
|
||||
key_id: json.loads(json_bytes)
|
||||
}
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def claim_client_keys(destination):
|
||||
device_keys = remote_queries[destination]
|
||||
try:
|
||||
limiter = yield get_retry_limiter(
|
||||
destination, self.clock, self.store
|
||||
)
|
||||
with limiter:
|
||||
remote_result = yield self.federation.claim_client_keys(
|
||||
destination,
|
||||
{"one_time_keys": device_keys},
|
||||
timeout=timeout
|
||||
)
|
||||
for user_id, keys in remote_result["one_time_keys"].items():
|
||||
if user_id in device_keys:
|
||||
json_result[user_id] = keys
|
||||
except CodeMessageException as e:
|
||||
failures[destination] = {
|
||||
"status": e.code, "message": e.message
|
||||
}
|
||||
except NotRetryingDestination as e:
|
||||
failures[destination] = {
|
||||
"status": 503, "message": "Not ready for retry",
|
||||
}
|
||||
except Exception as e:
|
||||
# include ConnectionRefused and other errors
|
||||
failures[destination] = {
|
||||
"status": 503, "message": e.message
|
||||
}
|
||||
|
||||
yield preserve_context_over_deferred(defer.gatherResults([
|
||||
preserve_fn(claim_client_keys)(destination)
|
||||
for destination in remote_queries
|
||||
]))
|
||||
|
||||
defer.returnValue({
|
||||
"one_time_keys": json_result,
|
||||
"failures": failures
|
||||
})
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def upload_keys_for_user(self, user_id, device_id, keys):
|
||||
time_now = self.clock.time_msec()
|
||||
|
||||
# TODO: Validate the JSON to make sure it has the right keys.
|
||||
device_keys = keys.get("device_keys", None)
|
||||
if device_keys:
|
||||
logger.info(
|
||||
"Updating device_keys for device %r for user %s at %d",
|
||||
device_id, user_id, time_now
|
||||
)
|
||||
# TODO: Sign the JSON with the server key
|
||||
changed = yield self.store.set_e2e_device_keys(
|
||||
user_id, device_id, time_now, device_keys,
|
||||
)
|
||||
if changed:
|
||||
# Only notify about device updates *if* the keys actually changed
|
||||
yield self.device_handler.notify_device_update(user_id, [device_id])
|
||||
|
||||
one_time_keys = keys.get("one_time_keys", None)
|
||||
if one_time_keys:
|
||||
logger.info(
|
||||
"Adding %d one_time_keys for device %r for user %r at %d",
|
||||
len(one_time_keys), device_id, user_id, time_now
|
||||
)
|
||||
key_list = []
|
||||
for key_id, key_json in one_time_keys.items():
|
||||
algorithm, key_id = key_id.split(":")
|
||||
key_list.append((
|
||||
algorithm, key_id, encode_canonical_json(key_json)
|
||||
))
|
||||
|
||||
yield self.store.add_e2e_one_time_keys(
|
||||
user_id, device_id, time_now, key_list
|
||||
)
|
||||
|
||||
# the device should have been registered already, but it may have been
|
||||
# deleted due to a race with a DELETE request. Or we may be using an
|
||||
# old access_token without an associated device_id. Either way, we
|
||||
# need to double-check the device is registered to avoid ending up with
|
||||
# keys without a corresponding device.
|
||||
self.device_handler.check_device_registered(user_id, device_id)
|
||||
|
||||
result = yield self.store.count_e2e_one_time_keys(user_id, device_id)
|
||||
|
||||
defer.returnValue({"one_time_key_counts": result})
|
||||
@@ -47,6 +47,7 @@ class EventStreamHandler(BaseHandler):
|
||||
self.clock = hs.get_clock()
|
||||
|
||||
self.notifier = hs.get_notifier()
|
||||
self.state = hs.get_state_handler()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
@@ -90,7 +91,7 @@ class EventStreamHandler(BaseHandler):
|
||||
# Send down presence.
|
||||
if event.state_key == auth_user_id:
|
||||
# Send down presence for everyone in the room.
|
||||
users = yield self.store.get_users_in_room(event.room_id)
|
||||
users = yield self.state.get_current_user_in_room(event.room_id)
|
||||
states = yield presence_handler.get_states(
|
||||
users,
|
||||
as_event=True,
|
||||
|
||||
@@ -26,7 +26,10 @@ from synapse.api.errors import (
|
||||
from synapse.api.constants import EventTypes, Membership, RejectedReason
|
||||
from synapse.events.validator import EventValidator
|
||||
from synapse.util import unwrapFirstError
|
||||
from synapse.util.logcontext import PreserveLoggingContext, preserve_fn
|
||||
from synapse.util.logcontext import (
|
||||
PreserveLoggingContext, preserve_fn, preserve_context_over_deferred
|
||||
)
|
||||
from synapse.util.metrics import measure_func
|
||||
from synapse.util.logutils import log_function
|
||||
from synapse.util.async import run_on_reactor
|
||||
from synapse.util.frozenutils import unfreeze
|
||||
@@ -77,27 +80,14 @@ class FederationHandler(BaseHandler):
|
||||
# When joining a room we need to queue any events for that room up
|
||||
self.room_queues = {}
|
||||
|
||||
def handle_new_event(self, event, destinations):
|
||||
""" Takes in an event from the client to server side, that has already
|
||||
been authed and handled by the state module, and sends it to any
|
||||
remote home servers that may be interested.
|
||||
|
||||
Args:
|
||||
event: The event to send
|
||||
destinations: A list of destinations to send it to
|
||||
|
||||
Returns:
|
||||
Deferred: Resolved when it has successfully been queued for
|
||||
processing.
|
||||
"""
|
||||
|
||||
return self.replication_layer.send_pdu(event, destinations)
|
||||
|
||||
@log_function
|
||||
@defer.inlineCallbacks
|
||||
def on_receive_pdu(self, origin, pdu, state=None, auth_chain=None):
|
||||
""" Called by the ReplicationLayer when we have a new pdu. We need to
|
||||
do auth checks and put it through the StateHandler.
|
||||
|
||||
auth_chain and state are None if we already have the necessary state
|
||||
and prev_events in the db
|
||||
"""
|
||||
event = pdu
|
||||
|
||||
@@ -115,16 +105,25 @@ class FederationHandler(BaseHandler):
|
||||
|
||||
# FIXME (erikj): Awful hack to make the case where we are not currently
|
||||
# in the room work
|
||||
is_in_room = yield self.auth.check_host_in_room(
|
||||
event.room_id,
|
||||
self.server_name
|
||||
)
|
||||
if not is_in_room and not event.internal_metadata.is_outlier():
|
||||
logger.debug("Got event for room we're not in.")
|
||||
# If state and auth_chain are None, then we don't need to do this check
|
||||
# as we already know we have enough state in the DB to handle this
|
||||
# event.
|
||||
if state and auth_chain and not event.internal_metadata.is_outlier():
|
||||
is_in_room = yield self.auth.check_host_in_room(
|
||||
event.room_id,
|
||||
self.server_name
|
||||
)
|
||||
else:
|
||||
is_in_room = True
|
||||
if not is_in_room:
|
||||
logger.info(
|
||||
"Got event for room we're not in: %r %r",
|
||||
event.room_id, event.event_id
|
||||
)
|
||||
|
||||
try:
|
||||
event_stream_id, max_stream_id = yield self._persist_auth_tree(
|
||||
auth_chain, state, event
|
||||
origin, auth_chain, state, event
|
||||
)
|
||||
except AuthError as e:
|
||||
raise FederationError(
|
||||
@@ -215,17 +214,28 @@ class FederationHandler(BaseHandler):
|
||||
|
||||
if event.type == EventTypes.Member:
|
||||
if event.membership == Membership.JOIN:
|
||||
prev_state = context.current_state.get((event.type, event.state_key))
|
||||
if not prev_state or prev_state.membership != Membership.JOIN:
|
||||
# Only fire user_joined_room if the user has acutally
|
||||
# joined the room. Don't bother if the user is just
|
||||
# changing their profile info.
|
||||
# Only fire user_joined_room if the user has acutally
|
||||
# joined the room. Don't bother if the user is just
|
||||
# changing their profile info.
|
||||
newly_joined = True
|
||||
prev_state_id = context.prev_state_ids.get(
|
||||
(event.type, event.state_key)
|
||||
)
|
||||
if prev_state_id:
|
||||
prev_state = yield self.store.get_event(
|
||||
prev_state_id, allow_none=True,
|
||||
)
|
||||
if prev_state and prev_state.membership == Membership.JOIN:
|
||||
newly_joined = False
|
||||
|
||||
if newly_joined:
|
||||
user = UserID.from_string(event.state_key)
|
||||
yield user_joined_room(self.distributor, user, event.room_id)
|
||||
|
||||
@measure_func("_filter_events_for_server")
|
||||
@defer.inlineCallbacks
|
||||
def _filter_events_for_server(self, server_name, room_id, events):
|
||||
event_to_state = yield self.store.get_state_for_events(
|
||||
event_to_state_ids = yield self.store.get_state_ids_for_events(
|
||||
frozenset(e.event_id for e in events),
|
||||
types=(
|
||||
(EventTypes.RoomHistoryVisibility, ""),
|
||||
@@ -233,6 +243,33 @@ class FederationHandler(BaseHandler):
|
||||
)
|
||||
)
|
||||
|
||||
# We only want to pull out member events that correspond to the
|
||||
# server's domain.
|
||||
|
||||
def check_match(id):
|
||||
try:
|
||||
return server_name == get_domain_from_id(id)
|
||||
except:
|
||||
return False
|
||||
|
||||
# Parses mapping `event_id -> (type, state_key) -> state event_id`
|
||||
# to get all state ids that we're interested in.
|
||||
event_map = yield self.store.get_events([
|
||||
e_id
|
||||
for key_to_eid in event_to_state_ids.values()
|
||||
for key, e_id in key_to_eid.items()
|
||||
if key[0] != EventTypes.Member or check_match(key[1])
|
||||
])
|
||||
|
||||
event_to_state = {
|
||||
e_id: {
|
||||
key: event_map[inner_e_id]
|
||||
for key, inner_e_id in key_to_eid.items()
|
||||
if inner_e_id in event_map
|
||||
}
|
||||
for e_id, key_to_eid in event_to_state_ids.items()
|
||||
}
|
||||
|
||||
def redact_disallowed(event, state):
|
||||
if not state:
|
||||
return event
|
||||
@@ -249,7 +286,7 @@ class FederationHandler(BaseHandler):
|
||||
if ev.type != EventTypes.Member:
|
||||
continue
|
||||
try:
|
||||
domain = UserID.from_string(ev.state_key).domain
|
||||
domain = get_domain_from_id(ev.state_key)
|
||||
except:
|
||||
continue
|
||||
|
||||
@@ -274,7 +311,7 @@ class FederationHandler(BaseHandler):
|
||||
|
||||
@log_function
|
||||
@defer.inlineCallbacks
|
||||
def backfill(self, dest, room_id, limit, extremities=[]):
|
||||
def backfill(self, dest, room_id, limit, extremities):
|
||||
""" Trigger a backfill request to `dest` for the given `room_id`
|
||||
|
||||
This will attempt to get more events from the remote. This may return
|
||||
@@ -284,9 +321,6 @@ class FederationHandler(BaseHandler):
|
||||
if dest == self.server_name:
|
||||
raise SynapseError(400, "Can't backfill from self.")
|
||||
|
||||
if not extremities:
|
||||
extremities = yield self.store.get_oldest_events_in_room(room_id)
|
||||
|
||||
events = yield self.replication_layer.backfill(
|
||||
dest,
|
||||
room_id,
|
||||
@@ -335,32 +369,61 @@ class FederationHandler(BaseHandler):
|
||||
state_events.update({s.event_id: s for s in state})
|
||||
events_to_state[e_id] = state
|
||||
|
||||
required_auth = set(
|
||||
a_id
|
||||
for event in events + state_events.values() + auth_events.values()
|
||||
for a_id, _ in event.auth_events
|
||||
)
|
||||
auth_events.update({
|
||||
e_id: event_map[e_id] for e_id in required_auth if e_id in event_map
|
||||
})
|
||||
missing_auth = required_auth - set(auth_events)
|
||||
failed_to_fetch = set()
|
||||
|
||||
# Try and fetch any missing auth events from both DB and remote servers.
|
||||
# We repeatedly do this until we stop finding new auth events.
|
||||
while missing_auth - failed_to_fetch:
|
||||
logger.info("Missing auth for backfill: %r", missing_auth)
|
||||
ret_events = yield self.store.get_events(missing_auth - failed_to_fetch)
|
||||
auth_events.update(ret_events)
|
||||
|
||||
required_auth.update(
|
||||
a_id for event in ret_events.values() for a_id, _ in event.auth_events
|
||||
)
|
||||
missing_auth = required_auth - set(auth_events)
|
||||
|
||||
if missing_auth - failed_to_fetch:
|
||||
logger.info(
|
||||
"Fetching missing auth for backfill: %r",
|
||||
missing_auth - failed_to_fetch
|
||||
)
|
||||
|
||||
results = yield preserve_context_over_deferred(defer.gatherResults(
|
||||
[
|
||||
preserve_fn(self.replication_layer.get_pdu)(
|
||||
[dest],
|
||||
event_id,
|
||||
outlier=True,
|
||||
timeout=10000,
|
||||
)
|
||||
for event_id in missing_auth - failed_to_fetch
|
||||
],
|
||||
consumeErrors=True
|
||||
)).addErrback(unwrapFirstError)
|
||||
auth_events.update({a.event_id: a for a in results if a})
|
||||
required_auth.update(
|
||||
a_id
|
||||
for event in results if event
|
||||
for a_id, _ in event.auth_events
|
||||
)
|
||||
missing_auth = required_auth - set(auth_events)
|
||||
|
||||
failed_to_fetch = missing_auth - set(auth_events)
|
||||
|
||||
seen_events = yield self.store.have_events(
|
||||
set(auth_events.keys()) | set(state_events.keys())
|
||||
)
|
||||
|
||||
all_events = events + state_events.values() + auth_events.values()
|
||||
required_auth = set(
|
||||
a_id for event in all_events for a_id, _ in event.auth_events
|
||||
)
|
||||
|
||||
missing_auth = required_auth - set(auth_events)
|
||||
if missing_auth:
|
||||
logger.info("Missing auth for backfill: %r", missing_auth)
|
||||
results = yield defer.gatherResults(
|
||||
[
|
||||
self.replication_layer.get_pdu(
|
||||
[dest],
|
||||
event_id,
|
||||
outlier=True,
|
||||
timeout=10000,
|
||||
)
|
||||
for event_id in missing_auth
|
||||
],
|
||||
consumeErrors=True
|
||||
).addErrback(unwrapFirstError)
|
||||
auth_events.update({a.event_id: a for a in results})
|
||||
|
||||
ev_infos = []
|
||||
for a in auth_events.values():
|
||||
if a.event_id in seen_events:
|
||||
@@ -372,6 +435,7 @@ class FederationHandler(BaseHandler):
|
||||
(auth_events[a_id].type, auth_events[a_id].state_key):
|
||||
auth_events[a_id]
|
||||
for a_id, _ in a.auth_events
|
||||
if a_id in auth_events
|
||||
}
|
||||
})
|
||||
|
||||
@@ -383,6 +447,7 @@ class FederationHandler(BaseHandler):
|
||||
(auth_events[a_id].type, auth_events[a_id].state_key):
|
||||
auth_events[a_id]
|
||||
for a_id, _ in event_map[e_id].auth_events
|
||||
if a_id in auth_events
|
||||
}
|
||||
})
|
||||
|
||||
@@ -426,6 +491,10 @@ class FederationHandler(BaseHandler):
|
||||
)
|
||||
max_depth = sorted_extremeties_tuple[0][1]
|
||||
|
||||
# We don't want to specify too many extremities as it causes the backfill
|
||||
# request URI to be too long.
|
||||
extremities = dict(sorted_extremeties_tuple[:5])
|
||||
|
||||
if current_depth > max_depth:
|
||||
logger.debug(
|
||||
"Not backfilling as we don't need to. %d < %d",
|
||||
@@ -522,11 +591,24 @@ class FederationHandler(BaseHandler):
|
||||
|
||||
event_ids = list(extremities.keys())
|
||||
|
||||
states = yield defer.gatherResults([
|
||||
self.state_handler.resolve_state_groups(room_id, [e])
|
||||
logger.debug("calling resolve_state_groups in _maybe_backfill")
|
||||
states = yield preserve_context_over_deferred(defer.gatherResults([
|
||||
preserve_fn(self.state_handler.resolve_state_groups)(room_id, [e])
|
||||
for e in event_ids
|
||||
])
|
||||
states = dict(zip(event_ids, [s[1] for s in states]))
|
||||
]))
|
||||
states = dict(zip(event_ids, [s.state for s in states]))
|
||||
|
||||
state_map = yield self.store.get_events(
|
||||
[e_id for ids in states.values() for e_id in ids],
|
||||
get_prev_content=False
|
||||
)
|
||||
states = {
|
||||
key: {
|
||||
k: state_map[e_id]
|
||||
for k, e_id in state_dict.items()
|
||||
if e_id in state_map
|
||||
} for key, state_dict in states.items()
|
||||
}
|
||||
|
||||
for e_id, _ in sorted_extremeties_tuple:
|
||||
likely_domains = get_domains_from_state(states[e_id])
|
||||
@@ -637,7 +719,7 @@ class FederationHandler(BaseHandler):
|
||||
pass
|
||||
|
||||
event_stream_id, max_stream_id = yield self._persist_auth_tree(
|
||||
auth_chain, state, event
|
||||
origin, auth_chain, state, event
|
||||
)
|
||||
|
||||
with PreserveLoggingContext():
|
||||
@@ -688,7 +770,9 @@ class FederationHandler(BaseHandler):
|
||||
logger.warn("Failed to create join %r because %s", event, e)
|
||||
raise e
|
||||
|
||||
self.auth.check(event, auth_events=context.current_state)
|
||||
# The remote hasn't signed it yet, obviously. We'll do the full checks
|
||||
# when we get the event back in `on_send_join_request`
|
||||
yield self.auth.check_from_context(event, context, do_sig_check=False)
|
||||
|
||||
defer.returnValue(event)
|
||||
|
||||
@@ -707,6 +791,10 @@ class FederationHandler(BaseHandler):
|
||||
)
|
||||
|
||||
event.internal_metadata.outlier = False
|
||||
# Send this event on behalf of the origin server since they may not
|
||||
# have an up to data view of the state of the room at this event so
|
||||
# will not know which servers to send the event to.
|
||||
event.internal_metadata.send_on_behalf_of = origin
|
||||
|
||||
context, event_stream_id, max_stream_id = yield self._handle_new_event(
|
||||
origin, event
|
||||
@@ -734,37 +822,15 @@ class FederationHandler(BaseHandler):
|
||||
user = UserID.from_string(event.state_key)
|
||||
yield user_joined_room(self.distributor, user, event.room_id)
|
||||
|
||||
new_pdu = event
|
||||
|
||||
destinations = set()
|
||||
|
||||
for k, s in context.current_state.items():
|
||||
try:
|
||||
if k[0] == EventTypes.Member:
|
||||
if s.content["membership"] == Membership.JOIN:
|
||||
destinations.add(get_domain_from_id(s.state_key))
|
||||
except:
|
||||
logger.warn(
|
||||
"Failed to get destination from event %s", s.event_id
|
||||
)
|
||||
|
||||
destinations.discard(origin)
|
||||
|
||||
logger.debug(
|
||||
"on_send_join_request: Sending event: %s, signatures: %s",
|
||||
event.event_id,
|
||||
event.signatures,
|
||||
)
|
||||
|
||||
self.replication_layer.send_pdu(new_pdu, destinations)
|
||||
|
||||
state_ids = [e.event_id for e in context.current_state.values()]
|
||||
state_ids = context.prev_state_ids.values()
|
||||
auth_chain = yield self.store.get_auth_chain(set(
|
||||
[event.event_id] + state_ids
|
||||
))
|
||||
|
||||
state = yield self.store.get_events(context.prev_state_ids.values())
|
||||
|
||||
defer.returnValue({
|
||||
"state": context.current_state.values(),
|
||||
"state": state.values(),
|
||||
"auth_chain": auth_chain,
|
||||
})
|
||||
|
||||
@@ -918,7 +984,9 @@ class FederationHandler(BaseHandler):
|
||||
)
|
||||
|
||||
try:
|
||||
self.auth.check(event, auth_events=context.current_state)
|
||||
# The remote hasn't signed it yet, obviously. We'll do the full checks
|
||||
# when we get the event back in `on_send_leave_request`
|
||||
yield self.auth.check_from_context(event, context, do_sig_check=False)
|
||||
except AuthError as e:
|
||||
logger.warn("Failed to create new leave %r because %s", event, e)
|
||||
raise e
|
||||
@@ -960,41 +1028,14 @@ class FederationHandler(BaseHandler):
|
||||
event, event_stream_id, max_stream_id, extra_users=extra_users
|
||||
)
|
||||
|
||||
new_pdu = event
|
||||
|
||||
destinations = set()
|
||||
|
||||
for k, s in context.current_state.items():
|
||||
try:
|
||||
if k[0] == EventTypes.Member:
|
||||
if s.content["membership"] == Membership.LEAVE:
|
||||
destinations.add(get_domain_from_id(s.state_key))
|
||||
except:
|
||||
logger.warn(
|
||||
"Failed to get destination from event %s", s.event_id
|
||||
)
|
||||
|
||||
destinations.discard(origin)
|
||||
|
||||
logger.debug(
|
||||
"on_send_leave_request: Sending event: %s, signatures: %s",
|
||||
event.event_id,
|
||||
event.signatures,
|
||||
)
|
||||
|
||||
self.replication_layer.send_pdu(new_pdu, destinations)
|
||||
|
||||
defer.returnValue(None)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_state_for_pdu(self, origin, room_id, event_id, do_auth=True):
|
||||
def get_state_for_pdu(self, room_id, event_id):
|
||||
"""Returns the state at the event. i.e. not including said event.
|
||||
"""
|
||||
yield run_on_reactor()
|
||||
|
||||
if do_auth:
|
||||
in_room = yield self.auth.check_host_in_room(room_id, origin)
|
||||
if not in_room:
|
||||
raise AuthError(403, "Host not in room.")
|
||||
|
||||
state_groups = yield self.store.get_state_groups(
|
||||
room_id, [event_id]
|
||||
)
|
||||
@@ -1033,6 +1074,34 @@ class FederationHandler(BaseHandler):
|
||||
else:
|
||||
defer.returnValue([])
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_state_ids_for_pdu(self, room_id, event_id):
|
||||
"""Returns the state at the event. i.e. not including said event.
|
||||
"""
|
||||
yield run_on_reactor()
|
||||
|
||||
state_groups = yield self.store.get_state_groups_ids(
|
||||
room_id, [event_id]
|
||||
)
|
||||
|
||||
if state_groups:
|
||||
_, state = state_groups.items().pop()
|
||||
results = state
|
||||
|
||||
event = yield self.store.get_event(event_id)
|
||||
if event and event.is_state():
|
||||
# Get previous state
|
||||
if "replaces_state" in event.unsigned:
|
||||
prev_id = event.unsigned["replaces_state"]
|
||||
if prev_id != event.event_id:
|
||||
results[(event.type, event.state_key)] = prev_id
|
||||
else:
|
||||
del results[(event.type, event.state_key)]
|
||||
|
||||
defer.returnValue(results.values())
|
||||
else:
|
||||
defer.returnValue([])
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def on_backfill_request(self, origin, room_id, pdu_list, limit):
|
||||
@@ -1065,16 +1134,17 @@ class FederationHandler(BaseHandler):
|
||||
)
|
||||
|
||||
if event:
|
||||
# FIXME: This is a temporary work around where we occasionally
|
||||
# return events slightly differently than when they were
|
||||
# originally signed
|
||||
event.signatures.update(
|
||||
compute_event_signature(
|
||||
event,
|
||||
self.hs.hostname,
|
||||
self.hs.config.signing_key[0]
|
||||
if self.hs.is_mine_id(event.event_id):
|
||||
# FIXME: This is a temporary work around where we occasionally
|
||||
# return events slightly differently than when they were
|
||||
# originally signed
|
||||
event.signatures.update(
|
||||
compute_event_signature(
|
||||
event,
|
||||
self.hs.hostname,
|
||||
self.hs.config.signing_key[0]
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
if do_auth:
|
||||
in_room = yield self.auth.check_host_in_room(
|
||||
@@ -1084,6 +1154,12 @@ class FederationHandler(BaseHandler):
|
||||
if not in_room:
|
||||
raise AuthError(403, "Host not in room.")
|
||||
|
||||
events = yield self._filter_events_for_server(
|
||||
origin, event.room_id, [event]
|
||||
)
|
||||
|
||||
event = events[0]
|
||||
|
||||
defer.returnValue(event)
|
||||
else:
|
||||
defer.returnValue(None)
|
||||
@@ -1114,11 +1190,12 @@ class FederationHandler(BaseHandler):
|
||||
backfilled=backfilled,
|
||||
)
|
||||
|
||||
# this intentionally does not yield: we don't care about the result
|
||||
# and don't need to wait for it.
|
||||
preserve_fn(self.hs.get_pusherpool().on_new_notifications)(
|
||||
event_stream_id, max_stream_id
|
||||
)
|
||||
if not backfilled:
|
||||
# this intentionally does not yield: we don't care about the result
|
||||
# and don't need to wait for it.
|
||||
preserve_fn(self.hs.get_pusherpool().on_new_notifications)(
|
||||
event_stream_id, max_stream_id
|
||||
)
|
||||
|
||||
defer.returnValue((context, event_stream_id, max_stream_id))
|
||||
|
||||
@@ -1129,9 +1206,9 @@ class FederationHandler(BaseHandler):
|
||||
a bunch of outliers, but not a chunk of individual events that depend
|
||||
on each other for state calculations.
|
||||
"""
|
||||
contexts = yield defer.gatherResults(
|
||||
contexts = yield preserve_context_over_deferred(defer.gatherResults(
|
||||
[
|
||||
self._prep_event(
|
||||
preserve_fn(self._prep_event)(
|
||||
origin,
|
||||
ev_info["event"],
|
||||
state=ev_info.get("state"),
|
||||
@@ -1139,7 +1216,7 @@ class FederationHandler(BaseHandler):
|
||||
)
|
||||
for ev_info in event_infos
|
||||
]
|
||||
)
|
||||
))
|
||||
|
||||
yield self.store.persist_events(
|
||||
[
|
||||
@@ -1150,11 +1227,19 @@ class FederationHandler(BaseHandler):
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _persist_auth_tree(self, auth_events, state, event):
|
||||
def _persist_auth_tree(self, origin, auth_events, state, event):
|
||||
"""Checks the auth chain is valid (and passes auth checks) for the
|
||||
state and event. Then persists the auth chain and state atomically.
|
||||
Persists the event seperately.
|
||||
|
||||
Will attempt to fetch missing auth events.
|
||||
|
||||
Args:
|
||||
origin (str): Where the events came from
|
||||
auth_events (list)
|
||||
state (list)
|
||||
event (Event)
|
||||
|
||||
Returns:
|
||||
2-tuple of (event_stream_id, max_stream_id) from the persist_event
|
||||
call for `event`
|
||||
@@ -1167,7 +1252,7 @@ class FederationHandler(BaseHandler):
|
||||
|
||||
event_map = {
|
||||
e.event_id: e
|
||||
for e in auth_events
|
||||
for e in itertools.chain(auth_events, state, [event])
|
||||
}
|
||||
|
||||
create_event = None
|
||||
@@ -1176,10 +1261,29 @@ class FederationHandler(BaseHandler):
|
||||
create_event = e
|
||||
break
|
||||
|
||||
missing_auth_events = set()
|
||||
for e in itertools.chain(auth_events, state, [event]):
|
||||
for e_id, _ in e.auth_events:
|
||||
if e_id not in event_map:
|
||||
missing_auth_events.add(e_id)
|
||||
|
||||
for e_id in missing_auth_events:
|
||||
m_ev = yield self.replication_layer.get_pdu(
|
||||
[origin],
|
||||
e_id,
|
||||
outlier=True,
|
||||
timeout=10000,
|
||||
)
|
||||
if m_ev and m_ev.event_id == e_id:
|
||||
event_map[e_id] = m_ev
|
||||
else:
|
||||
logger.info("Failed to find auth event %r", e_id)
|
||||
|
||||
for e in itertools.chain(auth_events, state, [event]):
|
||||
auth_for_e = {
|
||||
(event_map[e_id].type, event_map[e_id].state_key): event_map[e_id]
|
||||
for e_id, _ in e.auth_events
|
||||
if e_id in event_map
|
||||
}
|
||||
if create_event:
|
||||
auth_for_e[(EventTypes.Create, "")] = create_event
|
||||
@@ -1215,7 +1319,6 @@ class FederationHandler(BaseHandler):
|
||||
|
||||
event_stream_id, max_stream_id = yield self.store.persist_event(
|
||||
event, new_event_context,
|
||||
current_state=state,
|
||||
)
|
||||
|
||||
defer.returnValue((event_stream_id, max_stream_id))
|
||||
@@ -1228,7 +1331,13 @@ class FederationHandler(BaseHandler):
|
||||
)
|
||||
|
||||
if not auth_events:
|
||||
auth_events = context.current_state
|
||||
auth_events_ids = yield self.auth.compute_auth_events(
|
||||
event, context.prev_state_ids, for_verification=True,
|
||||
)
|
||||
auth_events = yield self.store.get_events(auth_events_ids)
|
||||
auth_events = {
|
||||
(e.type, e.state_key): e for e in auth_events.values()
|
||||
}
|
||||
|
||||
# This is a hack to fix some old rooms where the initial join event
|
||||
# didn't reference the create event in its auth events.
|
||||
@@ -1254,8 +1363,7 @@ class FederationHandler(BaseHandler):
|
||||
context.rejected = RejectedReason.AUTH_ERROR
|
||||
|
||||
if event.type == EventTypes.GuestAccess:
|
||||
full_context = yield self.store.get_current_state(room_id=event.room_id)
|
||||
yield self.maybe_kick_guest_users(event, full_context)
|
||||
yield self.maybe_kick_guest_users(event)
|
||||
|
||||
defer.returnValue(context)
|
||||
|
||||
@@ -1323,6 +1431,11 @@ class FederationHandler(BaseHandler):
|
||||
current_state = set(e.event_id for e in auth_events.values())
|
||||
event_auth_events = set(e_id for e_id, _ in event.auth_events)
|
||||
|
||||
if event.is_state():
|
||||
event_key = (event.type, event.state_key)
|
||||
else:
|
||||
event_key = None
|
||||
|
||||
if event_auth_events - current_state:
|
||||
have_events = yield self.store.have_events(
|
||||
event_auth_events - current_state
|
||||
@@ -1396,9 +1509,9 @@ class FederationHandler(BaseHandler):
|
||||
# Do auth conflict res.
|
||||
logger.info("Different auth: %s", different_auth)
|
||||
|
||||
different_events = yield defer.gatherResults(
|
||||
different_events = yield preserve_context_over_deferred(defer.gatherResults(
|
||||
[
|
||||
self.store.get_event(
|
||||
preserve_fn(self.store.get_event)(
|
||||
d,
|
||||
allow_none=True,
|
||||
allow_rejected=False,
|
||||
@@ -1407,7 +1520,7 @@ class FederationHandler(BaseHandler):
|
||||
if d in have_events and not have_events[d]
|
||||
],
|
||||
consumeErrors=True
|
||||
).addErrback(unwrapFirstError)
|
||||
)).addErrback(unwrapFirstError)
|
||||
|
||||
if different_events:
|
||||
local_view = dict(auth_events)
|
||||
@@ -1416,7 +1529,7 @@ class FederationHandler(BaseHandler):
|
||||
(d.type, d.state_key): d for d in different_events if d
|
||||
})
|
||||
|
||||
new_state, prev_state = self.state_handler.resolve_events(
|
||||
new_state = self.state_handler.resolve_events(
|
||||
[local_view.values(), remote_view.values()],
|
||||
event
|
||||
)
|
||||
@@ -1426,8 +1539,16 @@ class FederationHandler(BaseHandler):
|
||||
current_state = set(e.event_id for e in auth_events.values())
|
||||
different_auth = event_auth_events - current_state
|
||||
|
||||
context.current_state.update(auth_events)
|
||||
context.state_group = None
|
||||
context.current_state_ids = dict(context.current_state_ids)
|
||||
context.current_state_ids.update({
|
||||
k: a.event_id for k, a in auth_events.items()
|
||||
if k != event_key
|
||||
})
|
||||
context.prev_state_ids = dict(context.prev_state_ids)
|
||||
context.prev_state_ids.update({
|
||||
k: a.event_id for k, a in auth_events.items()
|
||||
})
|
||||
context.state_group = self.store.get_next_state_group()
|
||||
|
||||
if different_auth and not event.internal_metadata.is_outlier():
|
||||
logger.info("Different auth after resolution: %s", different_auth)
|
||||
@@ -1448,8 +1569,8 @@ class FederationHandler(BaseHandler):
|
||||
|
||||
if do_resolution:
|
||||
# 1. Get what we think is the auth chain.
|
||||
auth_ids = self.auth.compute_auth_events(
|
||||
event, context.current_state
|
||||
auth_ids = yield self.auth.compute_auth_events(
|
||||
event, context.prev_state_ids
|
||||
)
|
||||
local_auth_chain = yield self.store.get_auth_chain(auth_ids)
|
||||
|
||||
@@ -1505,8 +1626,16 @@ class FederationHandler(BaseHandler):
|
||||
# 4. Look at rejects and their proofs.
|
||||
# TODO.
|
||||
|
||||
context.current_state.update(auth_events)
|
||||
context.state_group = None
|
||||
context.current_state_ids = dict(context.current_state_ids)
|
||||
context.current_state_ids.update({
|
||||
k: a.event_id for k, a in auth_events.items()
|
||||
if k != event_key
|
||||
})
|
||||
context.prev_state_ids = dict(context.prev_state_ids)
|
||||
context.prev_state_ids.update({
|
||||
k: a.event_id for k, a in auth_events.items()
|
||||
})
|
||||
context.state_group = self.store.get_next_state_group()
|
||||
|
||||
try:
|
||||
self.auth.check(event, auth_events=auth_events)
|
||||
@@ -1692,12 +1821,12 @@ class FederationHandler(BaseHandler):
|
||||
)
|
||||
|
||||
try:
|
||||
self.auth.check(event, context.current_state)
|
||||
yield self.auth.check_from_context(event, context)
|
||||
except AuthError as e:
|
||||
logger.warn("Denying new third party invite %r because %s", event, e)
|
||||
raise e
|
||||
|
||||
yield self._check_signature(event, auth_events=context.current_state)
|
||||
yield self._check_signature(event, context)
|
||||
member_handler = self.hs.get_handlers().room_member_handler
|
||||
yield member_handler.send_membership_event(None, event, context)
|
||||
else:
|
||||
@@ -1723,11 +1852,11 @@ class FederationHandler(BaseHandler):
|
||||
)
|
||||
|
||||
try:
|
||||
self.auth.check(event, auth_events=context.current_state)
|
||||
self.auth.check_from_context(event, context)
|
||||
except AuthError as e:
|
||||
logger.warn("Denying third party invite %r because %s", event, e)
|
||||
raise e
|
||||
yield self._check_signature(event, auth_events=context.current_state)
|
||||
yield self._check_signature(event, context)
|
||||
|
||||
returned_invite = yield self.send_invite(origin, event)
|
||||
# TODO: Make sure the signatures actually are correct.
|
||||
@@ -1741,16 +1870,24 @@ class FederationHandler(BaseHandler):
|
||||
EventTypes.ThirdPartyInvite,
|
||||
event.content["third_party_invite"]["signed"]["token"]
|
||||
)
|
||||
original_invite = context.current_state.get(key)
|
||||
if not original_invite:
|
||||
logger.info(
|
||||
"Could not find invite event for third_party_invite - "
|
||||
"discarding: %s" % (event_dict,)
|
||||
original_invite = None
|
||||
original_invite_id = context.prev_state_ids.get(key)
|
||||
if original_invite_id:
|
||||
original_invite = yield self.store.get_event(
|
||||
original_invite_id, allow_none=True
|
||||
)
|
||||
return
|
||||
if original_invite:
|
||||
display_name = original_invite.content["display_name"]
|
||||
event_dict["content"]["third_party_invite"]["display_name"] = display_name
|
||||
else:
|
||||
logger.info(
|
||||
"Could not find invite event for third_party_invite: %r",
|
||||
event_dict
|
||||
)
|
||||
# We don't discard here as this is not the appropriate place to do
|
||||
# auth checks. If we need the invite and don't have it then the
|
||||
# auth check code will explode appropriately.
|
||||
|
||||
display_name = original_invite.content["display_name"]
|
||||
event_dict["content"]["third_party_invite"]["display_name"] = display_name
|
||||
builder = self.event_builder_factory.new(event_dict)
|
||||
EventValidator().validate_new(builder)
|
||||
message_handler = self.hs.get_handlers().message_handler
|
||||
@@ -1758,13 +1895,13 @@ class FederationHandler(BaseHandler):
|
||||
defer.returnValue((event, context))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _check_signature(self, event, auth_events):
|
||||
def _check_signature(self, event, context):
|
||||
"""
|
||||
Checks that the signature in the event is consistent with its invite.
|
||||
|
||||
Args:
|
||||
event (Event): The m.room.member event to check
|
||||
auth_events (dict<(event type, state_key), event>):
|
||||
context (EventContext):
|
||||
|
||||
Raises:
|
||||
AuthError: if signature didn't match any keys, or key has been
|
||||
@@ -1775,10 +1912,14 @@ class FederationHandler(BaseHandler):
|
||||
signed = event.content["third_party_invite"]["signed"]
|
||||
token = signed["token"]
|
||||
|
||||
invite_event = auth_events.get(
|
||||
invite_event_id = context.prev_state_ids.get(
|
||||
(EventTypes.ThirdPartyInvite, token,)
|
||||
)
|
||||
|
||||
invite_event = None
|
||||
if invite_event_id:
|
||||
invite_event = yield self.store.get_event(invite_event_id, allow_none=True)
|
||||
|
||||
if not invite_event:
|
||||
raise AuthError(403, "Could not find invite")
|
||||
|
||||
|
||||
444
synapse/handlers/initial_sync.py
Normal file
444
synapse/handlers/initial_sync.py
Normal file
@@ -0,0 +1,444 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.constants import EventTypes, Membership
|
||||
from synapse.api.errors import AuthError, Codes
|
||||
from synapse.events.utils import serialize_event
|
||||
from synapse.events.validator import EventValidator
|
||||
from synapse.streams.config import PaginationConfig
|
||||
from synapse.types import (
|
||||
UserID, StreamToken,
|
||||
)
|
||||
from synapse.util import unwrapFirstError
|
||||
from synapse.util.async import concurrently_execute
|
||||
from synapse.util.caches.snapshot_cache import SnapshotCache
|
||||
from synapse.util.logcontext import preserve_fn, preserve_context_over_deferred
|
||||
from synapse.visibility import filter_events_for_client
|
||||
|
||||
from ._base import BaseHandler
|
||||
|
||||
import logging
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class InitialSyncHandler(BaseHandler):
|
||||
def __init__(self, hs):
|
||||
super(InitialSyncHandler, self).__init__(hs)
|
||||
self.hs = hs
|
||||
self.state = hs.get_state_handler()
|
||||
self.clock = hs.get_clock()
|
||||
self.validator = EventValidator()
|
||||
self.snapshot_cache = SnapshotCache()
|
||||
|
||||
def snapshot_all_rooms(self, user_id=None, pagin_config=None,
|
||||
as_client_event=True, include_archived=False):
|
||||
"""Retrieve a snapshot of all rooms the user is invited or has joined.
|
||||
|
||||
This snapshot may include messages for all rooms where the user is
|
||||
joined, depending on the pagination config.
|
||||
|
||||
Args:
|
||||
user_id (str): The ID of the user making the request.
|
||||
pagin_config (synapse.api.streams.PaginationConfig): The pagination
|
||||
config used to determine how many messages *PER ROOM* to return.
|
||||
as_client_event (bool): True to get events in client-server format.
|
||||
include_archived (bool): True to get rooms that the user has left
|
||||
Returns:
|
||||
A list of dicts with "room_id" and "membership" keys for all rooms
|
||||
the user is currently invited or joined in on. Rooms where the user
|
||||
is joined on, may return a "messages" key with messages, depending
|
||||
on the specified PaginationConfig.
|
||||
"""
|
||||
key = (
|
||||
user_id,
|
||||
pagin_config.from_token,
|
||||
pagin_config.to_token,
|
||||
pagin_config.direction,
|
||||
pagin_config.limit,
|
||||
as_client_event,
|
||||
include_archived,
|
||||
)
|
||||
now_ms = self.clock.time_msec()
|
||||
result = self.snapshot_cache.get(now_ms, key)
|
||||
if result is not None:
|
||||
return result
|
||||
|
||||
return self.snapshot_cache.set(now_ms, key, self._snapshot_all_rooms(
|
||||
user_id, pagin_config, as_client_event, include_archived
|
||||
))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _snapshot_all_rooms(self, user_id=None, pagin_config=None,
|
||||
as_client_event=True, include_archived=False):
|
||||
|
||||
memberships = [Membership.INVITE, Membership.JOIN]
|
||||
if include_archived:
|
||||
memberships.append(Membership.LEAVE)
|
||||
|
||||
room_list = yield self.store.get_rooms_for_user_where_membership_is(
|
||||
user_id=user_id, membership_list=memberships
|
||||
)
|
||||
|
||||
user = UserID.from_string(user_id)
|
||||
|
||||
rooms_ret = []
|
||||
|
||||
now_token = yield self.hs.get_event_sources().get_current_token()
|
||||
|
||||
presence_stream = self.hs.get_event_sources().sources["presence"]
|
||||
pagination_config = PaginationConfig(from_token=now_token)
|
||||
presence, _ = yield presence_stream.get_pagination_rows(
|
||||
user, pagination_config.get_source_config("presence"), None
|
||||
)
|
||||
|
||||
receipt_stream = self.hs.get_event_sources().sources["receipt"]
|
||||
receipt, _ = yield receipt_stream.get_pagination_rows(
|
||||
user, pagination_config.get_source_config("receipt"), None
|
||||
)
|
||||
|
||||
tags_by_room = yield self.store.get_tags_for_user(user_id)
|
||||
|
||||
account_data, account_data_by_room = (
|
||||
yield self.store.get_account_data_for_user(user_id)
|
||||
)
|
||||
|
||||
public_room_ids = yield self.store.get_public_room_ids()
|
||||
|
||||
limit = pagin_config.limit
|
||||
if limit is None:
|
||||
limit = 10
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def handle_room(event):
|
||||
d = {
|
||||
"room_id": event.room_id,
|
||||
"membership": event.membership,
|
||||
"visibility": (
|
||||
"public" if event.room_id in public_room_ids
|
||||
else "private"
|
||||
),
|
||||
}
|
||||
|
||||
if event.membership == Membership.INVITE:
|
||||
time_now = self.clock.time_msec()
|
||||
d["inviter"] = event.sender
|
||||
|
||||
invite_event = yield self.store.get_event(event.event_id)
|
||||
d["invite"] = serialize_event(invite_event, time_now, as_client_event)
|
||||
|
||||
rooms_ret.append(d)
|
||||
|
||||
if event.membership not in (Membership.JOIN, Membership.LEAVE):
|
||||
return
|
||||
|
||||
try:
|
||||
if event.membership == Membership.JOIN:
|
||||
room_end_token = now_token.room_key
|
||||
deferred_room_state = self.state_handler.get_current_state(
|
||||
event.room_id
|
||||
)
|
||||
elif event.membership == Membership.LEAVE:
|
||||
room_end_token = "s%d" % (event.stream_ordering,)
|
||||
deferred_room_state = self.store.get_state_for_events(
|
||||
[event.event_id], None
|
||||
)
|
||||
deferred_room_state.addCallback(
|
||||
lambda states: states[event.event_id]
|
||||
)
|
||||
|
||||
(messages, token), current_state = yield preserve_context_over_deferred(
|
||||
defer.gatherResults(
|
||||
[
|
||||
preserve_fn(self.store.get_recent_events_for_room)(
|
||||
event.room_id,
|
||||
limit=limit,
|
||||
end_token=room_end_token,
|
||||
),
|
||||
deferred_room_state,
|
||||
]
|
||||
)
|
||||
).addErrback(unwrapFirstError)
|
||||
|
||||
messages = yield filter_events_for_client(
|
||||
self.store, user_id, messages
|
||||
)
|
||||
|
||||
start_token = now_token.copy_and_replace("room_key", token[0])
|
||||
end_token = now_token.copy_and_replace("room_key", token[1])
|
||||
time_now = self.clock.time_msec()
|
||||
|
||||
d["messages"] = {
|
||||
"chunk": [
|
||||
serialize_event(m, time_now, as_client_event)
|
||||
for m in messages
|
||||
],
|
||||
"start": start_token.to_string(),
|
||||
"end": end_token.to_string(),
|
||||
}
|
||||
|
||||
d["state"] = [
|
||||
serialize_event(c, time_now, as_client_event)
|
||||
for c in current_state.values()
|
||||
]
|
||||
|
||||
account_data_events = []
|
||||
tags = tags_by_room.get(event.room_id)
|
||||
if tags:
|
||||
account_data_events.append({
|
||||
"type": "m.tag",
|
||||
"content": {"tags": tags},
|
||||
})
|
||||
|
||||
account_data = account_data_by_room.get(event.room_id, {})
|
||||
for account_data_type, content in account_data.items():
|
||||
account_data_events.append({
|
||||
"type": account_data_type,
|
||||
"content": content,
|
||||
})
|
||||
|
||||
d["account_data"] = account_data_events
|
||||
except:
|
||||
logger.exception("Failed to get snapshot")
|
||||
|
||||
yield concurrently_execute(handle_room, room_list, 10)
|
||||
|
||||
account_data_events = []
|
||||
for account_data_type, content in account_data.items():
|
||||
account_data_events.append({
|
||||
"type": account_data_type,
|
||||
"content": content,
|
||||
})
|
||||
|
||||
ret = {
|
||||
"rooms": rooms_ret,
|
||||
"presence": presence,
|
||||
"account_data": account_data_events,
|
||||
"receipts": receipt,
|
||||
"end": now_token.to_string(),
|
||||
}
|
||||
|
||||
defer.returnValue(ret)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def room_initial_sync(self, requester, room_id, pagin_config=None):
|
||||
"""Capture the a snapshot of a room. If user is currently a member of
|
||||
the room this will be what is currently in the room. If the user left
|
||||
the room this will be what was in the room when they left.
|
||||
|
||||
Args:
|
||||
requester(Requester): The user to get a snapshot for.
|
||||
room_id(str): The room to get a snapshot of.
|
||||
pagin_config(synapse.streams.config.PaginationConfig):
|
||||
The pagination config used to determine how many messages to
|
||||
return.
|
||||
Raises:
|
||||
AuthError if the user wasn't in the room.
|
||||
Returns:
|
||||
A JSON serialisable dict with the snapshot of the room.
|
||||
"""
|
||||
|
||||
user_id = requester.user.to_string()
|
||||
|
||||
membership, member_event_id = yield self._check_in_room_or_world_readable(
|
||||
room_id, user_id,
|
||||
)
|
||||
is_peeking = member_event_id is None
|
||||
|
||||
if membership == Membership.JOIN:
|
||||
result = yield self._room_initial_sync_joined(
|
||||
user_id, room_id, pagin_config, membership, is_peeking
|
||||
)
|
||||
elif membership == Membership.LEAVE:
|
||||
result = yield self._room_initial_sync_parted(
|
||||
user_id, room_id, pagin_config, membership, member_event_id, is_peeking
|
||||
)
|
||||
|
||||
account_data_events = []
|
||||
tags = yield self.store.get_tags_for_room(user_id, room_id)
|
||||
if tags:
|
||||
account_data_events.append({
|
||||
"type": "m.tag",
|
||||
"content": {"tags": tags},
|
||||
})
|
||||
|
||||
account_data = yield self.store.get_account_data_for_room(user_id, room_id)
|
||||
for account_data_type, content in account_data.items():
|
||||
account_data_events.append({
|
||||
"type": account_data_type,
|
||||
"content": content,
|
||||
})
|
||||
|
||||
result["account_data"] = account_data_events
|
||||
|
||||
defer.returnValue(result)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _room_initial_sync_parted(self, user_id, room_id, pagin_config,
|
||||
membership, member_event_id, is_peeking):
|
||||
room_state = yield self.store.get_state_for_events(
|
||||
[member_event_id], None
|
||||
)
|
||||
|
||||
room_state = room_state[member_event_id]
|
||||
|
||||
limit = pagin_config.limit if pagin_config else None
|
||||
if limit is None:
|
||||
limit = 10
|
||||
|
||||
stream_token = yield self.store.get_stream_token_for_event(
|
||||
member_event_id
|
||||
)
|
||||
|
||||
messages, token = yield self.store.get_recent_events_for_room(
|
||||
room_id,
|
||||
limit=limit,
|
||||
end_token=stream_token
|
||||
)
|
||||
|
||||
messages = yield filter_events_for_client(
|
||||
self.store, user_id, messages, is_peeking=is_peeking
|
||||
)
|
||||
|
||||
start_token = StreamToken.START.copy_and_replace("room_key", token[0])
|
||||
end_token = StreamToken.START.copy_and_replace("room_key", token[1])
|
||||
|
||||
time_now = self.clock.time_msec()
|
||||
|
||||
defer.returnValue({
|
||||
"membership": membership,
|
||||
"room_id": room_id,
|
||||
"messages": {
|
||||
"chunk": [serialize_event(m, time_now) for m in messages],
|
||||
"start": start_token.to_string(),
|
||||
"end": end_token.to_string(),
|
||||
},
|
||||
"state": [serialize_event(s, time_now) for s in room_state.values()],
|
||||
"presence": [],
|
||||
"receipts": [],
|
||||
})
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _room_initial_sync_joined(self, user_id, room_id, pagin_config,
|
||||
membership, is_peeking):
|
||||
current_state = yield self.state.get_current_state(
|
||||
room_id=room_id,
|
||||
)
|
||||
|
||||
# TODO: These concurrently
|
||||
time_now = self.clock.time_msec()
|
||||
state = [
|
||||
serialize_event(x, time_now)
|
||||
for x in current_state.values()
|
||||
]
|
||||
|
||||
now_token = yield self.hs.get_event_sources().get_current_token()
|
||||
|
||||
limit = pagin_config.limit if pagin_config else None
|
||||
if limit is None:
|
||||
limit = 10
|
||||
|
||||
room_members = [
|
||||
m for m in current_state.values()
|
||||
if m.type == EventTypes.Member
|
||||
and m.content["membership"] == Membership.JOIN
|
||||
]
|
||||
|
||||
presence_handler = self.hs.get_presence_handler()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_presence():
|
||||
states = yield presence_handler.get_states(
|
||||
[m.user_id for m in room_members],
|
||||
as_event=True,
|
||||
)
|
||||
|
||||
defer.returnValue(states)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_receipts():
|
||||
receipts = yield self.store.get_linearized_receipts_for_room(
|
||||
room_id,
|
||||
to_key=now_token.receipt_key,
|
||||
)
|
||||
if not receipts:
|
||||
receipts = []
|
||||
defer.returnValue(receipts)
|
||||
|
||||
presence, receipts, (messages, token) = yield defer.gatherResults(
|
||||
[
|
||||
preserve_fn(get_presence)(),
|
||||
preserve_fn(get_receipts)(),
|
||||
preserve_fn(self.store.get_recent_events_for_room)(
|
||||
room_id,
|
||||
limit=limit,
|
||||
end_token=now_token.room_key,
|
||||
)
|
||||
],
|
||||
consumeErrors=True,
|
||||
).addErrback(unwrapFirstError)
|
||||
|
||||
messages = yield filter_events_for_client(
|
||||
self.store, user_id, messages, is_peeking=is_peeking,
|
||||
)
|
||||
|
||||
start_token = now_token.copy_and_replace("room_key", token[0])
|
||||
end_token = now_token.copy_and_replace("room_key", token[1])
|
||||
|
||||
time_now = self.clock.time_msec()
|
||||
|
||||
ret = {
|
||||
"room_id": room_id,
|
||||
"messages": {
|
||||
"chunk": [serialize_event(m, time_now) for m in messages],
|
||||
"start": start_token.to_string(),
|
||||
"end": end_token.to_string(),
|
||||
},
|
||||
"state": state,
|
||||
"presence": presence,
|
||||
"receipts": receipts,
|
||||
}
|
||||
if not is_peeking:
|
||||
ret["membership"] = membership
|
||||
|
||||
defer.returnValue(ret)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _check_in_room_or_world_readable(self, room_id, user_id):
|
||||
try:
|
||||
# check_user_was_in_room will return the most recent membership
|
||||
# event for the user if:
|
||||
# * The user is a non-guest user, and was ever in the room
|
||||
# * The user is a guest user, and has joined the room
|
||||
# else it will throw.
|
||||
member_event = yield self.auth.check_user_was_in_room(room_id, user_id)
|
||||
defer.returnValue((member_event.membership, member_event.event_id))
|
||||
return
|
||||
except AuthError:
|
||||
visibility = yield self.state_handler.get_current_state(
|
||||
room_id, EventTypes.RoomHistoryVisibility, ""
|
||||
)
|
||||
if (
|
||||
visibility and
|
||||
visibility.content["history_visibility"] == "world_readable"
|
||||
):
|
||||
defer.returnValue((Membership.JOIN, None))
|
||||
return
|
||||
raise AuthError(
|
||||
403, "Guest access not allowed", errcode=Codes.GUEST_ACCESS_FORBIDDEN
|
||||
)
|
||||
@@ -16,19 +16,17 @@
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.constants import EventTypes, Membership
|
||||
from synapse.api.errors import AuthError, Codes, SynapseError
|
||||
from synapse.api.errors import AuthError, Codes, SynapseError, LimitExceededError
|
||||
from synapse.crypto.event_signing import add_hashes_and_signatures
|
||||
from synapse.events.utils import serialize_event
|
||||
from synapse.events.validator import EventValidator
|
||||
from synapse.push.action_generator import ActionGenerator
|
||||
from synapse.streams.config import PaginationConfig
|
||||
from synapse.types import (
|
||||
UserID, RoomAlias, RoomStreamToken, StreamToken, get_domain_from_id
|
||||
UserID, RoomAlias, RoomStreamToken,
|
||||
)
|
||||
from synapse.util import unwrapFirstError
|
||||
from synapse.util.async import concurrently_execute, run_on_reactor, ReadWriteLock
|
||||
from synapse.util.caches.snapshot_cache import SnapshotCache
|
||||
from synapse.util.async import run_on_reactor, ReadWriteLock, Limiter
|
||||
from synapse.util.logcontext import preserve_fn
|
||||
from synapse.util.metrics import measure_func
|
||||
from synapse.visibility import filter_events_for_client
|
||||
|
||||
from ._base import BaseHandler
|
||||
@@ -36,6 +34,7 @@ from ._base import BaseHandler
|
||||
from canonicaljson import encode_canonical_json
|
||||
|
||||
import logging
|
||||
import random
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -48,10 +47,13 @@ class MessageHandler(BaseHandler):
|
||||
self.state = hs.get_state_handler()
|
||||
self.clock = hs.get_clock()
|
||||
self.validator = EventValidator()
|
||||
self.snapshot_cache = SnapshotCache()
|
||||
|
||||
self.pagination_lock = ReadWriteLock()
|
||||
|
||||
# We arbitrarily limit concurrent event creation for a room to 5.
|
||||
# This is to stop us from diverging history *too* much.
|
||||
self.limiter = Limiter(max_count=5)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def purge_history(self, room_id, event_id):
|
||||
event = yield self.store.get_event(event_id)
|
||||
@@ -66,7 +68,7 @@ class MessageHandler(BaseHandler):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_messages(self, requester, room_id=None, pagin_config=None,
|
||||
as_client_event=True):
|
||||
as_client_event=True, event_filter=None):
|
||||
"""Get messages in a room.
|
||||
|
||||
Args:
|
||||
@@ -75,18 +77,18 @@ class MessageHandler(BaseHandler):
|
||||
pagin_config (synapse.api.streams.PaginationConfig): The pagination
|
||||
config rules to apply, if any.
|
||||
as_client_event (bool): True to get events in client-server format.
|
||||
event_filter (Filter): Filter to apply to results or None
|
||||
Returns:
|
||||
dict: Pagination API results
|
||||
"""
|
||||
user_id = requester.user.to_string()
|
||||
data_source = self.hs.get_event_sources().sources["room"]
|
||||
|
||||
if pagin_config.from_token:
|
||||
room_token = pagin_config.from_token.room_key
|
||||
else:
|
||||
pagin_config.from_token = (
|
||||
yield self.hs.get_event_sources().get_current_token(
|
||||
direction='b'
|
||||
yield self.hs.get_event_sources().get_current_token_for_room(
|
||||
room_id=room_id
|
||||
)
|
||||
)
|
||||
room_token = pagin_config.from_token.room_key
|
||||
@@ -129,8 +131,13 @@ class MessageHandler(BaseHandler):
|
||||
room_id, max_topo
|
||||
)
|
||||
|
||||
events, next_key = yield data_source.get_pagination_rows(
|
||||
requester.user, source_config, room_id
|
||||
events, next_key = yield self.store.paginate_room_events(
|
||||
room_id=room_id,
|
||||
from_key=source_config.from_key,
|
||||
to_key=source_config.to_key,
|
||||
direction=source_config.direction,
|
||||
limit=source_config.limit,
|
||||
event_filter=event_filter,
|
||||
)
|
||||
|
||||
next_token = pagin_config.from_token.copy_and_replace(
|
||||
@@ -144,6 +151,9 @@ class MessageHandler(BaseHandler):
|
||||
"end": next_token.to_string(),
|
||||
})
|
||||
|
||||
if event_filter:
|
||||
events = event_filter.filter(events)
|
||||
|
||||
events = yield filter_events_for_client(
|
||||
self.store,
|
||||
user_id,
|
||||
@@ -185,36 +195,40 @@ class MessageHandler(BaseHandler):
|
||||
"""
|
||||
builder = self.event_builder_factory.new(event_dict)
|
||||
|
||||
self.validator.validate_new(builder)
|
||||
with (yield self.limiter.queue(builder.room_id)):
|
||||
self.validator.validate_new(builder)
|
||||
|
||||
if builder.type == EventTypes.Member:
|
||||
membership = builder.content.get("membership", None)
|
||||
target = UserID.from_string(builder.state_key)
|
||||
if builder.type == EventTypes.Member:
|
||||
membership = builder.content.get("membership", None)
|
||||
target = UserID.from_string(builder.state_key)
|
||||
|
||||
if membership in {Membership.JOIN, Membership.INVITE}:
|
||||
# If event doesn't include a display name, add one.
|
||||
profile = self.hs.get_handlers().profile_handler
|
||||
content = builder.content
|
||||
if membership in {Membership.JOIN, Membership.INVITE}:
|
||||
# If event doesn't include a display name, add one.
|
||||
profile = self.hs.get_handlers().profile_handler
|
||||
content = builder.content
|
||||
|
||||
try:
|
||||
content["displayname"] = yield profile.get_displayname(target)
|
||||
content["avatar_url"] = yield profile.get_avatar_url(target)
|
||||
except Exception as e:
|
||||
logger.info(
|
||||
"Failed to get profile information for %r: %s",
|
||||
target, e
|
||||
)
|
||||
try:
|
||||
if "displayname" not in content:
|
||||
content["displayname"] = yield profile.get_displayname(target)
|
||||
if "avatar_url" not in content:
|
||||
content["avatar_url"] = yield profile.get_avatar_url(target)
|
||||
except Exception as e:
|
||||
logger.info(
|
||||
"Failed to get profile information for %r: %s",
|
||||
target, e
|
||||
)
|
||||
|
||||
if token_id is not None:
|
||||
builder.internal_metadata.token_id = token_id
|
||||
if token_id is not None:
|
||||
builder.internal_metadata.token_id = token_id
|
||||
|
||||
if txn_id is not None:
|
||||
builder.internal_metadata.txn_id = txn_id
|
||||
if txn_id is not None:
|
||||
builder.internal_metadata.txn_id = txn_id
|
||||
|
||||
event, context = yield self._create_new_client_event(
|
||||
builder=builder,
|
||||
prev_event_ids=prev_event_ids,
|
||||
)
|
||||
|
||||
event, context = yield self._create_new_client_event(
|
||||
builder=builder,
|
||||
prev_event_ids=prev_event_ids,
|
||||
)
|
||||
defer.returnValue((event, context))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@@ -234,12 +248,27 @@ class MessageHandler(BaseHandler):
|
||||
"Tried to send member event through non-member codepath"
|
||||
)
|
||||
|
||||
# We check here if we are currently being rate limited, so that we
|
||||
# don't do unnecessary work. We check again just before we actually
|
||||
# send the event.
|
||||
time_now = self.clock.time()
|
||||
allowed, time_allowed = self.ratelimiter.send_message(
|
||||
event.sender, time_now,
|
||||
msg_rate_hz=self.hs.config.rc_messages_per_second,
|
||||
burst_count=self.hs.config.rc_message_burst_count,
|
||||
update=False,
|
||||
)
|
||||
if not allowed:
|
||||
raise LimitExceededError(
|
||||
retry_after_ms=int(1000 * (time_allowed - time_now)),
|
||||
)
|
||||
|
||||
user = UserID.from_string(event.sender)
|
||||
|
||||
assert self.hs.is_mine(user), "User must be our own: %s" % (user,)
|
||||
|
||||
if event.is_state():
|
||||
prev_state = self.deduplicate_state_event(event, context)
|
||||
prev_state = yield self.deduplicate_state_event(event, context)
|
||||
if prev_state is not None:
|
||||
defer.returnValue(prev_state)
|
||||
|
||||
@@ -252,8 +281,11 @@ class MessageHandler(BaseHandler):
|
||||
|
||||
if event.type == EventTypes.Message:
|
||||
presence = self.hs.get_presence_handler()
|
||||
yield presence.bump_presence_active_time(user)
|
||||
# We don't want to block sending messages on any presence code. This
|
||||
# matters as sometimes presence code can take a while.
|
||||
preserve_fn(presence.bump_presence_active_time)(user)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def deduplicate_state_event(self, event, context):
|
||||
"""
|
||||
Checks whether event is in the latest resolved state in context.
|
||||
@@ -261,13 +293,17 @@ class MessageHandler(BaseHandler):
|
||||
If so, returns the version of the event in context.
|
||||
Otherwise, returns None.
|
||||
"""
|
||||
prev_event = context.current_state.get((event.type, event.state_key))
|
||||
prev_event_id = context.prev_state_ids.get((event.type, event.state_key))
|
||||
prev_event = yield self.store.get_event(prev_event_id, allow_none=True)
|
||||
if not prev_event:
|
||||
return
|
||||
|
||||
if prev_event and event.user_id == prev_event.user_id:
|
||||
prev_content = encode_canonical_json(prev_event.content)
|
||||
next_content = encode_canonical_json(event.content)
|
||||
if prev_content == next_content:
|
||||
return prev_event
|
||||
return None
|
||||
defer.returnValue(prev_event)
|
||||
return
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def create_and_send_nonmember_event(
|
||||
@@ -378,375 +414,7 @@ class MessageHandler(BaseHandler):
|
||||
[serialize_event(c, now) for c in room_state.values()]
|
||||
)
|
||||
|
||||
def snapshot_all_rooms(self, user_id=None, pagin_config=None,
|
||||
as_client_event=True, include_archived=False):
|
||||
"""Retrieve a snapshot of all rooms the user is invited or has joined.
|
||||
|
||||
This snapshot may include messages for all rooms where the user is
|
||||
joined, depending on the pagination config.
|
||||
|
||||
Args:
|
||||
user_id (str): The ID of the user making the request.
|
||||
pagin_config (synapse.api.streams.PaginationConfig): The pagination
|
||||
config used to determine how many messages *PER ROOM* to return.
|
||||
as_client_event (bool): True to get events in client-server format.
|
||||
include_archived (bool): True to get rooms that the user has left
|
||||
Returns:
|
||||
A list of dicts with "room_id" and "membership" keys for all rooms
|
||||
the user is currently invited or joined in on. Rooms where the user
|
||||
is joined on, may return a "messages" key with messages, depending
|
||||
on the specified PaginationConfig.
|
||||
"""
|
||||
key = (
|
||||
user_id,
|
||||
pagin_config.from_token,
|
||||
pagin_config.to_token,
|
||||
pagin_config.direction,
|
||||
pagin_config.limit,
|
||||
as_client_event,
|
||||
include_archived,
|
||||
)
|
||||
now_ms = self.clock.time_msec()
|
||||
result = self.snapshot_cache.get(now_ms, key)
|
||||
if result is not None:
|
||||
return result
|
||||
|
||||
return self.snapshot_cache.set(now_ms, key, self._snapshot_all_rooms(
|
||||
user_id, pagin_config, as_client_event, include_archived
|
||||
))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _snapshot_all_rooms(self, user_id=None, pagin_config=None,
|
||||
as_client_event=True, include_archived=False):
|
||||
|
||||
memberships = [Membership.INVITE, Membership.JOIN]
|
||||
if include_archived:
|
||||
memberships.append(Membership.LEAVE)
|
||||
|
||||
room_list = yield self.store.get_rooms_for_user_where_membership_is(
|
||||
user_id=user_id, membership_list=memberships
|
||||
)
|
||||
|
||||
user = UserID.from_string(user_id)
|
||||
|
||||
rooms_ret = []
|
||||
|
||||
now_token = yield self.hs.get_event_sources().get_current_token()
|
||||
|
||||
presence_stream = self.hs.get_event_sources().sources["presence"]
|
||||
pagination_config = PaginationConfig(from_token=now_token)
|
||||
presence, _ = yield presence_stream.get_pagination_rows(
|
||||
user, pagination_config.get_source_config("presence"), None
|
||||
)
|
||||
|
||||
receipt_stream = self.hs.get_event_sources().sources["receipt"]
|
||||
receipt, _ = yield receipt_stream.get_pagination_rows(
|
||||
user, pagination_config.get_source_config("receipt"), None
|
||||
)
|
||||
|
||||
tags_by_room = yield self.store.get_tags_for_user(user_id)
|
||||
|
||||
account_data, account_data_by_room = (
|
||||
yield self.store.get_account_data_for_user(user_id)
|
||||
)
|
||||
|
||||
public_room_ids = yield self.store.get_public_room_ids()
|
||||
|
||||
limit = pagin_config.limit
|
||||
if limit is None:
|
||||
limit = 10
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def handle_room(event):
|
||||
d = {
|
||||
"room_id": event.room_id,
|
||||
"membership": event.membership,
|
||||
"visibility": (
|
||||
"public" if event.room_id in public_room_ids
|
||||
else "private"
|
||||
),
|
||||
}
|
||||
|
||||
if event.membership == Membership.INVITE:
|
||||
time_now = self.clock.time_msec()
|
||||
d["inviter"] = event.sender
|
||||
|
||||
invite_event = yield self.store.get_event(event.event_id)
|
||||
d["invite"] = serialize_event(invite_event, time_now, as_client_event)
|
||||
|
||||
rooms_ret.append(d)
|
||||
|
||||
if event.membership not in (Membership.JOIN, Membership.LEAVE):
|
||||
return
|
||||
|
||||
try:
|
||||
if event.membership == Membership.JOIN:
|
||||
room_end_token = now_token.room_key
|
||||
deferred_room_state = self.state_handler.get_current_state(
|
||||
event.room_id
|
||||
)
|
||||
elif event.membership == Membership.LEAVE:
|
||||
room_end_token = "s%d" % (event.stream_ordering,)
|
||||
deferred_room_state = self.store.get_state_for_events(
|
||||
[event.event_id], None
|
||||
)
|
||||
deferred_room_state.addCallback(
|
||||
lambda states: states[event.event_id]
|
||||
)
|
||||
|
||||
(messages, token), current_state = yield defer.gatherResults(
|
||||
[
|
||||
self.store.get_recent_events_for_room(
|
||||
event.room_id,
|
||||
limit=limit,
|
||||
end_token=room_end_token,
|
||||
),
|
||||
deferred_room_state,
|
||||
]
|
||||
).addErrback(unwrapFirstError)
|
||||
|
||||
messages = yield filter_events_for_client(
|
||||
self.store, user_id, messages
|
||||
)
|
||||
|
||||
start_token = now_token.copy_and_replace("room_key", token[0])
|
||||
end_token = now_token.copy_and_replace("room_key", token[1])
|
||||
time_now = self.clock.time_msec()
|
||||
|
||||
d["messages"] = {
|
||||
"chunk": [
|
||||
serialize_event(m, time_now, as_client_event)
|
||||
for m in messages
|
||||
],
|
||||
"start": start_token.to_string(),
|
||||
"end": end_token.to_string(),
|
||||
}
|
||||
|
||||
d["state"] = [
|
||||
serialize_event(c, time_now, as_client_event)
|
||||
for c in current_state.values()
|
||||
]
|
||||
|
||||
account_data_events = []
|
||||
tags = tags_by_room.get(event.room_id)
|
||||
if tags:
|
||||
account_data_events.append({
|
||||
"type": "m.tag",
|
||||
"content": {"tags": tags},
|
||||
})
|
||||
|
||||
account_data = account_data_by_room.get(event.room_id, {})
|
||||
for account_data_type, content in account_data.items():
|
||||
account_data_events.append({
|
||||
"type": account_data_type,
|
||||
"content": content,
|
||||
})
|
||||
|
||||
d["account_data"] = account_data_events
|
||||
except:
|
||||
logger.exception("Failed to get snapshot")
|
||||
|
||||
yield concurrently_execute(handle_room, room_list, 10)
|
||||
|
||||
account_data_events = []
|
||||
for account_data_type, content in account_data.items():
|
||||
account_data_events.append({
|
||||
"type": account_data_type,
|
||||
"content": content,
|
||||
})
|
||||
|
||||
ret = {
|
||||
"rooms": rooms_ret,
|
||||
"presence": presence,
|
||||
"account_data": account_data_events,
|
||||
"receipts": receipt,
|
||||
"end": now_token.to_string(),
|
||||
}
|
||||
|
||||
defer.returnValue(ret)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def room_initial_sync(self, requester, room_id, pagin_config=None):
|
||||
"""Capture the a snapshot of a room. If user is currently a member of
|
||||
the room this will be what is currently in the room. If the user left
|
||||
the room this will be what was in the room when they left.
|
||||
|
||||
Args:
|
||||
requester(Requester): The user to get a snapshot for.
|
||||
room_id(str): The room to get a snapshot of.
|
||||
pagin_config(synapse.streams.config.PaginationConfig):
|
||||
The pagination config used to determine how many messages to
|
||||
return.
|
||||
Raises:
|
||||
AuthError if the user wasn't in the room.
|
||||
Returns:
|
||||
A JSON serialisable dict with the snapshot of the room.
|
||||
"""
|
||||
|
||||
user_id = requester.user.to_string()
|
||||
|
||||
membership, member_event_id = yield self._check_in_room_or_world_readable(
|
||||
room_id, user_id,
|
||||
)
|
||||
is_peeking = member_event_id is None
|
||||
|
||||
if membership == Membership.JOIN:
|
||||
result = yield self._room_initial_sync_joined(
|
||||
user_id, room_id, pagin_config, membership, is_peeking
|
||||
)
|
||||
elif membership == Membership.LEAVE:
|
||||
result = yield self._room_initial_sync_parted(
|
||||
user_id, room_id, pagin_config, membership, member_event_id, is_peeking
|
||||
)
|
||||
|
||||
account_data_events = []
|
||||
tags = yield self.store.get_tags_for_room(user_id, room_id)
|
||||
if tags:
|
||||
account_data_events.append({
|
||||
"type": "m.tag",
|
||||
"content": {"tags": tags},
|
||||
})
|
||||
|
||||
account_data = yield self.store.get_account_data_for_room(user_id, room_id)
|
||||
for account_data_type, content in account_data.items():
|
||||
account_data_events.append({
|
||||
"type": account_data_type,
|
||||
"content": content,
|
||||
})
|
||||
|
||||
result["account_data"] = account_data_events
|
||||
|
||||
defer.returnValue(result)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _room_initial_sync_parted(self, user_id, room_id, pagin_config,
|
||||
membership, member_event_id, is_peeking):
|
||||
room_state = yield self.store.get_state_for_events(
|
||||
[member_event_id], None
|
||||
)
|
||||
|
||||
room_state = room_state[member_event_id]
|
||||
|
||||
limit = pagin_config.limit if pagin_config else None
|
||||
if limit is None:
|
||||
limit = 10
|
||||
|
||||
stream_token = yield self.store.get_stream_token_for_event(
|
||||
member_event_id
|
||||
)
|
||||
|
||||
messages, token = yield self.store.get_recent_events_for_room(
|
||||
room_id,
|
||||
limit=limit,
|
||||
end_token=stream_token
|
||||
)
|
||||
|
||||
messages = yield filter_events_for_client(
|
||||
self.store, user_id, messages, is_peeking=is_peeking
|
||||
)
|
||||
|
||||
start_token = StreamToken.START.copy_and_replace("room_key", token[0])
|
||||
end_token = StreamToken.START.copy_and_replace("room_key", token[1])
|
||||
|
||||
time_now = self.clock.time_msec()
|
||||
|
||||
defer.returnValue({
|
||||
"membership": membership,
|
||||
"room_id": room_id,
|
||||
"messages": {
|
||||
"chunk": [serialize_event(m, time_now) for m in messages],
|
||||
"start": start_token.to_string(),
|
||||
"end": end_token.to_string(),
|
||||
},
|
||||
"state": [serialize_event(s, time_now) for s in room_state.values()],
|
||||
"presence": [],
|
||||
"receipts": [],
|
||||
})
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _room_initial_sync_joined(self, user_id, room_id, pagin_config,
|
||||
membership, is_peeking):
|
||||
current_state = yield self.state.get_current_state(
|
||||
room_id=room_id,
|
||||
)
|
||||
|
||||
# TODO: These concurrently
|
||||
time_now = self.clock.time_msec()
|
||||
state = [
|
||||
serialize_event(x, time_now)
|
||||
for x in current_state.values()
|
||||
]
|
||||
|
||||
now_token = yield self.hs.get_event_sources().get_current_token()
|
||||
|
||||
limit = pagin_config.limit if pagin_config else None
|
||||
if limit is None:
|
||||
limit = 10
|
||||
|
||||
room_members = [
|
||||
m for m in current_state.values()
|
||||
if m.type == EventTypes.Member
|
||||
and m.content["membership"] == Membership.JOIN
|
||||
]
|
||||
|
||||
presence_handler = self.hs.get_presence_handler()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_presence():
|
||||
states = yield presence_handler.get_states(
|
||||
[m.user_id for m in room_members],
|
||||
as_event=True,
|
||||
)
|
||||
|
||||
defer.returnValue(states)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_receipts():
|
||||
receipts_handler = self.hs.get_handlers().receipts_handler
|
||||
receipts = yield receipts_handler.get_receipts_for_room(
|
||||
room_id,
|
||||
now_token.receipt_key
|
||||
)
|
||||
defer.returnValue(receipts)
|
||||
|
||||
presence, receipts, (messages, token) = yield defer.gatherResults(
|
||||
[
|
||||
get_presence(),
|
||||
get_receipts(),
|
||||
self.store.get_recent_events_for_room(
|
||||
room_id,
|
||||
limit=limit,
|
||||
end_token=now_token.room_key,
|
||||
)
|
||||
],
|
||||
consumeErrors=True,
|
||||
).addErrback(unwrapFirstError)
|
||||
|
||||
messages = yield filter_events_for_client(
|
||||
self.store, user_id, messages, is_peeking=is_peeking,
|
||||
)
|
||||
|
||||
start_token = now_token.copy_and_replace("room_key", token[0])
|
||||
end_token = now_token.copy_and_replace("room_key", token[1])
|
||||
|
||||
time_now = self.clock.time_msec()
|
||||
|
||||
ret = {
|
||||
"room_id": room_id,
|
||||
"messages": {
|
||||
"chunk": [serialize_event(m, time_now) for m in messages],
|
||||
"start": start_token.to_string(),
|
||||
"end": end_token.to_string(),
|
||||
},
|
||||
"state": state,
|
||||
"presence": presence,
|
||||
"receipts": receipts,
|
||||
}
|
||||
if not is_peeking:
|
||||
ret["membership"] = membership
|
||||
|
||||
defer.returnValue(ret)
|
||||
|
||||
@measure_func("_create_new_client_event")
|
||||
@defer.inlineCallbacks
|
||||
def _create_new_client_event(self, builder, prev_event_ids=None):
|
||||
if prev_event_ids:
|
||||
@@ -758,6 +426,20 @@ class MessageHandler(BaseHandler):
|
||||
builder.room_id,
|
||||
)
|
||||
|
||||
# We want to limit the max number of prev events we point to in our
|
||||
# new event
|
||||
if len(latest_ret) > 10:
|
||||
# Sort by reverse depth, so we point to the most recent.
|
||||
latest_ret.sort(key=lambda a: -a[2])
|
||||
new_latest_ret = latest_ret[:5]
|
||||
|
||||
# We also randomly point to some of the older events, to make
|
||||
# sure that we don't completely ignore the older events.
|
||||
if latest_ret[5:]:
|
||||
sample_size = min(5, len(latest_ret[5:]))
|
||||
new_latest_ret.extend(random.sample(latest_ret[5:], sample_size))
|
||||
latest_ret = new_latest_ret
|
||||
|
||||
if latest_ret:
|
||||
depth = max([d for _, _, d in latest_ret]) + 1
|
||||
else:
|
||||
@@ -790,14 +472,15 @@ class MessageHandler(BaseHandler):
|
||||
event = builder.build()
|
||||
|
||||
logger.debug(
|
||||
"Created event %s with current state: %s",
|
||||
event.event_id, context.current_state,
|
||||
"Created event %s with state: %s",
|
||||
event.event_id, context.prev_state_ids,
|
||||
)
|
||||
|
||||
defer.returnValue(
|
||||
(event, context,)
|
||||
)
|
||||
|
||||
@measure_func("handle_new_client_event")
|
||||
@defer.inlineCallbacks
|
||||
def handle_new_client_event(
|
||||
self,
|
||||
@@ -813,12 +496,12 @@ class MessageHandler(BaseHandler):
|
||||
self.ratelimit(requester)
|
||||
|
||||
try:
|
||||
self.auth.check(event, auth_events=context.current_state)
|
||||
yield self.auth.check_from_context(event, context)
|
||||
except AuthError as err:
|
||||
logger.warn("Denying new event %r because %s", event, err)
|
||||
raise err
|
||||
|
||||
yield self.maybe_kick_guest_users(event, context.current_state.values())
|
||||
yield self.maybe_kick_guest_users(event, context)
|
||||
|
||||
if event.type == EventTypes.CanonicalAlias:
|
||||
# Check the alias is acually valid (at this time at least)
|
||||
@@ -846,6 +529,15 @@ class MessageHandler(BaseHandler):
|
||||
e.sender == event.sender
|
||||
)
|
||||
|
||||
state_to_include_ids = [
|
||||
e_id
|
||||
for k, e_id in context.current_state_ids.items()
|
||||
if k[0] in self.hs.config.room_invite_state_types
|
||||
or k[0] == EventTypes.Member and k[1] == event.sender
|
||||
]
|
||||
|
||||
state_to_include = yield self.store.get_events(state_to_include_ids)
|
||||
|
||||
event.unsigned["invite_room_state"] = [
|
||||
{
|
||||
"type": e.type,
|
||||
@@ -853,9 +545,7 @@ class MessageHandler(BaseHandler):
|
||||
"content": e.content,
|
||||
"sender": e.sender,
|
||||
}
|
||||
for k, e in context.current_state.items()
|
||||
if e.type in self.hs.config.room_invite_state_types
|
||||
or is_inviter_member_event(e)
|
||||
for e in state_to_include.values()
|
||||
]
|
||||
|
||||
invitee = UserID.from_string(event.state_key)
|
||||
@@ -877,7 +567,14 @@ class MessageHandler(BaseHandler):
|
||||
)
|
||||
|
||||
if event.type == EventTypes.Redaction:
|
||||
if self.auth.check_redaction(event, auth_events=context.current_state):
|
||||
auth_events_ids = yield self.auth.compute_auth_events(
|
||||
event, context.prev_state_ids, for_verification=True,
|
||||
)
|
||||
auth_events = yield self.store.get_events(auth_events_ids)
|
||||
auth_events = {
|
||||
(e.type, e.state_key): e for e in auth_events.values()
|
||||
}
|
||||
if self.auth.check_redaction(event, auth_events=auth_events):
|
||||
original_event = yield self.store.get_event(
|
||||
event.redacts,
|
||||
check_redacted=False,
|
||||
@@ -891,7 +588,7 @@ class MessageHandler(BaseHandler):
|
||||
"You don't have permission to redact events"
|
||||
)
|
||||
|
||||
if event.type == EventTypes.Create and context.current_state:
|
||||
if event.type == EventTypes.Create and context.prev_state_ids:
|
||||
raise AuthError(
|
||||
403,
|
||||
"Changing the room create event is forbidden",
|
||||
@@ -912,21 +609,10 @@ class MessageHandler(BaseHandler):
|
||||
event_stream_id, max_stream_id
|
||||
)
|
||||
|
||||
destinations = set()
|
||||
for k, s in context.current_state.items():
|
||||
try:
|
||||
if k[0] == EventTypes.Member:
|
||||
if s.content["membership"] == Membership.JOIN:
|
||||
destinations.add(get_domain_from_id(s.state_key))
|
||||
except SynapseError:
|
||||
logger.warn(
|
||||
"Failed to get destination from event %s", s.event_id
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _notify():
|
||||
yield run_on_reactor()
|
||||
self.notifier.on_new_room_event(
|
||||
yield self.notifier.on_new_room_event(
|
||||
event, event_stream_id, max_stream_id,
|
||||
extra_users=extra_users
|
||||
)
|
||||
@@ -935,7 +621,3 @@ class MessageHandler(BaseHandler):
|
||||
|
||||
# If invite, remove room_state from unsigned before sending.
|
||||
event.unsigned.pop("invite_room_state", None)
|
||||
|
||||
federation_handler.handle_new_event(
|
||||
event, destinations=destinations,
|
||||
)
|
||||
|
||||
@@ -52,6 +52,11 @@ bump_active_time_counter = metrics.register_counter("bump_active_time")
|
||||
|
||||
get_updates_counter = metrics.register_counter("get_updates", labels=["type"])
|
||||
|
||||
notify_reason_counter = metrics.register_counter("notify_reason", labels=["reason"])
|
||||
state_transition_counter = metrics.register_counter(
|
||||
"state_transition", labels=["from", "to"]
|
||||
)
|
||||
|
||||
|
||||
# If a user was last active in the last LAST_ACTIVE_GRANULARITY, consider them
|
||||
# "currently_active"
|
||||
@@ -86,26 +91,29 @@ class PresenceHandler(object):
|
||||
self.store = hs.get_datastore()
|
||||
self.wheel_timer = WheelTimer()
|
||||
self.notifier = hs.get_notifier()
|
||||
self.federation = hs.get_replication_layer()
|
||||
self.replication = hs.get_replication_layer()
|
||||
self.federation = hs.get_federation_sender()
|
||||
|
||||
self.federation.register_edu_handler(
|
||||
self.state = hs.get_state_handler()
|
||||
|
||||
self.replication.register_edu_handler(
|
||||
"m.presence", self.incoming_presence
|
||||
)
|
||||
self.federation.register_edu_handler(
|
||||
self.replication.register_edu_handler(
|
||||
"m.presence_invite",
|
||||
lambda origin, content: self.invite_presence(
|
||||
observed_user=UserID.from_string(content["observed_user"]),
|
||||
observer_user=UserID.from_string(content["observer_user"]),
|
||||
)
|
||||
)
|
||||
self.federation.register_edu_handler(
|
||||
self.replication.register_edu_handler(
|
||||
"m.presence_accept",
|
||||
lambda origin, content: self.accept_presence(
|
||||
observed_user=UserID.from_string(content["observed_user"]),
|
||||
observer_user=UserID.from_string(content["observer_user"]),
|
||||
)
|
||||
)
|
||||
self.federation.register_edu_handler(
|
||||
self.replication.register_edu_handler(
|
||||
"m.presence_deny",
|
||||
lambda origin, content: self.deny_presence(
|
||||
observed_user=UserID.from_string(content["observed_user"]),
|
||||
@@ -189,6 +197,13 @@ class PresenceHandler(object):
|
||||
5000,
|
||||
)
|
||||
|
||||
self.clock.call_later(
|
||||
60,
|
||||
self.clock.looping_call,
|
||||
self._persist_unpersisted_changes,
|
||||
60 * 1000,
|
||||
)
|
||||
|
||||
metrics.register_callback("wheel_timer_size", lambda: len(self.wheel_timer))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@@ -203,7 +218,7 @@ class PresenceHandler(object):
|
||||
is some spurious presence changes that will self-correct.
|
||||
"""
|
||||
logger.info(
|
||||
"Performing _on_shutdown. Persiting %d unpersisted changes",
|
||||
"Performing _on_shutdown. Persisting %d unpersisted changes",
|
||||
len(self.user_to_current_state)
|
||||
)
|
||||
|
||||
@@ -214,6 +229,27 @@ class PresenceHandler(object):
|
||||
])
|
||||
logger.info("Finished _on_shutdown")
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _persist_unpersisted_changes(self):
|
||||
"""We periodically persist the unpersisted changes, as otherwise they
|
||||
may stack up and slow down shutdown times.
|
||||
"""
|
||||
logger.info(
|
||||
"Performing _persist_unpersisted_changes. Persisting %d unpersisted changes",
|
||||
len(self.unpersisted_users_changes)
|
||||
)
|
||||
|
||||
unpersisted = self.unpersisted_users_changes
|
||||
self.unpersisted_users_changes = set()
|
||||
|
||||
if unpersisted:
|
||||
yield self.store.update_presence([
|
||||
self.user_to_current_state[user_id]
|
||||
for user_id in unpersisted
|
||||
])
|
||||
|
||||
logger.info("Finished _persist_unpersisted_changes")
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _update_states(self, new_states):
|
||||
"""Updates presence of users. Sets the appropriate timeouts. Pokes
|
||||
@@ -230,6 +266,12 @@ class PresenceHandler(object):
|
||||
to_notify = {} # Changes we want to notify everyone about
|
||||
to_federation_ping = {} # These need sending keep-alives
|
||||
|
||||
# Only bother handling the last presence change for each user
|
||||
new_states_dict = {}
|
||||
for new_state in new_states:
|
||||
new_states_dict[new_state.user_id] = new_state
|
||||
new_state = new_states_dict.values()
|
||||
|
||||
for new_state in new_states:
|
||||
user_id = new_state.user_id
|
||||
|
||||
@@ -503,7 +545,7 @@ class PresenceHandler(object):
|
||||
defer.returnValue(states)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _get_interested_parties(self, states):
|
||||
def _get_interested_parties(self, states, calculate_remote_hosts=True):
|
||||
"""Given a list of states return which entities (rooms, users, servers)
|
||||
are interested in the given states.
|
||||
|
||||
@@ -526,14 +568,17 @@ class PresenceHandler(object):
|
||||
users_to_states.setdefault(state.user_id, []).append(state)
|
||||
|
||||
hosts_to_states = {}
|
||||
for room_id, states in room_ids_to_states.items():
|
||||
local_states = filter(lambda s: self.is_mine_id(s.user_id), states)
|
||||
if not local_states:
|
||||
continue
|
||||
if calculate_remote_hosts:
|
||||
for room_id, states in room_ids_to_states.items():
|
||||
local_states = filter(lambda s: self.is_mine_id(s.user_id), states)
|
||||
if not local_states:
|
||||
continue
|
||||
|
||||
hosts = yield self.store.get_joined_hosts_for_room(room_id)
|
||||
for host in hosts:
|
||||
hosts_to_states.setdefault(host, []).extend(local_states)
|
||||
users = yield self.store.get_users_in_room(room_id)
|
||||
hosts = set(get_domain_from_id(u) for u in users)
|
||||
|
||||
for host in hosts:
|
||||
hosts_to_states.setdefault(host, []).extend(local_states)
|
||||
|
||||
for user_id, states in users_to_states.items():
|
||||
local_states = filter(lambda s: self.is_mine_id(s.user_id), states)
|
||||
@@ -565,24 +610,24 @@ class PresenceHandler(object):
|
||||
|
||||
self._push_to_remotes(hosts_to_states)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def notify_for_states(self, state, stream_id):
|
||||
parties = yield self._get_interested_parties([state])
|
||||
room_ids_to_states, users_to_states, hosts_to_states = parties
|
||||
|
||||
self.notifier.on_new_event(
|
||||
"presence_key", stream_id, rooms=room_ids_to_states.keys(),
|
||||
users=[UserID.from_string(u) for u in users_to_states.keys()]
|
||||
)
|
||||
|
||||
def _push_to_remotes(self, hosts_to_states):
|
||||
"""Sends state updates to remote servers.
|
||||
|
||||
Args:
|
||||
hosts_to_states (dict): Mapping `server_name` -> `[UserPresenceState]`
|
||||
"""
|
||||
now = self.clock.time_msec()
|
||||
for host, states in hosts_to_states.items():
|
||||
self.federation.send_edu(
|
||||
destination=host,
|
||||
edu_type="m.presence",
|
||||
content={
|
||||
"push": [
|
||||
_format_user_presence_state(state, now)
|
||||
for state in states
|
||||
]
|
||||
}
|
||||
)
|
||||
self.federation.send_presence(host, states)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def incoming_presence(self, origin, content):
|
||||
@@ -603,6 +648,13 @@ class PresenceHandler(object):
|
||||
)
|
||||
continue
|
||||
|
||||
if get_domain_from_id(user_id) != origin:
|
||||
logger.info(
|
||||
"Got presence update from %r with bad 'user_id': %r",
|
||||
origin, user_id,
|
||||
)
|
||||
continue
|
||||
|
||||
presence_state = push.get("presence", None)
|
||||
if not presence_state:
|
||||
logger.info(
|
||||
@@ -662,17 +714,17 @@ class PresenceHandler(object):
|
||||
defer.returnValue([
|
||||
{
|
||||
"type": "m.presence",
|
||||
"content": _format_user_presence_state(state, now),
|
||||
"content": format_user_presence_state(state, now),
|
||||
}
|
||||
for state in updates
|
||||
])
|
||||
else:
|
||||
defer.returnValue([
|
||||
_format_user_presence_state(state, now) for state in updates
|
||||
format_user_presence_state(state, now) for state in updates
|
||||
])
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def set_state(self, target_user, state):
|
||||
def set_state(self, target_user, state, ignore_status_msg=False):
|
||||
"""Set the presence state of the user.
|
||||
"""
|
||||
status_msg = state.get("status_msg", None)
|
||||
@@ -689,10 +741,13 @@ class PresenceHandler(object):
|
||||
prev_state = yield self.current_state_for_user(user_id)
|
||||
|
||||
new_fields = {
|
||||
"state": presence,
|
||||
"status_msg": status_msg if presence != PresenceState.OFFLINE else None
|
||||
"state": presence
|
||||
}
|
||||
|
||||
if not ignore_status_msg:
|
||||
msg = status_msg if presence != PresenceState.OFFLINE else None
|
||||
new_fields["status_msg"] = msg
|
||||
|
||||
if presence == PresenceState.ONLINE:
|
||||
new_fields["last_active_ts"] = self.clock.time_msec()
|
||||
|
||||
@@ -711,13 +766,13 @@ class PresenceHandler(object):
|
||||
# don't need to send to local clients here, as that is done as part
|
||||
# of the event stream/sync.
|
||||
# TODO: Only send to servers not already in the room.
|
||||
user_ids = yield self.store.get_users_in_room(room_id)
|
||||
if self.is_mine(user):
|
||||
state = yield self.current_state_for_user(user.to_string())
|
||||
|
||||
hosts = yield self.store.get_joined_hosts_for_room(room_id)
|
||||
hosts = set(get_domain_from_id(u) for u in user_ids)
|
||||
self._push_to_remotes({host: (state,) for host in hosts})
|
||||
else:
|
||||
user_ids = yield self.store.get_users_in_room(room_id)
|
||||
user_ids = filter(self.is_mine_id, user_ids)
|
||||
|
||||
states = yield self.current_state_for_users(user_ids)
|
||||
@@ -893,28 +948,38 @@ class PresenceHandler(object):
|
||||
def should_notify(old_state, new_state):
|
||||
"""Decides if a presence state change should be sent to interested parties.
|
||||
"""
|
||||
if old_state == new_state:
|
||||
return False
|
||||
|
||||
if old_state.status_msg != new_state.status_msg:
|
||||
return True
|
||||
|
||||
if old_state.state == PresenceState.ONLINE:
|
||||
if new_state.state != PresenceState.ONLINE:
|
||||
# Always notify for online -> anything
|
||||
return True
|
||||
|
||||
if new_state.currently_active != old_state.currently_active:
|
||||
return True
|
||||
|
||||
if new_state.last_active_ts - old_state.last_active_ts > LAST_ACTIVE_GRANULARITY:
|
||||
# Always notify for a transition where last active gets bumped.
|
||||
notify_reason_counter.inc("status_msg_change")
|
||||
return True
|
||||
|
||||
if old_state.state != new_state.state:
|
||||
notify_reason_counter.inc("state_change")
|
||||
state_transition_counter.inc(old_state.state, new_state.state)
|
||||
return True
|
||||
|
||||
if old_state.state == PresenceState.ONLINE:
|
||||
if new_state.currently_active != old_state.currently_active:
|
||||
notify_reason_counter.inc("current_active_change")
|
||||
return True
|
||||
|
||||
if new_state.last_active_ts - old_state.last_active_ts > LAST_ACTIVE_GRANULARITY:
|
||||
# Only notify about last active bumps if we're not currently acive
|
||||
if not new_state.currently_active:
|
||||
notify_reason_counter.inc("last_active_change_online")
|
||||
return True
|
||||
|
||||
elif new_state.last_active_ts - old_state.last_active_ts > LAST_ACTIVE_GRANULARITY:
|
||||
# Always notify for a transition where last active gets bumped.
|
||||
notify_reason_counter.inc("last_active_change_not_online")
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def _format_user_presence_state(state, now):
|
||||
def format_user_presence_state(state, now):
|
||||
"""Convert UserPresenceState to a format that can be sent down to clients
|
||||
and to other servers.
|
||||
"""
|
||||
@@ -941,11 +1006,12 @@ class PresenceEventSource(object):
|
||||
self.get_presence_handler = hs.get_presence_handler
|
||||
self.clock = hs.get_clock()
|
||||
self.store = hs.get_datastore()
|
||||
self.state = hs.get_state_handler()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def get_new_events(self, user, from_key, room_ids=None, include_offline=True,
|
||||
**kwargs):
|
||||
explicit_room_id=None, **kwargs):
|
||||
# The process for getting presence events are:
|
||||
# 1. Get the rooms the user is in.
|
||||
# 2. Get the list of user in the rooms.
|
||||
@@ -962,22 +1028,24 @@ class PresenceEventSource(object):
|
||||
user_id = user.to_string()
|
||||
if from_key is not None:
|
||||
from_key = int(from_key)
|
||||
room_ids = room_ids or []
|
||||
|
||||
presence = self.get_presence_handler()
|
||||
stream_change_cache = self.store.presence_stream_cache
|
||||
|
||||
if not room_ids:
|
||||
rooms = yield self.store.get_rooms_for_user(user_id)
|
||||
room_ids = set(e.room_id for e in rooms)
|
||||
else:
|
||||
room_ids = set(room_ids)
|
||||
|
||||
max_token = self.store.get_current_presence_token()
|
||||
|
||||
plist = yield self.store.get_presence_list_accepted(user.localpart)
|
||||
friends = set(row["observed_user_id"] for row in plist)
|
||||
friends.add(user_id) # So that we receive our own presence
|
||||
users_interested_in = set(row["observed_user_id"] for row in plist)
|
||||
users_interested_in.add(user_id) # So that we receive our own presence
|
||||
|
||||
users_who_share_room = yield self.store.get_users_who_share_room_with_user(
|
||||
user_id
|
||||
)
|
||||
users_interested_in.update(users_who_share_room)
|
||||
|
||||
if explicit_room_id:
|
||||
user_ids = yield self.store.get_users_in_room(explicit_room_id)
|
||||
users_interested_in.update(user_ids)
|
||||
|
||||
user_ids_changed = set()
|
||||
changed = None
|
||||
@@ -989,35 +1057,19 @@ class PresenceEventSource(object):
|
||||
# work out if we share a room or they're in our presence list
|
||||
get_updates_counter.inc("stream")
|
||||
for other_user_id in changed:
|
||||
if other_user_id in friends:
|
||||
if other_user_id in users_interested_in:
|
||||
user_ids_changed.add(other_user_id)
|
||||
continue
|
||||
other_rooms = yield self.store.get_rooms_for_user(other_user_id)
|
||||
if room_ids.intersection(e.room_id for e in other_rooms):
|
||||
user_ids_changed.add(other_user_id)
|
||||
continue
|
||||
else:
|
||||
# Too many possible updates. Find all users we can see and check
|
||||
# if any of them have changed.
|
||||
get_updates_counter.inc("full")
|
||||
|
||||
user_ids_to_check = set()
|
||||
for room_id in room_ids:
|
||||
users = yield self.store.get_users_in_room(room_id)
|
||||
user_ids_to_check.update(users)
|
||||
|
||||
user_ids_to_check.update(friends)
|
||||
|
||||
# Always include yourself. Only really matters for when the user is
|
||||
# not in any rooms, but still.
|
||||
user_ids_to_check.add(user_id)
|
||||
|
||||
if from_key:
|
||||
user_ids_changed = stream_change_cache.get_entities_changed(
|
||||
user_ids_to_check, from_key,
|
||||
users_interested_in, from_key,
|
||||
)
|
||||
else:
|
||||
user_ids_changed = user_ids_to_check
|
||||
user_ids_changed = users_interested_in
|
||||
|
||||
updates = yield presence.current_state_for_users(user_ids_changed)
|
||||
|
||||
@@ -1026,7 +1078,7 @@ class PresenceEventSource(object):
|
||||
defer.returnValue(([
|
||||
{
|
||||
"type": "m.presence",
|
||||
"content": _format_user_presence_state(s, now),
|
||||
"content": format_user_presence_state(s, now),
|
||||
}
|
||||
for s in updates.values()
|
||||
if include_offline or s.state != PresenceState.OFFLINE
|
||||
|
||||
@@ -13,15 +13,15 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
import synapse.types
|
||||
from synapse.api.errors import SynapseError, AuthError, CodeMessageException
|
||||
from synapse.types import UserID, Requester
|
||||
|
||||
from synapse.types import UserID
|
||||
from ._base import BaseHandler
|
||||
|
||||
import logging
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -65,13 +65,13 @@ class ProfileHandler(BaseHandler):
|
||||
defer.returnValue(result["displayname"])
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def set_displayname(self, target_user, requester, new_displayname):
|
||||
def set_displayname(self, target_user, requester, new_displayname, by_admin=False):
|
||||
"""target_user is the user whose displayname is to be changed;
|
||||
auth_user is the user attempting to make this change."""
|
||||
if not self.hs.is_mine(target_user):
|
||||
raise SynapseError(400, "User is not hosted on this Home Server")
|
||||
|
||||
if target_user != requester.user:
|
||||
if not by_admin and target_user != requester.user:
|
||||
raise AuthError(400, "Cannot set another user's displayname")
|
||||
|
||||
if new_displayname == '':
|
||||
@@ -111,13 +111,13 @@ class ProfileHandler(BaseHandler):
|
||||
defer.returnValue(result["avatar_url"])
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def set_avatar_url(self, target_user, requester, new_avatar_url):
|
||||
def set_avatar_url(self, target_user, requester, new_avatar_url, by_admin=False):
|
||||
"""target_user is the user whose avatar_url is to be changed;
|
||||
auth_user is the user attempting to make this change."""
|
||||
if not self.hs.is_mine(target_user):
|
||||
raise SynapseError(400, "User is not hosted on this Home Server")
|
||||
|
||||
if target_user != requester.user:
|
||||
if not by_admin and target_user != requester.user:
|
||||
raise AuthError(400, "Cannot set another user's avatar_url")
|
||||
|
||||
yield self.store.set_profile_avatar_url(
|
||||
@@ -165,7 +165,9 @@ class ProfileHandler(BaseHandler):
|
||||
try:
|
||||
# Assume the user isn't a guest because we don't let guests set
|
||||
# profile or avatar data.
|
||||
requester = Requester(user, "", False)
|
||||
# XXX why are we recreating `requester` here for each room?
|
||||
# what was wrong with the `requester` we were passed?
|
||||
requester = synapse.types.create_requester(user)
|
||||
yield handler.update_membership(
|
||||
requester,
|
||||
user,
|
||||
|
||||
@@ -18,6 +18,7 @@ from ._base import BaseHandler
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.util.logcontext import PreserveLoggingContext
|
||||
from synapse.types import get_domain_from_id
|
||||
|
||||
import logging
|
||||
|
||||
@@ -32,11 +33,12 @@ class ReceiptsHandler(BaseHandler):
|
||||
self.server_name = hs.config.server_name
|
||||
self.store = hs.get_datastore()
|
||||
self.hs = hs
|
||||
self.federation = hs.get_replication_layer()
|
||||
self.federation.register_edu_handler(
|
||||
self.federation = hs.get_federation_sender()
|
||||
hs.get_replication_layer().register_edu_handler(
|
||||
"m.receipt", self._received_remote_receipt
|
||||
)
|
||||
self.clock = self.hs.get_clock()
|
||||
self.state = hs.get_state_handler()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def received_client_receipt(self, room_id, receipt_type, user_id,
|
||||
@@ -98,7 +100,7 @@ class ReceiptsHandler(BaseHandler):
|
||||
|
||||
if not res:
|
||||
# res will be None if this read receipt is 'old'
|
||||
defer.returnValue(False)
|
||||
continue
|
||||
|
||||
stream_id, max_persisted_id = res
|
||||
|
||||
@@ -107,6 +109,10 @@ class ReceiptsHandler(BaseHandler):
|
||||
if max_batch_id is None or max_persisted_id > max_batch_id:
|
||||
max_batch_id = max_persisted_id
|
||||
|
||||
if min_batch_id is None:
|
||||
# no new receipts
|
||||
defer.returnValue(False)
|
||||
|
||||
affected_room_ids = list(set([r["room_id"] for r in receipts]))
|
||||
|
||||
with PreserveLoggingContext():
|
||||
@@ -133,7 +139,8 @@ class ReceiptsHandler(BaseHandler):
|
||||
event_ids = receipt["event_ids"]
|
||||
data = receipt["data"]
|
||||
|
||||
remotedomains = yield self.store.get_joined_hosts_for_room(room_id)
|
||||
users = yield self.state.get_current_user_in_room(room_id)
|
||||
remotedomains = set(get_domain_from_id(u) for u in users)
|
||||
remotedomains = remotedomains.copy()
|
||||
remotedomains.discard(self.server_name)
|
||||
|
||||
@@ -153,6 +160,7 @@ class ReceiptsHandler(BaseHandler):
|
||||
}
|
||||
},
|
||||
},
|
||||
key=(room_id, receipt_type, user_id),
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
|
||||
@@ -14,18 +14,18 @@
|
||||
# limitations under the License.
|
||||
|
||||
"""Contains functions for registering clients."""
|
||||
import logging
|
||||
import urllib
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.types import UserID, Requester
|
||||
from synapse.api.errors import (
|
||||
AuthError, Codes, SynapseError, RegistrationError, InvalidCaptchaError
|
||||
)
|
||||
from ._base import BaseHandler
|
||||
from synapse.util.async import run_on_reactor
|
||||
from synapse.http.client import CaptchaServerHttpClient
|
||||
|
||||
import logging
|
||||
import urllib
|
||||
from synapse.types import UserID
|
||||
from synapse.util.async import run_on_reactor
|
||||
from ._base import BaseHandler
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -40,6 +40,8 @@ class RegistrationHandler(BaseHandler):
|
||||
|
||||
self._next_generated_user_id = None
|
||||
|
||||
self.macaroon_gen = hs.get_macaroon_generator()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def check_username(self, localpart, guest_access_token=None,
|
||||
assigned_user_id=None):
|
||||
@@ -52,6 +54,13 @@ class RegistrationHandler(BaseHandler):
|
||||
Codes.INVALID_USERNAME
|
||||
)
|
||||
|
||||
if localpart[0] == '_':
|
||||
raise SynapseError(
|
||||
400,
|
||||
"User ID may not begin with _",
|
||||
Codes.INVALID_USERNAME
|
||||
)
|
||||
|
||||
user = UserID(localpart, self.hs.hostname)
|
||||
user_id = user.to_string()
|
||||
|
||||
@@ -74,7 +83,7 @@ class RegistrationHandler(BaseHandler):
|
||||
"User ID already taken.",
|
||||
errcode=Codes.USER_IN_USE,
|
||||
)
|
||||
user_data = yield self.auth.get_user_from_macaroon(guest_access_token)
|
||||
user_data = yield self.auth.get_user_by_access_token(guest_access_token)
|
||||
if not user_data["is_guest"] or user_data["user"].localpart != localpart:
|
||||
raise AuthError(
|
||||
403,
|
||||
@@ -99,8 +108,13 @@ class RegistrationHandler(BaseHandler):
|
||||
localpart : The local part of the user ID to register. If None,
|
||||
one will be generated.
|
||||
password (str) : The password to assign to this user so they can
|
||||
login again. This can be None which means they cannot login again
|
||||
via a password (e.g. the user is an application service user).
|
||||
login again. This can be None which means they cannot login again
|
||||
via a password (e.g. the user is an application service user).
|
||||
generate_token (bool): Whether a new access token should be
|
||||
generated. Having this be True should be considered deprecated,
|
||||
since it offers no means of associating a device_id with the
|
||||
access_token. Instead you should call auth_handler.issue_access_token
|
||||
after registration.
|
||||
Returns:
|
||||
A tuple of (user_id, access_token).
|
||||
Raises:
|
||||
@@ -131,7 +145,7 @@ class RegistrationHandler(BaseHandler):
|
||||
|
||||
token = None
|
||||
if generate_token:
|
||||
token = self.auth_handler().generate_access_token(user_id)
|
||||
token = self.macaroon_gen.generate_access_token(user_id)
|
||||
yield self.store.register(
|
||||
user_id=user_id,
|
||||
token=token,
|
||||
@@ -155,7 +169,7 @@ class RegistrationHandler(BaseHandler):
|
||||
user_id = user.to_string()
|
||||
yield self.check_user_id_not_appservice_exclusive(user_id)
|
||||
if generate_token:
|
||||
token = self.auth_handler().generate_access_token(user_id)
|
||||
token = self.macaroon_gen.generate_access_token(user_id)
|
||||
try:
|
||||
yield self.store.register(
|
||||
user_id=user_id,
|
||||
@@ -181,7 +195,7 @@ class RegistrationHandler(BaseHandler):
|
||||
def appservice_register(self, user_localpart, as_token):
|
||||
user = UserID(user_localpart, self.hs.hostname)
|
||||
user_id = user.to_string()
|
||||
service = yield self.store.get_app_service_by_token(as_token)
|
||||
service = self.store.get_app_service_by_token(as_token)
|
||||
if not service:
|
||||
raise AuthError(403, "Invalid application service token.")
|
||||
if not service.is_interested_in_user(user_id):
|
||||
@@ -196,15 +210,13 @@ class RegistrationHandler(BaseHandler):
|
||||
user_id, allowed_appservice=service
|
||||
)
|
||||
|
||||
token = self.auth_handler().generate_access_token(user_id)
|
||||
yield self.store.register(
|
||||
user_id=user_id,
|
||||
token=token,
|
||||
password_hash="",
|
||||
appservice_id=service_id,
|
||||
create_profile_with_localpart=user.localpart,
|
||||
)
|
||||
defer.returnValue((user_id, token))
|
||||
defer.returnValue(user_id)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def check_recaptcha(self, ip, private_key, challenge, response):
|
||||
@@ -244,7 +256,7 @@ class RegistrationHandler(BaseHandler):
|
||||
user_id = user.to_string()
|
||||
|
||||
yield self.check_user_id_not_appservice_exclusive(user_id)
|
||||
token = self.auth_handler().generate_access_token(user_id)
|
||||
token = self.macaroon_gen.generate_access_token(user_id)
|
||||
try:
|
||||
yield self.store.register(
|
||||
user_id=user_id,
|
||||
@@ -294,11 +306,10 @@ class RegistrationHandler(BaseHandler):
|
||||
# XXX: This should be a deferred list, shouldn't it?
|
||||
yield identity_handler.bind_threepid(c, user_id)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def check_user_id_not_appservice_exclusive(self, user_id, allowed_appservice=None):
|
||||
# valid user IDs must not clash with any user ID namespaces claimed by
|
||||
# application services.
|
||||
services = yield self.store.get_app_services()
|
||||
services = self.store.get_app_services()
|
||||
interested_services = [
|
||||
s for s in services
|
||||
if s.is_interested_in_user(user_id)
|
||||
@@ -360,7 +371,7 @@ class RegistrationHandler(BaseHandler):
|
||||
defer.returnValue(data)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_or_create_user(self, localpart, displayname, duration_seconds,
|
||||
def get_or_create_user(self, requester, localpart, displayname,
|
||||
password_hash=None):
|
||||
"""Creates a new user if the user does not exist,
|
||||
else revokes all previous access tokens and generates a new one.
|
||||
@@ -390,8 +401,7 @@ class RegistrationHandler(BaseHandler):
|
||||
|
||||
user = UserID(localpart, self.hs.hostname)
|
||||
user_id = user.to_string()
|
||||
token = self.auth_handler().generate_short_term_login_token(
|
||||
user_id, duration_seconds)
|
||||
token = self.macaroon_gen.generate_access_token(user_id)
|
||||
|
||||
if need_register:
|
||||
yield self.store.register(
|
||||
@@ -408,7 +418,7 @@ class RegistrationHandler(BaseHandler):
|
||||
logger.info("setting user display name: %s -> %s", user_id, displayname)
|
||||
profile_handler = self.hs.get_handlers().profile_handler
|
||||
yield profile_handler.set_displayname(
|
||||
user, Requester(user, token, False), displayname
|
||||
user, requester, displayname, by_admin=True,
|
||||
)
|
||||
|
||||
defer.returnValue((user_id, token))
|
||||
|
||||
@@ -20,12 +20,10 @@ from ._base import BaseHandler
|
||||
|
||||
from synapse.types import UserID, RoomAlias, RoomID, RoomStreamToken
|
||||
from synapse.api.constants import (
|
||||
EventTypes, JoinRules, RoomCreationPreset, Membership,
|
||||
EventTypes, JoinRules, RoomCreationPreset
|
||||
)
|
||||
from synapse.api.errors import AuthError, StoreError, SynapseError
|
||||
from synapse.util import stringutils
|
||||
from synapse.util.async import concurrently_execute
|
||||
from synapse.util.caches.response_cache import ResponseCache
|
||||
from synapse.visibility import filter_events_for_client
|
||||
|
||||
from collections import OrderedDict
|
||||
@@ -36,8 +34,6 @@ import string
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
REMOTE_ROOM_LIST_POLL_INTERVAL = 60 * 1000
|
||||
|
||||
id_server_scheme = "https://"
|
||||
|
||||
|
||||
@@ -48,16 +44,19 @@ class RoomCreationHandler(BaseHandler):
|
||||
"join_rules": JoinRules.INVITE,
|
||||
"history_visibility": "shared",
|
||||
"original_invitees_have_ops": False,
|
||||
"guest_can_join": True,
|
||||
},
|
||||
RoomCreationPreset.TRUSTED_PRIVATE_CHAT: {
|
||||
"join_rules": JoinRules.INVITE,
|
||||
"history_visibility": "shared",
|
||||
"original_invitees_have_ops": True,
|
||||
"guest_can_join": True,
|
||||
},
|
||||
RoomCreationPreset.PUBLIC_CHAT: {
|
||||
"join_rules": JoinRules.PUBLIC,
|
||||
"history_visibility": "shared",
|
||||
"original_invitees_have_ops": False,
|
||||
"guest_can_join": False,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -196,6 +195,11 @@ class RoomCreationHandler(BaseHandler):
|
||||
},
|
||||
ratelimit=False)
|
||||
|
||||
content = {}
|
||||
is_direct = config.get("is_direct", None)
|
||||
if is_direct:
|
||||
content["is_direct"] = is_direct
|
||||
|
||||
for invitee in invite_list:
|
||||
yield room_member_handler.update_membership(
|
||||
requester,
|
||||
@@ -203,6 +207,7 @@ class RoomCreationHandler(BaseHandler):
|
||||
room_id,
|
||||
"invite",
|
||||
ratelimit=False,
|
||||
content=content,
|
||||
)
|
||||
|
||||
for invite_3pid in invite_3pid_list:
|
||||
@@ -334,6 +339,13 @@ class RoomCreationHandler(BaseHandler):
|
||||
content={"history_visibility": config["history_visibility"]}
|
||||
)
|
||||
|
||||
if config["guest_can_join"]:
|
||||
if (EventTypes.GuestAccess, '') not in initial_state:
|
||||
yield send(
|
||||
etype=EventTypes.GuestAccess,
|
||||
content={"guest_access": "can_join"}
|
||||
)
|
||||
|
||||
for (etype, state_key), content in initial_state.items():
|
||||
yield send(
|
||||
etype=etype,
|
||||
@@ -342,149 +354,6 @@ class RoomCreationHandler(BaseHandler):
|
||||
)
|
||||
|
||||
|
||||
class RoomListHandler(BaseHandler):
|
||||
def __init__(self, hs):
|
||||
super(RoomListHandler, self).__init__(hs)
|
||||
self.response_cache = ResponseCache()
|
||||
self.remote_list_request_cache = ResponseCache()
|
||||
self.remote_list_cache = {}
|
||||
self.fetch_looping_call = hs.get_clock().looping_call(
|
||||
self.fetch_all_remote_lists, REMOTE_ROOM_LIST_POLL_INTERVAL
|
||||
)
|
||||
self.fetch_all_remote_lists()
|
||||
|
||||
def get_local_public_room_list(self):
|
||||
result = self.response_cache.get(())
|
||||
if not result:
|
||||
result = self.response_cache.set((), self._get_public_room_list())
|
||||
return result
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _get_public_room_list(self):
|
||||
room_ids = yield self.store.get_public_room_ids()
|
||||
|
||||
results = []
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def handle_room(room_id):
|
||||
current_state = yield self.state_handler.get_current_state(room_id)
|
||||
|
||||
# Double check that this is actually a public room.
|
||||
join_rules_event = current_state.get((EventTypes.JoinRules, ""))
|
||||
if join_rules_event:
|
||||
join_rule = join_rules_event.content.get("join_rule", None)
|
||||
if join_rule and join_rule != JoinRules.PUBLIC:
|
||||
defer.returnValue(None)
|
||||
|
||||
result = {"room_id": room_id}
|
||||
|
||||
num_joined_users = len([
|
||||
1 for _, event in current_state.items()
|
||||
if event.type == EventTypes.Member
|
||||
and event.membership == Membership.JOIN
|
||||
])
|
||||
if num_joined_users == 0:
|
||||
return
|
||||
|
||||
result["num_joined_members"] = num_joined_users
|
||||
|
||||
aliases = yield self.store.get_aliases_for_room(room_id)
|
||||
if aliases:
|
||||
result["aliases"] = aliases
|
||||
|
||||
name_event = yield current_state.get((EventTypes.Name, ""))
|
||||
if name_event:
|
||||
name = name_event.content.get("name", None)
|
||||
if name:
|
||||
result["name"] = name
|
||||
|
||||
topic_event = current_state.get((EventTypes.Topic, ""))
|
||||
if topic_event:
|
||||
topic = topic_event.content.get("topic", None)
|
||||
if topic:
|
||||
result["topic"] = topic
|
||||
|
||||
canonical_event = current_state.get((EventTypes.CanonicalAlias, ""))
|
||||
if canonical_event:
|
||||
canonical_alias = canonical_event.content.get("alias", None)
|
||||
if canonical_alias:
|
||||
result["canonical_alias"] = canonical_alias
|
||||
|
||||
visibility_event = current_state.get((EventTypes.RoomHistoryVisibility, ""))
|
||||
visibility = None
|
||||
if visibility_event:
|
||||
visibility = visibility_event.content.get("history_visibility", None)
|
||||
result["world_readable"] = visibility == "world_readable"
|
||||
|
||||
guest_event = current_state.get((EventTypes.GuestAccess, ""))
|
||||
guest = None
|
||||
if guest_event:
|
||||
guest = guest_event.content.get("guest_access", None)
|
||||
result["guest_can_join"] = guest == "can_join"
|
||||
|
||||
avatar_event = current_state.get(("m.room.avatar", ""))
|
||||
if avatar_event:
|
||||
avatar_url = avatar_event.content.get("url", None)
|
||||
if avatar_url:
|
||||
result["avatar_url"] = avatar_url
|
||||
|
||||
results.append(result)
|
||||
|
||||
yield concurrently_execute(handle_room, room_ids, 10)
|
||||
|
||||
# FIXME (erikj): START is no longer a valid value
|
||||
defer.returnValue({"start": "START", "end": "END", "chunk": results})
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def fetch_all_remote_lists(self):
|
||||
deferred = self.hs.get_replication_layer().get_public_rooms(
|
||||
self.hs.config.secondary_directory_servers
|
||||
)
|
||||
self.remote_list_request_cache.set((), deferred)
|
||||
self.remote_list_cache = yield deferred
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_aggregated_public_room_list(self):
|
||||
"""
|
||||
Get the public room list from this server and the servers
|
||||
specified in the secondary_directory_servers config option.
|
||||
XXX: Pagination...
|
||||
"""
|
||||
# We return the results from out cache which is updated by a looping call,
|
||||
# unless we're missing a cache entry, in which case wait for the result
|
||||
# of the fetch if there's one in progress. If not, omit that server.
|
||||
wait = False
|
||||
for s in self.hs.config.secondary_directory_servers:
|
||||
if s not in self.remote_list_cache:
|
||||
logger.warn("No cached room list from %s: waiting for fetch", s)
|
||||
wait = True
|
||||
break
|
||||
|
||||
if wait and self.remote_list_request_cache.get(()):
|
||||
yield self.remote_list_request_cache.get(())
|
||||
|
||||
public_rooms = yield self.get_local_public_room_list()
|
||||
|
||||
# keep track of which room IDs we've seen so we can de-dup
|
||||
room_ids = set()
|
||||
|
||||
# tag all the ones in our list with our server name.
|
||||
# Also add the them to the de-deping set
|
||||
for room in public_rooms['chunk']:
|
||||
room["server_name"] = self.hs.hostname
|
||||
room_ids.add(room["room_id"])
|
||||
|
||||
# Now add the results from federation
|
||||
for server_name, server_result in self.remote_list_cache.items():
|
||||
for room in server_result["chunk"]:
|
||||
if room["room_id"] not in room_ids:
|
||||
room["server_name"] = server_name
|
||||
public_rooms["chunk"].append(room)
|
||||
room_ids.add(room["room_id"])
|
||||
|
||||
defer.returnValue(public_rooms)
|
||||
|
||||
|
||||
class RoomContextHandler(BaseHandler):
|
||||
@defer.inlineCallbacks
|
||||
def get_event_context(self, user, room_id, event_id, limit, is_guest):
|
||||
@@ -568,6 +437,7 @@ class RoomEventSource(object):
|
||||
limit,
|
||||
room_ids,
|
||||
is_guest,
|
||||
explicit_room_id=None,
|
||||
):
|
||||
# We just ignore the key for now.
|
||||
|
||||
@@ -578,7 +448,7 @@ class RoomEventSource(object):
|
||||
logger.warn("Stream has topological part!!!! %r", from_key)
|
||||
from_key = "s%s" % (from_token.stream,)
|
||||
|
||||
app_service = yield self.store.get_app_service_by_user_id(
|
||||
app_service = self.store.get_app_service_by_user_id(
|
||||
user.to_string()
|
||||
)
|
||||
if app_service:
|
||||
@@ -616,8 +486,11 @@ class RoomEventSource(object):
|
||||
|
||||
defer.returnValue((events, end_key))
|
||||
|
||||
def get_current_key(self, direction='f'):
|
||||
return self.store.get_room_events_max_id(direction)
|
||||
def get_current_key(self):
|
||||
return self.store.get_room_events_max_id()
|
||||
|
||||
def get_current_key_for_room(self, room_id):
|
||||
return self.store.get_room_events_max_id(room_id)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_pagination_rows(self, user, config, key):
|
||||
|
||||
442
synapse/handlers/room_list.py
Normal file
442
synapse/handlers/room_list.py
Normal file
@@ -0,0 +1,442 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014 - 2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from ._base import BaseHandler
|
||||
|
||||
from synapse.api.constants import (
|
||||
EventTypes, JoinRules,
|
||||
)
|
||||
from synapse.util.async import concurrently_execute
|
||||
from synapse.util.caches.response_cache import ResponseCache
|
||||
from synapse.types import ThirdPartyInstanceID
|
||||
|
||||
from collections import namedtuple
|
||||
from unpaddedbase64 import encode_base64, decode_base64
|
||||
|
||||
import logging
|
||||
import msgpack
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
REMOTE_ROOM_LIST_POLL_INTERVAL = 60 * 1000
|
||||
|
||||
|
||||
# This is used to indicate we should only return rooms published to the main list.
|
||||
EMTPY_THIRD_PARTY_ID = ThirdPartyInstanceID(None, None)
|
||||
|
||||
|
||||
class RoomListHandler(BaseHandler):
|
||||
def __init__(self, hs):
|
||||
super(RoomListHandler, self).__init__(hs)
|
||||
self.response_cache = ResponseCache(hs)
|
||||
self.remote_response_cache = ResponseCache(hs, timeout_ms=30 * 1000)
|
||||
|
||||
def get_local_public_room_list(self, limit=None, since_token=None,
|
||||
search_filter=None,
|
||||
network_tuple=EMTPY_THIRD_PARTY_ID,):
|
||||
"""Generate a local public room list.
|
||||
|
||||
There are multiple different lists: the main one plus one per third
|
||||
party network. A client can ask for a specific list or to return all.
|
||||
|
||||
Args:
|
||||
limit (int)
|
||||
since_token (str)
|
||||
search_filter (dict)
|
||||
network_tuple (ThirdPartyInstanceID): Which public list to use.
|
||||
This can be (None, None) to indicate the main list, or a particular
|
||||
appservice and network id to use an appservice specific one.
|
||||
Setting to None returns all public rooms across all lists.
|
||||
"""
|
||||
if search_filter:
|
||||
# We explicitly don't bother caching searches or requests for
|
||||
# appservice specific lists.
|
||||
return self._get_public_room_list(
|
||||
limit, since_token, search_filter, network_tuple=network_tuple,
|
||||
)
|
||||
|
||||
key = (limit, since_token, network_tuple)
|
||||
result = self.response_cache.get(key)
|
||||
if not result:
|
||||
result = self.response_cache.set(
|
||||
key,
|
||||
self._get_public_room_list(
|
||||
limit, since_token, network_tuple=network_tuple
|
||||
)
|
||||
)
|
||||
return result
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _get_public_room_list(self, limit=None, since_token=None,
|
||||
search_filter=None,
|
||||
network_tuple=EMTPY_THIRD_PARTY_ID,):
|
||||
if since_token and since_token != "END":
|
||||
since_token = RoomListNextBatch.from_token(since_token)
|
||||
else:
|
||||
since_token = None
|
||||
|
||||
rooms_to_order_value = {}
|
||||
rooms_to_num_joined = {}
|
||||
rooms_to_latest_event_ids = {}
|
||||
|
||||
newly_visible = []
|
||||
newly_unpublished = []
|
||||
if since_token:
|
||||
stream_token = since_token.stream_ordering
|
||||
current_public_id = yield self.store.get_current_public_room_stream_id()
|
||||
public_room_stream_id = since_token.public_room_stream_id
|
||||
newly_visible, newly_unpublished = yield self.store.get_public_room_changes(
|
||||
public_room_stream_id, current_public_id,
|
||||
network_tuple=network_tuple,
|
||||
)
|
||||
else:
|
||||
stream_token = yield self.store.get_room_max_stream_ordering()
|
||||
public_room_stream_id = yield self.store.get_current_public_room_stream_id()
|
||||
|
||||
room_ids = yield self.store.get_public_room_ids_at_stream_id(
|
||||
public_room_stream_id, network_tuple=network_tuple,
|
||||
)
|
||||
|
||||
# We want to return rooms in a particular order: the number of joined
|
||||
# users. We then arbitrarily use the room_id as a tie breaker.
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_order_for_room(room_id):
|
||||
latest_event_ids = rooms_to_latest_event_ids.get(room_id, None)
|
||||
if not latest_event_ids:
|
||||
latest_event_ids = yield self.store.get_forward_extremeties_for_room(
|
||||
room_id, stream_token
|
||||
)
|
||||
rooms_to_latest_event_ids[room_id] = latest_event_ids
|
||||
|
||||
if not latest_event_ids:
|
||||
return
|
||||
|
||||
joined_users = yield self.state_handler.get_current_user_in_room(
|
||||
room_id, latest_event_ids,
|
||||
)
|
||||
num_joined_users = len(joined_users)
|
||||
rooms_to_num_joined[room_id] = num_joined_users
|
||||
|
||||
if num_joined_users == 0:
|
||||
return
|
||||
|
||||
# We want larger rooms to be first, hence negating num_joined_users
|
||||
rooms_to_order_value[room_id] = (-num_joined_users, room_id)
|
||||
|
||||
yield concurrently_execute(get_order_for_room, room_ids, 10)
|
||||
|
||||
sorted_entries = sorted(rooms_to_order_value.items(), key=lambda e: e[1])
|
||||
sorted_rooms = [room_id for room_id, _ in sorted_entries]
|
||||
|
||||
# `sorted_rooms` should now be a list of all public room ids that is
|
||||
# stable across pagination. Therefore, we can use indices into this
|
||||
# list as our pagination tokens.
|
||||
|
||||
# Filter out rooms that we don't want to return
|
||||
rooms_to_scan = [
|
||||
r for r in sorted_rooms
|
||||
if r not in newly_unpublished and rooms_to_num_joined[room_id] > 0
|
||||
]
|
||||
|
||||
total_room_count = len(rooms_to_scan)
|
||||
|
||||
if since_token:
|
||||
# Filter out rooms we've already returned previously
|
||||
# `since_token.current_limit` is the index of the last room we
|
||||
# sent down, so we exclude it and everything before/after it.
|
||||
if since_token.direction_is_forward:
|
||||
rooms_to_scan = rooms_to_scan[since_token.current_limit + 1:]
|
||||
else:
|
||||
rooms_to_scan = rooms_to_scan[:since_token.current_limit]
|
||||
rooms_to_scan.reverse()
|
||||
|
||||
# Actually generate the entries. _generate_room_entry will append to
|
||||
# chunk but will stop if len(chunk) > limit
|
||||
chunk = []
|
||||
if limit and not search_filter:
|
||||
step = limit + 1
|
||||
for i in xrange(0, len(rooms_to_scan), step):
|
||||
# We iterate here because the vast majority of cases we'll stop
|
||||
# at first iteration, but occaisonally _generate_room_entry
|
||||
# won't append to the chunk and so we need to loop again.
|
||||
# We don't want to scan over the entire range either as that
|
||||
# would potentially waste a lot of work.
|
||||
yield concurrently_execute(
|
||||
lambda r: self._generate_room_entry(
|
||||
r, rooms_to_num_joined[r],
|
||||
chunk, limit, search_filter
|
||||
),
|
||||
rooms_to_scan[i:i + step], 10
|
||||
)
|
||||
if len(chunk) >= limit + 1:
|
||||
break
|
||||
else:
|
||||
yield concurrently_execute(
|
||||
lambda r: self._generate_room_entry(
|
||||
r, rooms_to_num_joined[r],
|
||||
chunk, limit, search_filter
|
||||
),
|
||||
rooms_to_scan, 5
|
||||
)
|
||||
|
||||
chunk.sort(key=lambda e: (-e["num_joined_members"], e["room_id"]))
|
||||
|
||||
# Work out the new limit of the batch for pagination, or None if we
|
||||
# know there are no more results that would be returned.
|
||||
# i.e., [since_token.current_limit..new_limit] is the batch of rooms
|
||||
# we've returned (or the reverse if we paginated backwards)
|
||||
# We tried to pull out limit + 1 rooms above, so if we have <= limit
|
||||
# then we know there are no more results to return
|
||||
new_limit = None
|
||||
if chunk and (not limit or len(chunk) > limit):
|
||||
|
||||
if not since_token or since_token.direction_is_forward:
|
||||
if limit:
|
||||
chunk = chunk[:limit]
|
||||
last_room_id = chunk[-1]["room_id"]
|
||||
else:
|
||||
if limit:
|
||||
chunk = chunk[-limit:]
|
||||
last_room_id = chunk[0]["room_id"]
|
||||
|
||||
new_limit = sorted_rooms.index(last_room_id)
|
||||
|
||||
results = {
|
||||
"chunk": chunk,
|
||||
"total_room_count_estimate": total_room_count,
|
||||
}
|
||||
|
||||
if since_token:
|
||||
results["new_rooms"] = bool(newly_visible)
|
||||
|
||||
if not since_token or since_token.direction_is_forward:
|
||||
if new_limit is not None:
|
||||
results["next_batch"] = RoomListNextBatch(
|
||||
stream_ordering=stream_token,
|
||||
public_room_stream_id=public_room_stream_id,
|
||||
current_limit=new_limit,
|
||||
direction_is_forward=True,
|
||||
).to_token()
|
||||
|
||||
if since_token:
|
||||
results["prev_batch"] = since_token.copy_and_replace(
|
||||
direction_is_forward=False,
|
||||
current_limit=since_token.current_limit + 1,
|
||||
).to_token()
|
||||
else:
|
||||
if new_limit is not None:
|
||||
results["prev_batch"] = RoomListNextBatch(
|
||||
stream_ordering=stream_token,
|
||||
public_room_stream_id=public_room_stream_id,
|
||||
current_limit=new_limit,
|
||||
direction_is_forward=False,
|
||||
).to_token()
|
||||
|
||||
if since_token:
|
||||
results["next_batch"] = since_token.copy_and_replace(
|
||||
direction_is_forward=True,
|
||||
current_limit=since_token.current_limit - 1,
|
||||
).to_token()
|
||||
|
||||
defer.returnValue(results)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _generate_room_entry(self, room_id, num_joined_users, chunk, limit,
|
||||
search_filter):
|
||||
if limit and len(chunk) > limit + 1:
|
||||
# We've already got enough, so lets just drop it.
|
||||
return
|
||||
|
||||
result = {
|
||||
"room_id": room_id,
|
||||
"num_joined_members": num_joined_users,
|
||||
}
|
||||
|
||||
current_state_ids = yield self.state_handler.get_current_state_ids(room_id)
|
||||
|
||||
event_map = yield self.store.get_events([
|
||||
event_id for key, event_id in current_state_ids.items()
|
||||
if key[0] in (
|
||||
EventTypes.JoinRules,
|
||||
EventTypes.Name,
|
||||
EventTypes.Topic,
|
||||
EventTypes.CanonicalAlias,
|
||||
EventTypes.RoomHistoryVisibility,
|
||||
EventTypes.GuestAccess,
|
||||
"m.room.avatar",
|
||||
)
|
||||
])
|
||||
|
||||
current_state = {
|
||||
(ev.type, ev.state_key): ev
|
||||
for ev in event_map.values()
|
||||
}
|
||||
|
||||
# Double check that this is actually a public room.
|
||||
join_rules_event = current_state.get((EventTypes.JoinRules, ""))
|
||||
if join_rules_event:
|
||||
join_rule = join_rules_event.content.get("join_rule", None)
|
||||
if join_rule and join_rule != JoinRules.PUBLIC:
|
||||
defer.returnValue(None)
|
||||
|
||||
aliases = yield self.store.get_aliases_for_room(room_id)
|
||||
if aliases:
|
||||
result["aliases"] = aliases
|
||||
|
||||
name_event = yield current_state.get((EventTypes.Name, ""))
|
||||
if name_event:
|
||||
name = name_event.content.get("name", None)
|
||||
if name:
|
||||
result["name"] = name
|
||||
|
||||
topic_event = current_state.get((EventTypes.Topic, ""))
|
||||
if topic_event:
|
||||
topic = topic_event.content.get("topic", None)
|
||||
if topic:
|
||||
result["topic"] = topic
|
||||
|
||||
canonical_event = current_state.get((EventTypes.CanonicalAlias, ""))
|
||||
if canonical_event:
|
||||
canonical_alias = canonical_event.content.get("alias", None)
|
||||
if canonical_alias:
|
||||
result["canonical_alias"] = canonical_alias
|
||||
|
||||
visibility_event = current_state.get((EventTypes.RoomHistoryVisibility, ""))
|
||||
visibility = None
|
||||
if visibility_event:
|
||||
visibility = visibility_event.content.get("history_visibility", None)
|
||||
result["world_readable"] = visibility == "world_readable"
|
||||
|
||||
guest_event = current_state.get((EventTypes.GuestAccess, ""))
|
||||
guest = None
|
||||
if guest_event:
|
||||
guest = guest_event.content.get("guest_access", None)
|
||||
result["guest_can_join"] = guest == "can_join"
|
||||
|
||||
avatar_event = current_state.get(("m.room.avatar", ""))
|
||||
if avatar_event:
|
||||
avatar_url = avatar_event.content.get("url", None)
|
||||
if avatar_url:
|
||||
result["avatar_url"] = avatar_url
|
||||
|
||||
if _matches_room_entry(result, search_filter):
|
||||
chunk.append(result)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_remote_public_room_list(self, server_name, limit=None, since_token=None,
|
||||
search_filter=None, include_all_networks=False,
|
||||
third_party_instance_id=None,):
|
||||
if search_filter:
|
||||
# We currently don't support searching across federation, so we have
|
||||
# to do it manually without pagination
|
||||
limit = None
|
||||
since_token = None
|
||||
|
||||
res = yield self._get_remote_list_cached(
|
||||
server_name, limit=limit, since_token=since_token,
|
||||
include_all_networks=include_all_networks,
|
||||
third_party_instance_id=third_party_instance_id,
|
||||
)
|
||||
|
||||
if search_filter:
|
||||
res = {"chunk": [
|
||||
entry
|
||||
for entry in list(res.get("chunk", []))
|
||||
if _matches_room_entry(entry, search_filter)
|
||||
]}
|
||||
|
||||
defer.returnValue(res)
|
||||
|
||||
def _get_remote_list_cached(self, server_name, limit=None, since_token=None,
|
||||
search_filter=None, include_all_networks=False,
|
||||
third_party_instance_id=None,):
|
||||
repl_layer = self.hs.get_replication_layer()
|
||||
if search_filter:
|
||||
# We can't cache when asking for search
|
||||
return repl_layer.get_public_rooms(
|
||||
server_name, limit=limit, since_token=since_token,
|
||||
search_filter=search_filter, include_all_networks=include_all_networks,
|
||||
third_party_instance_id=third_party_instance_id,
|
||||
)
|
||||
|
||||
key = (
|
||||
server_name, limit, since_token, include_all_networks,
|
||||
third_party_instance_id,
|
||||
)
|
||||
result = self.remote_response_cache.get(key)
|
||||
if not result:
|
||||
result = self.remote_response_cache.set(
|
||||
key,
|
||||
repl_layer.get_public_rooms(
|
||||
server_name, limit=limit, since_token=since_token,
|
||||
search_filter=search_filter,
|
||||
include_all_networks=include_all_networks,
|
||||
third_party_instance_id=third_party_instance_id,
|
||||
)
|
||||
)
|
||||
return result
|
||||
|
||||
|
||||
class RoomListNextBatch(namedtuple("RoomListNextBatch", (
|
||||
"stream_ordering", # stream_ordering of the first public room list
|
||||
"public_room_stream_id", # public room stream id for first public room list
|
||||
"current_limit", # The number of previous rooms returned
|
||||
"direction_is_forward", # Bool if this is a next_batch, false if prev_batch
|
||||
))):
|
||||
|
||||
KEY_DICT = {
|
||||
"stream_ordering": "s",
|
||||
"public_room_stream_id": "p",
|
||||
"current_limit": "n",
|
||||
"direction_is_forward": "d",
|
||||
}
|
||||
|
||||
REVERSE_KEY_DICT = {v: k for k, v in KEY_DICT.items()}
|
||||
|
||||
@classmethod
|
||||
def from_token(cls, token):
|
||||
return RoomListNextBatch(**{
|
||||
cls.REVERSE_KEY_DICT[key]: val
|
||||
for key, val in msgpack.loads(decode_base64(token)).items()
|
||||
})
|
||||
|
||||
def to_token(self):
|
||||
return encode_base64(msgpack.dumps({
|
||||
self.KEY_DICT[key]: val
|
||||
for key, val in self._asdict().items()
|
||||
}))
|
||||
|
||||
def copy_and_replace(self, **kwds):
|
||||
return self._replace(
|
||||
**kwds
|
||||
)
|
||||
|
||||
|
||||
def _matches_room_entry(room_entry, search_filter):
|
||||
if search_filter and search_filter.get("generic_search_term", None):
|
||||
generic_search_term = search_filter["generic_search_term"].upper()
|
||||
if generic_search_term in room_entry.get("name", "").upper():
|
||||
return True
|
||||
elif generic_search_term in room_entry.get("topic", "").upper():
|
||||
return True
|
||||
elif generic_search_term in room_entry.get("canonical_alias", "").upper():
|
||||
return True
|
||||
else:
|
||||
return True
|
||||
|
||||
return False
|
||||
@@ -14,24 +14,22 @@
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
import logging
|
||||
|
||||
from signedjson.key import decode_verify_key_bytes
|
||||
from signedjson.sign import verify_signed_json
|
||||
from twisted.internet import defer
|
||||
from unpaddedbase64 import decode_base64
|
||||
|
||||
from ._base import BaseHandler
|
||||
|
||||
from synapse.types import UserID, RoomID, Requester
|
||||
import synapse.types
|
||||
from synapse.api.constants import (
|
||||
EventTypes, Membership,
|
||||
)
|
||||
from synapse.api.errors import AuthError, SynapseError, Codes
|
||||
from synapse.types import UserID, RoomID
|
||||
from synapse.util.async import Linearizer
|
||||
from synapse.util.distributor import user_left_room, user_joined_room
|
||||
|
||||
from signedjson.sign import verify_signed_json
|
||||
from signedjson.key import decode_verify_key_bytes
|
||||
|
||||
from unpaddedbase64 import decode_base64
|
||||
|
||||
import logging
|
||||
from ._base import BaseHandler
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -47,7 +45,7 @@ class RoomMemberHandler(BaseHandler):
|
||||
def __init__(self, hs):
|
||||
super(RoomMemberHandler, self).__init__(hs)
|
||||
|
||||
self.member_linearizer = Linearizer()
|
||||
self.member_linearizer = Linearizer(name="member")
|
||||
|
||||
self.clock = hs.get_clock()
|
||||
|
||||
@@ -61,10 +59,13 @@ class RoomMemberHandler(BaseHandler):
|
||||
prev_event_ids,
|
||||
txn_id=None,
|
||||
ratelimit=True,
|
||||
content=None,
|
||||
):
|
||||
if content is None:
|
||||
content = {}
|
||||
msg_handler = self.hs.get_handlers().message_handler
|
||||
|
||||
content = {"membership": membership}
|
||||
content["membership"] = membership
|
||||
if requester.is_guest:
|
||||
content["kind"] = "guest"
|
||||
|
||||
@@ -84,6 +85,12 @@ class RoomMemberHandler(BaseHandler):
|
||||
prev_event_ids=prev_event_ids,
|
||||
)
|
||||
|
||||
# Check if this event matches the previous membership event for the user.
|
||||
duplicate = yield msg_handler.deduplicate_state_event(event, context)
|
||||
if duplicate is not None:
|
||||
# Discard the new event since this membership change is a no-op.
|
||||
defer.returnValue(duplicate)
|
||||
|
||||
yield msg_handler.handle_new_client_event(
|
||||
requester,
|
||||
event,
|
||||
@@ -92,20 +99,28 @@ class RoomMemberHandler(BaseHandler):
|
||||
ratelimit=ratelimit,
|
||||
)
|
||||
|
||||
prev_member_event = context.current_state.get(
|
||||
prev_member_event_id = context.prev_state_ids.get(
|
||||
(EventTypes.Member, target.to_string()),
|
||||
None
|
||||
)
|
||||
|
||||
if event.membership == Membership.JOIN:
|
||||
if not prev_member_event or prev_member_event.membership != Membership.JOIN:
|
||||
# Only fire user_joined_room if the user has acutally joined the
|
||||
# room. Don't bother if the user is just changing their profile
|
||||
# info.
|
||||
# Only fire user_joined_room if the user has acutally joined the
|
||||
# room. Don't bother if the user is just changing their profile
|
||||
# info.
|
||||
newly_joined = True
|
||||
if prev_member_event_id:
|
||||
prev_member_event = yield self.store.get_event(prev_member_event_id)
|
||||
newly_joined = prev_member_event.membership != Membership.JOIN
|
||||
if newly_joined:
|
||||
yield user_joined_room(self.distributor, target, room_id)
|
||||
elif event.membership == Membership.LEAVE:
|
||||
if prev_member_event and prev_member_event.membership == Membership.JOIN:
|
||||
user_left_room(self.distributor, target, room_id)
|
||||
if prev_member_event_id:
|
||||
prev_member_event = yield self.store.get_event(prev_member_event_id)
|
||||
if prev_member_event.membership == Membership.JOIN:
|
||||
user_left_room(self.distributor, target, room_id)
|
||||
|
||||
defer.returnValue(event)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def remote_join(self, remote_room_hosts, room_id, user, content):
|
||||
@@ -142,8 +157,9 @@ class RoomMemberHandler(BaseHandler):
|
||||
remote_room_hosts=None,
|
||||
third_party_signed=None,
|
||||
ratelimit=True,
|
||||
content=None,
|
||||
):
|
||||
key = (target, room_id,)
|
||||
key = (room_id,)
|
||||
|
||||
with (yield self.member_linearizer.queue(key)):
|
||||
result = yield self._update_membership(
|
||||
@@ -155,6 +171,7 @@ class RoomMemberHandler(BaseHandler):
|
||||
remote_room_hosts=remote_room_hosts,
|
||||
third_party_signed=third_party_signed,
|
||||
ratelimit=ratelimit,
|
||||
content=content,
|
||||
)
|
||||
|
||||
defer.returnValue(result)
|
||||
@@ -170,7 +187,12 @@ class RoomMemberHandler(BaseHandler):
|
||||
remote_room_hosts=None,
|
||||
third_party_signed=None,
|
||||
ratelimit=True,
|
||||
content=None,
|
||||
):
|
||||
content_specified = bool(content)
|
||||
if content is None:
|
||||
content = {}
|
||||
|
||||
effective_membership_state = action
|
||||
if action in ["kick", "unban"]:
|
||||
effective_membership_state = "leave"
|
||||
@@ -188,43 +210,56 @@ class RoomMemberHandler(BaseHandler):
|
||||
remote_room_hosts = []
|
||||
|
||||
latest_event_ids = yield self.store.get_latest_event_ids_in_room(room_id)
|
||||
current_state = yield self.state_handler.get_current_state(
|
||||
current_state_ids = yield self.state_handler.get_current_state_ids(
|
||||
room_id, latest_event_ids=latest_event_ids,
|
||||
)
|
||||
|
||||
old_state = current_state.get((EventTypes.Member, target.to_string()))
|
||||
old_membership = old_state.content.get("membership") if old_state else None
|
||||
if action == "unban" and old_membership != "ban":
|
||||
raise SynapseError(
|
||||
403,
|
||||
"Cannot unban user who was not banned (membership=%s)" % old_membership,
|
||||
errcode=Codes.BAD_STATE
|
||||
)
|
||||
if old_membership == "ban" and action != "unban":
|
||||
raise SynapseError(
|
||||
403,
|
||||
"Cannot %s user who was banned" % (action,),
|
||||
errcode=Codes.BAD_STATE
|
||||
)
|
||||
old_state_id = current_state_ids.get((EventTypes.Member, target.to_string()))
|
||||
if old_state_id:
|
||||
old_state = yield self.store.get_event(old_state_id, allow_none=True)
|
||||
old_membership = old_state.content.get("membership") if old_state else None
|
||||
if action == "unban" and old_membership != "ban":
|
||||
raise SynapseError(
|
||||
403,
|
||||
"Cannot unban user who was not banned"
|
||||
" (membership=%s)" % old_membership,
|
||||
errcode=Codes.BAD_STATE
|
||||
)
|
||||
if old_membership == "ban" and action != "unban":
|
||||
raise SynapseError(
|
||||
403,
|
||||
"Cannot %s user who was banned" % (action,),
|
||||
errcode=Codes.BAD_STATE
|
||||
)
|
||||
|
||||
is_host_in_room = self.is_host_in_room(current_state)
|
||||
if old_state:
|
||||
same_content = content == old_state.content
|
||||
same_membership = old_membership == effective_membership_state
|
||||
same_sender = requester.user.to_string() == old_state.sender
|
||||
if same_sender and same_membership and same_content:
|
||||
defer.returnValue(old_state)
|
||||
|
||||
is_host_in_room = yield self._is_host_in_room(current_state_ids)
|
||||
|
||||
if effective_membership_state == Membership.JOIN:
|
||||
if requester.is_guest and not self._can_guest_join(current_state):
|
||||
# This should be an auth check, but guests are a local concept,
|
||||
# so don't really fit into the general auth process.
|
||||
raise AuthError(403, "Guest access not allowed")
|
||||
if requester.is_guest:
|
||||
guest_can_join = yield self._can_guest_join(current_state_ids)
|
||||
if not guest_can_join:
|
||||
# This should be an auth check, but guests are a local concept,
|
||||
# so don't really fit into the general auth process.
|
||||
raise AuthError(403, "Guest access not allowed")
|
||||
|
||||
if not is_host_in_room:
|
||||
inviter = yield self.get_inviter(target.to_string(), room_id)
|
||||
if inviter and not self.hs.is_mine(inviter):
|
||||
remote_room_hosts.append(inviter.domain)
|
||||
|
||||
content = {"membership": Membership.JOIN}
|
||||
content["membership"] = Membership.JOIN
|
||||
|
||||
profile = self.hs.get_handlers().profile_handler
|
||||
content["displayname"] = yield profile.get_displayname(target)
|
||||
content["avatar_url"] = yield profile.get_avatar_url(target)
|
||||
if not content_specified:
|
||||
content["displayname"] = yield profile.get_displayname(target)
|
||||
content["avatar_url"] = yield profile.get_avatar_url(target)
|
||||
|
||||
if requester.is_guest:
|
||||
content["kind"] = "guest"
|
||||
@@ -266,7 +301,7 @@ class RoomMemberHandler(BaseHandler):
|
||||
|
||||
defer.returnValue({})
|
||||
|
||||
yield self._local_membership_update(
|
||||
res = yield self._local_membership_update(
|
||||
requester=requester,
|
||||
target=target,
|
||||
room_id=room_id,
|
||||
@@ -274,7 +309,9 @@ class RoomMemberHandler(BaseHandler):
|
||||
txn_id=txn_id,
|
||||
ratelimit=ratelimit,
|
||||
prev_event_ids=latest_event_ids,
|
||||
content=content,
|
||||
)
|
||||
defer.returnValue(res)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def send_membership_event(
|
||||
@@ -315,18 +352,20 @@ class RoomMemberHandler(BaseHandler):
|
||||
)
|
||||
assert self.hs.is_mine(sender), "Sender must be our own: %s" % (sender,)
|
||||
else:
|
||||
requester = Requester(target_user, None, False)
|
||||
requester = synapse.types.create_requester(target_user)
|
||||
|
||||
message_handler = self.hs.get_handlers().message_handler
|
||||
prev_event = message_handler.deduplicate_state_event(event, context)
|
||||
prev_event = yield message_handler.deduplicate_state_event(event, context)
|
||||
if prev_event is not None:
|
||||
return
|
||||
|
||||
if event.membership == Membership.JOIN:
|
||||
if requester.is_guest and not self._can_guest_join(context.current_state):
|
||||
# This should be an auth check, but guests are a local concept,
|
||||
# so don't really fit into the general auth process.
|
||||
raise AuthError(403, "Guest access not allowed")
|
||||
if requester.is_guest:
|
||||
guest_can_join = yield self._can_guest_join(context.prev_state_ids)
|
||||
if not guest_can_join:
|
||||
# This should be an auth check, but guests are a local concept,
|
||||
# so don't really fit into the general auth process.
|
||||
raise AuthError(403, "Guest access not allowed")
|
||||
|
||||
yield message_handler.handle_new_client_event(
|
||||
requester,
|
||||
@@ -336,27 +375,39 @@ class RoomMemberHandler(BaseHandler):
|
||||
ratelimit=ratelimit,
|
||||
)
|
||||
|
||||
prev_member_event = context.current_state.get(
|
||||
(EventTypes.Member, target_user.to_string()),
|
||||
prev_member_event_id = context.prev_state_ids.get(
|
||||
(EventTypes.Member, event.state_key),
|
||||
None
|
||||
)
|
||||
|
||||
if event.membership == Membership.JOIN:
|
||||
if not prev_member_event or prev_member_event.membership != Membership.JOIN:
|
||||
# Only fire user_joined_room if the user has acutally joined the
|
||||
# room. Don't bother if the user is just changing their profile
|
||||
# info.
|
||||
# Only fire user_joined_room if the user has acutally joined the
|
||||
# room. Don't bother if the user is just changing their profile
|
||||
# info.
|
||||
newly_joined = True
|
||||
if prev_member_event_id:
|
||||
prev_member_event = yield self.store.get_event(prev_member_event_id)
|
||||
newly_joined = prev_member_event.membership != Membership.JOIN
|
||||
if newly_joined:
|
||||
yield user_joined_room(self.distributor, target_user, room_id)
|
||||
elif event.membership == Membership.LEAVE:
|
||||
if prev_member_event and prev_member_event.membership == Membership.JOIN:
|
||||
user_left_room(self.distributor, target_user, room_id)
|
||||
if prev_member_event_id:
|
||||
prev_member_event = yield self.store.get_event(prev_member_event_id)
|
||||
if prev_member_event.membership == Membership.JOIN:
|
||||
user_left_room(self.distributor, target_user, room_id)
|
||||
|
||||
def _can_guest_join(self, current_state):
|
||||
@defer.inlineCallbacks
|
||||
def _can_guest_join(self, current_state_ids):
|
||||
"""
|
||||
Returns whether a guest can join a room based on its current state.
|
||||
"""
|
||||
guest_access = current_state.get((EventTypes.GuestAccess, ""), None)
|
||||
return (
|
||||
guest_access_id = current_state_ids.get((EventTypes.GuestAccess, ""), None)
|
||||
if not guest_access_id:
|
||||
defer.returnValue(False)
|
||||
|
||||
guest_access = yield self.store.get_event(guest_access_id)
|
||||
|
||||
defer.returnValue(
|
||||
guest_access
|
||||
and guest_access.content
|
||||
and "guest_access" in guest_access.content
|
||||
@@ -675,3 +726,24 @@ class RoomMemberHandler(BaseHandler):
|
||||
|
||||
if membership:
|
||||
yield self.store.forget(user_id, room_id)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _is_host_in_room(self, current_state_ids):
|
||||
# Have we just created the room, and is this about to be the very
|
||||
# first member event?
|
||||
create_event_id = current_state_ids.get(("m.room.create", ""))
|
||||
if len(current_state_ids) == 1 and create_event_id:
|
||||
defer.returnValue(self.hs.is_mine_id(create_event_id))
|
||||
|
||||
for (etype, state_key), event_id in current_state_ids.items():
|
||||
if etype != EventTypes.Member or not self.hs.is_mine_id(state_key):
|
||||
continue
|
||||
|
||||
event = yield self.store.get_event(event_id, allow_none=True)
|
||||
if not event:
|
||||
continue
|
||||
|
||||
if event.membership == Membership.JOIN:
|
||||
defer.returnValue(True)
|
||||
|
||||
defer.returnValue(False)
|
||||
|
||||
@@ -35,6 +35,7 @@ SyncConfig = collections.namedtuple("SyncConfig", [
|
||||
"filter_collection",
|
||||
"is_guest",
|
||||
"request_key",
|
||||
"device_id",
|
||||
])
|
||||
|
||||
|
||||
@@ -113,6 +114,8 @@ class SyncResult(collections.namedtuple("SyncResult", [
|
||||
"joined", # JoinedSyncResult for each joined room.
|
||||
"invited", # InvitedSyncResult for each invited room.
|
||||
"archived", # ArchivedSyncResult for each archived room.
|
||||
"to_device", # List of direct messages for the device.
|
||||
"device_lists", # List of user_ids whose devices have chanegd
|
||||
])):
|
||||
__slots__ = []
|
||||
|
||||
@@ -126,7 +129,9 @@ class SyncResult(collections.namedtuple("SyncResult", [
|
||||
self.joined or
|
||||
self.invited or
|
||||
self.archived or
|
||||
self.account_data
|
||||
self.account_data or
|
||||
self.to_device or
|
||||
self.device_lists
|
||||
)
|
||||
|
||||
|
||||
@@ -138,7 +143,8 @@ class SyncHandler(object):
|
||||
self.presence_handler = hs.get_presence_handler()
|
||||
self.event_sources = hs.get_event_sources()
|
||||
self.clock = hs.get_clock()
|
||||
self.response_cache = ResponseCache()
|
||||
self.response_cache = ResponseCache(hs)
|
||||
self.state = hs.get_state_handler()
|
||||
|
||||
def wait_for_sync_for_user(self, sync_config, since_token=None, timeout=0,
|
||||
full_state=False):
|
||||
@@ -273,6 +279,7 @@ class SyncHandler(object):
|
||||
"""
|
||||
with Measure(self.clock, "load_filtered_recents"):
|
||||
timeline_limit = sync_config.filter_collection.timeline_limit()
|
||||
block_all_timeline = sync_config.filter_collection.blocks_all_room_timeline()
|
||||
|
||||
if recents is None or newly_joined_room or timeline_limit < len(recents):
|
||||
limited = True
|
||||
@@ -289,7 +296,7 @@ class SyncHandler(object):
|
||||
else:
|
||||
recents = []
|
||||
|
||||
if not limited:
|
||||
if not limited or block_all_timeline:
|
||||
defer.returnValue(TimelineBatch(
|
||||
events=recents,
|
||||
prev_batch=now_token,
|
||||
@@ -355,11 +362,11 @@ class SyncHandler(object):
|
||||
Returns:
|
||||
A Deferred map from ((type, state_key)->Event)
|
||||
"""
|
||||
state = yield self.store.get_state_for_event(event.event_id)
|
||||
state_ids = yield self.store.get_state_ids_for_event(event.event_id)
|
||||
if event.is_state():
|
||||
state = state.copy()
|
||||
state[(event.type, event.state_key)] = event
|
||||
defer.returnValue(state)
|
||||
state_ids = state_ids.copy()
|
||||
state_ids[(event.type, event.state_key)] = event.event_id
|
||||
defer.returnValue(state_ids)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_state_at(self, room_id, stream_position):
|
||||
@@ -412,62 +419,66 @@ class SyncHandler(object):
|
||||
with Measure(self.clock, "compute_state_delta"):
|
||||
if full_state:
|
||||
if batch:
|
||||
current_state = yield self.store.get_state_for_event(
|
||||
current_state_ids = yield self.store.get_state_ids_for_event(
|
||||
batch.events[-1].event_id
|
||||
)
|
||||
|
||||
state = yield self.store.get_state_for_event(
|
||||
state_ids = yield self.store.get_state_ids_for_event(
|
||||
batch.events[0].event_id
|
||||
)
|
||||
else:
|
||||
current_state = yield self.get_state_at(
|
||||
current_state_ids = yield self.get_state_at(
|
||||
room_id, stream_position=now_token
|
||||
)
|
||||
|
||||
state = current_state
|
||||
state_ids = current_state_ids
|
||||
|
||||
timeline_state = {
|
||||
(event.type, event.state_key): event
|
||||
(event.type, event.state_key): event.event_id
|
||||
for event in batch.events if event.is_state()
|
||||
}
|
||||
|
||||
state = _calculate_state(
|
||||
state_ids = _calculate_state(
|
||||
timeline_contains=timeline_state,
|
||||
timeline_start=state,
|
||||
timeline_start=state_ids,
|
||||
previous={},
|
||||
current=current_state,
|
||||
current=current_state_ids,
|
||||
)
|
||||
elif batch.limited:
|
||||
state_at_previous_sync = yield self.get_state_at(
|
||||
room_id, stream_position=since_token
|
||||
)
|
||||
|
||||
current_state = yield self.store.get_state_for_event(
|
||||
current_state_ids = yield self.store.get_state_ids_for_event(
|
||||
batch.events[-1].event_id
|
||||
)
|
||||
|
||||
state_at_timeline_start = yield self.store.get_state_for_event(
|
||||
state_at_timeline_start = yield self.store.get_state_ids_for_event(
|
||||
batch.events[0].event_id
|
||||
)
|
||||
|
||||
timeline_state = {
|
||||
(event.type, event.state_key): event
|
||||
(event.type, event.state_key): event.event_id
|
||||
for event in batch.events if event.is_state()
|
||||
}
|
||||
|
||||
state = _calculate_state(
|
||||
state_ids = _calculate_state(
|
||||
timeline_contains=timeline_state,
|
||||
timeline_start=state_at_timeline_start,
|
||||
previous=state_at_previous_sync,
|
||||
current=current_state,
|
||||
current=current_state_ids,
|
||||
)
|
||||
else:
|
||||
state = {}
|
||||
state_ids = {}
|
||||
|
||||
defer.returnValue({
|
||||
(e.type, e.state_key): e
|
||||
for e in sync_config.filter_collection.filter_room_state(state.values())
|
||||
})
|
||||
state = {}
|
||||
if state_ids:
|
||||
state = yield self.store.get_events(state_ids.values())
|
||||
|
||||
defer.returnValue({
|
||||
(e.type, e.state_key): e
|
||||
for e in sync_config.filter_collection.filter_room_state(state.values())
|
||||
})
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def unread_notifs_for_room_id(self, room_id, sync_config):
|
||||
@@ -485,9 +496,9 @@ class SyncHandler(object):
|
||||
)
|
||||
defer.returnValue(notifs)
|
||||
|
||||
# There is no new information in this period, so your notification
|
||||
# count is whatever it was last time.
|
||||
defer.returnValue(None)
|
||||
# There is no new information in this period, so your notification
|
||||
# count is whatever it was last time.
|
||||
defer.returnValue(None)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def generate_sync_result(self, sync_config, since_token=None, full_state=False):
|
||||
@@ -501,6 +512,7 @@ class SyncHandler(object):
|
||||
Returns:
|
||||
Deferred(SyncResult)
|
||||
"""
|
||||
logger.info("Calculating sync response for %r", sync_config.user)
|
||||
|
||||
# NB: The now_token gets changed by some of the generate_sync_* methods,
|
||||
# this is due to some of the underlying streams not supporting the ability
|
||||
@@ -523,8 +535,19 @@ class SyncHandler(object):
|
||||
)
|
||||
newly_joined_rooms, newly_joined_users = res
|
||||
|
||||
yield self._generate_sync_entry_for_presence(
|
||||
sync_result_builder, newly_joined_rooms, newly_joined_users
|
||||
block_all_presence_data = (
|
||||
since_token is None and
|
||||
sync_config.filter_collection.blocks_all_presence()
|
||||
)
|
||||
if not block_all_presence_data:
|
||||
yield self._generate_sync_entry_for_presence(
|
||||
sync_result_builder, newly_joined_rooms, newly_joined_users
|
||||
)
|
||||
|
||||
yield self._generate_sync_entry_for_to_device(sync_result_builder)
|
||||
|
||||
device_lists = yield self._generate_sync_entry_for_device_list(
|
||||
sync_result_builder
|
||||
)
|
||||
|
||||
defer.returnValue(SyncResult(
|
||||
@@ -533,9 +556,76 @@ class SyncHandler(object):
|
||||
joined=sync_result_builder.joined,
|
||||
invited=sync_result_builder.invited,
|
||||
archived=sync_result_builder.archived,
|
||||
to_device=sync_result_builder.to_device,
|
||||
device_lists=device_lists,
|
||||
next_batch=sync_result_builder.now_token,
|
||||
))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _generate_sync_entry_for_device_list(self, sync_result_builder):
|
||||
user_id = sync_result_builder.sync_config.user.to_string()
|
||||
since_token = sync_result_builder.since_token
|
||||
|
||||
if since_token and since_token.device_list_key:
|
||||
rooms = yield self.store.get_rooms_for_user(user_id)
|
||||
room_ids = set(r.room_id for r in rooms)
|
||||
|
||||
user_ids_changed = set()
|
||||
changed = yield self.store.get_user_whose_devices_changed(
|
||||
since_token.device_list_key
|
||||
)
|
||||
for other_user_id in changed:
|
||||
other_rooms = yield self.store.get_rooms_for_user(other_user_id)
|
||||
if room_ids.intersection(e.room_id for e in other_rooms):
|
||||
user_ids_changed.add(other_user_id)
|
||||
|
||||
defer.returnValue(user_ids_changed)
|
||||
else:
|
||||
defer.returnValue([])
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _generate_sync_entry_for_to_device(self, sync_result_builder):
|
||||
"""Generates the portion of the sync response. Populates
|
||||
`sync_result_builder` with the result.
|
||||
|
||||
Args:
|
||||
sync_result_builder(SyncResultBuilder)
|
||||
|
||||
Returns:
|
||||
Deferred(dict): A dictionary containing the per room account data.
|
||||
"""
|
||||
user_id = sync_result_builder.sync_config.user.to_string()
|
||||
device_id = sync_result_builder.sync_config.device_id
|
||||
now_token = sync_result_builder.now_token
|
||||
since_stream_id = 0
|
||||
if sync_result_builder.since_token is not None:
|
||||
since_stream_id = int(sync_result_builder.since_token.to_device_key)
|
||||
|
||||
if since_stream_id != int(now_token.to_device_key):
|
||||
# We only delete messages when a new message comes in, but that's
|
||||
# fine so long as we delete them at some point.
|
||||
|
||||
deleted = yield self.store.delete_messages_for_device(
|
||||
user_id, device_id, since_stream_id
|
||||
)
|
||||
logger.info("Deleted %d to-device messages up to %d",
|
||||
deleted, since_stream_id)
|
||||
|
||||
messages, stream_id = yield self.store.get_new_messages_for_device(
|
||||
user_id, device_id, since_stream_id, now_token.to_device_key
|
||||
)
|
||||
|
||||
logger.info(
|
||||
"Returning %d to-device messages between %d and %d (current token: %d)",
|
||||
len(messages), since_stream_id, stream_id, now_token.to_device_key
|
||||
)
|
||||
sync_result_builder.now_token = now_token.copy_and_replace(
|
||||
"to_device_key", stream_id
|
||||
)
|
||||
sync_result_builder.to_device = messages
|
||||
else:
|
||||
sync_result_builder.to_device = []
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _generate_sync_entry_for_account_data(self, sync_result_builder):
|
||||
"""Generates the account data portion of the sync response. Populates
|
||||
@@ -626,7 +716,7 @@ class SyncHandler(object):
|
||||
|
||||
extra_users_ids = set(newly_joined_users)
|
||||
for room_id in newly_joined_rooms:
|
||||
users = yield self.store.get_users_in_room(room_id)
|
||||
users = yield self.state.get_current_user_in_room(room_id)
|
||||
extra_users_ids.update(users)
|
||||
extra_users_ids.discard(user.to_string())
|
||||
|
||||
@@ -659,13 +749,20 @@ class SyncHandler(object):
|
||||
`(newly_joined_rooms, newly_joined_users)`
|
||||
"""
|
||||
user_id = sync_result_builder.sync_config.user.to_string()
|
||||
|
||||
now_token, ephemeral_by_room = yield self.ephemeral_by_room(
|
||||
sync_result_builder.sync_config,
|
||||
now_token=sync_result_builder.now_token,
|
||||
since_token=sync_result_builder.since_token,
|
||||
block_all_room_ephemeral = (
|
||||
sync_result_builder.since_token is None and
|
||||
sync_result_builder.sync_config.filter_collection.blocks_all_room_ephemeral()
|
||||
)
|
||||
sync_result_builder.now_token = now_token
|
||||
|
||||
if block_all_room_ephemeral:
|
||||
ephemeral_by_room = {}
|
||||
else:
|
||||
now_token, ephemeral_by_room = yield self.ephemeral_by_room(
|
||||
sync_result_builder.sync_config,
|
||||
now_token=sync_result_builder.now_token,
|
||||
since_token=sync_result_builder.since_token,
|
||||
)
|
||||
sync_result_builder.now_token = now_token
|
||||
|
||||
ignored_account_data = yield self.store.get_global_account_data_by_type_for_user(
|
||||
"m.ignored_user_list", user_id=user_id,
|
||||
@@ -738,7 +835,7 @@ class SyncHandler(object):
|
||||
|
||||
assert since_token
|
||||
|
||||
app_service = yield self.store.get_app_service_by_user_id(user_id)
|
||||
app_service = self.store.get_app_service_by_user_id(user_id)
|
||||
if app_service:
|
||||
rooms = yield self.store.get_app_service_rooms(app_service)
|
||||
joined_room_ids = set(r.room_id for r in rooms)
|
||||
@@ -766,8 +863,13 @@ class SyncHandler(object):
|
||||
# the last sync (even if we have since left). This is to make sure
|
||||
# we do send down the room, and with full state, where necessary
|
||||
if room_id in joined_room_ids or has_join:
|
||||
old_state = yield self.get_state_at(room_id, since_token)
|
||||
old_mem_ev = old_state.get((EventTypes.Member, user_id), None)
|
||||
old_state_ids = yield self.get_state_at(room_id, since_token)
|
||||
old_mem_ev_id = old_state_ids.get((EventTypes.Member, user_id), None)
|
||||
old_mem_ev = None
|
||||
if old_mem_ev_id:
|
||||
old_mem_ev = yield self.store.get_event(
|
||||
old_mem_ev_id, allow_none=True
|
||||
)
|
||||
if not old_mem_ev or old_mem_ev.membership != Membership.JOIN:
|
||||
newly_joined_rooms.append(room_id)
|
||||
|
||||
@@ -1059,27 +1161,25 @@ def _calculate_state(timeline_contains, timeline_start, previous, current):
|
||||
Returns:
|
||||
dict
|
||||
"""
|
||||
event_id_to_state = {
|
||||
e.event_id: e
|
||||
for e in itertools.chain(
|
||||
timeline_contains.values(),
|
||||
previous.values(),
|
||||
timeline_start.values(),
|
||||
current.values(),
|
||||
event_id_to_key = {
|
||||
e: key
|
||||
for key, e in itertools.chain(
|
||||
timeline_contains.items(),
|
||||
previous.items(),
|
||||
timeline_start.items(),
|
||||
current.items(),
|
||||
)
|
||||
}
|
||||
|
||||
c_ids = set(e.event_id for e in current.values())
|
||||
tc_ids = set(e.event_id for e in timeline_contains.values())
|
||||
p_ids = set(e.event_id for e in previous.values())
|
||||
ts_ids = set(e.event_id for e in timeline_start.values())
|
||||
c_ids = set(e for e in current.values())
|
||||
tc_ids = set(e for e in timeline_contains.values())
|
||||
p_ids = set(e for e in previous.values())
|
||||
ts_ids = set(e for e in timeline_start.values())
|
||||
|
||||
state_ids = ((c_ids | ts_ids) - p_ids) - tc_ids
|
||||
|
||||
evs = (event_id_to_state[e] for e in state_ids)
|
||||
return {
|
||||
(e.type, e.state_key): e
|
||||
for e in evs
|
||||
event_id_to_key[e]: e for e in state_ids
|
||||
}
|
||||
|
||||
|
||||
@@ -1103,6 +1203,7 @@ class SyncResultBuilder(object):
|
||||
self.joined = []
|
||||
self.invited = []
|
||||
self.archived = []
|
||||
self.device = []
|
||||
|
||||
|
||||
class RoomSyncResultBuilder(object):
|
||||
|
||||
@@ -16,9 +16,10 @@
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.errors import SynapseError, AuthError
|
||||
from synapse.util.logcontext import PreserveLoggingContext
|
||||
from synapse.util.logcontext import preserve_fn
|
||||
from synapse.util.metrics import Measure
|
||||
from synapse.types import UserID
|
||||
from synapse.util.wheel_timer import WheelTimer
|
||||
from synapse.types import UserID, get_domain_from_id
|
||||
|
||||
import logging
|
||||
|
||||
@@ -33,6 +34,13 @@ logger = logging.getLogger(__name__)
|
||||
RoomMember = namedtuple("RoomMember", ("room_id", "user_id"))
|
||||
|
||||
|
||||
# How often we expect remote servers to resend us presence.
|
||||
FEDERATION_TIMEOUT = 60 * 1000
|
||||
|
||||
# How often to resend typing across federation.
|
||||
FEDERATION_PING_INTERVAL = 40 * 1000
|
||||
|
||||
|
||||
class TypingHandler(object):
|
||||
def __init__(self, hs):
|
||||
self.store = hs.get_datastore()
|
||||
@@ -40,17 +48,21 @@ class TypingHandler(object):
|
||||
self.auth = hs.get_auth()
|
||||
self.is_mine_id = hs.is_mine_id
|
||||
self.notifier = hs.get_notifier()
|
||||
self.state = hs.get_state_handler()
|
||||
|
||||
self.hs = hs
|
||||
|
||||
self.clock = hs.get_clock()
|
||||
self.wheel_timer = WheelTimer(bucket_size=5000)
|
||||
|
||||
self.federation = hs.get_replication_layer()
|
||||
self.federation = hs.get_federation_sender()
|
||||
|
||||
self.federation.register_edu_handler("m.typing", self._recv_edu)
|
||||
hs.get_replication_layer().register_edu_handler("m.typing", self._recv_edu)
|
||||
|
||||
hs.get_distributor().observe("user_left_room", self.user_left_room)
|
||||
|
||||
self._member_typing_until = {} # clock time we expect to stop
|
||||
self._member_typing_timer = {} # deferreds to manage theabove
|
||||
self._member_last_federation_poke = {}
|
||||
|
||||
# map room IDs to serial numbers
|
||||
self._room_serials = {}
|
||||
@@ -58,12 +70,49 @@ class TypingHandler(object):
|
||||
# map room IDs to sets of users currently typing
|
||||
self._room_typing = {}
|
||||
|
||||
def tearDown(self):
|
||||
"""Cancels all the pending timers.
|
||||
Normally this shouldn't be needed, but it's required from unit tests
|
||||
to avoid a "Reactor was unclean" warning."""
|
||||
for t in self._member_typing_timer.values():
|
||||
self.clock.cancel_call_later(t)
|
||||
self.clock.looping_call(
|
||||
self._handle_timeouts,
|
||||
5000,
|
||||
)
|
||||
|
||||
def _handle_timeouts(self):
|
||||
logger.info("Checking for typing timeouts")
|
||||
|
||||
now = self.clock.time_msec()
|
||||
|
||||
members = set(self.wheel_timer.fetch(now))
|
||||
|
||||
for member in members:
|
||||
if not self.is_typing(member):
|
||||
# Nothing to do if they're no longer typing
|
||||
continue
|
||||
|
||||
until = self._member_typing_until.get(member, None)
|
||||
if not until or until <= now:
|
||||
logger.info("Timing out typing for: %s", member.user_id)
|
||||
preserve_fn(self._stopped_typing)(member)
|
||||
continue
|
||||
|
||||
# Check if we need to resend a keep alive over federation for this
|
||||
# user.
|
||||
if self.hs.is_mine_id(member.user_id):
|
||||
last_fed_poke = self._member_last_federation_poke.get(member, None)
|
||||
if not last_fed_poke or last_fed_poke + FEDERATION_PING_INTERVAL <= now:
|
||||
preserve_fn(self._push_remote)(
|
||||
member=member,
|
||||
typing=True
|
||||
)
|
||||
|
||||
# Add a paranoia timer to ensure that we always have a timer for
|
||||
# each person typing.
|
||||
self.wheel_timer.insert(
|
||||
now=now,
|
||||
obj=member,
|
||||
then=now + 60 * 1000,
|
||||
)
|
||||
|
||||
def is_typing(self, member):
|
||||
return member.user_id in self._room_typing.get(member.room_id, [])
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def started_typing(self, target_user, auth_user, room_id, timeout):
|
||||
@@ -82,23 +131,17 @@ class TypingHandler(object):
|
||||
"%s has started typing in %s", target_user_id, room_id
|
||||
)
|
||||
|
||||
until = self.clock.time_msec() + timeout
|
||||
member = RoomMember(room_id=room_id, user_id=target_user_id)
|
||||
|
||||
was_present = member in self._member_typing_until
|
||||
was_present = member.user_id in self._room_typing.get(room_id, set())
|
||||
|
||||
if member in self._member_typing_timer:
|
||||
self.clock.cancel_call_later(self._member_typing_timer[member])
|
||||
now = self.clock.time_msec()
|
||||
self._member_typing_until[member] = now + timeout
|
||||
|
||||
def _cb():
|
||||
logger.debug(
|
||||
"%s has timed out in %s", target_user.to_string(), room_id
|
||||
)
|
||||
self._stopped_typing(member)
|
||||
|
||||
self._member_typing_until[member] = until
|
||||
self._member_typing_timer[member] = self.clock.call_later(
|
||||
timeout / 1000.0, _cb
|
||||
self.wheel_timer.insert(
|
||||
now=now,
|
||||
obj=member,
|
||||
then=now + timeout,
|
||||
)
|
||||
|
||||
if was_present:
|
||||
@@ -106,8 +149,7 @@ class TypingHandler(object):
|
||||
defer.returnValue(None)
|
||||
|
||||
yield self._push_update(
|
||||
room_id=room_id,
|
||||
user_id=target_user_id,
|
||||
member=member,
|
||||
typing=True,
|
||||
)
|
||||
|
||||
@@ -130,10 +172,6 @@ class TypingHandler(object):
|
||||
|
||||
member = RoomMember(room_id=room_id, user_id=target_user_id)
|
||||
|
||||
if member in self._member_typing_timer:
|
||||
self.clock.cancel_call_later(self._member_typing_timer[member])
|
||||
del self._member_typing_timer[member]
|
||||
|
||||
yield self._stopped_typing(member)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@@ -145,79 +183,101 @@ class TypingHandler(object):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _stopped_typing(self, member):
|
||||
if member not in self._member_typing_until:
|
||||
if member.user_id not in self._room_typing.get(member.room_id, set()):
|
||||
# No point
|
||||
defer.returnValue(None)
|
||||
|
||||
self._member_typing_until.pop(member, None)
|
||||
self._member_last_federation_poke.pop(member, None)
|
||||
|
||||
yield self._push_update(
|
||||
room_id=member.room_id,
|
||||
user_id=member.user_id,
|
||||
member=member,
|
||||
typing=False,
|
||||
)
|
||||
|
||||
del self._member_typing_until[member]
|
||||
@defer.inlineCallbacks
|
||||
def _push_update(self, member, typing):
|
||||
if self.hs.is_mine_id(member.user_id):
|
||||
# Only send updates for changes to our own users.
|
||||
yield self._push_remote(member, typing)
|
||||
|
||||
if member in self._member_typing_timer:
|
||||
# Don't cancel it - either it already expired, or the real
|
||||
# stopped_typing() will cancel it
|
||||
del self._member_typing_timer[member]
|
||||
self._push_update_local(
|
||||
member=member,
|
||||
typing=typing
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _push_update(self, room_id, user_id, typing):
|
||||
domains = yield self.store.get_joined_hosts_for_room(room_id)
|
||||
def _push_remote(self, member, typing):
|
||||
users = yield self.state.get_current_user_in_room(member.room_id)
|
||||
self._member_last_federation_poke[member] = self.clock.time_msec()
|
||||
|
||||
deferreds = []
|
||||
for domain in domains:
|
||||
if domain == self.server_name:
|
||||
self._push_update_local(
|
||||
room_id=room_id,
|
||||
user_id=user_id,
|
||||
typing=typing
|
||||
)
|
||||
else:
|
||||
deferreds.append(self.federation.send_edu(
|
||||
now = self.clock.time_msec()
|
||||
self.wheel_timer.insert(
|
||||
now=now,
|
||||
obj=member,
|
||||
then=now + FEDERATION_PING_INTERVAL,
|
||||
)
|
||||
|
||||
for domain in set(get_domain_from_id(u) for u in users):
|
||||
if domain != self.server_name:
|
||||
self.federation.send_edu(
|
||||
destination=domain,
|
||||
edu_type="m.typing",
|
||||
content={
|
||||
"room_id": room_id,
|
||||
"user_id": user_id,
|
||||
"room_id": member.room_id,
|
||||
"user_id": member.user_id,
|
||||
"typing": typing,
|
||||
},
|
||||
))
|
||||
|
||||
yield defer.DeferredList(deferreds, consumeErrors=True)
|
||||
key=member,
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _recv_edu(self, origin, content):
|
||||
room_id = content["room_id"]
|
||||
user_id = content["user_id"]
|
||||
|
||||
# Check that the string is a valid user id
|
||||
UserID.from_string(user_id)
|
||||
member = RoomMember(user_id=user_id, room_id=room_id)
|
||||
|
||||
domains = yield self.store.get_joined_hosts_for_room(room_id)
|
||||
# Check that the string is a valid user id
|
||||
user = UserID.from_string(user_id)
|
||||
|
||||
if user.domain != origin:
|
||||
logger.info(
|
||||
"Got typing update from %r with bad 'user_id': %r",
|
||||
origin, user_id,
|
||||
)
|
||||
return
|
||||
|
||||
users = yield self.state.get_current_user_in_room(room_id)
|
||||
domains = set(get_domain_from_id(u) for u in users)
|
||||
|
||||
if self.server_name in domains:
|
||||
logger.info("Got typing update from %s: %r", user_id, content)
|
||||
now = self.clock.time_msec()
|
||||
self._member_typing_until[member] = now + FEDERATION_TIMEOUT
|
||||
self.wheel_timer.insert(
|
||||
now=now,
|
||||
obj=member,
|
||||
then=now + FEDERATION_TIMEOUT,
|
||||
)
|
||||
self._push_update_local(
|
||||
room_id=room_id,
|
||||
user_id=user_id,
|
||||
member=member,
|
||||
typing=content["typing"]
|
||||
)
|
||||
|
||||
def _push_update_local(self, room_id, user_id, typing):
|
||||
room_set = self._room_typing.setdefault(room_id, set())
|
||||
def _push_update_local(self, member, typing):
|
||||
room_set = self._room_typing.setdefault(member.room_id, set())
|
||||
if typing:
|
||||
room_set.add(user_id)
|
||||
room_set.add(member.user_id)
|
||||
else:
|
||||
room_set.discard(user_id)
|
||||
room_set.discard(member.user_id)
|
||||
|
||||
self._latest_room_serial += 1
|
||||
self._room_serials[room_id] = self._latest_room_serial
|
||||
self._room_serials[member.room_id] = self._latest_room_serial
|
||||
|
||||
with PreserveLoggingContext():
|
||||
self.notifier.on_new_event(
|
||||
"typing_key", self._latest_room_serial, rooms=[room_id]
|
||||
)
|
||||
self.notifier.on_new_event(
|
||||
"typing_key", self._latest_room_serial, rooms=[member.room_id]
|
||||
)
|
||||
|
||||
def get_all_typing_updates(self, last_id, current_id):
|
||||
# TODO: Work out a way to do this without scanning the entire state.
|
||||
|
||||
@@ -25,7 +25,7 @@ from synapse.http.endpoint import SpiderEndpoint
|
||||
from canonicaljson import encode_canonical_json
|
||||
|
||||
from twisted.internet import defer, reactor, ssl, protocol, task
|
||||
from twisted.internet.endpoints import SSL4ClientEndpoint, TCP4ClientEndpoint
|
||||
from twisted.internet.endpoints import HostnameEndpoint, wrapClientTLS
|
||||
from twisted.web.client import (
|
||||
BrowserLikeRedirectAgent, ContentDecoderAgent, GzipDecoder, Agent,
|
||||
readBody, PartialDownloadError,
|
||||
@@ -386,26 +386,23 @@ class SpiderEndpointFactory(object):
|
||||
|
||||
def endpointForURI(self, uri):
|
||||
logger.info("Getting endpoint for %s", uri.toBytes())
|
||||
|
||||
if uri.scheme == "http":
|
||||
return SpiderEndpoint(
|
||||
reactor, uri.host, uri.port, self.blacklist, self.whitelist,
|
||||
endpoint=TCP4ClientEndpoint,
|
||||
endpoint_kw_args={
|
||||
'timeout': 15
|
||||
},
|
||||
)
|
||||
endpoint_factory = HostnameEndpoint
|
||||
elif uri.scheme == "https":
|
||||
tlsPolicy = self.policyForHTTPS.creatorForNetloc(uri.host, uri.port)
|
||||
return SpiderEndpoint(
|
||||
reactor, uri.host, uri.port, self.blacklist, self.whitelist,
|
||||
endpoint=SSL4ClientEndpoint,
|
||||
endpoint_kw_args={
|
||||
'sslContextFactory': tlsPolicy,
|
||||
'timeout': 15
|
||||
},
|
||||
)
|
||||
tlsCreator = self.policyForHTTPS.creatorForNetloc(uri.host, uri.port)
|
||||
|
||||
def endpoint_factory(reactor, host, port, **kw):
|
||||
return wrapClientTLS(
|
||||
tlsCreator,
|
||||
HostnameEndpoint(reactor, host, port, **kw))
|
||||
else:
|
||||
logger.warn("Can't get endpoint for unrecognised scheme %s", uri.scheme)
|
||||
return None
|
||||
return SpiderEndpoint(
|
||||
reactor, uri.host, uri.port, self.blacklist, self.whitelist,
|
||||
endpoint=endpoint_factory, endpoint_kw_args=dict(timeout=15),
|
||||
)
|
||||
|
||||
|
||||
class SpiderHttpClient(SimpleHttpClient):
|
||||
|
||||
@@ -13,8 +13,8 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from twisted.internet.endpoints import SSL4ClientEndpoint, TCP4ClientEndpoint
|
||||
from twisted.internet import defer
|
||||
from twisted.internet.endpoints import HostnameEndpoint, wrapClientTLS
|
||||
from twisted.internet import defer, reactor
|
||||
from twisted.internet.error import ConnectError
|
||||
from twisted.names import client, dns
|
||||
from twisted.names.error import DNSNameError, DomainError
|
||||
@@ -58,21 +58,85 @@ def matrix_federation_endpoint(reactor, destination, ssl_context_factory=None,
|
||||
endpoint_kw_args.update(timeout=timeout)
|
||||
|
||||
if ssl_context_factory is None:
|
||||
transport_endpoint = TCP4ClientEndpoint
|
||||
transport_endpoint = HostnameEndpoint
|
||||
default_port = 8008
|
||||
else:
|
||||
transport_endpoint = SSL4ClientEndpoint
|
||||
endpoint_kw_args.update(sslContextFactory=ssl_context_factory)
|
||||
def transport_endpoint(reactor, host, port, timeout):
|
||||
return wrapClientTLS(
|
||||
ssl_context_factory,
|
||||
HostnameEndpoint(reactor, host, port, timeout=timeout))
|
||||
default_port = 8448
|
||||
|
||||
if port is None:
|
||||
return SRVClientEndpoint(
|
||||
return _WrappingEndpointFac(SRVClientEndpoint(
|
||||
reactor, "matrix", domain, protocol="tcp",
|
||||
default_port=default_port, endpoint=transport_endpoint,
|
||||
endpoint_kw_args=endpoint_kw_args
|
||||
)
|
||||
))
|
||||
else:
|
||||
return transport_endpoint(reactor, domain, port, **endpoint_kw_args)
|
||||
return _WrappingEndpointFac(transport_endpoint(
|
||||
reactor, domain, port, **endpoint_kw_args
|
||||
))
|
||||
|
||||
|
||||
class _WrappingEndpointFac(object):
|
||||
def __init__(self, endpoint_fac):
|
||||
self.endpoint_fac = endpoint_fac
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def connect(self, protocolFactory):
|
||||
conn = yield self.endpoint_fac.connect(protocolFactory)
|
||||
conn = _WrappedConnection(conn)
|
||||
defer.returnValue(conn)
|
||||
|
||||
|
||||
class _WrappedConnection(object):
|
||||
"""Wraps a connection and calls abort on it if it hasn't seen any action
|
||||
for 2.5-3 minutes.
|
||||
"""
|
||||
__slots__ = ["conn", "last_request"]
|
||||
|
||||
def __init__(self, conn):
|
||||
object.__setattr__(self, "conn", conn)
|
||||
object.__setattr__(self, "last_request", time.time())
|
||||
|
||||
def __getattr__(self, name):
|
||||
return getattr(self.conn, name)
|
||||
|
||||
def __setattr__(self, name, value):
|
||||
setattr(self.conn, name, value)
|
||||
|
||||
def _time_things_out_maybe(self):
|
||||
# We use a slightly shorter timeout here just in case the callLater is
|
||||
# triggered early. Paranoia ftw.
|
||||
# TODO: Cancel the previous callLater rather than comparing time.time()?
|
||||
if time.time() - self.last_request >= 2.5 * 60:
|
||||
self.abort()
|
||||
# Abort the underlying TLS connection. The abort() method calls
|
||||
# loseConnection() on the underlying TLS connection which tries to
|
||||
# shutdown the connection cleanly. We call abortConnection()
|
||||
# since that will promptly close the underlying TCP connection.
|
||||
self.transport.abortConnection()
|
||||
|
||||
def request(self, request):
|
||||
self.last_request = time.time()
|
||||
|
||||
# Time this connection out if we haven't send a request in the last
|
||||
# N minutes
|
||||
# TODO: Cancel the previous callLater?
|
||||
reactor.callLater(3 * 60, self._time_things_out_maybe)
|
||||
|
||||
d = self.conn.request(request)
|
||||
|
||||
def update_request_time(res):
|
||||
self.last_request = time.time()
|
||||
# TODO: Cancel the previous callLater?
|
||||
reactor.callLater(3 * 60, self._time_things_out_maybe)
|
||||
return res
|
||||
|
||||
d.addCallback(update_request_time)
|
||||
|
||||
return d
|
||||
|
||||
|
||||
class SpiderEndpoint(object):
|
||||
@@ -80,7 +144,7 @@ class SpiderEndpoint(object):
|
||||
Implements twisted.internet.interfaces.IStreamClientEndpoint.
|
||||
"""
|
||||
def __init__(self, reactor, host, port, blacklist, whitelist,
|
||||
endpoint=TCP4ClientEndpoint, endpoint_kw_args={}):
|
||||
endpoint=HostnameEndpoint, endpoint_kw_args={}):
|
||||
self.reactor = reactor
|
||||
self.host = host
|
||||
self.port = port
|
||||
@@ -118,7 +182,7 @@ class SRVClientEndpoint(object):
|
||||
"""
|
||||
|
||||
def __init__(self, reactor, service, domain, protocol="tcp",
|
||||
default_port=None, endpoint=TCP4ClientEndpoint,
|
||||
default_port=None, endpoint=HostnameEndpoint,
|
||||
endpoint_kw_args={}):
|
||||
self.reactor = reactor
|
||||
self.service_name = "_%s._%s.%s" % (service, protocol, domain)
|
||||
|
||||
@@ -33,6 +33,7 @@ from synapse.api.errors import (
|
||||
|
||||
from signedjson.sign import sign_json
|
||||
|
||||
import cgi
|
||||
import simplejson as json
|
||||
import logging
|
||||
import random
|
||||
@@ -87,7 +88,8 @@ class MatrixFederationHttpClient(object):
|
||||
self.signing_key = hs.config.signing_key[0]
|
||||
self.server_name = hs.hostname
|
||||
pool = HTTPConnectionPool(reactor)
|
||||
pool.maxPersistentPerHost = 10
|
||||
pool.maxPersistentPerHost = 5
|
||||
pool.cachedConnectionTimeout = 2 * 60
|
||||
self.agent = Agent.usingEndpointFactory(
|
||||
reactor, MatrixFederationEndpointFactory(hs), pool=pool
|
||||
)
|
||||
@@ -155,9 +157,7 @@ class MatrixFederationHttpClient(object):
|
||||
time_out=timeout / 1000. if timeout else 60,
|
||||
)
|
||||
|
||||
response = yield preserve_context_over_fn(
|
||||
send_request,
|
||||
)
|
||||
response = yield preserve_context_over_fn(send_request)
|
||||
|
||||
log_result = "%d %s" % (response.code, response.phrase,)
|
||||
break
|
||||
@@ -248,7 +248,7 @@ class MatrixFederationHttpClient(object):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def put_json(self, destination, path, data={}, json_data_callback=None,
|
||||
long_retries=False):
|
||||
long_retries=False, timeout=None):
|
||||
""" Sends the specifed json data using PUT
|
||||
|
||||
Args:
|
||||
@@ -261,6 +261,8 @@ class MatrixFederationHttpClient(object):
|
||||
use as the request body.
|
||||
long_retries (bool): A boolean that indicates whether we should
|
||||
retry for a short or long time.
|
||||
timeout(int): How long to try (in ms) the destination for before
|
||||
giving up. None indicates no timeout.
|
||||
|
||||
Returns:
|
||||
Deferred: Succeeds when we get a 2xx HTTP response. The result
|
||||
@@ -287,22 +289,19 @@ class MatrixFederationHttpClient(object):
|
||||
body_callback=body_callback,
|
||||
headers_dict={"Content-Type": ["application/json"]},
|
||||
long_retries=long_retries,
|
||||
timeout=timeout,
|
||||
)
|
||||
|
||||
if 200 <= response.code < 300:
|
||||
# We need to update the transactions table to say it was sent?
|
||||
c_type = response.headers.getRawHeaders("Content-Type")
|
||||
|
||||
if "application/json" not in c_type:
|
||||
raise RuntimeError(
|
||||
"Content-Type not application/json"
|
||||
)
|
||||
check_content_type_is_json(response.headers)
|
||||
|
||||
body = yield preserve_context_over_fn(readBody, response)
|
||||
defer.returnValue(json.loads(body))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def post_json(self, destination, path, data={}, long_retries=True):
|
||||
def post_json(self, destination, path, data={}, long_retries=False,
|
||||
timeout=None):
|
||||
""" Sends the specifed json data using POST
|
||||
|
||||
Args:
|
||||
@@ -313,6 +312,8 @@ class MatrixFederationHttpClient(object):
|
||||
the request body. This will be encoded as JSON.
|
||||
long_retries (bool): A boolean that indicates whether we should
|
||||
retry for a short or long time.
|
||||
timeout(int): How long to try (in ms) the destination for before
|
||||
giving up. None indicates no timeout.
|
||||
|
||||
Returns:
|
||||
Deferred: Succeeds when we get a 2xx HTTP response. The result
|
||||
@@ -332,17 +333,13 @@ class MatrixFederationHttpClient(object):
|
||||
path.encode("ascii"),
|
||||
body_callback=body_callback,
|
||||
headers_dict={"Content-Type": ["application/json"]},
|
||||
long_retries=True,
|
||||
long_retries=long_retries,
|
||||
timeout=timeout,
|
||||
)
|
||||
|
||||
if 200 <= response.code < 300:
|
||||
# We need to update the transactions table to say it was sent?
|
||||
c_type = response.headers.getRawHeaders("Content-Type")
|
||||
|
||||
if "application/json" not in c_type:
|
||||
raise RuntimeError(
|
||||
"Content-Type not application/json"
|
||||
)
|
||||
check_content_type_is_json(response.headers)
|
||||
|
||||
body = yield preserve_context_over_fn(readBody, response)
|
||||
|
||||
@@ -395,12 +392,7 @@ class MatrixFederationHttpClient(object):
|
||||
|
||||
if 200 <= response.code < 300:
|
||||
# We need to update the transactions table to say it was sent?
|
||||
c_type = response.headers.getRawHeaders("Content-Type")
|
||||
|
||||
if "application/json" not in c_type:
|
||||
raise RuntimeError(
|
||||
"Content-Type not application/json"
|
||||
)
|
||||
check_content_type_is_json(response.headers)
|
||||
|
||||
body = yield preserve_context_over_fn(readBody, response)
|
||||
|
||||
@@ -520,3 +512,29 @@ def _flatten_response_never_received(e):
|
||||
)
|
||||
else:
|
||||
return "%s: %s" % (type(e).__name__, e.message,)
|
||||
|
||||
|
||||
def check_content_type_is_json(headers):
|
||||
"""
|
||||
Check that a set of HTTP headers have a Content-Type header, and that it
|
||||
is application/json.
|
||||
|
||||
Args:
|
||||
headers (twisted.web.http_headers.Headers): headers to check
|
||||
|
||||
Raises:
|
||||
RuntimeError if the
|
||||
|
||||
"""
|
||||
c_type = headers.getRawHeaders("Content-Type")
|
||||
if c_type is None:
|
||||
raise RuntimeError(
|
||||
"No Content-Type header"
|
||||
)
|
||||
|
||||
c_type = c_type[0] # only the first header
|
||||
val, options = cgi.parse_header(c_type)
|
||||
if val != "application/json":
|
||||
raise RuntimeError(
|
||||
"Content-Type not application/json: was '%s'" % c_type
|
||||
)
|
||||
|
||||
@@ -19,6 +19,7 @@ from synapse.api.errors import (
|
||||
)
|
||||
from synapse.util.logcontext import LoggingContext, PreserveLoggingContext
|
||||
from synapse.util.caches import intern_dict
|
||||
from synapse.util.metrics import Measure
|
||||
import synapse.metrics
|
||||
import synapse.events
|
||||
|
||||
@@ -74,12 +75,12 @@ response_db_txn_duration = metrics.register_distribution(
|
||||
_next_request_id = 0
|
||||
|
||||
|
||||
def request_handler(report_metrics=True):
|
||||
def request_handler(include_metrics=False):
|
||||
"""Decorator for ``wrap_request_handler``"""
|
||||
return lambda request_handler: wrap_request_handler(request_handler, report_metrics)
|
||||
return lambda request_handler: wrap_request_handler(request_handler, include_metrics)
|
||||
|
||||
|
||||
def wrap_request_handler(request_handler, report_metrics):
|
||||
def wrap_request_handler(request_handler, include_metrics=False):
|
||||
"""Wraps a method that acts as a request handler with the necessary logging
|
||||
and exception handling.
|
||||
|
||||
@@ -103,54 +104,56 @@ def wrap_request_handler(request_handler, report_metrics):
|
||||
_next_request_id += 1
|
||||
|
||||
with LoggingContext(request_id) as request_context:
|
||||
if report_metrics:
|
||||
with Measure(self.clock, "wrapped_request_handler"):
|
||||
request_metrics = RequestMetrics()
|
||||
request_metrics.start(self.clock)
|
||||
request_metrics.start(self.clock, name=self.__class__.__name__)
|
||||
|
||||
request_context.request = request_id
|
||||
with request.processing():
|
||||
try:
|
||||
with PreserveLoggingContext(request_context):
|
||||
yield request_handler(self, request)
|
||||
except CodeMessageException as e:
|
||||
code = e.code
|
||||
if isinstance(e, SynapseError):
|
||||
logger.info(
|
||||
"%s SynapseError: %s - %s", request, code, e.msg
|
||||
)
|
||||
else:
|
||||
logger.exception(e)
|
||||
outgoing_responses_counter.inc(request.method, str(code))
|
||||
respond_with_json(
|
||||
request, code, cs_exception(e), send_cors=True,
|
||||
pretty_print=_request_user_agent_is_curl(request),
|
||||
version_string=self.version_string,
|
||||
)
|
||||
except:
|
||||
logger.exception(
|
||||
"Failed handle request %s.%s on %r: %r",
|
||||
request_handler.__module__,
|
||||
request_handler.__name__,
|
||||
self,
|
||||
request
|
||||
)
|
||||
respond_with_json(
|
||||
request,
|
||||
500,
|
||||
{
|
||||
"error": "Internal server error",
|
||||
"errcode": Codes.UNKNOWN,
|
||||
},
|
||||
send_cors=True
|
||||
)
|
||||
finally:
|
||||
request_context.request = request_id
|
||||
with request.processing():
|
||||
try:
|
||||
if report_metrics:
|
||||
request_metrics.stop(
|
||||
self.clock, request, self.__class__.__name__
|
||||
with PreserveLoggingContext(request_context):
|
||||
if include_metrics:
|
||||
yield request_handler(self, request, request_metrics)
|
||||
else:
|
||||
yield request_handler(self, request)
|
||||
except CodeMessageException as e:
|
||||
code = e.code
|
||||
if isinstance(e, SynapseError):
|
||||
logger.info(
|
||||
"%s SynapseError: %s - %s", request, code, e.msg
|
||||
)
|
||||
else:
|
||||
logger.exception(e)
|
||||
outgoing_responses_counter.inc(request.method, str(code))
|
||||
respond_with_json(
|
||||
request, code, cs_exception(e), send_cors=True,
|
||||
pretty_print=_request_user_agent_is_curl(request),
|
||||
version_string=self.version_string,
|
||||
)
|
||||
except:
|
||||
pass
|
||||
logger.exception(
|
||||
"Failed handle request %s.%s on %r: %r",
|
||||
request_handler.__module__,
|
||||
request_handler.__name__,
|
||||
self,
|
||||
request
|
||||
)
|
||||
respond_with_json(
|
||||
request,
|
||||
500,
|
||||
{
|
||||
"error": "Internal server error",
|
||||
"errcode": Codes.UNKNOWN,
|
||||
},
|
||||
send_cors=True
|
||||
)
|
||||
finally:
|
||||
try:
|
||||
request_metrics.stop(
|
||||
self.clock, request
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warn("Failed to stop metrics: %r", e)
|
||||
return wrapped_request_handler
|
||||
|
||||
|
||||
@@ -205,6 +208,7 @@ class JsonResource(HttpServer, resource.Resource):
|
||||
|
||||
def register_paths(self, method, path_patterns, callback):
|
||||
for path_pattern in path_patterns:
|
||||
logger.debug("Registering for %s %s", method, path_pattern.pattern)
|
||||
self.path_regexs.setdefault(method, []).append(
|
||||
self._PathEntry(path_pattern, callback)
|
||||
)
|
||||
@@ -219,9 +223,9 @@ class JsonResource(HttpServer, resource.Resource):
|
||||
# It does its own metric reporting because _async_render dispatches to
|
||||
# a callback and it's the class name of that callback we want to report
|
||||
# against rather than the JsonResource itself.
|
||||
@request_handler(report_metrics=False)
|
||||
@request_handler(include_metrics=True)
|
||||
@defer.inlineCallbacks
|
||||
def _async_render(self, request):
|
||||
def _async_render(self, request, request_metrics):
|
||||
""" This gets called from render() every time someone sends us a request.
|
||||
This checks if anyone has registered a callback for that method and
|
||||
path.
|
||||
@@ -230,9 +234,6 @@ class JsonResource(HttpServer, resource.Resource):
|
||||
self._send_response(request, 200, {})
|
||||
return
|
||||
|
||||
request_metrics = RequestMetrics()
|
||||
request_metrics.start(self.clock)
|
||||
|
||||
# Loop through all the registered callbacks to check if the method
|
||||
# and path regex match
|
||||
for path_entry in self.path_regexs.get(request.method, []):
|
||||
@@ -246,12 +247,6 @@ class JsonResource(HttpServer, resource.Resource):
|
||||
|
||||
callback = path_entry.callback
|
||||
|
||||
servlet_instance = getattr(callback, "__self__", None)
|
||||
if servlet_instance is not None:
|
||||
servlet_classname = servlet_instance.__class__.__name__
|
||||
else:
|
||||
servlet_classname = "%r" % callback
|
||||
|
||||
kwargs = intern_dict({
|
||||
name: urllib.unquote(value).decode("UTF-8") if value else value
|
||||
for name, value in m.groupdict().items()
|
||||
@@ -262,10 +257,13 @@ class JsonResource(HttpServer, resource.Resource):
|
||||
code, response = callback_return
|
||||
self._send_response(request, code, response)
|
||||
|
||||
try:
|
||||
request_metrics.stop(self.clock, request, servlet_classname)
|
||||
except:
|
||||
pass
|
||||
servlet_instance = getattr(callback, "__self__", None)
|
||||
if servlet_instance is not None:
|
||||
servlet_classname = servlet_instance.__class__.__name__
|
||||
else:
|
||||
servlet_classname = "%r" % callback
|
||||
|
||||
request_metrics.name = servlet_classname
|
||||
|
||||
return
|
||||
|
||||
@@ -297,11 +295,12 @@ class JsonResource(HttpServer, resource.Resource):
|
||||
|
||||
|
||||
class RequestMetrics(object):
|
||||
def start(self, clock):
|
||||
def start(self, clock, name):
|
||||
self.start = clock.time_msec()
|
||||
self.start_context = LoggingContext.current_context()
|
||||
self.name = name
|
||||
|
||||
def stop(self, clock, request, servlet_classname):
|
||||
def stop(self, clock, request):
|
||||
context = LoggingContext.current_context()
|
||||
|
||||
tag = ""
|
||||
@@ -315,26 +314,26 @@ class RequestMetrics(object):
|
||||
)
|
||||
return
|
||||
|
||||
incoming_requests_counter.inc(request.method, servlet_classname, tag)
|
||||
incoming_requests_counter.inc(request.method, self.name, tag)
|
||||
|
||||
response_timer.inc_by(
|
||||
clock.time_msec() - self.start, request.method,
|
||||
servlet_classname, tag
|
||||
self.name, tag
|
||||
)
|
||||
|
||||
ru_utime, ru_stime = context.get_resource_usage()
|
||||
|
||||
response_ru_utime.inc_by(
|
||||
ru_utime, request.method, servlet_classname, tag
|
||||
ru_utime, request.method, self.name, tag
|
||||
)
|
||||
response_ru_stime.inc_by(
|
||||
ru_stime, request.method, servlet_classname, tag
|
||||
ru_stime, request.method, self.name, tag
|
||||
)
|
||||
response_db_txn_count.inc_by(
|
||||
context.db_txn_count, request.method, servlet_classname, tag
|
||||
context.db_txn_count, request.method, self.name, tag
|
||||
)
|
||||
response_db_txn_duration.inc_by(
|
||||
context.db_txn_duration, request.method, servlet_classname, tag
|
||||
context.db_txn_duration, request.method, self.name, tag
|
||||
)
|
||||
|
||||
|
||||
@@ -393,17 +392,30 @@ def respond_with_json_bytes(request, code, json_bytes, send_cors=False,
|
||||
request.setHeader(b"Content-Length", b"%d" % (len(json_bytes),))
|
||||
|
||||
if send_cors:
|
||||
request.setHeader("Access-Control-Allow-Origin", "*")
|
||||
request.setHeader("Access-Control-Allow-Methods",
|
||||
"GET, POST, PUT, DELETE, OPTIONS")
|
||||
request.setHeader("Access-Control-Allow-Headers",
|
||||
"Origin, X-Requested-With, Content-Type, Accept")
|
||||
set_cors_headers(request)
|
||||
|
||||
request.write(json_bytes)
|
||||
finish_request(request)
|
||||
return NOT_DONE_YET
|
||||
|
||||
|
||||
def set_cors_headers(request):
|
||||
"""Set the CORs headers so that javascript running in a web browsers can
|
||||
use this API
|
||||
|
||||
Args:
|
||||
request (twisted.web.http.Request): The http request to add CORs to.
|
||||
"""
|
||||
request.setHeader("Access-Control-Allow-Origin", "*")
|
||||
request.setHeader(
|
||||
"Access-Control-Allow-Methods", "GET, POST, PUT, DELETE, OPTIONS"
|
||||
)
|
||||
request.setHeader(
|
||||
"Access-Control-Allow-Headers",
|
||||
"Origin, X-Requested-With, Content-Type, Accept"
|
||||
)
|
||||
|
||||
|
||||
def finish_request(request):
|
||||
""" Finish writing the response to the request.
|
||||
|
||||
|
||||
@@ -41,9 +41,13 @@ def parse_integer(request, name, default=None, required=False):
|
||||
SynapseError: if the parameter is absent and required, or if the
|
||||
parameter is present and not an integer.
|
||||
"""
|
||||
if name in request.args:
|
||||
return parse_integer_from_args(request.args, name, default, required)
|
||||
|
||||
|
||||
def parse_integer_from_args(args, name, default=None, required=False):
|
||||
if name in args:
|
||||
try:
|
||||
return int(request.args[name][0])
|
||||
return int(args[name][0])
|
||||
except:
|
||||
message = "Query parameter %r must be an integer" % (name,)
|
||||
raise SynapseError(400, message)
|
||||
@@ -74,12 +78,16 @@ def parse_boolean(request, name, default=None, required=False):
|
||||
parameter is present and not one of "true" or "false".
|
||||
"""
|
||||
|
||||
if name in request.args:
|
||||
return parse_boolean_from_args(request.args, name, default, required)
|
||||
|
||||
|
||||
def parse_boolean_from_args(args, name, default=None, required=False):
|
||||
if name in args:
|
||||
try:
|
||||
return {
|
||||
"true": True,
|
||||
"false": False,
|
||||
}[request.args[name][0]]
|
||||
}[args[name][0]]
|
||||
except:
|
||||
message = (
|
||||
"Boolean query parameter %r must be one of"
|
||||
@@ -116,9 +124,15 @@ def parse_string(request, name, default=None, required=False,
|
||||
parameter is present, must be one of a list of allowed values and
|
||||
is not one of those allowed values.
|
||||
"""
|
||||
return parse_string_from_args(
|
||||
request.args, name, default, required, allowed_values, param_type,
|
||||
)
|
||||
|
||||
if name in request.args:
|
||||
value = request.args[name][0]
|
||||
|
||||
def parse_string_from_args(args, name, default=None, required=False,
|
||||
allowed_values=None, param_type="string"):
|
||||
if name in args:
|
||||
value = args[name][0]
|
||||
if allowed_values is not None and value not in allowed_values:
|
||||
message = "Query parameter %r must be one of [%s]" % (
|
||||
name, ", ".join(repr(v) for v in allowed_values)
|
||||
|
||||
@@ -13,28 +13,25 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Because otherwise 'resource' collides with synapse.metrics.resource
|
||||
from __future__ import absolute_import
|
||||
|
||||
import logging
|
||||
from resource import getrusage, RUSAGE_SELF
|
||||
import functools
|
||||
import os
|
||||
import stat
|
||||
import time
|
||||
import gc
|
||||
|
||||
from twisted.internet import reactor
|
||||
|
||||
from .metric import (
|
||||
CounterMetric, CallbackMetric, DistributionMetric, CacheMetric
|
||||
CounterMetric, CallbackMetric, DistributionMetric, CacheMetric,
|
||||
MemoryUsageMetric,
|
||||
)
|
||||
from .process_collector import register_process_collector
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
all_metrics = []
|
||||
all_collectors = []
|
||||
|
||||
|
||||
class Metrics(object):
|
||||
@@ -45,6 +42,12 @@ class Metrics(object):
|
||||
def __init__(self, name):
|
||||
self.name_prefix = name
|
||||
|
||||
def make_subspace(self, name):
|
||||
return Metrics("%s_%s" % (self.name_prefix, name))
|
||||
|
||||
def register_collector(self, func):
|
||||
all_collectors.append(func)
|
||||
|
||||
def _register(self, metric_class, name, *args, **kwargs):
|
||||
full_name = "%s_%s" % (self.name_prefix, name)
|
||||
|
||||
@@ -66,6 +69,21 @@ class Metrics(object):
|
||||
return self._register(CacheMetric, *args, **kwargs)
|
||||
|
||||
|
||||
def register_memory_metrics(hs):
|
||||
try:
|
||||
import psutil
|
||||
process = psutil.Process()
|
||||
process.memory_info().rss
|
||||
except (ImportError, AttributeError):
|
||||
logger.warn(
|
||||
"psutil is not installed or incorrect version."
|
||||
" Disabling memory metrics."
|
||||
)
|
||||
return
|
||||
metric = MemoryUsageMetric(hs, psutil)
|
||||
all_metrics.append(metric)
|
||||
|
||||
|
||||
def get_metrics_for(pkg_name):
|
||||
""" Returns a Metrics instance for conveniently creating metrics
|
||||
namespaced with the given name prefix. """
|
||||
@@ -78,8 +96,8 @@ def get_metrics_for(pkg_name):
|
||||
def render_all():
|
||||
strs = []
|
||||
|
||||
# TODO(paul): Internal hack
|
||||
update_resource_metrics()
|
||||
for collector in all_collectors:
|
||||
collector()
|
||||
|
||||
for metric in all_metrics:
|
||||
try:
|
||||
@@ -93,73 +111,21 @@ def render_all():
|
||||
return "\n".join(strs)
|
||||
|
||||
|
||||
# Now register some standard process-wide state metrics, to give indications of
|
||||
# process resource usage
|
||||
|
||||
rusage = None
|
||||
register_process_collector(get_metrics_for("process"))
|
||||
|
||||
|
||||
def update_resource_metrics():
|
||||
global rusage
|
||||
rusage = getrusage(RUSAGE_SELF)
|
||||
python_metrics = get_metrics_for("python")
|
||||
|
||||
resource_metrics = get_metrics_for("process.resource")
|
||||
|
||||
# msecs
|
||||
resource_metrics.register_callback("utime", lambda: rusage.ru_utime * 1000)
|
||||
resource_metrics.register_callback("stime", lambda: rusage.ru_stime * 1000)
|
||||
|
||||
# kilobytes
|
||||
resource_metrics.register_callback("maxrss", lambda: rusage.ru_maxrss * 1024)
|
||||
|
||||
TYPES = {
|
||||
stat.S_IFSOCK: "SOCK",
|
||||
stat.S_IFLNK: "LNK",
|
||||
stat.S_IFREG: "REG",
|
||||
stat.S_IFBLK: "BLK",
|
||||
stat.S_IFDIR: "DIR",
|
||||
stat.S_IFCHR: "CHR",
|
||||
stat.S_IFIFO: "FIFO",
|
||||
}
|
||||
|
||||
|
||||
def _process_fds():
|
||||
counts = {(k,): 0 for k in TYPES.values()}
|
||||
counts[("other",)] = 0
|
||||
|
||||
# Not every OS will have a /proc/self/fd directory
|
||||
if not os.path.exists("/proc/self/fd"):
|
||||
return counts
|
||||
|
||||
for fd in os.listdir("/proc/self/fd"):
|
||||
try:
|
||||
s = os.stat("/proc/self/fd/%s" % (fd))
|
||||
fmt = stat.S_IFMT(s.st_mode)
|
||||
if fmt in TYPES:
|
||||
t = TYPES[fmt]
|
||||
else:
|
||||
t = "other"
|
||||
|
||||
counts[(t,)] += 1
|
||||
except OSError:
|
||||
# the dirh itself used by listdir() is usually missing by now
|
||||
pass
|
||||
|
||||
return counts
|
||||
|
||||
get_metrics_for("process").register_callback("fds", _process_fds, labels=["type"])
|
||||
|
||||
reactor_metrics = get_metrics_for("reactor")
|
||||
tick_time = reactor_metrics.register_distribution("tick_time")
|
||||
pending_calls_metric = reactor_metrics.register_distribution("pending_calls")
|
||||
|
||||
gc_time = reactor_metrics.register_distribution("gc_time", labels=["gen"])
|
||||
gc_unreachable = reactor_metrics.register_counter("gc_unreachable", labels=["gen"])
|
||||
|
||||
reactor_metrics.register_callback(
|
||||
gc_time = python_metrics.register_distribution("gc_time", labels=["gen"])
|
||||
gc_unreachable = python_metrics.register_counter("gc_unreachable_total", labels=["gen"])
|
||||
python_metrics.register_callback(
|
||||
"gc_counts", lambda: {(i,): v for i, v in enumerate(gc.get_count())}, labels=["gen"]
|
||||
)
|
||||
|
||||
reactor_metrics = get_metrics_for("python.twisted.reactor")
|
||||
tick_time = reactor_metrics.register_distribution("tick_time")
|
||||
pending_calls_metric = reactor_metrics.register_distribution("pending_calls")
|
||||
|
||||
|
||||
def runUntilCurrentTimer(func):
|
||||
|
||||
|
||||
@@ -98,9 +98,9 @@ class CallbackMetric(BaseMetric):
|
||||
value = self.callback()
|
||||
|
||||
if self.is_scalar():
|
||||
return ["%s %d" % (self.name, value)]
|
||||
return ["%s %.12g" % (self.name, value)]
|
||||
|
||||
return ["%s%s %d" % (self.name, self._render_key(k), value[k])
|
||||
return ["%s%s %.12g" % (self.name, self._render_key(k), value[k])
|
||||
for k in sorted(value.keys())]
|
||||
|
||||
|
||||
@@ -153,3 +153,43 @@ class CacheMetric(object):
|
||||
"""%s:total{name="%s"} %d""" % (self.name, self.cache_name, total),
|
||||
"""%s:size{name="%s"} %d""" % (self.name, self.cache_name, size),
|
||||
]
|
||||
|
||||
|
||||
class MemoryUsageMetric(object):
|
||||
"""Keeps track of the current memory usage, using psutil.
|
||||
|
||||
The class will keep the current min/max/sum/counts of rss over the last
|
||||
WINDOW_SIZE_SEC, by polling UPDATE_HZ times per second
|
||||
"""
|
||||
|
||||
UPDATE_HZ = 2 # number of times to get memory per second
|
||||
WINDOW_SIZE_SEC = 30 # the size of the window in seconds
|
||||
|
||||
def __init__(self, hs, psutil):
|
||||
clock = hs.get_clock()
|
||||
self.memory_snapshots = []
|
||||
|
||||
self.process = psutil.Process()
|
||||
|
||||
clock.looping_call(self._update_curr_values, 1000 / self.UPDATE_HZ)
|
||||
|
||||
def _update_curr_values(self):
|
||||
max_size = self.UPDATE_HZ * self.WINDOW_SIZE_SEC
|
||||
self.memory_snapshots.append(self.process.memory_info().rss)
|
||||
self.memory_snapshots[:] = self.memory_snapshots[-max_size:]
|
||||
|
||||
def render(self):
|
||||
if not self.memory_snapshots:
|
||||
return []
|
||||
|
||||
max_rss = max(self.memory_snapshots)
|
||||
min_rss = min(self.memory_snapshots)
|
||||
sum_rss = sum(self.memory_snapshots)
|
||||
len_rss = len(self.memory_snapshots)
|
||||
|
||||
return [
|
||||
"process_psutil_rss:max %d" % max_rss,
|
||||
"process_psutil_rss:min %d" % min_rss,
|
||||
"process_psutil_rss:total %d" % sum_rss,
|
||||
"process_psutil_rss:count %d" % len_rss,
|
||||
]
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user