mirror of
https://github.com/element-hq/synapse.git
synced 2025-12-05 01:10:13 +00:00
Compare commits
2493 Commits
erikj/pypy
...
matrix-org
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
fb5daca994 | ||
|
|
d7dbc56c71 | ||
|
|
9cee0ce7db | ||
|
|
350333a09a | ||
|
|
639d9ae9a0 | ||
|
|
4d17add8de | ||
|
|
27b1b4a2c9 | ||
|
|
5b5b171f3e | ||
|
|
b282fe7170 | ||
|
|
0834d1a70c | ||
|
|
63fcc42990 | ||
|
|
6194a64ae9 | ||
|
|
eefd9fee81 | ||
|
|
014fee93b3 | ||
|
|
86780a8bc3 | ||
|
|
31e0fe9031 | ||
|
|
3ba2859e0c | ||
|
|
e9dd8370b0 | ||
|
|
4d7fc7f977 | ||
|
|
f9b4bb05e0 | ||
|
|
7450693435 | ||
|
|
8da6f0be48 | ||
|
|
11880103b1 | ||
|
|
7984708a55 | ||
|
|
24d35ab47b | ||
|
|
30348c924c | ||
|
|
6cdca71079 | ||
|
|
a3810136fe | ||
|
|
3ce8d59176 | ||
|
|
b9c2ae6788 | ||
|
|
85be3dde81 | ||
|
|
2f8b580b64 | ||
|
|
c5b0bdd542 | ||
|
|
e4df0e189d | ||
|
|
4ad613f6be | ||
|
|
ac6bc55512 | ||
|
|
69efd77749 | ||
|
|
276af7b59b | ||
|
|
51b156d48a | ||
|
|
30f5ffdca2 | ||
|
|
650f0e69f2 | ||
|
|
d28db583da | ||
|
|
58a35366be | ||
|
|
1ec9f7db27 | ||
|
|
dc56a6b8c8 | ||
|
|
bac9bf1b12 | ||
|
|
d82c42837f | ||
|
|
35b4aa04be | ||
|
|
2a28b79e04 | ||
|
|
8224121502 | ||
|
|
281553afe6 | ||
|
|
987f4945b4 | ||
|
|
31d56c3fb5 | ||
|
|
09f79aaad0 | ||
|
|
23e0ff840a | ||
|
|
48e7697911 | ||
|
|
f136c89d5e | ||
|
|
01fc847f7f | ||
|
|
7fc1f1e2b6 | ||
|
|
57cfa513f5 | ||
|
|
d58b1ffe94 | ||
|
|
e71940aa64 | ||
|
|
e36950dec5 | ||
|
|
f902e89d4b | ||
|
|
a380f041c2 | ||
|
|
9397edb28b | ||
|
|
06ce7335e9 | ||
|
|
13c8749ac9 | ||
|
|
e1f1784f99 | ||
|
|
86e865d7d2 | ||
|
|
a2dfab12c5 | ||
|
|
00957d1aa4 | ||
|
|
6af0096f4f | ||
|
|
250ce11ab9 | ||
|
|
566641a0b5 | ||
|
|
acafcf1c5b | ||
|
|
e56c79c114 | ||
|
|
6ebe2d23b1 | ||
|
|
0bfea9a2be | ||
|
|
e64655c25d | ||
|
|
5a16cb4bf0 | ||
|
|
b88a323ffb | ||
|
|
59358cd3e7 | ||
|
|
55366814a6 | ||
|
|
b2d40d7363 | ||
|
|
4bd597d9fc | ||
|
|
ad8a26e361 | ||
|
|
19b9366d73 | ||
|
|
e08f81d96a | ||
|
|
8d1dd7eb30 | ||
|
|
35e0cfb54d | ||
|
|
7b67848042 | ||
|
|
95f21c7a66 | ||
|
|
d101488c5f | ||
|
|
64778693be | ||
|
|
c042bcbe0f | ||
|
|
37a187bfab | ||
|
|
5bf6bcb850 | ||
|
|
733896e046 | ||
|
|
a188056364 | ||
|
|
961e242aaf | ||
|
|
e0e214556a | ||
|
|
633dcc316c | ||
|
|
c36d15d2de | ||
|
|
737f283a07 | ||
|
|
aac6d1fc9b | ||
|
|
bd08ee7a46 | ||
|
|
a4cb21659b | ||
|
|
eddce9d74a | ||
|
|
2e05f5d7a4 | ||
|
|
d78d08981a | ||
|
|
7b53e9ebfd | ||
|
|
be20243549 | ||
|
|
f40c2db05a | ||
|
|
067b00d49d | ||
|
|
994d7ae7c5 | ||
|
|
d2d146a314 | ||
|
|
61f471f779 | ||
|
|
0c01f829ae | ||
|
|
5068fb16a5 | ||
|
|
2abe85d50e | ||
|
|
9adf1991ca | ||
|
|
bbc0dbeec0 | ||
|
|
248eb4638d | ||
|
|
da146657c9 | ||
|
|
a158c36a8a | ||
|
|
6957bfdca6 | ||
|
|
2ccf3b241c | ||
|
|
9ce53a3861 | ||
|
|
54d2b7e596 | ||
|
|
b6b1382be1 | ||
|
|
9d527191bc | ||
|
|
a8f96c63aa | ||
|
|
c144292373 | ||
|
|
f83ac78201 | ||
|
|
e6032054bf | ||
|
|
ebf5a6b14c | ||
|
|
e892457a03 | ||
|
|
a297155a97 | ||
|
|
6c82de5100 | ||
|
|
0ad44acb5a | ||
|
|
5f14e7e982 | ||
|
|
29ed09e80a | ||
|
|
224dc4f6a9 | ||
|
|
3b2dd1b3c2 | ||
|
|
872e75a3d5 | ||
|
|
f90eb60ae9 | ||
|
|
7827251daf | ||
|
|
b5d1c68beb | ||
|
|
f2ed64eaaf | ||
|
|
ef328b2fc1 | ||
|
|
bad72b0b8e | ||
|
|
1bf84c4b6b | ||
|
|
fd2eef49c8 | ||
|
|
1d09586599 | ||
|
|
7f237800e9 | ||
|
|
1ece06273e | ||
|
|
bb256ac96f | ||
|
|
6a3c5d6891 | ||
|
|
cc7a294e2e | ||
|
|
7b6ed9871e | ||
|
|
d79a687d85 | ||
|
|
f29d85d9e4 | ||
|
|
bbeeb97f75 | ||
|
|
0a9945220e | ||
|
|
73a5f06652 | ||
|
|
c077c3277b | ||
|
|
31f3ca1b2b | ||
|
|
c81f33f73d | ||
|
|
170ccc9de5 | ||
|
|
45c7f12d2a | ||
|
|
2cad971ab4 | ||
|
|
8d86d11fdf | ||
|
|
6037a9804c | ||
|
|
3c69f32402 | ||
|
|
6bfe8e32b5 | ||
|
|
5fc9261929 | ||
|
|
0162994983 | ||
|
|
a0d6987991 | ||
|
|
254b7c5b15 | ||
|
|
672dcf59d3 | ||
|
|
7eae6eaa2f | ||
|
|
8b0f2afbaf | ||
|
|
79926e016e | ||
|
|
8ffbe43ba1 | ||
|
|
bcfa5cd00c | ||
|
|
d84bd51e95 | ||
|
|
9072a8c627 | ||
|
|
3872c7a107 | ||
|
|
8f267fa8a8 | ||
|
|
64d62e41b8 | ||
|
|
3545e17f43 | ||
|
|
29235901b8 | ||
|
|
e8b1721290 | ||
|
|
3406333a58 | ||
|
|
45d173a59a | ||
|
|
bf575ae20e | ||
|
|
929b005999 | ||
|
|
663396e45d | ||
|
|
ece7e00048 | ||
|
|
9d0d40fc15 | ||
|
|
3edc57296d | ||
|
|
727124a762 | ||
|
|
6ad71cc29d | ||
|
|
d4d3629aaf | ||
|
|
3170c56e07 | ||
|
|
c1f18892bb | ||
|
|
1c99934b28 | ||
|
|
a9e2b9ec16 | ||
|
|
85bb322333 | ||
|
|
65d43f3ca5 | ||
|
|
0e0aee25c4 | ||
|
|
82c5e7de25 | ||
|
|
2e27339add | ||
|
|
88df6c0c9a | ||
|
|
402a7bf63d | ||
|
|
00466e2feb | ||
|
|
c98d91fe94 | ||
|
|
ac5491f563 | ||
|
|
b0effa2160 | ||
|
|
82f7f1543b | ||
|
|
96d79bb532 | ||
|
|
f2581ee8b8 | ||
|
|
9834367eea | ||
|
|
da52d3af31 | ||
|
|
ad882cd54d | ||
|
|
3557cf34dc | ||
|
|
856a18f7a8 | ||
|
|
d766343668 | ||
|
|
0bf2c7f3bc | ||
|
|
36be39b8b3 | ||
|
|
3365117151 | ||
|
|
6b1ffa5f3d | ||
|
|
f4e7545d88 | ||
|
|
01bcf01927 | ||
|
|
e933a2712d | ||
|
|
ec9c8fc6cf | ||
|
|
d638a7484b | ||
|
|
7eff3afa05 | ||
|
|
b84907bdbb | ||
|
|
f2c9e51d28 | ||
|
|
e4919b9329 | ||
|
|
8a12b6f1eb | ||
|
|
848cf95ea0 | ||
|
|
9037787f0b | ||
|
|
eda96586ca | ||
|
|
64a2cef9bb | ||
|
|
a41dce8f8a | ||
|
|
c0d6045776 | ||
|
|
49f4bc4709 | ||
|
|
8eec652de5 | ||
|
|
8a6196c6c8 | ||
|
|
fc5d876dba | ||
|
|
f58dbb02a6 | ||
|
|
ca7ea2a4b5 | ||
|
|
c80439a320 | ||
|
|
acf6d4d2e3 | ||
|
|
aea5461488 | ||
|
|
8d910ff5b9 | ||
|
|
bf92b7201f | ||
|
|
1a4f8022e6 | ||
|
|
b2d20e94fa | ||
|
|
7455ba436a | ||
|
|
cff886c47b | ||
|
|
a24492c06f | ||
|
|
b7442c3e2b | ||
|
|
a3708a1885 | ||
|
|
3346a21324 | ||
|
|
30ecfef5a3 | ||
|
|
c927d6de9b | ||
|
|
0c4cf9372b | ||
|
|
6226a27bf8 | ||
|
|
efff39c030 | ||
|
|
b5c268738b | ||
|
|
17673404fb | ||
|
|
7f026792e1 | ||
|
|
11940d462a | ||
|
|
6184f6fcbc | ||
|
|
e556aefe0a | ||
|
|
7efb38d1dd | ||
|
|
699be7d1be | ||
|
|
ede125b7e1 | ||
|
|
2fa14fd48a | ||
|
|
66eb0bd548 | ||
|
|
5aae844e60 | ||
|
|
ec8d7603e6 | ||
|
|
8c87bb550e | ||
|
|
4aa29508af | ||
|
|
b4017539d4 | ||
|
|
c97d491649 | ||
|
|
b6557f2cfe | ||
|
|
138e030cfe | ||
|
|
502ae6c663 | ||
|
|
e6acf0c399 | ||
|
|
04eca2589d | ||
|
|
474c9aadbe | ||
|
|
7dcbcca68c | ||
|
|
3b42bb96e4 | ||
|
|
738000971e | ||
|
|
fa467e62a9 | ||
|
|
355d62c499 | ||
|
|
ce3e583d94 | ||
|
|
fc2f29c1d0 | ||
|
|
ce3c8df6df | ||
|
|
095b45c165 | ||
|
|
795f8e3fe7 | ||
|
|
d7457c7661 | ||
|
|
359c97f506 | ||
|
|
9e617cd4c2 | ||
|
|
d0497425f8 | ||
|
|
808ddf0ae7 | ||
|
|
feb15dc99f | ||
|
|
ecd7e36047 | ||
|
|
6bba80241c | ||
|
|
3a46280ca3 | ||
|
|
e1a12e24d2 | ||
|
|
6a3743b0d4 | ||
|
|
481f6c87e7 | ||
|
|
df4407d665 | ||
|
|
70a00eacf9 | ||
|
|
a02d609b1f | ||
|
|
96ef35c62f | ||
|
|
5c3cb8778a | ||
|
|
1beda9c8a7 | ||
|
|
27c005ae2c | ||
|
|
505bfd82bb | ||
|
|
fdbd90e25d | ||
|
|
52cd019a54 | ||
|
|
f20cd34858 | ||
|
|
7723b4caa4 | ||
|
|
9adcd3a514 | ||
|
|
063a1251a9 | ||
|
|
af6da6db2d | ||
|
|
131c0134f5 | ||
|
|
fad3a84335 | ||
|
|
38434a7fbb | ||
|
|
83c7b8ec91 | ||
|
|
84f600b2ee | ||
|
|
aec1708c53 | ||
|
|
f3c8658217 | ||
|
|
a5d9303283 | ||
|
|
38258a0976 | ||
|
|
a597994fb6 | ||
|
|
82b3e0851c | ||
|
|
f8c407a13b | ||
|
|
8da976fe00 | ||
|
|
1232ae41cf | ||
|
|
99fa03e8b5 | ||
|
|
a8331897aa | ||
|
|
0f3e296cb7 | ||
|
|
6826593b81 | ||
|
|
6b61060b51 | ||
|
|
46ecd9fd6d | ||
|
|
9efcc3f3be | ||
|
|
832e9c52ca | ||
|
|
54a79c1d37 | ||
|
|
2849d3f29d | ||
|
|
5ae38b65c1 | ||
|
|
bfe3f5815f | ||
|
|
cc01eae332 | ||
|
|
85e98fd4e8 | ||
|
|
51adaac953 | ||
|
|
10e0737569 | ||
|
|
f57cb21952 | ||
|
|
fac3c03087 | ||
|
|
14d5e22700 | ||
|
|
fbfe44bb4d | ||
|
|
d61a04583e | ||
|
|
7e919bdbd0 | ||
|
|
96355d2f2f | ||
|
|
df4ecff5a9 | ||
|
|
6d6591880e | ||
|
|
4abecb7b02 | ||
|
|
bd84387ac6 | ||
|
|
ebfaff84c9 | ||
|
|
73d676dc8b | ||
|
|
62f6b86ba7 | ||
|
|
f6124311fd | ||
|
|
88a4d54883 | ||
|
|
368c88c487 | ||
|
|
5deaf9e30b | ||
|
|
acb501c46d | ||
|
|
97479d0c54 | ||
|
|
06567ec513 | ||
|
|
692daf6f54 | ||
|
|
458b6f4733 | ||
|
|
fe08db2713 | ||
|
|
21b7375778 | ||
|
|
4c0ec15bdc | ||
|
|
85c590105f | ||
|
|
ae7a132f38 | ||
|
|
ac001dabdc | ||
|
|
bfb3d255b1 | ||
|
|
ab55794b6f | ||
|
|
010159365c | ||
|
|
d3169e8d28 | ||
|
|
05b9f48ee5 | ||
|
|
4c9812f5da | ||
|
|
4b3403ca9b | ||
|
|
1c13c9f6b6 | ||
|
|
c7a26b7c32 | ||
|
|
fd1c18c088 | ||
|
|
c2c9a78db9 | ||
|
|
e75a779d9e | ||
|
|
828db669ec | ||
|
|
717e4448c4 | ||
|
|
9636b2407d | ||
|
|
3670025e64 | ||
|
|
4ac363a168 | ||
|
|
d360c97ae1 | ||
|
|
76100203ab | ||
|
|
d1e1fd6210 | ||
|
|
252b503fc8 | ||
|
|
84a35f32c7 | ||
|
|
c517a19c2d | ||
|
|
738a2867c8 | ||
|
|
755adff0e4 | ||
|
|
888c59c955 | ||
|
|
f25a4a4692 | ||
|
|
b3e1f2aa7a | ||
|
|
31aca5589c | ||
|
|
76d40f4904 | ||
|
|
fbfad76c03 | ||
|
|
c974116f19 | ||
|
|
e978247fe5 | ||
|
|
51e9fe36e4 | ||
|
|
2367c5568c | ||
|
|
10e48d8310 | ||
|
|
ba8e144554 | ||
|
|
f5b46482f4 | ||
|
|
fdf2a31a51 | ||
|
|
41dab8a222 | ||
|
|
c77b24c092 | ||
|
|
5d2134d485 | ||
|
|
a55fa2047f | ||
|
|
3d9d48fffb | ||
|
|
a0d03f2e15 | ||
|
|
d0897dead5 | ||
|
|
567aa35b67 | ||
|
|
f2f40e64a9 | ||
|
|
4c6a31cd6e | ||
|
|
e41f183aa8 | ||
|
|
83333498a5 | ||
|
|
86063d4321 | ||
|
|
09eb08f910 | ||
|
|
97efe99ae9 | ||
|
|
691c8198b7 | ||
|
|
86e6165687 | ||
|
|
1e38be3a7a | ||
|
|
841c228533 | ||
|
|
c430111d0e | ||
|
|
97d3918377 | ||
|
|
6f6bf2a1eb | ||
|
|
8c5009b628 | ||
|
|
ae7b4da4cc | ||
|
|
fc7cae8aa3 | ||
|
|
f9058ca785 | ||
|
|
f648313f98 | ||
|
|
15f012032c | ||
|
|
4ec1cf49e2 | ||
|
|
f878f64f43 | ||
|
|
5f027d1fc5 | ||
|
|
380dba1020 | ||
|
|
ed4d176152 | ||
|
|
c6064a7ba6 | ||
|
|
a8594fd19f | ||
|
|
7fae460402 | ||
|
|
37b4c7d8a9 | ||
|
|
e5d2df9c34 | ||
|
|
04006bb7f0 | ||
|
|
ce59a2faad | ||
|
|
633f97151c | ||
|
|
e6153e1bd1 | ||
|
|
5d6bad1b3c | ||
|
|
e8ecbb6f20 | ||
|
|
d11d7cdf87 | ||
|
|
9e8e236d98 | ||
|
|
d6c75cb7c2 | ||
|
|
1ccd5676e3 | ||
|
|
d906206049 | ||
|
|
f85b6ca494 | ||
|
|
f2f179dce2 | ||
|
|
6d00213e80 | ||
|
|
897f8752da | ||
|
|
beda469bc6 | ||
|
|
46aebbbcbf | ||
|
|
01521299c7 | ||
|
|
2fae34bd2c | ||
|
|
95a22ae194 | ||
|
|
ec0a523ac3 | ||
|
|
e178feca3f | ||
|
|
f0325a9ccc | ||
|
|
c050f493dd | ||
|
|
a3e4a198e3 | ||
|
|
8b2fa38256 | ||
|
|
641ccdbb14 | ||
|
|
6f5e41e420 | ||
|
|
0d37a7bf83 | ||
|
|
ebf94aff8d | ||
|
|
7a13fe16f7 | ||
|
|
bf5c9706d9 | ||
|
|
10f7bfe897 | ||
|
|
7b62d0bc70 | ||
|
|
7e6c2937c3 | ||
|
|
b1dfd20292 | ||
|
|
edd6cdfc9a | ||
|
|
3cb1799347 | ||
|
|
772b8ebe54 | ||
|
|
3c9acdef4d | ||
|
|
8a0fddfd73 | ||
|
|
fe28150cdc | ||
|
|
d524bc9110 | ||
|
|
d2b00d0866 | ||
|
|
ab655dca33 | ||
|
|
5a32e9273e | ||
|
|
caddadfc5a | ||
|
|
dd52d4de4c | ||
|
|
69ef497253 | ||
|
|
024eb98524 | ||
|
|
32019c9897 | ||
|
|
657488113e | ||
|
|
3b4de17d2b | ||
|
|
7d0981b312 | ||
|
|
07c3c08fad | ||
|
|
f477370c0c | ||
|
|
586f474a44 | ||
|
|
6823fe5241 | ||
|
|
f7085ac84f | ||
|
|
9898bbd9dc | ||
|
|
9a8ae6f1bf | ||
|
|
1d08de6ce9 | ||
|
|
2f4b2f4783 | ||
|
|
6d363cea9d | ||
|
|
f0e4bac64e | ||
|
|
4304e7e593 | ||
|
|
10d31c433b | ||
|
|
6515b9c0d4 | ||
|
|
8c48971b51 | ||
|
|
e10c527930 | ||
|
|
2f5be2d8dc | ||
|
|
4086026524 | ||
|
|
9d914454c8 | ||
|
|
19e2fb4386 | ||
|
|
189fd15564 | ||
|
|
8404f132c3 | ||
|
|
b2850e62db | ||
|
|
06c00bd19b | ||
|
|
b42a972b71 | ||
|
|
2c8ac84a26 | ||
|
|
1ef6084b75 | ||
|
|
345afd9fdc | ||
|
|
bd85434cb3 | ||
|
|
c18f7fc410 | ||
|
|
dafd50d178 | ||
|
|
883ff92a7f | ||
|
|
d79d165761 | ||
|
|
8cfc0165e9 | ||
|
|
62451800e7 | ||
|
|
b31ed22738 | ||
|
|
7738329672 | ||
|
|
dd3df11c55 | ||
|
|
e1c5463efc | ||
|
|
468749c9fc | ||
|
|
eedf400d05 | ||
|
|
5175094707 | ||
|
|
8e82611f37 | ||
|
|
6028718b1a | ||
|
|
f784980d2b | ||
|
|
0d766c8ccf | ||
|
|
e02bdaf08b | ||
|
|
b6b67715ed | ||
|
|
7ed07066ac | ||
|
|
555d702e34 | ||
|
|
899a3a1268 | ||
|
|
f3de4f8cb7 | ||
|
|
321d5b73d8 | ||
|
|
62ce3034f3 | ||
|
|
0aff09f6c9 | ||
|
|
48c3b7dc19 | ||
|
|
2f703b8645 | ||
|
|
cc50b1ae53 | ||
|
|
f576c34594 | ||
|
|
9fff1aacca | ||
|
|
0eac4fa525 | ||
|
|
822cb39dfa | ||
|
|
342fb8dae9 | ||
|
|
cb842dc99f | ||
|
|
2238a10b42 | ||
|
|
f023be9293 | ||
|
|
828c58522e | ||
|
|
97ffc5690b | ||
|
|
b4bc6fef5b | ||
|
|
68030fd37b | ||
|
|
b7336ff32d | ||
|
|
5b6672c66d | ||
|
|
84cf00c645 | ||
|
|
bea15fb599 | ||
|
|
0c88ab1844 | ||
|
|
2998fa57b0 | ||
|
|
f5abaafabd | ||
|
|
b7f4f902fa | ||
|
|
702c020e58 | ||
|
|
09f15918be | ||
|
|
da2c8f3c94 | ||
|
|
a58e4e0d48 | ||
|
|
f2a5aebf98 | ||
|
|
a9c1b419a9 | ||
|
|
f5cd5ebd7b | ||
|
|
1859af9b2a | ||
|
|
c95e9fff99 | ||
|
|
7dfd70fc83 | ||
|
|
b2f8642d3d | ||
|
|
f5a4001bb1 | ||
|
|
b9b6d17ab1 | ||
|
|
c824dc727a | ||
|
|
edc6a1e4f9 | ||
|
|
35129ac998 | ||
|
|
ed02a0018c | ||
|
|
8bb8cc993a | ||
|
|
aa1336c00a | ||
|
|
4da3fc0ea0 | ||
|
|
24c16fc349 | ||
|
|
b8255eba26 | ||
|
|
b2999a7055 | ||
|
|
c3208e45c9 | ||
|
|
9d95351cad | ||
|
|
1de53a7a1a | ||
|
|
bae1115e55 | ||
|
|
b3d398343e | ||
|
|
0648e76979 | ||
|
|
8588d0eb3d | ||
|
|
1574b839e0 | ||
|
|
7ec2bf9b77 | ||
|
|
d431c0924c | ||
|
|
2bf5a47b3e | ||
|
|
d3bd94805f | ||
|
|
09cbcb78d3 | ||
|
|
631376e2ac | ||
|
|
abed247182 | ||
|
|
9240948346 | ||
|
|
62e6d40b39 | ||
|
|
d45c984653 | ||
|
|
d53a80af25 | ||
|
|
85cd30b1fd | ||
|
|
deca951241 | ||
|
|
9f07f4c559 | ||
|
|
6e18805ac2 | ||
|
|
77692b52b5 | ||
|
|
efa4ccfaee | ||
|
|
e721a7f2c1 | ||
|
|
1233d244ff | ||
|
|
b541fac7c3 | ||
|
|
af32d3b773 | ||
|
|
2fda8134f1 | ||
|
|
8b34f71bea | ||
|
|
fbaf868f62 | ||
|
|
4a9c38bfa3 | ||
|
|
be14c24cea | ||
|
|
1697f6a323 | ||
|
|
52d12ca782 | ||
|
|
c45d8e9ba2 | ||
|
|
da13b4aa86 | ||
|
|
b08f76bd23 | ||
|
|
bd07a35c29 | ||
|
|
de796f27e6 | ||
|
|
2687af82d4 | ||
|
|
3727d66a0e | ||
|
|
0d81e26769 | ||
|
|
59bc64328f | ||
|
|
f32fb65552 | ||
|
|
39a76b9cba | ||
|
|
1529c19675 | ||
|
|
194b6259c5 | ||
|
|
5a2c33c12e | ||
|
|
7dae7087d3 | ||
|
|
12aefb9dfc | ||
|
|
9609c91e7d | ||
|
|
338df4f409 | ||
|
|
3e90250ea3 | ||
|
|
0b1e287e81 | ||
|
|
6c9a0ba415 | ||
|
|
0697bb2247 | ||
|
|
24081224d1 | ||
|
|
c46e7a9c9b | ||
|
|
a2849a18a5 | ||
|
|
59984e9f58 | ||
|
|
546ec1a5cf | ||
|
|
7a00178832 | ||
|
|
9df84dd22d | ||
|
|
3f23154088 | ||
|
|
f6270a8fe2 | ||
|
|
235407a78e | ||
|
|
77bf92e3c6 | ||
|
|
bb3d0c270d | ||
|
|
f8c45d428c | ||
|
|
153535fc56 | ||
|
|
a8d8225ead | ||
|
|
cc03f4c58b | ||
|
|
32c8b5507c | ||
|
|
971edd04af | ||
|
|
471200074b | ||
|
|
6841d8ff55 | ||
|
|
12f3b9000c | ||
|
|
aa09d6b8f0 | ||
|
|
dc4b23e1a1 | ||
|
|
8379a741cc | ||
|
|
321fe5c44c | ||
|
|
b5b3a7e867 | ||
|
|
4febfe47f0 | ||
|
|
77eca2487c | ||
|
|
1c4f05db41 | ||
|
|
7d855447ef | ||
|
|
debbea5b29 | ||
|
|
5c4edc83b5 | ||
|
|
b6146537d2 | ||
|
|
f62b69e32a | ||
|
|
7f02e4d008 | ||
|
|
9192e593ec | ||
|
|
11bfe438a2 | ||
|
|
aaecffba3a | ||
|
|
e1d7c96814 | ||
|
|
7e03f9a484 | ||
|
|
46ca345b06 | ||
|
|
f36ea03741 | ||
|
|
c9d4e7b716 | ||
|
|
f681aab895 | ||
|
|
11254bdf6d | ||
|
|
1985860c6e | ||
|
|
2ac516850b | ||
|
|
302fbd218d | ||
|
|
b2d6e63b79 | ||
|
|
feec718265 | ||
|
|
ee5e8d71ac | ||
|
|
26072df6af | ||
|
|
b69f76c106 | ||
|
|
4d9b5c60f9 | ||
|
|
0163466d72 | ||
|
|
4c79a63fd7 | ||
|
|
54fed21c04 | ||
|
|
90565d015e | ||
|
|
0cf2a64974 | ||
|
|
83bcdcee61 | ||
|
|
d4a459f7cb | ||
|
|
c3d963ac24 | ||
|
|
6d4e6d4cba | ||
|
|
baf9e74a73 | ||
|
|
f9834a3d1a | ||
|
|
aac06e8f74 | ||
|
|
2bbc4cab60 | ||
|
|
cea4e4e7b2 | ||
|
|
0a8b0eeca1 | ||
|
|
51e89709aa | ||
|
|
53b27bbf06 | ||
|
|
70a2157b64 | ||
|
|
f97511a1f3 | ||
|
|
73dc099645 | ||
|
|
88d85ebae1 | ||
|
|
50934ce460 | ||
|
|
e90fcd9edd | ||
|
|
9687e039e7 | ||
|
|
a28ec23273 | ||
|
|
a2a6c1c22f | ||
|
|
524d61bf7e | ||
|
|
7c9cdb2245 | ||
|
|
a289150943 | ||
|
|
544722bad2 | ||
|
|
f8ee66250a | ||
|
|
ed787cf09e | ||
|
|
1587b5a033 | ||
|
|
59ef517e6b | ||
|
|
847d5db1d1 | ||
|
|
daec6fc355 | ||
|
|
0e830d3770 | ||
|
|
dc6cede78e | ||
|
|
c7546b3cdb | ||
|
|
d56c39cf24 | ||
|
|
f9d156d270 | ||
|
|
9d58ccc547 | ||
|
|
9355a5c42b | ||
|
|
3991b4cbdb | ||
|
|
af4a1bac50 | ||
|
|
0964005d84 | ||
|
|
1c93cd9f9f | ||
|
|
8ecaff51a1 | ||
|
|
f6c48802f5 | ||
|
|
a88bc67f88 | ||
|
|
42c43cfafd | ||
|
|
c7daf3136c | ||
|
|
64038b806c | ||
|
|
2bd4513a4d | ||
|
|
d073cb7ead | ||
|
|
8a8ad46f48 | ||
|
|
2771447c29 | ||
|
|
6cc4fcf25c | ||
|
|
ac507e7ab8 | ||
|
|
e6651e8046 | ||
|
|
291628d42a | ||
|
|
3c09818d91 | ||
|
|
27d3f2e7ab | ||
|
|
17e0a58020 | ||
|
|
587d8ac60f | ||
|
|
34449cfc6c | ||
|
|
a4632783fb | ||
|
|
24772ba56e | ||
|
|
eeda4e618c | ||
|
|
d24197bead | ||
|
|
c6bbad109b | ||
|
|
16dc9064d4 | ||
|
|
63772443e6 | ||
|
|
a3f6576084 | ||
|
|
7fc2b5c063 | ||
|
|
89e3e39d52 | ||
|
|
2938a00825 | ||
|
|
5219f7e060 | ||
|
|
93ebeb2aa8 | ||
|
|
c1b077cd19 | ||
|
|
06cc0bb762 | ||
|
|
64c6566980 | ||
|
|
8fd4d9129f | ||
|
|
9164bfa1c3 | ||
|
|
9084720993 | ||
|
|
80d5d3baa1 | ||
|
|
b1c27975d0 | ||
|
|
dc155f4c2c | ||
|
|
2746e805fe | ||
|
|
0aeb1324b7 | ||
|
|
4a9055d446 | ||
|
|
3c91c5b216 | ||
|
|
f6e8019b9c | ||
|
|
760469c812 | ||
|
|
47ed4d84bb | ||
|
|
f09d2b692f | ||
|
|
4c3eb14d68 | ||
|
|
1d4d518b50 | ||
|
|
159434a133 | ||
|
|
264f6c2a39 | ||
|
|
82e71a259c | ||
|
|
490b97d3e7 | ||
|
|
f9d5b60a24 | ||
|
|
1cc22da600 | ||
|
|
aac13b1f9a | ||
|
|
ccc1a3d54d | ||
|
|
665e53524e | ||
|
|
e438699c59 | ||
|
|
a9111786f9 | ||
|
|
1fc1bc2a51 | ||
|
|
db0609f1ec | ||
|
|
ab731d8f8e | ||
|
|
45bdacd9a7 | ||
|
|
177f104432 | ||
|
|
22fbf86e4f | ||
|
|
f138bb40e2 | ||
|
|
855645c719 | ||
|
|
25423f50aa | ||
|
|
2ef617bc06 | ||
|
|
e83a08d795 | ||
|
|
b6800a8ecd | ||
|
|
d04e2ff3a4 | ||
|
|
a842fed418 | ||
|
|
e01a1bc92d | ||
|
|
6fdd31915b | ||
|
|
07caa749bf | ||
|
|
f09db236b1 | ||
|
|
8bfd01f619 | ||
|
|
1b17d1a106 | ||
|
|
b01aaadd48 | ||
|
|
1071c7d963 | ||
|
|
6453d03edd | ||
|
|
3ae48a1f99 | ||
|
|
4cedd53224 | ||
|
|
5663137e03 | ||
|
|
b202531be6 | ||
|
|
1b179455fc | ||
|
|
981f852d54 | ||
|
|
def63649df | ||
|
|
06f1ad1625 | ||
|
|
95fc70216d | ||
|
|
9b0316c75a | ||
|
|
03c2720940 | ||
|
|
b21b9dbc37 | ||
|
|
78c083f159 | ||
|
|
3aa8925091 | ||
|
|
f2f74ffce6 | ||
|
|
7d2cf7e960 | ||
|
|
0108ed8ae6 | ||
|
|
a7f48320b1 | ||
|
|
df2a616c7b | ||
|
|
550308c7a1 | ||
|
|
e8b1d2a452 | ||
|
|
5b54d51d1e | ||
|
|
f6955db970 | ||
|
|
8ca05b5755 | ||
|
|
f0ca088280 | ||
|
|
50ac1d843d | ||
|
|
513e600f63 | ||
|
|
b95dbdcba4 | ||
|
|
927a67ee1a | ||
|
|
6942d68247 | ||
|
|
b59994b454 | ||
|
|
816988baaa | ||
|
|
2869a29fd7 | ||
|
|
d43b63818c | ||
|
|
a68ade6ed3 | ||
|
|
29c5922021 | ||
|
|
d9350b0db8 | ||
|
|
bcb1245a2d | ||
|
|
62073992c5 | ||
|
|
0393c4203c | ||
|
|
6f7540ada4 | ||
|
|
1d107d8484 | ||
|
|
f7aed3d7a2 | ||
|
|
9009143fb9 | ||
|
|
fbd3866bc6 | ||
|
|
9e18e0b1cb | ||
|
|
c61ddeedac | ||
|
|
0af6213019 | ||
|
|
35e2cc8b52 | ||
|
|
6e9f3ab415 | ||
|
|
e641115421 | ||
|
|
3061dac53e | ||
|
|
668f91d707 | ||
|
|
0061e8744f | ||
|
|
fa74fcf512 | ||
|
|
a2f2516199 | ||
|
|
a940618c94 | ||
|
|
c57f871184 | ||
|
|
8681aff4f1 | ||
|
|
5d9546f9f4 | ||
|
|
7b5546d077 | ||
|
|
5d34e32d42 | ||
|
|
f382117852 | ||
|
|
3de7c8a4d0 | ||
|
|
2ff2d36b80 | ||
|
|
9bfc617791 | ||
|
|
503c0ab78b | ||
|
|
e779ee0ee2 | ||
|
|
4285be791d | ||
|
|
b5665f7516 | ||
|
|
6d3513740d | ||
|
|
850b103b36 | ||
|
|
21185e3e8a | ||
|
|
24a70e19c7 | ||
|
|
04aa2f2863 | ||
|
|
f7bcdbe56c | ||
|
|
3027ea22b0 | ||
|
|
5875a65253 | ||
|
|
36d621201b | ||
|
|
9040c9ffa1 | ||
|
|
4a18127917 | ||
|
|
adae348fdf | ||
|
|
4974147aa3 | ||
|
|
13122e5e24 | ||
|
|
cf3e1cc200 | ||
|
|
a38d46249e | ||
|
|
aab6a31c96 | ||
|
|
748d8fdc7b | ||
|
|
655891d179 | ||
|
|
4225a97f4e | ||
|
|
22578545a0 | ||
|
|
667fcd54e8 | ||
|
|
f96020550f | ||
|
|
81964aeb90 | ||
|
|
2e9ee30969 | ||
|
|
a61e4522b5 | ||
|
|
1168cbd54d | ||
|
|
bbc0d9617f | ||
|
|
8009d84364 | ||
|
|
dc692556d6 | ||
|
|
dc78db8c56 | ||
|
|
4f78108d8c | ||
|
|
0b78d8adf2 | ||
|
|
85827eef2d | ||
|
|
90c070c850 | ||
|
|
87528f0756 | ||
|
|
88acb99747 | ||
|
|
2b8ff4659f | ||
|
|
ddfcdd4778 | ||
|
|
6f0c5e5d9b | ||
|
|
49cf205dc7 | ||
|
|
39af634dd2 | ||
|
|
3f6ec271ba | ||
|
|
4d49e0bdfd | ||
|
|
81570abfb2 | ||
|
|
ddc89df89d | ||
|
|
eb24aecf8c | ||
|
|
e1ba98d724 | ||
|
|
a298331de4 | ||
|
|
71edaae981 | ||
|
|
64527f94cc | ||
|
|
883df2e983 | ||
|
|
5336acd46f | ||
|
|
fa9d2c7295 | ||
|
|
19fe990476 | ||
|
|
995f2f032f | ||
|
|
9e1283c824 | ||
|
|
a68807d426 | ||
|
|
2e67cabd7f | ||
|
|
b7b62bf9ea | ||
|
|
d84319ae10 | ||
|
|
23b6701a28 | ||
|
|
e58a9d781c | ||
|
|
74d4cdee25 | ||
|
|
418bcd4309 | ||
|
|
098db4aa52 | ||
|
|
c33b25fd8d | ||
|
|
de4f798f01 | ||
|
|
ea6dc356b0 | ||
|
|
955f34d23e | ||
|
|
241d7d2d62 | ||
|
|
1535f21eb5 | ||
|
|
4be85281f9 | ||
|
|
cb3edec6af | ||
|
|
923f77cff3 | ||
|
|
55e6fc917c | ||
|
|
68c1ed4d1a | ||
|
|
b82fa849c8 | ||
|
|
e457034e99 | ||
|
|
1d98cf26be | ||
|
|
211786ecd6 | ||
|
|
4fb65a1091 | ||
|
|
5810cffd33 | ||
|
|
f3eead0660 | ||
|
|
4131381123 | ||
|
|
6a5ded5988 | ||
|
|
4f181f361d | ||
|
|
c566f0ee17 | ||
|
|
772c6067a3 | ||
|
|
baffe96d95 | ||
|
|
264a48aedf | ||
|
|
21c88016bd | ||
|
|
ed992ae6ba | ||
|
|
3e6e8a1c03 | ||
|
|
e0b6db29ed | ||
|
|
a70a43bc51 | ||
|
|
f2b2cd8eb4 | ||
|
|
00f51493f5 | ||
|
|
d5ae1f1291 | ||
|
|
1b01488d27 | ||
|
|
0f73f0e70e | ||
|
|
ca35e54d6b | ||
|
|
497f053344 | ||
|
|
ad816b0add | ||
|
|
0c057736ac | ||
|
|
43253c10b8 | ||
|
|
18ab019a4a | ||
|
|
76b09c29b0 | ||
|
|
ba6bc2faa0 | ||
|
|
edbcb4152b | ||
|
|
949c2c5435 | ||
|
|
b17af156c7 | ||
|
|
1c9da43a95 | ||
|
|
0b32bb20bb | ||
|
|
c94de0ab60 | ||
|
|
502c901e11 | ||
|
|
48a5a7552d | ||
|
|
706b5d76ed | ||
|
|
7c679b1118 | ||
|
|
d080b3425c | ||
|
|
03a98aff3c | ||
|
|
fa20c9ce94 | ||
|
|
5ef5435529 | ||
|
|
aa7b890cfe | ||
|
|
7cd6edb947 | ||
|
|
0294c14ec4 | ||
|
|
7fe42cf949 | ||
|
|
15ca0c6a4d | ||
|
|
0baf498bd1 | ||
|
|
a232e06100 | ||
|
|
4a32d25d4c | ||
|
|
31f85f9db9 | ||
|
|
ec609f8094 | ||
|
|
caef86f428 | ||
|
|
54417999b6 | ||
|
|
45dc260060 | ||
|
|
d1c217c823 | ||
|
|
897d57bc58 | ||
|
|
555460ae1b | ||
|
|
4162f820ff | ||
|
|
29205e9596 | ||
|
|
d213884c41 | ||
|
|
b91e2833b3 | ||
|
|
f2acc3dcf9 | ||
|
|
3ddec016ff | ||
|
|
8e01263587 | ||
|
|
3265def8c7 | ||
|
|
af4701b311 | ||
|
|
44330a21e9 | ||
|
|
464ffd1b5e | ||
|
|
327425764e | ||
|
|
dbff7e9436 | ||
|
|
a4339de9de | ||
|
|
8aee5aa068 | ||
|
|
52b2318777 | ||
|
|
56f38d1776 | ||
|
|
8cb252d00c | ||
|
|
776594f99d | ||
|
|
ed44c475d8 | ||
|
|
ab80d5e0a9 | ||
|
|
f25d74f69c | ||
|
|
ea05155a8c | ||
|
|
d271383e63 | ||
|
|
0fc0a3bdff | ||
|
|
6c4d582144 | ||
|
|
685da5a3b0 | ||
|
|
a6c6750166 | ||
|
|
bdbcfc2a80 | ||
|
|
6eb0c8a2e4 | ||
|
|
6b54fa81de | ||
|
|
25eb769b26 | ||
|
|
0b6b999e7b | ||
|
|
3328428d05 | ||
|
|
4598682b43 | ||
|
|
033d43e419 | ||
|
|
647c724573 | ||
|
|
a15ba15e64 | ||
|
|
6a6cbfcf1e | ||
|
|
d2688d7f03 | ||
|
|
303b6f29f0 | ||
|
|
1fe7ca1362 | ||
|
|
9bba6ebaa9 | ||
|
|
66efcbbff1 | ||
|
|
0877157353 | ||
|
|
2ffec928e2 | ||
|
|
b390756150 | ||
|
|
b8f84f99ff | ||
|
|
43b77c5d97 | ||
|
|
2f267ee160 | ||
|
|
7d5b142547 | ||
|
|
c3276aef25 | ||
|
|
fa722a699c | ||
|
|
023143f9ae | ||
|
|
5c688739d6 | ||
|
|
ebb46497ba | ||
|
|
91ec972277 | ||
|
|
5beda10bbd | ||
|
|
257025ac89 | ||
|
|
3f9889bfd6 | ||
|
|
caa22334b3 | ||
|
|
5834c6178c | ||
|
|
b152ee71fe | ||
|
|
d987353840 | ||
|
|
a1c8f268e5 | ||
|
|
8b93af662d | ||
|
|
2117c409a0 | ||
|
|
fa9d36e050 | ||
|
|
4ef222ab61 | ||
|
|
61cd9af09b | ||
|
|
791658b576 | ||
|
|
2982d16e07 | ||
|
|
c5b49eb7ca | ||
|
|
b568ca309c | ||
|
|
3c320c006c | ||
|
|
85b51fdd6b | ||
|
|
43954d000e | ||
|
|
2a0159b8ae | ||
|
|
cb98ac261b | ||
|
|
31a07d2335 | ||
|
|
91279fd218 | ||
|
|
513188aa56 | ||
|
|
fadb01551a | ||
|
|
d25c20ccbe | ||
|
|
7d893beebe | ||
|
|
94a83b534f | ||
|
|
74cbfdc7de | ||
|
|
d4a35ada28 | ||
|
|
e020834e4f | ||
|
|
2ad72da931 | ||
|
|
8da7d0e4f9 | ||
|
|
3c4208a057 | ||
|
|
f4164edb70 | ||
|
|
2eed4d7af4 | ||
|
|
438ef47637 | ||
|
|
74a3b4a650 | ||
|
|
9b69c85f7c | ||
|
|
d51b8a1674 | ||
|
|
662b031a30 | ||
|
|
4ec67a3d21 | ||
|
|
0595413c0f | ||
|
|
a7032abb2e | ||
|
|
9e6d88f4e2 | ||
|
|
8c93e0bae7 | ||
|
|
70332a12dd | ||
|
|
373654c635 | ||
|
|
485d999c8a | ||
|
|
69054e3d4c | ||
|
|
0237a0d1a5 | ||
|
|
69a2d4e38c | ||
|
|
19275b3030 | ||
|
|
f12993ec16 | ||
|
|
940d4fad24 | ||
|
|
bb36b93f71 | ||
|
|
d87b87adf7 | ||
|
|
caed150363 | ||
|
|
80a6a445fa | ||
|
|
628e65721b | ||
|
|
274c2f50a5 | ||
|
|
a99e933550 | ||
|
|
3847fa38c4 | ||
|
|
f2690c6423 | ||
|
|
81b94c5750 | ||
|
|
65fa37ac5e | ||
|
|
3baf641a48 | ||
|
|
c0238ecbed | ||
|
|
273b6bcf22 | ||
|
|
f7f1027d3d | ||
|
|
34e5e17f91 | ||
|
|
b96c6c3185 | ||
|
|
1ffe9578d1 | ||
|
|
cce957e254 | ||
|
|
bd9b8d87ae | ||
|
|
2aa39db681 | ||
|
|
657847e4c6 | ||
|
|
965168a842 | ||
|
|
2854ee2a52 | ||
|
|
598317927c | ||
|
|
7ed5acacf4 | ||
|
|
c1c38da586 | ||
|
|
051a9ea921 | ||
|
|
265d847ffd | ||
|
|
f4778d4cd9 | ||
|
|
9e25443db8 | ||
|
|
44982606ee | ||
|
|
516a272aca | ||
|
|
0cfd6c3161 | ||
|
|
5405351b14 | ||
|
|
1b91ff685f | ||
|
|
ed7a703d4c | ||
|
|
1671913287 | ||
|
|
f51888530d | ||
|
|
826ca61745 | ||
|
|
fbd2615de4 | ||
|
|
c10cb581c6 | ||
|
|
ef0cc648cf | ||
|
|
761f9fccff | ||
|
|
a662252758 | ||
|
|
1aa3e1d287 | ||
|
|
1bb8ec296d | ||
|
|
d80f64d370 | ||
|
|
998666be64 | ||
|
|
c882783535 | ||
|
|
572acde483 | ||
|
|
5dc2a702cf | ||
|
|
3e784eff74 | ||
|
|
16b652f0a3 | ||
|
|
e82247f990 | ||
|
|
c7f665d700 | ||
|
|
d3f108b6bb | ||
|
|
097330bae8 | ||
|
|
21b977ccfe | ||
|
|
928d337c16 | ||
|
|
bc1a8b1f7a | ||
|
|
b3be9e4376 | ||
|
|
67f0c990f8 | ||
|
|
fba1111dd6 | ||
|
|
c8cd87b21b | ||
|
|
55e17d3697 | ||
|
|
1ee6285905 | ||
|
|
68e1a872fd | ||
|
|
55fc17cf4b | ||
|
|
ffc807af50 | ||
|
|
41788bba50 | ||
|
|
873f870e5a | ||
|
|
5acbe09b67 | ||
|
|
8c1e746f54 | ||
|
|
93b32d4515 | ||
|
|
bed10f9880 | ||
|
|
4bbef62124 | ||
|
|
3cf15edef7 | ||
|
|
a234e895cf | ||
|
|
c943d8d2e8 | ||
|
|
4daa397a00 | ||
|
|
c7cd35d682 | ||
|
|
54cc69154e | ||
|
|
11faa4296d | ||
|
|
f6338d6a3e | ||
|
|
1ccdc1e93a | ||
|
|
25414b44a2 | ||
|
|
3f11953fcb | ||
|
|
50943ab942 | ||
|
|
30961182f2 | ||
|
|
c1a133a6b6 | ||
|
|
778fa85f47 | ||
|
|
1a1e198f72 | ||
|
|
3b8d0ceb22 | ||
|
|
7356d52e73 | ||
|
|
9459137f1e | ||
|
|
1294d4a329 | ||
|
|
ab34fdecb7 | ||
|
|
b162cb2e41 | ||
|
|
0e1900d819 | ||
|
|
641efb6a39 | ||
|
|
e7af8be5ae | ||
|
|
142983b4ea | ||
|
|
721414d98a | ||
|
|
e993925279 | ||
|
|
a3dc1e9cbe | ||
|
|
d9dcb2ba3a | ||
|
|
adf53f04ce | ||
|
|
c435bfee9c | ||
|
|
db7283cc6b | ||
|
|
d0b8d49f71 | ||
|
|
5474824975 | ||
|
|
17f4f14df7 | ||
|
|
cd5b264b03 | ||
|
|
eb6a7cf3f4 | ||
|
|
37638c06c5 | ||
|
|
60a015550a | ||
|
|
90d5983d7a | ||
|
|
d89f8683dc | ||
|
|
c20cb5160d | ||
|
|
fda97dd58a | ||
|
|
8e1ed09dff | ||
|
|
965f33c901 | ||
|
|
9899824b85 | ||
|
|
9219139351 | ||
|
|
63c19e1df9 | ||
|
|
3e86dcf1c0 | ||
|
|
86bcf4d6a7 | ||
|
|
ba07d4a70e | ||
|
|
928b2187ea | ||
|
|
4b31426a02 | ||
|
|
122c7a43c9 | ||
|
|
14047126d8 | ||
|
|
d143f211c8 | ||
|
|
e9fe9af068 | ||
|
|
aad8a1a825 | ||
|
|
689f4cb914 | ||
|
|
c8f9b45bc2 | ||
|
|
e65bc7d315 | ||
|
|
33f3624ff7 | ||
|
|
8c52160b07 | ||
|
|
a093fab253 | ||
|
|
6e80c03d45 | ||
|
|
6372efbdc3 | ||
|
|
58d6c93103 | ||
|
|
b7ffa0e2cd | ||
|
|
d77ef276fa | ||
|
|
27e0178da9 | ||
|
|
6d1a94d218 | ||
|
|
8731197e54 | ||
|
|
afbf6b33fc | ||
|
|
37adde32dc | ||
|
|
04fc8bbcb0 | ||
|
|
39b900b316 | ||
|
|
47dd8f02a1 | ||
|
|
2426c2f21a | ||
|
|
39242090e3 | ||
|
|
e6784daf07 | ||
|
|
45fd2c8942 | ||
|
|
c0d7d9d642 | ||
|
|
dc76a3e909 | ||
|
|
f164fd9220 | ||
|
|
ba214a5e32 | ||
|
|
4161ff2fc4 | ||
|
|
290763f559 | ||
|
|
b770435389 | ||
|
|
5674ea3e6c | ||
|
|
1e4217c90c | ||
|
|
0acdd0f1ea | ||
|
|
65201631a4 | ||
|
|
697872cf08 | ||
|
|
b515f844ee | ||
|
|
602c84cd9c | ||
|
|
2a91799fcc | ||
|
|
be088b32d8 | ||
|
|
fcf1dec809 | ||
|
|
105ff162d4 | ||
|
|
06964c4a0a | ||
|
|
f3afd6ef1a | ||
|
|
bcbd74dc5b | ||
|
|
d7b42afc74 | ||
|
|
80f4740c8f | ||
|
|
522c804f6b | ||
|
|
19a625362b | ||
|
|
90b8b7706f | ||
|
|
07229bbdae | ||
|
|
434bbf2cb5 | ||
|
|
d5bf7a4a99 | ||
|
|
718ffcf8bb | ||
|
|
3856582741 | ||
|
|
f0c73a1e7a | ||
|
|
b3511adb92 | ||
|
|
6762a7268c | ||
|
|
9da84a9a1e | ||
|
|
403ecd8a2c | ||
|
|
47fbff7a05 | ||
|
|
396624864a | ||
|
|
e73dcb66da | ||
|
|
ea166f2ac9 | ||
|
|
3ec10dffd6 | ||
|
|
732cf72b86 | ||
|
|
abcb9aee5b | ||
|
|
2a0d8f8219 | ||
|
|
320dfe523c | ||
|
|
0af9e1a637 | ||
|
|
fa87c981e1 | ||
|
|
0d7cef0943 | ||
|
|
f90b3d83a3 | ||
|
|
f743471380 | ||
|
|
b9e888858c | ||
|
|
973d67a033 | ||
|
|
e3e3fbc23a | ||
|
|
e885024523 | ||
|
|
7321f45457 | ||
|
|
d87c9092c9 | ||
|
|
b9abf3e4e3 | ||
|
|
92d39126d7 | ||
|
|
b835ebcc79 | ||
|
|
62c5245c87 | ||
|
|
49043f5ff3 | ||
|
|
949629291c | ||
|
|
ad42322257 | ||
|
|
0bba2799b6 | ||
|
|
64a2acb161 | ||
|
|
1594eba29e | ||
|
|
16284039c6 | ||
|
|
1119b4cebe | ||
|
|
109a560905 | ||
|
|
48b5829aea | ||
|
|
7c6f4f9427 | ||
|
|
25c2332071 | ||
|
|
2ee1bd124c | ||
|
|
a2427981b7 | ||
|
|
6cbd1b495e | ||
|
|
1c7c317df1 | ||
|
|
dc3a00f24f | ||
|
|
75299af4fc | ||
|
|
89e786bd85 | ||
|
|
d9664344ec | ||
|
|
784a2d4f2c | ||
|
|
0be963472b | ||
|
|
64e7e11853 | ||
|
|
4d70d1f80e | ||
|
|
99bbd90b0d | ||
|
|
8a57cc3123 | ||
|
|
2edb7c7676 | ||
|
|
dfaf0fee31 | ||
|
|
e380538b59 | ||
|
|
4e1cebd56f | ||
|
|
5006c4b30d | ||
|
|
866a5320de | ||
|
|
448ac6cf0d | ||
|
|
832799dbff | ||
|
|
b4ecf0b886 | ||
|
|
5b5148b7ec | ||
|
|
c9c1541fd0 | ||
|
|
1d40373c9d | ||
|
|
5202679edc | ||
|
|
c315922b5f | ||
|
|
ca8abfbf30 | ||
|
|
5aeadb7414 | ||
|
|
c9f724caa4 | ||
|
|
487bc49bf8 | ||
|
|
739ea29d1e | ||
|
|
ea8c4094db | ||
|
|
7f41bcbeec | ||
|
|
11fdfaf03b | ||
|
|
2510db3e76 | ||
|
|
f91df1f761 | ||
|
|
3bc9629be5 | ||
|
|
d45489474d | ||
|
|
fa1ce4d8ad | ||
|
|
79ebfbe7c6 | ||
|
|
cd41c6ece2 | ||
|
|
27771b2495 | ||
|
|
d3250499c1 | ||
|
|
a8bcc7274d | ||
|
|
65666fedd5 | ||
|
|
0682ca04b3 | ||
|
|
6fe6a6f029 | ||
|
|
d330d45e2d | ||
|
|
ccd4290777 | ||
|
|
9cb42507f8 | ||
|
|
d960910c72 | ||
|
|
b46b8a5efb | ||
|
|
27fe3e2d4f | ||
|
|
3410142741 | ||
|
|
e7674eb759 | ||
|
|
7c1a92274c | ||
|
|
f5deaff424 | ||
|
|
5f360182c6 | ||
|
|
46453bfc2f | ||
|
|
6bf6bc1d1d | ||
|
|
f45be05305 | ||
|
|
24f36469bc | ||
|
|
597c79be10 | ||
|
|
e6021c370e | ||
|
|
a8b946decb | ||
|
|
3f5ac150b2 | ||
|
|
5bcccfde6c | ||
|
|
c95dd7a426 | ||
|
|
4d87d3659a | ||
|
|
32fc39fd4c | ||
|
|
93acf49e9b | ||
|
|
b2c290a6e5 | ||
|
|
499dc1b349 | ||
|
|
9377509dcd | ||
|
|
2d4de61fb7 | ||
|
|
87ef315ad5 | ||
|
|
fccadb7719 | ||
|
|
f0fa66f495 | ||
|
|
1515d1b581 | ||
|
|
a2b7102eea | ||
|
|
a5d7968b3e | ||
|
|
b5525c76d1 | ||
|
|
e97648c4e2 | ||
|
|
835ceeee76 | ||
|
|
b3682df2ca | ||
|
|
8ad8490cff | ||
|
|
59fa91fe88 | ||
|
|
1b5436ad78 | ||
|
|
257c41cc2e | ||
|
|
b4e2290d89 | ||
|
|
e3ee63578f | ||
|
|
ea0b767114 | ||
|
|
ab03912e94 | ||
|
|
1fc50712d6 | ||
|
|
7c7786d4e1 | ||
|
|
b0a14bf53e | ||
|
|
f131cd9e53 | ||
|
|
edb33eb163 | ||
|
|
bcc9cda8ca | ||
|
|
05e3354047 | ||
|
|
3364d96c6b | ||
|
|
98385888b8 | ||
|
|
68264d7404 | ||
|
|
91fa69e029 | ||
|
|
4c56bedee3 | ||
|
|
520ee9bd2c | ||
|
|
a60a2eaa02 | ||
|
|
e3a720217a | ||
|
|
530bc862dc | ||
|
|
a6f5cc65d9 | ||
|
|
e555bc6551 | ||
|
|
a843868fe9 | ||
|
|
f5da3bacb2 | ||
|
|
97f072db74 | ||
|
|
80ad710217 | ||
|
|
4fec5e57be | ||
|
|
a8a32d2714 | ||
|
|
921f17f938 | ||
|
|
9a2f296fa2 | ||
|
|
58c9653c6b | ||
|
|
6b58ade2f0 | ||
|
|
9e66c58ceb | ||
|
|
f83f5fbce8 | ||
|
|
aecaec3e10 | ||
|
|
1efee2f52b | ||
|
|
49e047c55e | ||
|
|
59a2c6d60e | ||
|
|
06f812b95c | ||
|
|
c9154b970c | ||
|
|
b3d5c4ad9d | ||
|
|
456544b621 | ||
|
|
d199f2248f | ||
|
|
8f650bd338 | ||
|
|
7b0f6293f2 | ||
|
|
fcde5b2a97 | ||
|
|
342e072024 | ||
|
|
55e8a87888 | ||
|
|
26a8c1d7ab | ||
|
|
54de6a812a | ||
|
|
733bf44290 | ||
|
|
f14cd95342 | ||
|
|
986615b0b2 | ||
|
|
bfeaab6dfc | ||
|
|
b260f92936 | ||
|
|
271d3e7865 | ||
|
|
18b7eb830b | ||
|
|
74106ba171 | ||
|
|
2a9ce8c422 | ||
|
|
cbea0c7044 | ||
|
|
5aa024e501 | ||
|
|
c51a52f300 | ||
|
|
3d13c3a295 | ||
|
|
a679a01dbe | ||
|
|
8dad08a950 | ||
|
|
0a7d3cd00f | ||
|
|
ec8b217722 | ||
|
|
328ad6901d | ||
|
|
76b89d0edb | ||
|
|
370135ad0b | ||
|
|
0fcbca531f | ||
|
|
1e2740caab | ||
|
|
6ede23ff1b | ||
|
|
591ad2268c | ||
|
|
3c3246c078 | ||
|
|
5fb41a955c | ||
|
|
367b594183 | ||
|
|
7861cfec0a | ||
|
|
019cf013d6 | ||
|
|
4329f20e3f | ||
|
|
a285194021 | ||
|
|
bf81e38d36 | ||
|
|
78cac3e594 | ||
|
|
7871790db1 | ||
|
|
18e044628e | ||
|
|
b557b682d9 | ||
|
|
389c890f14 | ||
|
|
cd8738ab63 | ||
|
|
f6f8f81a48 | ||
|
|
ecd5e6bfa4 | ||
|
|
fda078f995 | ||
|
|
ab5580e152 | ||
|
|
e8d212d92e | ||
|
|
40e539683c | ||
|
|
05f6447301 | ||
|
|
5238960850 | ||
|
|
ccec25e2c6 | ||
|
|
c38b7c4104 | ||
|
|
29b25d59c6 | ||
|
|
884b800899 | ||
|
|
09d31815b4 | ||
|
|
fe1b369946 | ||
|
|
26cb0efa88 | ||
|
|
d47115ff8b | ||
|
|
2e3d90d67c | ||
|
|
a4b06b619c | ||
|
|
c63b1697f4 | ||
|
|
87ffd21b29 | ||
|
|
2452611d0f | ||
|
|
eb359eced4 | ||
|
|
c824b29e77 | ||
|
|
33d7776473 | ||
|
|
9ad8d9b17c | ||
|
|
5b1825ba5b | ||
|
|
9c4cf83259 | ||
|
|
05e7e5e972 | ||
|
|
db4f823d34 | ||
|
|
8e02494166 | ||
|
|
a6f06ce3e2 | ||
|
|
d34e9f93b7 | ||
|
|
efeb6176c1 | ||
|
|
1a54513cf1 | ||
|
|
242c52d607 | ||
|
|
012b4c1913 | ||
|
|
436bffd15f | ||
|
|
1b3c3e6d68 | ||
|
|
33d08e8433 | ||
|
|
8f7f4cb92b | ||
|
|
2623cec874 | ||
|
|
4fcdf7b4b2 | ||
|
|
955ef1f06c | ||
|
|
2ee4c9ee02 | ||
|
|
9dbd903f41 | ||
|
|
bf3de7b90b | ||
|
|
e73ad8de3b | ||
|
|
42f4feb2b7 | ||
|
|
f16f0e169d | ||
|
|
465117d7ca | ||
|
|
7ed58bb347 | ||
|
|
dad2da7e54 | ||
|
|
363786845b | ||
|
|
ec5717caf5 | ||
|
|
d26b660aa6 | ||
|
|
aede7248ab | ||
|
|
68a92afcff | ||
|
|
55abbe1850 | ||
|
|
2c28e25bda | ||
|
|
1e6e370b76 | ||
|
|
1c3c202b96 | ||
|
|
406f7aa0f6 | ||
|
|
34f56b40fd | ||
|
|
c445f5fec7 | ||
|
|
44adde498e | ||
|
|
cf94a78872 | ||
|
|
1a64dffb00 | ||
|
|
081e5d55e6 | ||
|
|
248e6770ca | ||
|
|
40a1c96617 | ||
|
|
7314bf4682 | ||
|
|
e9e3eaa67d | ||
|
|
d36b1d849d | ||
|
|
742056be0d | ||
|
|
bc8f265f0a | ||
|
|
ec041b335e | ||
|
|
053e83dafb | ||
|
|
b97a1356b1 | ||
|
|
b73dc0ef4d | ||
|
|
499e3281e6 | ||
|
|
66868119dc | ||
|
|
aba0b2a39b | ||
|
|
57dca35692 | ||
|
|
c68518dfbb | ||
|
|
e967bc86e7 | ||
|
|
1e2a7f18a1 | ||
|
|
f91faf09b3 | ||
|
|
4430b1ceb3 | ||
|
|
3413f1e284 | ||
|
|
40cbffb2d2 | ||
|
|
b9e997f561 | ||
|
|
9a7a77a22a | ||
|
|
8f6281ab0c | ||
|
|
0da0d0a29d | ||
|
|
022b9176fe | ||
|
|
0c62c958fd | ||
|
|
c41d52a042 | ||
|
|
7e554aac86 | ||
|
|
f863a52cea | ||
|
|
93efcb8526 | ||
|
|
dcfd71aa4c | ||
|
|
fca90b3445 | ||
|
|
a292454aa1 | ||
|
|
4f81edbd4f | ||
|
|
6344db659f | ||
|
|
511a52afc8 | ||
|
|
e885e2a623 | ||
|
|
d137e03231 | ||
|
|
f52565de50 | ||
|
|
a2d288c6a9 | ||
|
|
bd7c51921d | ||
|
|
978fa53cc2 | ||
|
|
eec9609e96 | ||
|
|
9e1b43bcbf | ||
|
|
a3036ac37e | ||
|
|
ebdafd8114 | ||
|
|
a98d215204 | ||
|
|
d554ca5e1d | ||
|
|
209e04fa11 | ||
|
|
e5142f65a6 | ||
|
|
b64aa6d687 | ||
|
|
848d3bf2e1 | ||
|
|
b55c770271 | ||
|
|
d543b72562 | ||
|
|
0136a522b1 | ||
|
|
2cb758ac75 | ||
|
|
560c71c735 | ||
|
|
a37ee2293c | ||
|
|
c55ad2e375 | ||
|
|
aaa9d9f0e1 | ||
|
|
75fa7f6b3c | ||
|
|
a5db0026ed | ||
|
|
9c491366c5 | ||
|
|
385aec4010 | ||
|
|
10f4856b0c | ||
|
|
dfde67a6fe | ||
|
|
10c843fcfb | ||
|
|
58930da52b | ||
|
|
0870588c20 | ||
|
|
f90cf150e2 | ||
|
|
067596d341 | ||
|
|
70d650be2b | ||
|
|
b92e7955be | ||
|
|
c98e1479bd | ||
|
|
67f2c901ea | ||
|
|
eef7778af9 | ||
|
|
a17e7caeb7 | ||
|
|
f0c06ac65c | ||
|
|
76b18df3d9 | ||
|
|
0da24cac8b | ||
|
|
2e3c8acc68 | ||
|
|
8d9a884cee | ||
|
|
896bc6cd46 | ||
|
|
be3548f7e1 | ||
|
|
4adf93e0f7 | ||
|
|
651faee698 | ||
|
|
caf33b2d9b | ||
|
|
8f8798bc0d | ||
|
|
7335f0adda | ||
|
|
ef535178ff | ||
|
|
04dee11e97 | ||
|
|
dd2ccee27d | ||
|
|
b6b0132ac7 | ||
|
|
e34cb5e7dc | ||
|
|
252ee2d979 | ||
|
|
14362bf359 | ||
|
|
1ee2584307 | ||
|
|
507b8bb091 | ||
|
|
d44d11d864 | ||
|
|
2d21d43c34 | ||
|
|
0fb76c71ac | ||
|
|
8bdaf5f7af | ||
|
|
a67bf0b074 | ||
|
|
f18d7546c6 | ||
|
|
3de8168343 | ||
|
|
bb069079bb | ||
|
|
2e5a31f197 | ||
|
|
fc8007dbec | ||
|
|
1238203bc4 | ||
|
|
41f072fd0e | ||
|
|
5a6ef20ef6 | ||
|
|
be8be535f7 | ||
|
|
ab71589c0b | ||
|
|
f328d95cef | ||
|
|
aac546c978 | ||
|
|
f52cb4cd78 | ||
|
|
6783534a0f | ||
|
|
a70688445d | ||
|
|
314b146b2e | ||
|
|
7846ac3125 | ||
|
|
56ec5869c9 | ||
|
|
1ea358b28b | ||
|
|
db74dcda5b | ||
|
|
551fe80bed | ||
|
|
63bb8f0df9 | ||
|
|
70d820c875 | ||
|
|
3f7652c56f | ||
|
|
535b6bfacc | ||
|
|
f7fe0e5f67 | ||
|
|
2455ad8468 | ||
|
|
0b640aa56b | ||
|
|
aa3a4944d5 | ||
|
|
46b7362304 | ||
|
|
870c45913e | ||
|
|
05f1a4596a | ||
|
|
13517e2914 | ||
|
|
b5fb7458d5 | ||
|
|
f73fdb04a6 | ||
|
|
3a4120e49a | ||
|
|
9fe894402f | ||
|
|
0a32208e5d | ||
|
|
774f3a692c | ||
|
|
5cc7564c5c | ||
|
|
0fe0b0eeb6 | ||
|
|
0126a5d60a | ||
|
|
13e334506c | ||
|
|
6b40e4f52a | ||
|
|
d5fb561709 | ||
|
|
d8ec81cc31 | ||
|
|
bc72d381b2 | ||
|
|
4d362a61ea | ||
|
|
00c281f6a4 | ||
|
|
c8cd41cdd8 | ||
|
|
41e4b2efea | ||
|
|
0c13d45522 | ||
|
|
9f1800fba8 | ||
|
|
8f4a9bbc16 | ||
|
|
9ba2bf1570 | ||
|
|
120c238705 | ||
|
|
1c1f633b13 | ||
|
|
6660f37558 | ||
|
|
20e5b46b20 | ||
|
|
0113ad36ee | ||
|
|
3e41de05cc | ||
|
|
2884712ca7 | ||
|
|
ded01c3bf6 | ||
|
|
8c75040c25 | ||
|
|
f1073ad43d | ||
|
|
a352b68acf | ||
|
|
364d616792 | ||
|
|
bde13833cb | ||
|
|
80a1bc7db5 | ||
|
|
f1f70bf4b5 | ||
|
|
dbb5a39b64 | ||
|
|
885ee861f7 | ||
|
|
486b9a6a2d | ||
|
|
1f31381611 | ||
|
|
ed5f43a55a | ||
|
|
09a17f965c | ||
|
|
1e9026e484 | ||
|
|
a60169ea09 | ||
|
|
78a16d395c | ||
|
|
5436848955 | ||
|
|
0477368e9a | ||
|
|
0ef0655b83 | ||
|
|
a64dbae90b | ||
|
|
d41a1a91d3 | ||
|
|
15bf3e3376 | ||
|
|
d9f7fa2e57 | ||
|
|
b31c49d676 | ||
|
|
255c229f23 | ||
|
|
d12134ce37 | ||
|
|
36e2aade87 | ||
|
|
33546b58aa | ||
|
|
fdc015c6e9 | ||
|
|
e9892b4b26 | ||
|
|
50f69e2ef2 | ||
|
|
41b35412bf | ||
|
|
7dbb473339 | ||
|
|
16a8884233 | ||
|
|
ba0406d10d | ||
|
|
8327d5df70 | ||
|
|
a31befbcd0 | ||
|
|
5ee5b655b2 | ||
|
|
0a43219a27 | ||
|
|
eba4ff1bcb | ||
|
|
2eab219a70 | ||
|
|
95f305c35a | ||
|
|
6e7dc7c7dd | ||
|
|
b7fbc9bd95 | ||
|
|
919a2c74f6 | ||
|
|
81c07a32fd | ||
|
|
b063784b78 | ||
|
|
defa28efa1 | ||
|
|
690029d1a3 | ||
|
|
d88faf92d1 | ||
|
|
958c968d02 | ||
|
|
746b2f5657 | ||
|
|
efeabd3180 | ||
|
|
1fd6eb695d | ||
|
|
128360d4f0 | ||
|
|
17aab5827a | ||
|
|
1a815fb04f | ||
|
|
66503a69c9 | ||
|
|
bab916bccc | ||
|
|
5c73115155 | ||
|
|
e0fda29f94 | ||
|
|
02270b4b3d | ||
|
|
6babdb671b | ||
|
|
0f2165ccf4 | ||
|
|
18f0cc7d99 | ||
|
|
64935d11f7 | ||
|
|
2d1d1025fa | ||
|
|
b01e71e719 | ||
|
|
dded389ac1 | ||
|
|
c54fcd9ee4 | ||
|
|
0b2158719c | ||
|
|
188f8d63e2 | ||
|
|
48e65099b5 | ||
|
|
75331c5fca | ||
|
|
8c966fbd51 | ||
|
|
efe8126290 | ||
|
|
88625db05f | ||
|
|
84379062f9 | ||
|
|
310197bab5 | ||
|
|
b0932b34cb | ||
|
|
4a5bbb1941 | ||
|
|
87f60e7053 | ||
|
|
5ef84da4f1 | ||
|
|
216a05b3e3 | ||
|
|
9f715573aa | ||
|
|
96dc600579 | ||
|
|
377eb480ca | ||
|
|
e4134c5e13 | ||
|
|
778c1fea8b | ||
|
|
7aa778fba9 | ||
|
|
70aee0717c | ||
|
|
3210f4c385 | ||
|
|
040a560a48 | ||
|
|
cffe46408f | ||
|
|
ac9716f154 | ||
|
|
8f79084bd4 | ||
|
|
10ea3f46ba | ||
|
|
f6be734be9 | ||
|
|
05e01f21d7 | ||
|
|
81d226888f | ||
|
|
72c4d482e9 | ||
|
|
fbf608decb | ||
|
|
06d40c8b98 | ||
|
|
5f88549f4a | ||
|
|
ca457f594e | ||
|
|
c11614bcdc | ||
|
|
21961c93c7 | ||
|
|
48340e4f13 | ||
|
|
fcbc282f56 | ||
|
|
51773bcbaf | ||
|
|
da491e75b2 | ||
|
|
0b3c80a234 | ||
|
|
dd6f62ed99 | ||
|
|
c367ba5dd2 | ||
|
|
eef541a291 | ||
|
|
80aade3805 | ||
|
|
ab116bdb0c | ||
|
|
4ce84a1acd | ||
|
|
a7ff5a1770 | ||
|
|
aa6fab0cc2 | ||
|
|
8d740132f4 | ||
|
|
3b096c5f5c | ||
|
|
43b7f371f5 | ||
|
|
abb151f3c9 | ||
|
|
4982b28868 | ||
|
|
d06f2a229e | ||
|
|
58a224a651 | ||
|
|
20eccd84d4 | ||
|
|
722472b48c | ||
|
|
73c7112433 | ||
|
|
b09f348530 | ||
|
|
4c04222fa5 | ||
|
|
ccb56fc24b | ||
|
|
81cf449daa | ||
|
|
e043ede4a2 | ||
|
|
b821c839dc | ||
|
|
597013caa5 | ||
|
|
6a0afa582a | ||
|
|
3ae915b27e | ||
|
|
59f2d73522 | ||
|
|
9c26b390a2 | ||
|
|
f88d747f79 | ||
|
|
065e739d6e | ||
|
|
696d7c5937 | ||
|
|
0eae075723 | ||
|
|
79d1f072f4 | ||
|
|
6bb9aacf9d | ||
|
|
745ddb4dd0 | ||
|
|
7a5a5f2df2 | ||
|
|
1f31cc37f8 | ||
|
|
2675c1e40e | ||
|
|
c71177f285 | ||
|
|
07a5559916 | ||
|
|
56d15a0530 | ||
|
|
812b5de0fe | ||
|
|
80f34d7b57 | ||
|
|
661a540dd1 | ||
|
|
70599ce925 | ||
|
|
fb2193cc63 | ||
|
|
356f13c069 | ||
|
|
02ac463dbf | ||
|
|
c5af1b6b00 | ||
|
|
3a3fb2f6f9 | ||
|
|
4a10510cd5 | ||
|
|
f84b89f0c6 | ||
|
|
a15ad60849 | ||
|
|
07233a1ec8 | ||
|
|
e793866398 | ||
|
|
aaa70e26a2 | ||
|
|
a04a2d043c | ||
|
|
0f06b496d1 | ||
|
|
83b70c9f63 | ||
|
|
e0deeff23e | ||
|
|
991af8b0d6 | ||
|
|
00c487a8db | ||
|
|
c8285564a3 | ||
|
|
1db79d6192 | ||
|
|
d60eed0710 | ||
|
|
195254cae8 | ||
|
|
43db0d9f6a | ||
|
|
8e539f13c0 | ||
|
|
6ecb2ca4ec | ||
|
|
58ee43d020 | ||
|
|
2a449fec4d | ||
|
|
6ca4d3ae9a | ||
|
|
dea9f20f8c | ||
|
|
963e3ed282 | ||
|
|
d240796ded | ||
|
|
c8c5bf950a | ||
|
|
c9ca285d33 | ||
|
|
1d4ee854e2 | ||
|
|
cca0093fa9 | ||
|
|
4efa389299 | ||
|
|
aefd2d1cbc | ||
|
|
10de8c2631 | ||
|
|
014e0799f9 | ||
|
|
c626fc576a | ||
|
|
e5b0bbcd33 | ||
|
|
70ecb415f5 | ||
|
|
e1625d62a8 | ||
|
|
163e48c0e3 | ||
|
|
887c6e6f05 | ||
|
|
42a9ea37e4 | ||
|
|
8b5dbee47e | ||
|
|
85b992f621 | ||
|
|
cc84f7cb8e | ||
|
|
209ba4d024 | ||
|
|
b007ee4606 | ||
|
|
95b86e6dad | ||
|
|
cbf8d146ac | ||
|
|
faad233ea6 | ||
|
|
6900303997 | ||
|
|
37b7e84620 | ||
|
|
1c5ed2a19b | ||
|
|
b08ad0389e | ||
|
|
be2c677386 | ||
|
|
79bea8ab9a | ||
|
|
84f94e4cbb | ||
|
|
d16cc52b5d | ||
|
|
137e6a4557 | ||
|
|
680f1d9387 | ||
|
|
cb8a321bdd | ||
|
|
d437882581 | ||
|
|
88ea5ab2c3 | ||
|
|
989bdc9e56 | ||
|
|
6fe04ffef2 | ||
|
|
b791a530da | ||
|
|
a24bc5b2dc | ||
|
|
c0c79ef444 | ||
|
|
b5605dfecc | ||
|
|
31b5395ab6 | ||
|
|
09804c9862 | ||
|
|
c2da3406fc | ||
|
|
ccffb0965d | ||
|
|
18d68bfee4 | ||
|
|
d4503e25ed | ||
|
|
149fa411e2 | ||
|
|
60ff2e7984 | ||
|
|
332d7e9b97 | ||
|
|
6fb51eaf7b | ||
|
|
e837df6adb | ||
|
|
42368ea8db | ||
|
|
ee660c6b9b | ||
|
|
0cb441fedd | ||
|
|
5adf627551 | ||
|
|
6a30a0bfd3 | ||
|
|
c4c98ce8da | ||
|
|
523d5bcd0b | ||
|
|
43e1e0489c | ||
|
|
1ed33784a6 | ||
|
|
526bf8126f | ||
|
|
425e6b4983 | ||
|
|
b153f5b150 | ||
|
|
3fd8a07fca | ||
|
|
f68eea808a | ||
|
|
53e171f345 | ||
|
|
80cb9becd8 | ||
|
|
816df9f267 | ||
|
|
821306120a | ||
|
|
1a3a2002ff | ||
|
|
e168abbcff | ||
|
|
e501e9ecb2 | ||
|
|
cbd2adc95e | ||
|
|
3b86ecfa79 | ||
|
|
647781ca56 | ||
|
|
c39f305067 | ||
|
|
678c8a7f1e | ||
|
|
c5c5a7403b | ||
|
|
2d98c960ec | ||
|
|
eb79110beb | ||
|
|
dd95eb4cb5 | ||
|
|
60d53f9e95 | ||
|
|
c0112fabc2 | ||
|
|
0e8364c7f5 | ||
|
|
782471b7e1 | ||
|
|
0466454b00 | ||
|
|
077468f6a9 | ||
|
|
b3f29dc1e5 | ||
|
|
f03ddc98ec | ||
|
|
1f71f386f6 | ||
|
|
206eb9fd94 | ||
|
|
7d6e89ed22 | ||
|
|
21018c2c13 | ||
|
|
a8affd606e | ||
|
|
b7381d5338 | ||
|
|
3abab26458 | ||
|
|
99b5a2e560 | ||
|
|
ba5c616ff4 | ||
|
|
0c11c1be88 | ||
|
|
e00e8f2166 | ||
|
|
fd8e921b6e | ||
|
|
024cda9a97 | ||
|
|
c9aff0736c | ||
|
|
40aa6e8349 | ||
|
|
9295fa30a8 | ||
|
|
5e50058473 | ||
|
|
cdda850ce1 | ||
|
|
0e792e7903 | ||
|
|
3547e66bc6 | ||
|
|
6da7f39d95 | ||
|
|
b5e646a18c | ||
|
|
048b3ece36 | ||
|
|
13d37c3c56 | ||
|
|
a458a40337 | ||
|
|
7e23476814 | ||
|
|
260b498ee5 | ||
|
|
1620578b13 | ||
|
|
108434e53d | ||
|
|
1400bb1663 | ||
|
|
a284a32d78 | ||
|
|
458a435114 | ||
|
|
30057b1e15 | ||
|
|
ae1af262f6 | ||
|
|
90afc07f39 | ||
|
|
89b5ef7c4b | ||
|
|
35b6e6d2a8 | ||
|
|
3367e65476 | ||
|
|
0c4ccdcb83 | ||
|
|
f28643cea9 | ||
|
|
5f46be19a7 | ||
|
|
d46b18a00f | ||
|
|
3b1930e8ec | ||
|
|
fe97b81c09 | ||
|
|
997db04648 | ||
|
|
c00b484eff | ||
|
|
94040b0798 | ||
|
|
e581754f09 | ||
|
|
e04b1d6b0a | ||
|
|
5599608887 | ||
|
|
6b45ffd2d1 | ||
|
|
c9eb6dfc1b | ||
|
|
3f84da139c | ||
|
|
def64d6ef3 | ||
|
|
100e2c42f6 | ||
|
|
8715731559 | ||
|
|
34b3af3363 | ||
|
|
7354667bd3 | ||
|
|
08dfa8eee2 | ||
|
|
f904b1c60c | ||
|
|
1f1dee94f6 | ||
|
|
f6ebaf4a32 | ||
|
|
4ea762c1a2 | ||
|
|
012cb5416c | ||
|
|
fcb2c3f0db | ||
|
|
fd85b167ec | ||
|
|
b6e0be701e | ||
|
|
96d9d5d388 | ||
|
|
d13459636f | ||
|
|
1d275dba69 | ||
|
|
56b5e83e36 | ||
|
|
1f590f3e9a | ||
|
|
1b45e6a9bc | ||
|
|
53ca739f1f | ||
|
|
81c2176cba | ||
|
|
573ef3f1c9 | ||
|
|
8940281d1b | ||
|
|
9c272da05f | ||
|
|
a5974f89d6 | ||
|
|
5d8a93a10e | ||
|
|
1f0f5ffa1e | ||
|
|
634efb65f1 | ||
|
|
c64d5fc66c | ||
|
|
3c39fa8902 | ||
|
|
ce81ccb063 | ||
|
|
1cf5c379cb | ||
|
|
fee1118a20 | ||
|
|
97b9141245 | ||
|
|
fcd1eb642d | ||
|
|
8e6a163f27 | ||
|
|
39d0a99972 | ||
|
|
9ef05a12c3 | ||
|
|
80be396464 | ||
|
|
5650e38e7d | ||
|
|
8a04412fa1 | ||
|
|
8cc82aad87 | ||
|
|
de22001ab5 | ||
|
|
f1026418ea | ||
|
|
17cbf773b9 | ||
|
|
984d4a2c0f | ||
|
|
e6bffa4475 | ||
|
|
92f0f3d21d | ||
|
|
a438a6d2bc | ||
|
|
7ea3b4118d | ||
|
|
183f23f10d | ||
|
|
792def4928 | ||
|
|
2df75de505 | ||
|
|
b084e4d963 | ||
|
|
35b7b8e4bc | ||
|
|
6b9b6a9169 | ||
|
|
c7c75e87dc | ||
|
|
b0a1036d93 | ||
|
|
8f99cd5996 | ||
|
|
60f44c098d | ||
|
|
50ad8005e4 | ||
|
|
83618d719a | ||
|
|
e7a76b5123 | ||
|
|
29c8cf8db8 | ||
|
|
d3da5294e8 | ||
|
|
765f2b8446 | ||
|
|
ff5e5423e5 | ||
|
|
311b5ce051 | ||
|
|
3facde2536 | ||
|
|
4364ea1272 | ||
|
|
4b0c3a3270 | ||
|
|
5048455965 | ||
|
|
6c8957be7f | ||
|
|
18ce88bd2d | ||
|
|
40d40e470d | ||
|
|
56aae0eaf5 | ||
|
|
dc2c527ce9 | ||
|
|
62b51b8452 | ||
|
|
b2c04da8dc | ||
|
|
ec9cbe847d | ||
|
|
acded821c4 | ||
|
|
69e519052b | ||
|
|
46b5547a42 | ||
|
|
36bb5c2383 | ||
|
|
e800ee2f63 | ||
|
|
cc0874cf71 | ||
|
|
68f8fc2f14 | ||
|
|
4845c7359d | ||
|
|
351b50a887 | ||
|
|
60f86fc876 | ||
|
|
937c407eef | ||
|
|
dcfc10b129 | ||
|
|
aebd0c9717 | ||
|
|
1c188cf73c | ||
|
|
1a12766e3b | ||
|
|
6037349512 | ||
|
|
ebbabc4986 | ||
|
|
8d7ad44331 | ||
|
|
5367708236 | ||
|
|
9dba1b668c | ||
|
|
424a7f48f8 | ||
|
|
4ed1e45869 | ||
|
|
21d188bf95 | ||
|
|
8a65666454 | ||
|
|
8781083960 | ||
|
|
fa12209c1b | ||
|
|
c7c03bf303 | ||
|
|
871357d539 | ||
|
|
71df327190 | ||
|
|
c9eab73f2a | ||
|
|
47571d11db | ||
|
|
b80b93ea0f | ||
|
|
6df5a6a833 | ||
|
|
5164ccc3e5 | ||
|
|
3306cf45ca | ||
|
|
c487c42492 | ||
|
|
9c417c54d4 | ||
|
|
9843f2a657 | ||
|
|
7b4715bad7 | ||
|
|
f15e9e8de4 | ||
|
|
72e2fafa20 | ||
|
|
233bf78ab4 | ||
|
|
f22f46f4f9 | ||
|
|
290f125a13 | ||
|
|
52ecbc2843 | ||
|
|
05e49ffbdf | ||
|
|
bd0f9c2065 | ||
|
|
c5b3c6e101 | ||
|
|
83bf65297a | ||
|
|
e8701e64b9 | ||
|
|
c553797c4f | ||
|
|
5905f36f05 | ||
|
|
c3f8dbf6b5 | ||
|
|
62607d5452 | ||
|
|
e57df8fa86 | ||
|
|
e856036f4c | ||
|
|
9e7aa98c22 | ||
|
|
2022ae0fb9 | ||
|
|
64ec3493c1 | ||
|
|
4063fe0283 | ||
|
|
183cacac90 | ||
|
|
19d6b6cd7a | ||
|
|
c10ed26c30 | ||
|
|
ae571810f2 | ||
|
|
3ddbb1687c | ||
|
|
8fae3d7b1e | ||
|
|
b57dcb4b51 | ||
|
|
26db18bc90 | ||
|
|
b9675ef6e6 | ||
|
|
e395eb1108 | ||
|
|
3b0fa77f50 | ||
|
|
129e403487 | ||
|
|
a3ac837599 | ||
|
|
78741cf025 | ||
|
|
51bb339ab2 | ||
|
|
b743c1237e | ||
|
|
31719ad124 | ||
|
|
565c2edb0a | ||
|
|
c877f0f034 | ||
|
|
b15266fe06 | ||
|
|
cfe1ff4bdb | ||
|
|
d4823efad9 | ||
|
|
9f53491cab | ||
|
|
02a27a6c4f | ||
|
|
a611c968cc | ||
|
|
59698906eb | ||
|
|
c0d8e0eb63 | ||
|
|
2ed0adb075 | ||
|
|
68ebb81e86 | ||
|
|
05adc6c2de | ||
|
|
f63bd4ff47 | ||
|
|
5bbc321588 | ||
|
|
4cf4320593 | ||
|
|
eab47ea1e5 | ||
|
|
f52dd35ac3 | ||
|
|
61c7edfd34 | ||
|
|
5bbd424ee0 | ||
|
|
6ac40f7b65 | ||
|
|
f505575f69 | ||
|
|
4084c58aa1 | ||
|
|
e99365f601 | ||
|
|
e2a01455af | ||
|
|
e8884e5e9c | ||
|
|
a7001c311b | ||
|
|
9181e2f4c7 | ||
|
|
fb76a81ff7 | ||
|
|
07d765209d | ||
|
|
48af68ba8e | ||
|
|
0c93df89b6 | ||
|
|
43f0941e8f | ||
|
|
481119f7d6 | ||
|
|
9f56645038 | ||
|
|
eb8619e256 | ||
|
|
4ef7a25c10 | ||
|
|
3727a15764 | ||
|
|
aaabbd3e9e | ||
|
|
84f9cac4d0 | ||
|
|
914f1eafac | ||
|
|
6fd2f685fe | ||
|
|
737aee9295 | ||
|
|
cb9c465707 | ||
|
|
3c79bdd7a0 | ||
|
|
a4c56bf67b | ||
|
|
4c1b32d7e2 | ||
|
|
f78b479118 | ||
|
|
4802f9cdb6 | ||
|
|
83776d6219 | ||
|
|
e83f8c0aa5 | ||
|
|
bd77216d06 | ||
|
|
5a578ea4c7 | ||
|
|
9ae64c9910 | ||
|
|
b42ad359e9 | ||
|
|
757e2c79b4 | ||
|
|
86e9bbc74e | ||
|
|
e40f25ebe1 | ||
|
|
ff1d333a02 | ||
|
|
2ae91a9e2f | ||
|
|
d213d69fe3 | ||
|
|
56da835eaf | ||
|
|
96bcfb29c7 | ||
|
|
7be1065b8f | ||
|
|
a2546b9082 | ||
|
|
ceeb5b909f | ||
|
|
43a89cca8e | ||
|
|
f338bf9257 | ||
|
|
767fc0b739 | ||
|
|
54d08c8868 | ||
|
|
5880bc5417 | ||
|
|
f613a3e332 | ||
|
|
bfe586843f | ||
|
|
d0633e6dbe | ||
|
|
0f2ca8cde1 | ||
|
|
c53f9d561e | ||
|
|
65141161f6 | ||
|
|
72f454b752 | ||
|
|
10ebbaea2e | ||
|
|
aa5ce4d450 | ||
|
|
d33d623f0d | ||
|
|
7984ffdc6a | ||
|
|
c1267d04c5 | ||
|
|
a04c076b7f | ||
|
|
44891b4a0a | ||
|
|
7b39bcdaae | ||
|
|
d937f342bb | ||
|
|
318cb1f207 | ||
|
|
c48465dbaa | ||
|
|
8be1a37909 | ||
|
|
d3d0be4167 | ||
|
|
762ada1e07 | ||
|
|
0d3da210f0 | ||
|
|
cccf86dd05 | ||
|
|
8a76094965 | ||
|
|
790f5848b2 | ||
|
|
82d7eea7e3 | ||
|
|
2547dffccc | ||
|
|
9bb041791c | ||
|
|
17515bae14 | ||
|
|
4bd3d25218 | ||
|
|
5ffacc5e84 | ||
|
|
83b2f83da0 | ||
|
|
b36270b5e1 | ||
|
|
6ff7a79308 | ||
|
|
af582b66bb | ||
|
|
2460d904bd | ||
|
|
1ccabe2965 | ||
|
|
fb83f6a1fc | ||
|
|
b04f81284a | ||
|
|
ec9331f851 | ||
|
|
dafef5a688 | ||
|
|
d96a070a3a | ||
|
|
ed3979df5f | ||
|
|
7b6d519482 | ||
|
|
52d1008661 | ||
|
|
ce3fe52498 | ||
|
|
d9f38561c8 | ||
|
|
4836864f56 | ||
|
|
a4a31fa8dc | ||
|
|
3fb35cbd6f | ||
|
|
15e0f1696f | ||
|
|
d6e7333ae4 | ||
|
|
6ec02e9ecf | ||
|
|
25cd5bb697 | ||
|
|
fa129ce5b5 | ||
|
|
e1e042f2a1 | ||
|
|
05d044aac3 | ||
|
|
2d5c693fd3 | ||
|
|
9c99ab4572 | ||
|
|
d549fdfa22 | ||
|
|
92e3071623 | ||
|
|
0fd1cd2400 | ||
|
|
7e2c89a37f | ||
|
|
9f7dc2bef7 | ||
|
|
cf51c4120e | ||
|
|
0834b152fb | ||
|
|
8b98a7e8c3 | ||
|
|
eab4d462f8 | ||
|
|
c3916462f6 | ||
|
|
110780b18b | ||
|
|
b09e29a03c | ||
|
|
7426c86eb8 | ||
|
|
d1b154a10f | ||
|
|
9377157961 | ||
|
|
2c838f6459 | ||
|
|
5037ee0d37 | ||
|
|
b26e8604f1 | ||
|
|
5fd07da764 | ||
|
|
c60b751694 | ||
|
|
683e564815 | ||
|
|
72550c3803 | ||
|
|
bb9a2ca87c | ||
|
|
0d3d7de6fc | ||
|
|
a8a5dd3b44 | ||
|
|
7178ab7da0 | ||
|
|
ae5831d303 | ||
|
|
721b2bfa85 | ||
|
|
19038582d3 | ||
|
|
64b4aead15 | ||
|
|
dd4287ca5d | ||
|
|
e0c2490a14 | ||
|
|
ec0cf996c9 | ||
|
|
d9d48aad2d | ||
|
|
adafa24b0a | ||
|
|
3bb3f02517 | ||
|
|
7dd0c1730a |
8
.gitignore
vendored
8
.gitignore
vendored
@@ -24,10 +24,10 @@ homeserver*.yaml
|
||||
.coverage
|
||||
htmlcov
|
||||
|
||||
demo/*.db
|
||||
demo/*.log
|
||||
demo/*.log.*
|
||||
demo/*.pid
|
||||
demo/*/*.db
|
||||
demo/*/*.log
|
||||
demo/*/*.log.*
|
||||
demo/*/*.pid
|
||||
demo/media_store.*
|
||||
demo/etc
|
||||
|
||||
|
||||
17
.travis.yml
Normal file
17
.travis.yml
Normal file
@@ -0,0 +1,17 @@
|
||||
sudo: false
|
||||
language: python
|
||||
python: 2.7
|
||||
|
||||
# tell travis to cache ~/.cache/pip
|
||||
cache: pip
|
||||
|
||||
env:
|
||||
- TOX_ENV=packaging
|
||||
- TOX_ENV=pep8
|
||||
- TOX_ENV=py27
|
||||
|
||||
install:
|
||||
- pip install tox
|
||||
|
||||
script:
|
||||
- tox -e $TOX_ENV
|
||||
863
CHANGES.rst
863
CHANGES.rst
@@ -1,3 +1,858 @@
|
||||
Changes in synapse v0.19.3 (2017-03-20)
|
||||
=======================================
|
||||
|
||||
No changes since v0.19.3-rc2
|
||||
|
||||
|
||||
Changes in synapse v0.19.3-rc2 (2017-03-13)
|
||||
===========================================
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix bug in handling of incoming device list updates over federation.
|
||||
|
||||
|
||||
|
||||
Changes in synapse v0.19.3-rc1 (2017-03-08)
|
||||
===========================================
|
||||
|
||||
Features:
|
||||
|
||||
* Add some administration functionalities. Thanks to morteza-araby! (PR #1784)
|
||||
|
||||
|
||||
Changes:
|
||||
|
||||
* Reduce database table sizes (PR #1873, #1916, #1923, #1963)
|
||||
* Update contrib/ to not use syutil. Thanks to andrewshadura! (PR #1907)
|
||||
* Don't fetch current state when sending an event in common case (PR #1955)
|
||||
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix synapse_port_db failure. Thanks to Pneumaticat! (PR #1904)
|
||||
* Fix caching to not cache error responses (PR #1913)
|
||||
* Fix APIs to make kick & ban reasons work (PR #1917)
|
||||
* Fix bugs in the /keys/changes api (PR #1921)
|
||||
* Fix bug where users couldn't forget rooms they were banned from (PR #1922)
|
||||
* Fix issue with long language values in pushers API (PR #1925)
|
||||
* Fix a race in transaction queue (PR #1930)
|
||||
* Fix dynamic thumbnailing to preserve aspect ratio. Thanks to jkolo! (PR
|
||||
#1945)
|
||||
* Fix device list update to not constantly resync (PR #1964)
|
||||
* Fix potential for huge memory usage when getting device that have
|
||||
changed (PR #1969)
|
||||
|
||||
|
||||
|
||||
Changes in synapse v0.19.2 (2017-02-20)
|
||||
=======================================
|
||||
|
||||
* Fix bug with event visibility check in /context/ API. Thanks to Tokodomo for
|
||||
pointing it out! (PR #1929)
|
||||
|
||||
|
||||
Changes in synapse v0.19.1 (2017-02-09)
|
||||
=======================================
|
||||
|
||||
* Fix bug where state was incorrectly reset in a room when synapse received an
|
||||
event over federation that did not pass auth checks (PR #1892)
|
||||
|
||||
|
||||
Changes in synapse v0.19.0 (2017-02-04)
|
||||
=======================================
|
||||
|
||||
No changes since RC 4.
|
||||
|
||||
|
||||
Changes in synapse v0.19.0-rc4 (2017-02-02)
|
||||
===========================================
|
||||
|
||||
* Bump cache sizes for common membership queries (PR #1879)
|
||||
|
||||
|
||||
Changes in synapse v0.19.0-rc3 (2017-02-02)
|
||||
===========================================
|
||||
|
||||
* Fix email push in pusher worker (PR #1875)
|
||||
* Make presence.get_new_events a bit faster (PR #1876)
|
||||
* Make /keys/changes a bit more performant (PR #1877)
|
||||
|
||||
|
||||
Changes in synapse v0.19.0-rc2 (2017-02-02)
|
||||
===========================================
|
||||
|
||||
* Include newly joined users in /keys/changes API (PR #1872)
|
||||
|
||||
|
||||
Changes in synapse v0.19.0-rc1 (2017-02-02)
|
||||
===========================================
|
||||
|
||||
Features:
|
||||
|
||||
* Add support for specifying multiple bind addresses (PR #1709, #1712, #1795,
|
||||
#1835). Thanks to @kyrias!
|
||||
* Add /account/3pid/delete endpoint (PR #1714)
|
||||
* Add config option to configure the Riot URL used in notification emails (PR
|
||||
#1811). Thanks to @aperezdc!
|
||||
* Add username and password config options for turn server (PR #1832). Thanks
|
||||
to @xsteadfastx!
|
||||
* Implement device lists updates over federation (PR #1857, #1861, #1864)
|
||||
* Implement /keys/changes (PR #1869, #1872)
|
||||
|
||||
|
||||
Changes:
|
||||
|
||||
* Improve IPv6 support (PR #1696). Thanks to @kyrias and @glyph!
|
||||
* Log which files we saved attachments to in the media_repository (PR #1791)
|
||||
* Linearize updates to membership via PUT /state/ to better handle multiple
|
||||
joins (PR #1787)
|
||||
* Limit number of entries to prefill from cache on startup (PR #1792)
|
||||
* Remove full_twisted_stacktraces option (PR #1802)
|
||||
* Measure size of some caches by sum of the size of cached values (PR #1815)
|
||||
* Measure metrics of string_cache (PR #1821)
|
||||
* Reduce logging verbosity (PR #1822, #1823, #1824)
|
||||
* Don't clobber a displayname or avatar_url if provided by an m.room.member
|
||||
event (PR #1852)
|
||||
* Better handle 401/404 response for federation /send/ (PR #1866, #1871)
|
||||
|
||||
|
||||
Fixes:
|
||||
|
||||
* Fix ability to change password to a non-ascii one (PR #1711)
|
||||
* Fix push getting stuck due to looking at the wrong view of state (PR #1820)
|
||||
* Fix email address comparison to be case insensitive (PR #1827)
|
||||
* Fix occasional inconsistencies of room membership (PR #1836, #1840)
|
||||
|
||||
|
||||
Performance:
|
||||
|
||||
* Don't block messages sending on bumping presence (PR #1789)
|
||||
* Change device_inbox stream index to include user (PR #1793)
|
||||
* Optimise state resolution (PR #1818)
|
||||
* Use DB cache of joined users for presence (PR #1862)
|
||||
* Add an index to make membership queries faster (PR #1867)
|
||||
|
||||
|
||||
Changes in synapse v0.18.7 (2017-01-09)
|
||||
=======================================
|
||||
|
||||
No changes from v0.18.7-rc2
|
||||
|
||||
|
||||
Changes in synapse v0.18.7-rc2 (2017-01-07)
|
||||
===========================================
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix error in rc1's discarding invalid inbound traffic logic that was
|
||||
incorrectly discarding missing events
|
||||
|
||||
|
||||
Changes in synapse v0.18.7-rc1 (2017-01-06)
|
||||
===========================================
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix error in #PR 1764 to actually fix the nightmare #1753 bug.
|
||||
* Improve deadlock logging further
|
||||
* Discard inbound federation traffic from invalid domains, to immunise
|
||||
against #1753
|
||||
|
||||
|
||||
Changes in synapse v0.18.6 (2017-01-06)
|
||||
=======================================
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix bug when checking if a guest user is allowed to join a room (PR #1772)
|
||||
Thanks to Patrik Oldsberg for diagnosing and the fix!
|
||||
|
||||
|
||||
Changes in synapse v0.18.6-rc3 (2017-01-05)
|
||||
===========================================
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix bug where we failed to send ban events to the banned server (PR #1758)
|
||||
* Fix bug where we sent event that didn't originate on this server to
|
||||
other servers (PR #1764)
|
||||
* Fix bug where processing an event from a remote server took a long time
|
||||
because we were making long HTTP requests (PR #1765, PR #1744)
|
||||
|
||||
Changes:
|
||||
|
||||
* Improve logging for debugging deadlocks (PR #1766, PR #1767)
|
||||
|
||||
|
||||
Changes in synapse v0.18.6-rc2 (2016-12-30)
|
||||
===========================================
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix memory leak in twisted by initialising logging correctly (PR #1731)
|
||||
* Fix bug where fetching missing events took an unacceptable amount of time in
|
||||
large rooms (PR #1734)
|
||||
|
||||
|
||||
Changes in synapse v0.18.6-rc1 (2016-12-29)
|
||||
===========================================
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Make sure that outbound connections are closed (PR #1725)
|
||||
|
||||
|
||||
Changes in synapse v0.18.5 (2016-12-16)
|
||||
=======================================
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix federation /backfill returning events it shouldn't (PR #1700)
|
||||
* Fix crash in url preview (PR #1701)
|
||||
|
||||
|
||||
Changes in synapse v0.18.5-rc3 (2016-12-13)
|
||||
===========================================
|
||||
|
||||
Features:
|
||||
|
||||
* Add support for E2E for guests (PR #1653)
|
||||
* Add new API appservice specific public room list (PR #1676)
|
||||
* Add new room membership APIs (PR #1680)
|
||||
|
||||
|
||||
Changes:
|
||||
|
||||
* Enable guest access for private rooms by default (PR #653)
|
||||
* Limit the number of events that can be created on a given room concurrently
|
||||
(PR #1620)
|
||||
* Log the args that we have on UI auth completion (PR #1649)
|
||||
* Stop generating refresh_tokens (PR #1654)
|
||||
* Stop putting a time caveat on access tokens (PR #1656)
|
||||
* Remove unspecced GET endpoints for e2e keys (PR #1694)
|
||||
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix handling of 500 and 429's over federation (PR #1650)
|
||||
* Fix Content-Type header parsing (PR #1660)
|
||||
* Fix error when previewing sites that include unicode, thanks to kyrias (PR
|
||||
#1664)
|
||||
* Fix some cases where we drop read receipts (PR #1678)
|
||||
* Fix bug where calls to ``/sync`` didn't correctly timeout (PR #1683)
|
||||
* Fix bug where E2E key query would fail if a single remote host failed (PR
|
||||
#1686)
|
||||
|
||||
|
||||
|
||||
Changes in synapse v0.18.5-rc2 (2016-11-24)
|
||||
===========================================
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Don't send old events over federation, fixes bug in -rc1.
|
||||
|
||||
Changes in synapse v0.18.5-rc1 (2016-11-24)
|
||||
===========================================
|
||||
|
||||
Features:
|
||||
|
||||
* Implement "event_fields" in filters (PR #1638)
|
||||
|
||||
Changes:
|
||||
|
||||
* Use external ldap auth pacakge (PR #1628)
|
||||
* Split out federation transaction sending to a worker (PR #1635)
|
||||
* Fail with a coherent error message if `/sync?filter=` is invalid (PR #1636)
|
||||
* More efficient notif count queries (PR #1644)
|
||||
|
||||
|
||||
Changes in synapse v0.18.4 (2016-11-22)
|
||||
=======================================
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Add workaround for buggy clients that the fail to register (PR #1632)
|
||||
|
||||
|
||||
Changes in synapse v0.18.4-rc1 (2016-11-14)
|
||||
===========================================
|
||||
|
||||
Changes:
|
||||
|
||||
* Various database efficiency improvements (PR #1188, #1192)
|
||||
* Update default config to blacklist more internal IPs, thanks to Euan Kemp (PR
|
||||
#1198)
|
||||
* Allow specifying duration in minutes in config, thanks to Daniel Dent (PR
|
||||
#1625)
|
||||
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix media repo to set CORs headers on responses (PR #1190)
|
||||
* Fix registration to not error on non-ascii passwords (PR #1191)
|
||||
* Fix create event code to limit the number of prev_events (PR #1615)
|
||||
* Fix bug in transaction ID deduplication (PR #1624)
|
||||
|
||||
|
||||
Changes in synapse v0.18.3 (2016-11-08)
|
||||
=======================================
|
||||
|
||||
SECURITY UPDATE
|
||||
|
||||
Explicitly require authentication when using LDAP3. This is the default on
|
||||
versions of ``ldap3`` above 1.0, but some distributions will package an older
|
||||
version.
|
||||
|
||||
If you are using LDAP3 login and have a version of ``ldap3`` older than 1.0 it
|
||||
is **CRITICAL to updgrade**.
|
||||
|
||||
|
||||
Changes in synapse v0.18.2 (2016-11-01)
|
||||
=======================================
|
||||
|
||||
No changes since v0.18.2-rc5
|
||||
|
||||
|
||||
Changes in synapse v0.18.2-rc5 (2016-10-28)
|
||||
===========================================
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix prometheus process metrics in worker processes (PR #1184)
|
||||
|
||||
|
||||
Changes in synapse v0.18.2-rc4 (2016-10-27)
|
||||
===========================================
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix ``user_threepids`` schema delta, which in some instances prevented
|
||||
startup after upgrade (PR #1183)
|
||||
|
||||
|
||||
Changes in synapse v0.18.2-rc3 (2016-10-27)
|
||||
===========================================
|
||||
|
||||
Changes:
|
||||
|
||||
* Allow clients to supply access tokens as headers (PR #1098)
|
||||
* Clarify error codes for GET /filter/, thanks to Alexander Maznev (PR #1164)
|
||||
* Make password reset email field case insensitive (PR #1170)
|
||||
* Reduce redundant database work in email pusher (PR #1174)
|
||||
* Allow configurable rate limiting per AS (PR #1175)
|
||||
* Check whether to ratelimit sooner to avoid work (PR #1176)
|
||||
* Standardise prometheus metrics (PR #1177)
|
||||
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix incredibly slow back pagination query (PR #1178)
|
||||
* Fix infinite typing bug (PR #1179)
|
||||
|
||||
|
||||
Changes in synapse v0.18.2-rc2 (2016-10-25)
|
||||
===========================================
|
||||
|
||||
(This release did not include the changes advertised and was identical to RC1)
|
||||
|
||||
|
||||
Changes in synapse v0.18.2-rc1 (2016-10-17)
|
||||
===========================================
|
||||
|
||||
Changes:
|
||||
|
||||
* Remove redundant event_auth index (PR #1113)
|
||||
* Reduce DB hits for replication (PR #1141)
|
||||
* Implement pluggable password auth (PR #1155)
|
||||
* Remove rate limiting from app service senders and fix get_or_create_user
|
||||
requester, thanks to Patrik Oldsberg (PR #1157)
|
||||
* window.postmessage for Interactive Auth fallback (PR #1159)
|
||||
* Use sys.executable instead of hardcoded python, thanks to Pedro Larroy
|
||||
(PR #1162)
|
||||
* Add config option for adding additional TLS fingerprints (PR #1167)
|
||||
* User-interactive auth on delete device (PR #1168)
|
||||
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix not being allowed to set your own state_key, thanks to Patrik Oldsberg
|
||||
(PR #1150)
|
||||
* Fix interactive auth to return 401 from for incorrect password (PR #1160,
|
||||
#1166)
|
||||
* Fix email push notifs being dropped (PR #1169)
|
||||
|
||||
|
||||
|
||||
Changes in synapse v0.18.1 (2016-10-05)
|
||||
======================================
|
||||
|
||||
No changes since v0.18.1-rc1
|
||||
|
||||
|
||||
Changes in synapse v0.18.1-rc1 (2016-09-30)
|
||||
===========================================
|
||||
|
||||
Features:
|
||||
|
||||
* Add total_room_count_estimate to ``/publicRooms`` (PR #1133)
|
||||
|
||||
|
||||
Changes:
|
||||
|
||||
* Time out typing over federation (PR #1140)
|
||||
* Restructure LDAP authentication (PR #1153)
|
||||
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix 3pid invites when server is already in the room (PR #1136)
|
||||
* Fix upgrading with SQLite taking lots of CPU for a few days
|
||||
after upgrade (PR #1144)
|
||||
* Fix upgrading from very old database versions (PR #1145)
|
||||
* Fix port script to work with recently added tables (PR #1146)
|
||||
|
||||
|
||||
Changes in synapse v0.18.0 (2016-09-19)
|
||||
=======================================
|
||||
|
||||
The release includes major changes to the state storage database schemas, which
|
||||
significantly reduce database size. Synapse will attempt to upgrade the current
|
||||
data in the background. Servers with large SQLite database may experience
|
||||
degradation of performance while this upgrade is in progress, therefore you may
|
||||
want to consider migrating to using Postgres before upgrading very large SQLite
|
||||
databases
|
||||
|
||||
|
||||
Changes:
|
||||
|
||||
* Make public room search case insensitive (PR #1127)
|
||||
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix and clean up publicRooms pagination (PR #1129)
|
||||
|
||||
|
||||
Changes in synapse v0.18.0-rc1 (2016-09-16)
|
||||
===========================================
|
||||
|
||||
Features:
|
||||
|
||||
* Add ``only=highlight`` on ``/notifications`` (PR #1081)
|
||||
* Add server param to /publicRooms (PR #1082)
|
||||
* Allow clients to ask for the whole of a single state event (PR #1094)
|
||||
* Add is_direct param to /createRoom (PR #1108)
|
||||
* Add pagination support to publicRooms (PR #1121)
|
||||
* Add very basic filter API to /publicRooms (PR #1126)
|
||||
* Add basic direct to device messaging support for E2E (PR #1074, #1084, #1104,
|
||||
#1111)
|
||||
|
||||
|
||||
Changes:
|
||||
|
||||
* Move to storing state_groups_state as deltas, greatly reducing DB size (PR
|
||||
#1065)
|
||||
* Reduce amount of state pulled out of the DB during common requests (PR #1069)
|
||||
* Allow PDF to be rendered from media repo (PR #1071)
|
||||
* Reindex state_groups_state after pruning (PR #1085)
|
||||
* Clobber EDUs in send queue (PR #1095)
|
||||
* Conform better to the CAS protocol specification (PR #1100)
|
||||
* Limit how often we ask for keys from dead servers (PR #1114)
|
||||
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix /notifications API when used with ``from`` param (PR #1080)
|
||||
* Fix backfill when cannot find an event. (PR #1107)
|
||||
|
||||
|
||||
Changes in synapse v0.17.3 (2016-09-09)
|
||||
=======================================
|
||||
|
||||
This release fixes a major bug that stopped servers from handling rooms with
|
||||
over 1000 members.
|
||||
|
||||
|
||||
Changes in synapse v0.17.2 (2016-09-08)
|
||||
=======================================
|
||||
|
||||
This release contains security bug fixes. Please upgrade.
|
||||
|
||||
|
||||
No changes since v0.17.2-rc1
|
||||
|
||||
|
||||
Changes in synapse v0.17.2-rc1 (2016-09-05)
|
||||
===========================================
|
||||
|
||||
Features:
|
||||
|
||||
* Start adding store-and-forward direct-to-device messaging (PR #1046, #1050,
|
||||
#1062, #1066)
|
||||
|
||||
|
||||
Changes:
|
||||
|
||||
* Avoid pulling the full state of a room out so often (PR #1047, #1049, #1063,
|
||||
#1068)
|
||||
* Don't notify for online to online presence transitions. (PR #1054)
|
||||
* Occasionally persist unpersisted presence updates (PR #1055)
|
||||
* Allow application services to have an optional 'url' (PR #1056)
|
||||
* Clean up old sent transactions from DB (PR #1059)
|
||||
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix None check in backfill (PR #1043)
|
||||
* Fix membership changes to be idempotent (PR #1067)
|
||||
* Fix bug in get_pdu where it would sometimes return events with incorrect
|
||||
signature
|
||||
|
||||
|
||||
|
||||
Changes in synapse v0.17.1 (2016-08-24)
|
||||
=======================================
|
||||
|
||||
Changes:
|
||||
|
||||
* Delete old received_transactions rows (PR #1038)
|
||||
* Pass through user-supplied content in /join/$room_id (PR #1039)
|
||||
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix bug with backfill (PR #1040)
|
||||
|
||||
|
||||
Changes in synapse v0.17.1-rc1 (2016-08-22)
|
||||
===========================================
|
||||
|
||||
Features:
|
||||
|
||||
* Add notification API (PR #1028)
|
||||
|
||||
|
||||
Changes:
|
||||
|
||||
* Don't print stack traces when failing to get remote keys (PR #996)
|
||||
* Various federation /event/ perf improvements (PR #998)
|
||||
* Only process one local membership event per room at a time (PR #1005)
|
||||
* Move default display name push rule (PR #1011, #1023)
|
||||
* Fix up preview URL API. Add tests. (PR #1015)
|
||||
* Set ``Content-Security-Policy`` on media repo (PR #1021)
|
||||
* Make notify_interested_services faster (PR #1022)
|
||||
* Add usage stats to prometheus monitoring (PR #1037)
|
||||
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix token login (PR #993)
|
||||
* Fix CAS login (PR #994, #995)
|
||||
* Fix /sync to not clobber status_msg (PR #997)
|
||||
* Fix redacted state events to include prev_content (PR #1003)
|
||||
* Fix some bugs in the auth/ldap handler (PR #1007)
|
||||
* Fix backfill request to limit URI length, so that remotes don't reject the
|
||||
requests due to path length limits (PR #1012)
|
||||
* Fix AS push code to not send duplicate events (PR #1025)
|
||||
|
||||
|
||||
|
||||
Changes in synapse v0.17.0 (2016-08-08)
|
||||
=======================================
|
||||
|
||||
This release contains significant security bug fixes regarding authenticating
|
||||
events received over federation. PLEASE UPGRADE.
|
||||
|
||||
This release changes the LDAP configuration format in a backwards incompatible
|
||||
way, see PR #843 for details.
|
||||
|
||||
|
||||
Changes:
|
||||
|
||||
* Add federation /version API (PR #990)
|
||||
* Make psutil dependency optional (PR #992)
|
||||
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix URL preview API to exclude HTML comments in description (PR #988)
|
||||
* Fix error handling of remote joins (PR #991)
|
||||
|
||||
|
||||
Changes in synapse v0.17.0-rc4 (2016-08-05)
|
||||
===========================================
|
||||
|
||||
Changes:
|
||||
|
||||
* Change the way we summarize URLs when previewing (PR #973)
|
||||
* Add new ``/state_ids/`` federation API (PR #979)
|
||||
* Speed up processing of ``/state/`` response (PR #986)
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix event persistence when event has already been partially persisted
|
||||
(PR #975, #983, #985)
|
||||
* Fix port script to also copy across backfilled events (PR #982)
|
||||
|
||||
|
||||
Changes in synapse v0.17.0-rc3 (2016-08-02)
|
||||
===========================================
|
||||
|
||||
Changes:
|
||||
|
||||
* Forbid non-ASes from registering users whose names begin with '_' (PR #958)
|
||||
* Add some basic admin API docs (PR #963)
|
||||
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Send the correct host header when fetching keys (PR #941)
|
||||
* Fix joining a room that has missing auth events (PR #964)
|
||||
* Fix various push bugs (PR #966, #970)
|
||||
* Fix adding emails on registration (PR #968)
|
||||
|
||||
|
||||
Changes in synapse v0.17.0-rc2 (2016-08-02)
|
||||
===========================================
|
||||
|
||||
(This release did not include the changes advertised and was identical to RC1)
|
||||
|
||||
|
||||
Changes in synapse v0.17.0-rc1 (2016-07-28)
|
||||
===========================================
|
||||
|
||||
This release changes the LDAP configuration format in a backwards incompatible
|
||||
way, see PR #843 for details.
|
||||
|
||||
|
||||
Features:
|
||||
|
||||
* Add purge_media_cache admin API (PR #902)
|
||||
* Add deactivate account admin API (PR #903)
|
||||
* Add optional pepper to password hashing (PR #907, #910 by KentShikama)
|
||||
* Add an admin option to shared secret registration (breaks backwards compat)
|
||||
(PR #909)
|
||||
* Add purge local room history API (PR #911, #923, #924)
|
||||
* Add requestToken endpoints (PR #915)
|
||||
* Add an /account/deactivate endpoint (PR #921)
|
||||
* Add filter param to /messages. Add 'contains_url' to filter. (PR #922)
|
||||
* Add device_id support to /login (PR #929)
|
||||
* Add device_id support to /v2/register flow. (PR #937, #942)
|
||||
* Add GET /devices endpoint (PR #939, #944)
|
||||
* Add GET /device/{deviceId} (PR #943)
|
||||
* Add update and delete APIs for devices (PR #949)
|
||||
|
||||
|
||||
Changes:
|
||||
|
||||
* Rewrite LDAP Authentication against ldap3 (PR #843 by mweinelt)
|
||||
* Linearize some federation endpoints based on (origin, room_id) (PR #879)
|
||||
* Remove the legacy v0 content upload API. (PR #888)
|
||||
* Use similar naming we use in email notifs for push (PR #894)
|
||||
* Optionally include password hash in createUser endpoint (PR #905 by
|
||||
KentShikama)
|
||||
* Use a query that postgresql optimises better for get_events_around (PR #906)
|
||||
* Fall back to 'username' if 'user' is not given for appservice registration.
|
||||
(PR #927 by Half-Shot)
|
||||
* Add metrics for psutil derived memory usage (PR #936)
|
||||
* Record device_id in client_ips (PR #938)
|
||||
* Send the correct host header when fetching keys (PR #941)
|
||||
* Log the hostname the reCAPTCHA was completed on (PR #946)
|
||||
* Make the device id on e2e key upload optional (PR #956)
|
||||
* Add r0.2.0 to the "supported versions" list (PR #960)
|
||||
* Don't include name of room for invites in push (PR #961)
|
||||
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix substitution failure in mail template (PR #887)
|
||||
* Put most recent 20 messages in email notif (PR #892)
|
||||
* Ensure that the guest user is in the database when upgrading accounts
|
||||
(PR #914)
|
||||
* Fix various edge cases in auth handling (PR #919)
|
||||
* Fix 500 ISE when sending alias event without a state_key (PR #925)
|
||||
* Fix bug where we stored rejections in the state_group, persist all
|
||||
rejections (PR #948)
|
||||
* Fix lack of check of if the user is banned when handling 3pid invites
|
||||
(PR #952)
|
||||
* Fix a couple of bugs in the transaction and keyring code (PR #954, #955)
|
||||
|
||||
|
||||
|
||||
Changes in synapse v0.16.1-r1 (2016-07-08)
|
||||
==========================================
|
||||
|
||||
THIS IS A CRITICAL SECURITY UPDATE.
|
||||
|
||||
This fixes a bug which allowed users' accounts to be accessed by unauthorised
|
||||
users.
|
||||
|
||||
Changes in synapse v0.16.1 (2016-06-20)
|
||||
=======================================
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix assorted bugs in ``/preview_url`` (PR #872)
|
||||
* Fix TypeError when setting unicode passwords (PR #873)
|
||||
|
||||
|
||||
Performance improvements:
|
||||
|
||||
* Turn ``use_frozen_events`` off by default (PR #877)
|
||||
* Disable responding with canonical json for federation (PR #878)
|
||||
|
||||
|
||||
Changes in synapse v0.16.1-rc1 (2016-06-15)
|
||||
===========================================
|
||||
|
||||
Features: None
|
||||
|
||||
Changes:
|
||||
|
||||
* Log requester for ``/publicRoom`` endpoints when possible (PR #856)
|
||||
* 502 on ``/thumbnail`` when can't connect to remote server (PR #862)
|
||||
* Linearize fetching of gaps on incoming events (PR #871)
|
||||
|
||||
|
||||
Bugs fixes:
|
||||
|
||||
* Fix bug where rooms where marked as published by default (PR #857)
|
||||
* Fix bug where joining room with an event with invalid sender (PR #868)
|
||||
* Fix bug where backfilled events were sent down sync streams (PR #869)
|
||||
* Fix bug where outgoing connections could wedge indefinitely, causing push
|
||||
notifications to be unreliable (PR #870)
|
||||
|
||||
|
||||
Performance improvements:
|
||||
|
||||
* Improve ``/publicRooms`` performance(PR #859)
|
||||
|
||||
|
||||
Changes in synapse v0.16.0 (2016-06-09)
|
||||
=======================================
|
||||
|
||||
NB: As of v0.14 all AS config files must have an ID field.
|
||||
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Don't make rooms published by default (PR #857)
|
||||
|
||||
Changes in synapse v0.16.0-rc2 (2016-06-08)
|
||||
===========================================
|
||||
|
||||
Features:
|
||||
|
||||
* Add configuration option for tuning GC via ``gc.set_threshold`` (PR #849)
|
||||
|
||||
Changes:
|
||||
|
||||
* Record metrics about GC (PR #771, #847, #852)
|
||||
* Add metric counter for number of persisted events (PR #841)
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix 'From' header in email notifications (PR #843)
|
||||
* Fix presence where timeouts were not being fired for the first 8h after
|
||||
restarts (PR #842)
|
||||
* Fix bug where synapse sent malformed transactions to AS's when retrying
|
||||
transactions (Commits 310197b, 8437906)
|
||||
|
||||
Performance improvements:
|
||||
|
||||
* Remove event fetching from DB threads (PR #835)
|
||||
* Change the way we cache events (PR #836)
|
||||
* Add events to cache when we persist them (PR #840)
|
||||
|
||||
|
||||
Changes in synapse v0.16.0-rc1 (2016-06-03)
|
||||
===========================================
|
||||
|
||||
Version 0.15 was not released. See v0.15.0-rc1 below for additional changes.
|
||||
|
||||
Features:
|
||||
|
||||
* Add email notifications for missed messages (PR #759, #786, #799, #810, #815,
|
||||
#821)
|
||||
* Add a ``url_preview_ip_range_whitelist`` config param (PR #760)
|
||||
* Add /report endpoint (PR #762)
|
||||
* Add basic ignore user API (PR #763)
|
||||
* Add an openidish mechanism for proving that you own a given user_id (PR #765)
|
||||
* Allow clients to specify a server_name to avoid 'No known servers' (PR #794)
|
||||
* Add secondary_directory_servers option to fetch room list from other servers
|
||||
(PR #808, #813)
|
||||
|
||||
Changes:
|
||||
|
||||
* Report per request metrics for all of the things using request_handler (PR
|
||||
#756)
|
||||
* Correctly handle ``NULL`` password hashes from the database (PR #775)
|
||||
* Allow receipts for events we haven't seen in the db (PR #784)
|
||||
* Make synctl read a cache factor from config file (PR #785)
|
||||
* Increment badge count per missed convo, not per msg (PR #793)
|
||||
* Special case m.room.third_party_invite event auth to match invites (PR #814)
|
||||
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix typo in event_auth servlet path (PR #757)
|
||||
* Fix password reset (PR #758)
|
||||
|
||||
|
||||
Performance improvements:
|
||||
|
||||
* Reduce database inserts when sending transactions (PR #767)
|
||||
* Queue events by room for persistence (PR #768)
|
||||
* Add cache to ``get_user_by_id`` (PR #772)
|
||||
* Add and use ``get_domain_from_id`` (PR #773)
|
||||
* Use tree cache for ``get_linearized_receipts_for_room`` (PR #779)
|
||||
* Remove unused indices (PR #782)
|
||||
* Add caches to ``bulk_get_push_rules*`` (PR #804)
|
||||
* Cache ``get_event_reference_hashes`` (PR #806)
|
||||
* Add ``get_users_with_read_receipts_in_room`` cache (PR #809)
|
||||
* Use state to calculate ``get_users_in_room`` (PR #811)
|
||||
* Load push rules in storage layer so that they get cached (PR #825)
|
||||
* Make ``get_joined_hosts_for_room`` use get_users_in_room (PR #828)
|
||||
* Poke notifier on next reactor tick (PR #829)
|
||||
* Change CacheMetrics to be quicker (PR #830)
|
||||
|
||||
|
||||
Changes in synapse v0.15.0-rc1 (2016-04-26)
|
||||
===========================================
|
||||
|
||||
Features:
|
||||
|
||||
* Add login support for Javascript Web Tokens, thanks to Niklas Riekenbrauck
|
||||
(PR #671,#687)
|
||||
* Add URL previewing support (PR #688)
|
||||
* Add login support for LDAP, thanks to Christoph Witzany (PR #701)
|
||||
* Add GET endpoint for pushers (PR #716)
|
||||
|
||||
Changes:
|
||||
|
||||
* Never notify for member events (PR #667)
|
||||
* Deduplicate identical ``/sync`` requests (PR #668)
|
||||
* Require user to have left room to forget room (PR #673)
|
||||
* Use DNS cache if within TTL (PR #677)
|
||||
* Let users see their own leave events (PR #699)
|
||||
* Deduplicate membership changes (PR #700)
|
||||
* Increase performance of pusher code (PR #705)
|
||||
* Respond with error status 504 if failed to talk to remote server (PR #731)
|
||||
* Increase search performance on postgres (PR #745)
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix bug where disabling all notifications still resulted in push (PR #678)
|
||||
* Fix bug where users couldn't reject remote invites if remote refused (PR #691)
|
||||
* Fix bug where synapse attempted to backfill from itself (PR #693)
|
||||
* Fix bug where profile information was not correctly added when joining remote
|
||||
rooms (PR #703)
|
||||
* Fix bug where register API required incorrect key name for AS registration
|
||||
(PR #727)
|
||||
|
||||
|
||||
Changes in synapse v0.14.0 (2016-03-30)
|
||||
=======================================
|
||||
|
||||
@@ -511,7 +1366,7 @@ Configuration:
|
||||
|
||||
* Add support for changing the bind host of the metrics listener via the
|
||||
``metrics_bind_host`` option.
|
||||
|
||||
|
||||
|
||||
Changes in synapse v0.9.0-r5 (2015-05-21)
|
||||
=========================================
|
||||
@@ -853,7 +1708,7 @@ See UPGRADE for information about changes to the client server API, including
|
||||
breaking backwards compatibility with VoIP calls and registration API.
|
||||
|
||||
Homeserver:
|
||||
* When a user changes their displayname or avatar the server will now update
|
||||
* When a user changes their displayname or avatar the server will now update
|
||||
all their join states to reflect this.
|
||||
* The server now adds "age" key to events to indicate how old they are. This
|
||||
is clock independent, so at no point does any server or webclient have to
|
||||
@@ -911,7 +1766,7 @@ Changes in synapse 0.2.2 (2014-09-06)
|
||||
=====================================
|
||||
|
||||
Homeserver:
|
||||
* When the server returns state events it now also includes the previous
|
||||
* When the server returns state events it now also includes the previous
|
||||
content.
|
||||
* Add support for inviting people when creating a new room.
|
||||
* Make the homeserver inform the room via `m.room.aliases` when a new alias
|
||||
@@ -923,7 +1778,7 @@ Webclient:
|
||||
* Handle `m.room.aliases` events.
|
||||
* Asynchronously send messages and show a local echo.
|
||||
* Inform the UI when a message failed to send.
|
||||
* Only autoscroll on receiving a new message if the user was already at the
|
||||
* Only autoscroll on receiving a new message if the user was already at the
|
||||
bottom of the screen.
|
||||
* Add support for ban/kick reasons.
|
||||
|
||||
|
||||
@@ -11,8 +11,10 @@ recursive-include synapse/storage/schema *.sql
|
||||
recursive-include synapse/storage/schema *.py
|
||||
|
||||
recursive-include docs *
|
||||
recursive-include res *
|
||||
recursive-include scripts *
|
||||
recursive-include scripts-dev *
|
||||
recursive-include synapse *.pyi
|
||||
recursive-include tests *.py
|
||||
|
||||
recursive-include synapse/static *.css
|
||||
@@ -22,5 +24,7 @@ recursive-include synapse/static *.js
|
||||
|
||||
exclude jenkins.sh
|
||||
exclude jenkins*.sh
|
||||
exclude jenkins*
|
||||
recursive-exclude jenkins *.sh
|
||||
|
||||
prune demo/etc
|
||||
|
||||
638
README.rst
638
README.rst
@@ -11,8 +11,8 @@ VoIP. The basics you need to know to get up and running are:
|
||||
like ``#matrix:matrix.org`` or ``#test:localhost:8448``.
|
||||
|
||||
- Matrix user IDs look like ``@matthew:matrix.org`` (although in the future
|
||||
you will normally refer to yourself and others using a 3PID: email
|
||||
address, phone number, etc rather than manipulating Matrix user IDs)
|
||||
you will normally refer to yourself and others using a third party identifier
|
||||
(3PID): email address, phone number, etc rather than manipulating Matrix user IDs)
|
||||
|
||||
The overall architecture is::
|
||||
|
||||
@@ -20,12 +20,13 @@ The overall architecture is::
|
||||
https://somewhere.org/_matrix https://elsewhere.net/_matrix
|
||||
|
||||
``#matrix:matrix.org`` is the official support room for Matrix, and can be
|
||||
accessed by any client from https://matrix.org/blog/try-matrix-now or via IRC
|
||||
bridge at irc://irc.freenode.net/matrix.
|
||||
accessed by any client from https://matrix.org/docs/projects/try-matrix-now.html or
|
||||
via IRC bridge at irc://irc.freenode.net/matrix.
|
||||
|
||||
Synapse is currently in rapid development, but as of version 0.5 we believe it
|
||||
is sufficiently stable to be run as an internet-facing service for real usage!
|
||||
|
||||
|
||||
About Matrix
|
||||
============
|
||||
|
||||
@@ -52,39 +53,30 @@ generation of fully open and interoperable messaging and VoIP apps for the
|
||||
internet.
|
||||
|
||||
Synapse is a reference "homeserver" implementation of Matrix from the core
|
||||
development team at matrix.org, written in Python/Twisted for clarity and
|
||||
simplicity. It is intended to showcase the concept of Matrix and let folks see
|
||||
the spec in the context of a codebase and let you run your own homeserver and
|
||||
generally help bootstrap the ecosystem.
|
||||
development team at matrix.org, written in Python/Twisted. It is intended to
|
||||
showcase the concept of Matrix and let folks see the spec in the context of a
|
||||
codebase and let you run your own homeserver and generally help bootstrap the
|
||||
ecosystem.
|
||||
|
||||
In Matrix, every user runs one or more Matrix clients, which connect through to
|
||||
a Matrix homeserver which stores all their personal chat history and user
|
||||
account information - much as a mail client connects through to an IMAP/SMTP
|
||||
server. Just like email, you can either run your own Matrix homeserver and
|
||||
control and own your own communications and history or use one hosted by
|
||||
someone else (e.g. matrix.org) - there is no single point of control or
|
||||
mandatory service provider in Matrix, unlike WhatsApp, Facebook, Hangouts, etc.
|
||||
|
||||
Synapse ships with two basic demo Matrix clients: webclient (a basic group chat
|
||||
web client demo implemented in AngularJS) and cmdclient (a basic Python
|
||||
command line utility which lets you easily see what the JSON APIs are up to).
|
||||
|
||||
Meanwhile, iOS and Android SDKs and clients are available from:
|
||||
|
||||
- https://github.com/matrix-org/matrix-ios-sdk
|
||||
- https://github.com/matrix-org/matrix-ios-kit
|
||||
- https://github.com/matrix-org/matrix-ios-console
|
||||
- https://github.com/matrix-org/matrix-android-sdk
|
||||
a Matrix homeserver. The homeserver stores all their personal chat history and
|
||||
user account information - much as a mail client connects through to an
|
||||
IMAP/SMTP server. Just like email, you can either run your own Matrix
|
||||
homeserver and control and own your own communications and history or use one
|
||||
hosted by someone else (e.g. matrix.org) - there is no single point of control
|
||||
or mandatory service provider in Matrix, unlike WhatsApp, Facebook, Hangouts,
|
||||
etc.
|
||||
|
||||
We'd like to invite you to join #matrix:matrix.org (via
|
||||
https://matrix.org/blog/try-matrix-now), run a homeserver, take a look at the
|
||||
Matrix spec at https://matrix.org/docs/spec and API docs at
|
||||
https://matrix.org/docs/api, experiment with the APIs and the demo clients, and
|
||||
report any bugs via https://matrix.org/jira.
|
||||
https://matrix.org/docs/projects/try-matrix-now.html), run a homeserver, take a look
|
||||
at the `Matrix spec <https://matrix.org/docs/spec>`_, and experiment with the
|
||||
`APIs <https://matrix.org/docs/api>`_ and `Client SDKs
|
||||
<http://matrix.org/docs/projects/try-matrix-now.html#client-sdks>`_.
|
||||
|
||||
Thanks for using Matrix!
|
||||
|
||||
[1] End-to-end encryption is currently in development - see https://matrix.org/git/olm
|
||||
[1] End-to-end encryption is currently in beta: `blog post <https://matrix.org/blog/2016/11/21/matrixs-olm-end-to-end-encryption-security-assessment-released-and-implemented-cross-platform-on-riot-at-last>`_.
|
||||
|
||||
|
||||
Synapse Installation
|
||||
====================
|
||||
@@ -94,9 +86,14 @@ Synapse is the reference python/twisted Matrix homeserver implementation.
|
||||
System requirements:
|
||||
- POSIX-compliant system (tested on Linux & OS X)
|
||||
- Python 2.7
|
||||
- At least 512 MB RAM.
|
||||
- At least 1GB of free RAM if you want to join large public rooms like #matrix:matrix.org
|
||||
|
||||
Synapse is written in python but some of the libraries is uses are written in
|
||||
Installing from source
|
||||
----------------------
|
||||
(Prebuilt packages are available for some platforms - see `Platform-Specific
|
||||
Instructions`_.)
|
||||
|
||||
Synapse is written in python but some of the libraries it uses are written in
|
||||
C. So before we can install synapse itself we need a working C compiler and the
|
||||
header files for python C extensions.
|
||||
|
||||
@@ -104,7 +101,7 @@ Installing prerequisites on Ubuntu or Debian::
|
||||
|
||||
sudo apt-get install build-essential python2.7-dev libffi-dev \
|
||||
python-pip python-setuptools sqlite3 \
|
||||
libssl-dev python-virtualenv libjpeg-dev
|
||||
libssl-dev python-virtualenv libjpeg-dev libxslt1-dev
|
||||
|
||||
Installing prerequisites on ArchLinux::
|
||||
|
||||
@@ -123,6 +120,7 @@ Installing prerequisites on Mac OS X::
|
||||
xcode-select --install
|
||||
sudo easy_install pip
|
||||
sudo pip install virtualenv
|
||||
brew install pkg-config libffi
|
||||
|
||||
Installing prerequisites on Raspbian::
|
||||
|
||||
@@ -133,10 +131,22 @@ Installing prerequisites on Raspbian::
|
||||
sudo pip install --upgrade ndg-httpsclient
|
||||
sudo pip install --upgrade virtualenv
|
||||
|
||||
Installing prerequisites on openSUSE::
|
||||
|
||||
sudo zypper in -t pattern devel_basis
|
||||
sudo zypper in python-pip python-setuptools sqlite3 python-virtualenv \
|
||||
python-devel libffi-devel libopenssl-devel libjpeg62-devel
|
||||
|
||||
Installing prerequisites on OpenBSD::
|
||||
|
||||
doas pkg_add python libffi py-pip py-setuptools sqlite3 py-virtualenv \
|
||||
libxslt
|
||||
|
||||
To install the synapse homeserver run::
|
||||
|
||||
virtualenv -p python2.7 ~/.synapse
|
||||
source ~/.synapse/bin/activate
|
||||
pip install --upgrade pip
|
||||
pip install --upgrade setuptools
|
||||
pip install https://github.com/matrix-org/synapse/tarball/master
|
||||
|
||||
@@ -144,38 +154,74 @@ This installs synapse, along with the libraries it uses, into a virtual
|
||||
environment under ``~/.synapse``. Feel free to pick a different directory
|
||||
if you prefer.
|
||||
|
||||
In case of problems, please see the _Troubleshooting section below.
|
||||
In case of problems, please see the _`Troubleshooting` section below.
|
||||
|
||||
Alternatively, Silvio Fricke has contributed a Dockerfile to automate the
|
||||
above in Docker at https://registry.hub.docker.com/u/silviof/docker-matrix/.
|
||||
|
||||
Also, Martin Giess has created an auto-deployment process with vagrant/ansible,
|
||||
tested with VirtualBox/AWS/DigitalOcean - see https://github.com/EMnify/matrix-synapse-auto-deploy
|
||||
Also, Martin Giess has created an auto-deployment process with vagrant/ansible,
|
||||
tested with VirtualBox/AWS/DigitalOcean - see https://github.com/EMnify/matrix-synapse-auto-deploy
|
||||
for details.
|
||||
|
||||
To set up your homeserver, run (in your virtualenv, as before)::
|
||||
Configuring synapse
|
||||
-------------------
|
||||
|
||||
Before you can start Synapse, you will need to generate a configuration
|
||||
file. To do this, run (in your virtualenv, as before)::
|
||||
|
||||
cd ~/.synapse
|
||||
python -m synapse.app.homeserver \
|
||||
--server-name machine.my.domain.name \
|
||||
--server-name my.domain.name \
|
||||
--config-path homeserver.yaml \
|
||||
--generate-config \
|
||||
--report-stats=[yes|no]
|
||||
|
||||
...substituting your host and domain name as appropriate.
|
||||
... substituting an appropriate value for ``--server-name``. The server name
|
||||
determines the "domain" part of user-ids for users on your server: these will
|
||||
all be of the format ``@user:my.domain.name``. It also determines how other
|
||||
matrix servers will reach yours for `Federation`_. For a test configuration,
|
||||
set this to the hostname of your server. For a more production-ready setup, you
|
||||
will probably want to specify your domain (``example.com``) rather than a
|
||||
matrix-specific hostname here (in the same way that your email address is
|
||||
probably ``user@example.com`` rather than ``user@email.example.com``) - but
|
||||
doing so may require more advanced setup - see `Setting up
|
||||
Federation`_. Beware that the server name cannot be changed later.
|
||||
|
||||
This will generate you a config file that you can then customise, but it will
|
||||
This command will generate you a config file that you can then customise, but it will
|
||||
also generate a set of keys for you. These keys will allow your Home Server to
|
||||
identify itself to other Home Servers, so don't lose or delete them. It would be
|
||||
wise to back them up somewhere safe. If, for whatever reason, you do need to
|
||||
wise to back them up somewhere safe. (If, for whatever reason, you do need to
|
||||
change your Home Server's keys, you may find that other Home Servers have the
|
||||
old key cached. If you update the signing key, you should change the name of the
|
||||
key in the <server name>.signing.key file (the second word) to something different.
|
||||
key in the ``<server name>.signing.key`` file (the second word) to something
|
||||
different. See `the spec`__ for more information on key management.)
|
||||
|
||||
By default, registration of new users is disabled. You can either enable
|
||||
registration in the config by specifying ``enable_registration: true``
|
||||
(it is then recommended to also set up CAPTCHA - see docs/CAPTCHA_SETUP), or
|
||||
you can use the command line to register new users::
|
||||
.. __: `key_management`_
|
||||
|
||||
The default configuration exposes two HTTP ports: 8008 and 8448. Port 8008 is
|
||||
configured without TLS; it is not recommended this be exposed outside your
|
||||
local network. Port 8448 is configured to use TLS with a self-signed
|
||||
certificate. This is fine for testing with but, to avoid your clients
|
||||
complaining about the certificate, you will almost certainly want to use
|
||||
another certificate for production purposes. (Note that a self-signed
|
||||
certificate is fine for `Federation`_). You can do so by changing
|
||||
``tls_certificate_path``, ``tls_private_key_path`` and ``tls_dh_params_path``
|
||||
in ``homeserver.yaml``; alternatively, you can use a reverse-proxy, but be sure
|
||||
to read `Using a reverse proxy with Synapse`_ when doing so.
|
||||
|
||||
Apart from port 8448 using TLS, both ports are the same in the default
|
||||
configuration.
|
||||
|
||||
Registering a user
|
||||
------------------
|
||||
|
||||
You will need at least one user on your server in order to use a Matrix
|
||||
client. Users can be registered either `via a Matrix client`__, or via a
|
||||
commandline script.
|
||||
|
||||
.. __: `client-user-reg`_
|
||||
|
||||
To get started, it is easiest to use the command line to register new users::
|
||||
|
||||
$ source ~/.synapse/bin/activate
|
||||
$ synctl start # if not already running
|
||||
@@ -183,10 +229,22 @@ you can use the command line to register new users::
|
||||
New user localpart: erikj
|
||||
Password:
|
||||
Confirm password:
|
||||
Make admin [no]:
|
||||
Success!
|
||||
|
||||
This process uses a setting ``registration_shared_secret`` in
|
||||
``homeserver.yaml``, which is shared between Synapse itself and the
|
||||
``register_new_matrix_user`` script. It doesn't matter what it is (a random
|
||||
value is generated by ``--generate-config``), but it should be kept secret, as
|
||||
anyone with knowledge of it can register users on your server even if
|
||||
``enable_registration`` is ``false``.
|
||||
|
||||
Setting up a TURN server
|
||||
------------------------
|
||||
|
||||
For reliable VoIP calls to be routed via this homeserver, you MUST configure
|
||||
a TURN server. See docs/turn-howto.rst for details.
|
||||
a TURN server. See `<docs/turn-howto.rst>`_ for details.
|
||||
|
||||
|
||||
Running Synapse
|
||||
===============
|
||||
@@ -198,29 +256,66 @@ run (e.g. ``~/.synapse``), and::
|
||||
source ./bin/activate
|
||||
synctl start
|
||||
|
||||
Using PostgreSQL
|
||||
================
|
||||
|
||||
As of Synapse 0.9, `PostgreSQL <http://www.postgresql.org>`_ is supported as an
|
||||
alternative to the `SQLite <http://sqlite.org/>`_ database that Synapse has
|
||||
traditionally used for convenience and simplicity.
|
||||
Connecting to Synapse from a client
|
||||
===================================
|
||||
|
||||
The advantages of Postgres include:
|
||||
The easiest way to try out your new Synapse installation is by connecting to it
|
||||
from a web client. The easiest option is probably the one at
|
||||
http://riot.im/app. You will need to specify a "Custom server" when you log on
|
||||
or register: set this to ``https://localhost:8448`` - remember to specify the
|
||||
port (``:8448``) unless you changed the configuration. (Leave the identity
|
||||
server as the default - see `Identity servers`_.)
|
||||
|
||||
* significant performance improvements due to the superior threading and
|
||||
caching model, smarter query optimiser
|
||||
* allowing the DB to be run on separate hardware
|
||||
* allowing basic active/backup high-availability with a "hot spare" synapse
|
||||
pointing at the same DB master, as well as enabling DB replication in
|
||||
synapse itself.
|
||||
If all goes well you should at least be able to log in, create a room, and
|
||||
start sending messages.
|
||||
|
||||
The only disadvantage is that the code is relatively new as of April 2015 and
|
||||
may have a few regressions relative to SQLite.
|
||||
(The homeserver runs a web client by default at https://localhost:8448/, though
|
||||
as of the time of writing it is somewhat outdated and not really recommended -
|
||||
https://github.com/matrix-org/synapse/issues/1527).
|
||||
|
||||
For information on how to install and use PostgreSQL, please see
|
||||
`docs/postgres.rst <docs/postgres.rst>`_.
|
||||
.. _`client-user-reg`:
|
||||
|
||||
Platform Specific Instructions
|
||||
Registering a new user from a client
|
||||
------------------------------------
|
||||
|
||||
By default, registration of new users via Matrix clients is disabled. To enable
|
||||
it, specify ``enable_registration: true`` in ``homeserver.yaml``. (It is then
|
||||
recommended to also set up CAPTCHA - see `<docs/CAPTCHA_SETUP.rst>`_.)
|
||||
|
||||
Once ``enable_registration`` is set to ``true``, it is possible to register a
|
||||
user via `riot.im <https://riot.im/app/#/register>`_ or other Matrix clients.
|
||||
|
||||
Your new user name will be formed partly from the ``server_name`` (see
|
||||
`Configuring synapse`_), and partly from a localpart you specify when you
|
||||
create the account. Your name will take the form of::
|
||||
|
||||
@localpart:my.domain.name
|
||||
|
||||
(pronounced "at localpart on my dot domain dot name").
|
||||
|
||||
As when logging in, you will need to specify a "Custom server". Specify your
|
||||
desired ``localpart`` in the 'User name' box.
|
||||
|
||||
|
||||
Security Note
|
||||
=============
|
||||
|
||||
Matrix serves raw user generated data in some APIs - specifically the `content
|
||||
repository endpoints <http://matrix.org/docs/spec/client_server/latest.html#get-matrix-media-r0-download-servername-mediaid>`_.
|
||||
|
||||
Whilst we have tried to mitigate against possible XSS attacks (e.g.
|
||||
https://github.com/matrix-org/synapse/pull/1021) we recommend running
|
||||
matrix homeservers on a dedicated domain name, to limit any malicious user generated
|
||||
content served to web browsers a matrix API from being able to attack webapps hosted
|
||||
on the same domain. This is particularly true of sharing a matrix webclient and
|
||||
server on the same domain.
|
||||
|
||||
See https://github.com/vector-im/vector-web/issues/1977 and
|
||||
https://developer.github.com/changes/2014-04-25-user-content-security for more details.
|
||||
|
||||
|
||||
Platform-Specific Instructions
|
||||
==============================
|
||||
|
||||
Debian
|
||||
@@ -228,7 +323,7 @@ Debian
|
||||
|
||||
Matrix provides official Debian packages via apt from http://matrix.org/packages/debian/.
|
||||
Note that these packages do not include a client - choose one from
|
||||
https://matrix.org/blog/try-matrix-now/ (or build your own with one of our SDKs :)
|
||||
https://matrix.org/docs/projects/try-matrix-now.html (or build your own with one of our SDKs :)
|
||||
|
||||
Fedora
|
||||
------
|
||||
@@ -239,9 +334,8 @@ https://obs.infoserver.lv/project/monitor/matrix-synapse
|
||||
ArchLinux
|
||||
---------
|
||||
|
||||
The quickest way to get up and running with ArchLinux is probably with Ivan
|
||||
Shapovalov's AUR package from
|
||||
https://aur.archlinux.org/packages/matrix-synapse/, which should pull in all
|
||||
The quickest way to get up and running with ArchLinux is probably with the community package
|
||||
https://www.archlinux.org/packages/community/any/matrix-synapse/, which should pull in all
|
||||
the necessary dependencies.
|
||||
|
||||
Alternatively, to install using pip a few changes may be needed as ArchLinux
|
||||
@@ -282,6 +376,32 @@ Synapse can be installed via FreeBSD Ports or Packages contributed by Brendan Mo
|
||||
- Ports: ``cd /usr/ports/net/py-matrix-synapse && make install clean``
|
||||
- Packages: ``pkg install py27-matrix-synapse``
|
||||
|
||||
|
||||
OpenBSD
|
||||
-------
|
||||
|
||||
There is currently no port for OpenBSD. Additionally, OpenBSD's security
|
||||
settings require a slightly more difficult installation process.
|
||||
|
||||
1) Create a new directory in ``/usr/local`` called ``_synapse``. Also, create a
|
||||
new user called ``_synapse`` and set that directory as the new user's home.
|
||||
This is required because, by default, OpenBSD only allows binaries which need
|
||||
write and execute permissions on the same memory space to be run from
|
||||
``/usr/local``.
|
||||
2) ``su`` to the new ``_synapse`` user and change to their home directory.
|
||||
3) Create a new virtualenv: ``virtualenv -p python2.7 ~/.synapse``
|
||||
4) Source the virtualenv configuration located at
|
||||
``/usr/local/_synapse/.synapse/bin/activate``. This is done in ``ksh`` by
|
||||
using the ``.`` command, rather than ``bash``'s ``source``.
|
||||
5) Optionally, use ``pip`` to install ``lxml``, which Synapse needs to parse
|
||||
webpages for their titles.
|
||||
6) Use ``pip`` to install this repository: ``pip install
|
||||
https://github.com/matrix-org/synapse/tarball/master``
|
||||
7) Optionally, change ``_synapse``'s shell to ``/bin/false`` to reduce the
|
||||
chance of a compromised Synapse server being used to take over your box.
|
||||
|
||||
After this, you may proceed with the rest of the install directions.
|
||||
|
||||
NixOS
|
||||
-----
|
||||
|
||||
@@ -321,6 +441,7 @@ Troubleshooting:
|
||||
you do, you may need to create a symlink to ``libsodium.a`` so ``ld`` can find
|
||||
it: ``ln -s /usr/local/lib/libsodium.a /usr/lib/libsodium.a``
|
||||
|
||||
|
||||
Troubleshooting
|
||||
===============
|
||||
|
||||
@@ -394,37 +515,6 @@ you will need to explicitly call Python2.7 - either running as::
|
||||
|
||||
...or by editing synctl with the correct python executable.
|
||||
|
||||
Synapse Development
|
||||
===================
|
||||
|
||||
To check out a synapse for development, clone the git repo into a working
|
||||
directory of your choice::
|
||||
|
||||
git clone https://github.com/matrix-org/synapse.git
|
||||
cd synapse
|
||||
|
||||
Synapse has a number of external dependencies, that are easiest
|
||||
to install using pip and a virtualenv::
|
||||
|
||||
virtualenv env
|
||||
source env/bin/activate
|
||||
python synapse/python_dependencies.py | xargs -n1 pip install
|
||||
pip install setuptools_trial mock
|
||||
|
||||
This will run a process of downloading and installing all the needed
|
||||
dependencies into a virtual env.
|
||||
|
||||
Once this is done, you may wish to run Synapse's unit tests, to
|
||||
check that everything is installed as it should be::
|
||||
|
||||
python setup.py test
|
||||
|
||||
This should end with a 'PASSED' result::
|
||||
|
||||
Ran 143 tests in 0.601s
|
||||
|
||||
PASSED (successes=143)
|
||||
|
||||
|
||||
Upgrading an existing Synapse
|
||||
=============================
|
||||
@@ -435,127 +525,252 @@ versions of synapse.
|
||||
|
||||
.. _UPGRADE.rst: UPGRADE.rst
|
||||
|
||||
.. _federation:
|
||||
|
||||
Setting up Federation
|
||||
=====================
|
||||
|
||||
In order for other homeservers to send messages to your server, it will need to
|
||||
be publicly visible on the internet, and they will need to know its host name.
|
||||
You have two choices here, which will influence the form of your Matrix user
|
||||
IDs:
|
||||
Federation is the process by which users on different servers can participate
|
||||
in the same room. For this to work, those other servers must be able to contact
|
||||
yours to send messages.
|
||||
|
||||
1) Use the machine's own hostname as available on public DNS in the form of
|
||||
its A or AAAA records. This is easier to set up initially, perhaps for
|
||||
testing, but lacks the flexibility of SRV.
|
||||
As explained in `Configuring synapse`_, the ``server_name`` in your
|
||||
``homeserver.yaml`` file determines the way that other servers will reach
|
||||
yours. By default, they will treat it as a hostname and try to connect to
|
||||
port 8448. This is easy to set up and will work with the default configuration,
|
||||
provided you set the ``server_name`` to match your machine's public DNS
|
||||
hostname.
|
||||
|
||||
2) Set up a SRV record for your domain name. This requires you create a SRV
|
||||
record in DNS, but gives the flexibility to run the server on your own
|
||||
choice of TCP port, on a machine that might not be the same name as the
|
||||
domain name.
|
||||
For a more flexible configuration, you can set up a DNS SRV record. This allows
|
||||
you to run your server on a machine that might not have the same name as your
|
||||
domain name. For example, you might want to run your server at
|
||||
``synapse.example.com``, but have your Matrix user-ids look like
|
||||
``@user:example.com``. (A SRV record also allows you to change the port from
|
||||
the default 8448. However, if you are thinking of using a reverse-proxy, be
|
||||
sure to read `Reverse-proxying the federation port`_ first.)
|
||||
|
||||
For the first form, simply pass the required hostname (of the machine) as the
|
||||
--server-name parameter::
|
||||
To use a SRV record, first create your SRV record and publish it in DNS. This
|
||||
should have the format ``_matrix._tcp.<yourdomain.com> <ttl> IN SRV 10 0 <port>
|
||||
<synapse.server.name>``. The DNS record should then look something like::
|
||||
|
||||
$ dig -t srv _matrix._tcp.example.com
|
||||
_matrix._tcp.example.com. 3600 IN SRV 10 0 8448 synapse.example.com.
|
||||
|
||||
You can then configure your homeserver to use ``<yourdomain.com>`` as the domain in
|
||||
its user-ids, by setting ``server_name``::
|
||||
|
||||
python -m synapse.app.homeserver \
|
||||
--server-name machine.my.domain.name \
|
||||
--server-name <yourdomain.com> \
|
||||
--config-path homeserver.yaml \
|
||||
--generate-config
|
||||
python -m synapse.app.homeserver --config-path homeserver.yaml
|
||||
|
||||
Alternatively, you can run ``synctl start`` to guide you through the process.
|
||||
|
||||
For the second form, first create your SRV record and publish it in DNS. This
|
||||
needs to be named _matrix._tcp.YOURDOMAIN, and point at at least one hostname
|
||||
and port where the server is running. (At the current time synapse does not
|
||||
support clustering multiple servers into a single logical homeserver). The DNS
|
||||
record would then look something like::
|
||||
|
||||
$ dig -t srv _matrix._tcp.machine.my.domain.name
|
||||
_matrix._tcp IN SRV 10 0 8448 machine.my.domain.name.
|
||||
|
||||
|
||||
At this point, you should then run the homeserver with the hostname of this
|
||||
SRV record, as that is the name other machines will expect it to have::
|
||||
|
||||
python -m synapse.app.homeserver \
|
||||
--server-name YOURDOMAIN \
|
||||
--config-path homeserver.yaml \
|
||||
--generate-config
|
||||
python -m synapse.app.homeserver --config-path homeserver.yaml
|
||||
|
||||
|
||||
If you've already generated the config file, you need to edit the "server_name"
|
||||
in you ```homeserver.yaml``` file. If you've already started Synapse and a
|
||||
If you've already generated the config file, you need to edit the ``server_name``
|
||||
in your ``homeserver.yaml`` file. If you've already started Synapse and a
|
||||
database has been created, you will have to recreate the database.
|
||||
|
||||
You may additionally want to pass one or more "-v" options, in order to
|
||||
increase the verbosity of logging output; at least for initial testing.
|
||||
If all goes well, you should be able to `connect to your server with a client`__,
|
||||
and then join a room via federation. (Try ``#matrix-dev:matrix.org`` as a first
|
||||
step. "Matrix HQ"'s sheer size and activity level tends to make even the
|
||||
largest boxes pause for thought.)
|
||||
|
||||
.. __: `Connecting to Synapse from a client`_
|
||||
|
||||
Troubleshooting
|
||||
---------------
|
||||
The typical failure mode with federation is that when you try to join a room,
|
||||
it is rejected with "401: Unauthorized". Generally this means that other
|
||||
servers in the room couldn't access yours. (Joining a room over federation is a
|
||||
complicated dance which requires connections in both directions).
|
||||
|
||||
So, things to check are:
|
||||
|
||||
* If you are trying to use a reverse-proxy, read `Reverse-proxying the
|
||||
federation port`_.
|
||||
* If you are not using a SRV record, check that your ``server_name`` (the part
|
||||
of your user-id after the ``:``) matches your hostname, and that port 8448 on
|
||||
that hostname is reachable from outside your network.
|
||||
* If you *are* using a SRV record, check that it matches your ``server_name``
|
||||
(it should be ``_matrix._tcp.<server_name>``), and that the port and hostname
|
||||
it specifies are reachable from outside your network.
|
||||
|
||||
Running a Demo Federation of Synapses
|
||||
-------------------------------------
|
||||
|
||||
If you want to get up and running quickly with a trio of homeservers in a
|
||||
private federation (``localhost:8080``, ``localhost:8081`` and
|
||||
``localhost:8082``) which you can then access through the webclient running at
|
||||
http://localhost:8080. Simply run::
|
||||
|
||||
demo/start.sh
|
||||
|
||||
This is mainly useful just for development purposes.
|
||||
|
||||
Running The Demo Web Client
|
||||
===========================
|
||||
|
||||
The homeserver runs a web client by default at https://localhost:8448/.
|
||||
|
||||
If this is the first time you have used the client from that browser (it uses
|
||||
HTML5 local storage to remember its config), you will need to log in to your
|
||||
account. If you don't yet have an account, because you've just started the
|
||||
homeserver for the first time, then you'll need to register one.
|
||||
private federation, there is a script in the ``demo`` directory. This is mainly
|
||||
useful just for development purposes. See `<demo/README>`_.
|
||||
|
||||
|
||||
Registering A New Account
|
||||
-------------------------
|
||||
Using PostgreSQL
|
||||
================
|
||||
|
||||
Your new user name will be formed partly from the hostname your server is
|
||||
running as, and partly from a localpart you specify when you create the
|
||||
account. Your name will take the form of::
|
||||
As of Synapse 0.9, `PostgreSQL <http://www.postgresql.org>`_ is supported as an
|
||||
alternative to the `SQLite <http://sqlite.org/>`_ database that Synapse has
|
||||
traditionally used for convenience and simplicity.
|
||||
|
||||
@localpart:my.domain.here
|
||||
(pronounced "at localpart on my dot domain dot here")
|
||||
The advantages of Postgres include:
|
||||
|
||||
Specify your desired localpart in the topmost box of the "Register for an
|
||||
account" form, and click the "Register" button. Hostnames can contain ports if
|
||||
required due to lack of SRV records (e.g. @matthew:localhost:8448 on an
|
||||
internal synapse sandbox running on localhost).
|
||||
* significant performance improvements due to the superior threading and
|
||||
caching model, smarter query optimiser
|
||||
* allowing the DB to be run on separate hardware
|
||||
* allowing basic active/backup high-availability with a "hot spare" synapse
|
||||
pointing at the same DB master, as well as enabling DB replication in
|
||||
synapse itself.
|
||||
|
||||
If registration fails, you may need to enable it in the homeserver (see
|
||||
`Synapse Installation`_ above)
|
||||
For information on how to install and use PostgreSQL, please see
|
||||
`docs/postgres.rst <docs/postgres.rst>`_.
|
||||
|
||||
|
||||
Logging In To An Existing Account
|
||||
---------------------------------
|
||||
.. _reverse-proxy:
|
||||
|
||||
Using a reverse proxy with Synapse
|
||||
==================================
|
||||
|
||||
It is possible to put a reverse proxy such as
|
||||
`nginx <https://nginx.org/en/docs/http/ngx_http_proxy_module.html>`_,
|
||||
`Apache <https://httpd.apache.org/docs/current/mod/mod_proxy_http.html>`_ or
|
||||
`HAProxy <http://www.haproxy.org/>`_ in front of Synapse. One advantage of
|
||||
doing so is that it means that you can expose the default https port (443) to
|
||||
Matrix clients without needing to run Synapse with root privileges.
|
||||
|
||||
The most important thing to know here is that Matrix clients and other Matrix
|
||||
servers do not necessarily need to connect to your server via the same
|
||||
port. Indeed, clients will use port 443 by default, whereas servers default to
|
||||
port 8448. Where these are different, we refer to the 'client port' and the
|
||||
'federation port'.
|
||||
|
||||
The next most important thing to know is that using a reverse-proxy on the
|
||||
federation port has a number of pitfalls. It is possible, but be sure to read
|
||||
`Reverse-proxying the federation port`_.
|
||||
|
||||
The recommended setup is therefore to configure your reverse-proxy on port 443
|
||||
for client connections, but to also expose port 8448 for server-server
|
||||
connections. All the Matrix endpoints begin ``/_matrix``, so an example nginx
|
||||
configuration might look like::
|
||||
|
||||
server {
|
||||
listen 443 ssl;
|
||||
listen [::]:443 ssl;
|
||||
server_name matrix.example.com;
|
||||
|
||||
location /_matrix {
|
||||
proxy_pass http://localhost:8008;
|
||||
proxy_set_header X-Forwarded-For $remote_addr;
|
||||
}
|
||||
}
|
||||
|
||||
You will also want to set ``bind_addresses: ['127.0.0.1']`` and ``x_forwarded: true``
|
||||
for port 8008 in ``homeserver.yaml`` to ensure that client IP addresses are
|
||||
recorded correctly.
|
||||
|
||||
Having done so, you can then use ``https://matrix.example.com`` (instead of
|
||||
``https://matrix.example.com:8448``) as the "Custom server" when `Connecting to
|
||||
Synapse from a client`_.
|
||||
|
||||
Reverse-proxying the federation port
|
||||
------------------------------------
|
||||
|
||||
There are two issues to consider before using a reverse-proxy on the federation
|
||||
port:
|
||||
|
||||
* Due to the way SSL certificates are managed in the Matrix federation protocol
|
||||
(see `spec`__), Synapse needs to be configured with the path to the SSL
|
||||
certificate, *even if you do not terminate SSL at Synapse*.
|
||||
|
||||
.. __: `key_management`_
|
||||
|
||||
* Synapse does not currently support SNI on the federation protocol
|
||||
(`bug #1491 <https://github.com/matrix-org/synapse/issues/1491>`_), which
|
||||
means that using name-based virtual hosting is unreliable.
|
||||
|
||||
Furthermore, a number of the normal reasons for using a reverse-proxy do not
|
||||
apply:
|
||||
|
||||
* Other servers will connect on port 8448 by default, so there is no need to
|
||||
listen on port 443 (for federation, at least), which avoids the need for root
|
||||
privileges and virtual hosting.
|
||||
|
||||
* A self-signed SSL certificate is fine for federation, so there is no need to
|
||||
automate renewals. (The certificate generated by ``--generate-config`` is
|
||||
valid for 10 years.)
|
||||
|
||||
If you want to set up a reverse-proxy on the federation port despite these
|
||||
caveats, you will need to do the following:
|
||||
|
||||
* In ``homeserver.yaml``, set ``tls_certificate_path`` to the path to the SSL
|
||||
certificate file used by your reverse-proxy, and set ``no_tls`` to ``True``.
|
||||
(``tls_private_key_path`` will be ignored if ``no_tls`` is ``True``.)
|
||||
|
||||
* In your reverse-proxy configuration:
|
||||
|
||||
* If there are other virtual hosts on the same port, make sure that the
|
||||
*default* one uses the certificate configured above.
|
||||
|
||||
* Forward ``/_matrix`` to Synapse.
|
||||
|
||||
* If your reverse-proxy is not listening on port 8448, publish a SRV record to
|
||||
tell other servers how to find you. See `Setting up Federation`_.
|
||||
|
||||
When updating the SSL certificate, just update the file pointed to by
|
||||
``tls_certificate_path``: there is no need to restart synapse. (You may like to
|
||||
use a symbolic link to help make this process atomic.)
|
||||
|
||||
The most common mistake when setting up federation is not to tell Synapse about
|
||||
your SSL certificate. To check it, you can visit
|
||||
``https://matrix.org/federationtester/api/report?server_name=<your_server_name>``.
|
||||
Unfortunately, there is no UI for this yet, but, you should see
|
||||
``"MatchingTLSFingerprint": true``. If not, check that
|
||||
``Certificates[0].SHA256Fingerprint`` (the fingerprint of the certificate
|
||||
presented by your reverse-proxy) matches ``Keys.tls_fingerprints[0].sha256``
|
||||
(the fingerprint of the certificate Synapse is using).
|
||||
|
||||
Just enter the ``@localpart:my.domain.here`` Matrix user ID and password into
|
||||
the form and click the Login button.
|
||||
|
||||
Identity Servers
|
||||
================
|
||||
|
||||
The job of authenticating 3PIDs and tracking which 3PIDs are associated with a
|
||||
given Matrix user is very security-sensitive, as there is obvious risk of spam
|
||||
if it is too easy to sign up for Matrix accounts or harvest 3PID data.
|
||||
Meanwhile the job of publishing the end-to-end encryption public keys for
|
||||
Matrix users is also very security-sensitive for similar reasons.
|
||||
Identity servers have the job of mapping email addresses and other 3rd Party
|
||||
IDs (3PIDs) to Matrix user IDs, as well as verifying the ownership of 3PIDs
|
||||
before creating that mapping.
|
||||
|
||||
Therefore the role of managing trusted identity in the Matrix ecosystem is
|
||||
farmed out to a cluster of known trusted ecosystem partners, who run 'Matrix
|
||||
Identity Servers' such as ``sydent``, whose role is purely to authenticate and
|
||||
track 3PID logins and publish end-user public keys.
|
||||
**They are not where accounts or credentials are stored - these live on home
|
||||
servers. Identity Servers are just for mapping 3rd party IDs to matrix IDs.**
|
||||
|
||||
This process is very security-sensitive, as there is obvious risk of spam if it
|
||||
is too easy to sign up for Matrix accounts or harvest 3PID data. In the longer
|
||||
term, we hope to create a decentralised system to manage it (`matrix-doc #712
|
||||
<https://github.com/matrix-org/matrix-doc/issues/712>`_), but in the meantime,
|
||||
the role of managing trusted identity in the Matrix ecosystem is farmed out to
|
||||
a cluster of known trusted ecosystem partners, who run 'Matrix Identity
|
||||
Servers' such as `Sydent <https://github.com/matrix-org/sydent>`_, whose role
|
||||
is purely to authenticate and track 3PID logins and publish end-user public
|
||||
keys.
|
||||
|
||||
You can host your own copy of Sydent, but this will prevent you reaching other
|
||||
users in the Matrix ecosystem via their email address, and prevent them finding
|
||||
you. We therefore recommend that you use one of the centralised identity servers
|
||||
at ``https://matrix.org`` or ``https://vector.im`` for now.
|
||||
|
||||
To reiterate: the Identity server will only be used if you choose to associate
|
||||
an email address with your account, or send an invite to another user via their
|
||||
email address.
|
||||
|
||||
|
||||
URL Previews
|
||||
============
|
||||
|
||||
Synapse 0.15.0 introduces a new API for previewing URLs at
|
||||
``/_matrix/media/r0/preview_url``. This is disabled by default. To turn it on
|
||||
you must enable the ``url_preview_enabled: True`` config parameter and
|
||||
explicitly specify the IP ranges that Synapse is not allowed to spider for
|
||||
previewing in the ``url_preview_ip_range_blacklist`` configuration parameter.
|
||||
This is critical from a security perspective to stop arbitrary Matrix users
|
||||
spidering 'internal' URLs on your network. At the very least we recommend that
|
||||
your loopback and RFC1918 IP addresses are blacklisted.
|
||||
|
||||
This also requires the optional lxml and netaddr python dependencies to be
|
||||
installed.
|
||||
|
||||
It's currently early days for identity servers as Matrix is not yet using 3PIDs
|
||||
as the primary means of identity and E2E encryption is not complete. As such,
|
||||
we are running a single identity server (https://matrix.org) at the current
|
||||
time.
|
||||
|
||||
Password reset
|
||||
==============
|
||||
@@ -565,24 +780,54 @@ server, they can request a password-reset token via clients such as Vector.
|
||||
|
||||
A manual password reset can be done via direct database access as follows.
|
||||
|
||||
First calculate the hash of the new password:
|
||||
First calculate the hash of the new password::
|
||||
|
||||
$ source ~/.synapse/bin/activate
|
||||
$ ./scripts/hash_password
|
||||
Password:
|
||||
Confirm password:
|
||||
Password:
|
||||
Confirm password:
|
||||
$2a$12$xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
||||
|
||||
Then update the `users` table in the database:
|
||||
Then update the `users` table in the database::
|
||||
|
||||
UPDATE users SET password_hash='$2a$12$xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'
|
||||
WHERE name='@test:test.com';
|
||||
|
||||
Where's the spec?!
|
||||
==================
|
||||
|
||||
The source of the matrix spec lives at https://github.com/matrix-org/matrix-doc.
|
||||
A recent HTML snapshot of this lives at http://matrix.org/docs/spec
|
||||
Synapse Development
|
||||
===================
|
||||
|
||||
Before setting up a development environment for synapse, make sure you have the
|
||||
system dependencies (such as the python header files) installed - see
|
||||
`Installing from source`_.
|
||||
|
||||
To check out a synapse for development, clone the git repo into a working
|
||||
directory of your choice::
|
||||
|
||||
git clone https://github.com/matrix-org/synapse.git
|
||||
cd synapse
|
||||
|
||||
Synapse has a number of external dependencies, that are easiest
|
||||
to install using pip and a virtualenv::
|
||||
|
||||
virtualenv -p python2.7 env
|
||||
source env/bin/activate
|
||||
python synapse/python_dependencies.py | xargs pip install
|
||||
pip install lxml mock
|
||||
|
||||
This will run a process of downloading and installing all the needed
|
||||
dependencies into a virtual env.
|
||||
|
||||
Once this is done, you may wish to run Synapse's unit tests, to
|
||||
check that everything is installed as it should be::
|
||||
|
||||
PYTHONPATH="." trial tests
|
||||
|
||||
This should end with a 'PASSED' result::
|
||||
|
||||
Ran 143 tests in 0.601s
|
||||
|
||||
PASSED (successes=143)
|
||||
|
||||
|
||||
Building Internal API Documentation
|
||||
@@ -599,8 +844,7 @@ Building internal API documentation::
|
||||
python setup.py build_sphinx
|
||||
|
||||
|
||||
|
||||
Halp!! Synapse eats all my RAM!
|
||||
Help!! Synapse eats all my RAM!
|
||||
===============================
|
||||
|
||||
Synapse's architecture is quite RAM hungry currently - we deliberately
|
||||
@@ -615,3 +859,5 @@ around a ~700MB footprint. You can dial it down further to 0.02 if
|
||||
desired, which targets roughly ~512MB. Conversely you can dial it up if
|
||||
you need performance for lots of users and have a box with a lot of RAM.
|
||||
|
||||
|
||||
.. _`key_management`: https://matrix.org/docs/spec/server_server/unstable.html#retrieving-server-keys
|
||||
|
||||
10
UPGRADE.rst
10
UPGRADE.rst
@@ -27,7 +27,15 @@ running:
|
||||
# Pull the latest version of the master branch.
|
||||
git pull
|
||||
# Update the versions of synapse's python dependencies.
|
||||
python synapse/python_dependencies.py | xargs -n1 pip install
|
||||
python synapse/python_dependencies.py | xargs -n1 pip install --upgrade
|
||||
|
||||
|
||||
Upgrading to v0.15.0
|
||||
====================
|
||||
|
||||
If you want to use the new URL previewing API (/_matrix/media/r0/preview_url)
|
||||
then you have to explicitly enable it in the config and update your dependencies
|
||||
dependencies. See README.rst for details.
|
||||
|
||||
|
||||
Upgrading to v0.11.0
|
||||
|
||||
@@ -32,7 +32,7 @@ import urlparse
|
||||
import nacl.signing
|
||||
import nacl.encoding
|
||||
|
||||
from syutil.crypto.jsonsign import verify_signed_json, SignatureVerifyException
|
||||
from signedjson.sign import verify_signed_json, SignatureVerifyException
|
||||
|
||||
CONFIG_JSON = "cmdclient_config.json"
|
||||
|
||||
|
||||
50
contrib/example_log_config.yaml
Normal file
50
contrib/example_log_config.yaml
Normal file
@@ -0,0 +1,50 @@
|
||||
# Example log_config file for synapse. To enable, point `log_config` to it in
|
||||
# `homeserver.yaml`, and restart synapse.
|
||||
#
|
||||
# This configuration will produce similar results to the defaults within
|
||||
# synapse, but can be edited to give more flexibility.
|
||||
|
||||
version: 1
|
||||
|
||||
formatters:
|
||||
fmt:
|
||||
format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s- %(message)s'
|
||||
|
||||
filters:
|
||||
context:
|
||||
(): synapse.util.logcontext.LoggingContextFilter
|
||||
request: ""
|
||||
|
||||
handlers:
|
||||
# example output to console
|
||||
console:
|
||||
class: logging.StreamHandler
|
||||
filters: [context]
|
||||
|
||||
# example output to file - to enable, edit 'root' config below.
|
||||
file:
|
||||
class: logging.handlers.RotatingFileHandler
|
||||
formatter: fmt
|
||||
filename: /var/log/synapse/homeserver.log
|
||||
maxBytes: 100000000
|
||||
backupCount: 3
|
||||
filters: [context]
|
||||
|
||||
|
||||
root:
|
||||
level: INFO
|
||||
handlers: [console] # to use file handler instead, switch to [file]
|
||||
|
||||
loggers:
|
||||
synapse:
|
||||
level: INFO
|
||||
|
||||
synapse.storage.SQL:
|
||||
# beware: increasing this to DEBUG will make synapse log sensitive
|
||||
# information such as access tokens.
|
||||
level: INFO
|
||||
|
||||
# example of enabling debugging for a component:
|
||||
#
|
||||
# synapse.federation.transport.server:
|
||||
# level: DEBUG
|
||||
@@ -1,5 +1,5 @@
|
||||
# This assumes that Synapse has been installed as a system package
|
||||
# (e.g. https://aur.archlinux.org/packages/matrix-synapse/ for ArchLinux)
|
||||
# (e.g. https://www.archlinux.org/packages/community/any/matrix-synapse/ for ArchLinux)
|
||||
# rather than in a user home directory or similar under virtualenv.
|
||||
|
||||
[Unit]
|
||||
@@ -9,6 +9,7 @@ Description=Synapse Matrix homeserver
|
||||
Type=simple
|
||||
User=synapse
|
||||
Group=synapse
|
||||
EnvironmentFile=-/etc/sysconfig/synapse
|
||||
WorkingDirectory=/var/lib/synapse
|
||||
ExecStart=/usr/bin/python2.7 -m synapse.app.homeserver --config-path=/etc/synapse/homeserver.yaml --log-config=/etc/synapse/log_config.yaml
|
||||
|
||||
|
||||
@@ -10,13 +10,13 @@ https://developers.google.com/recaptcha/
|
||||
|
||||
Setting ReCaptcha Keys
|
||||
----------------------
|
||||
The keys are a config option on the home server config. If they are not
|
||||
visible, you can generate them via --generate-config. Set the following value:
|
||||
The keys are a config option on the home server config. If they are not
|
||||
visible, you can generate them via --generate-config. Set the following value::
|
||||
|
||||
recaptcha_public_key: YOUR_PUBLIC_KEY
|
||||
recaptcha_private_key: YOUR_PRIVATE_KEY
|
||||
|
||||
In addition, you MUST enable captchas via:
|
||||
|
||||
In addition, you MUST enable captchas via::
|
||||
|
||||
enable_registration_captcha: true
|
||||
|
||||
@@ -25,7 +25,5 @@ Configuring IP used for auth
|
||||
The ReCaptcha API requires that the IP address of the user who solved the
|
||||
captcha is sent. If the client is connecting through a proxy or load balancer,
|
||||
it may be required to use the X-Forwarded-For (XFF) header instead of the origin
|
||||
IP address. This can be configured as an option on the home server like so:
|
||||
|
||||
captcha_ip_origin_is_x_forwarded: true
|
||||
|
||||
IP address. This can be configured using the x_forwarded directive in the
|
||||
listeners section of the homeserver.yaml configuration file.
|
||||
12
docs/admin_api/README.rst
Normal file
12
docs/admin_api/README.rst
Normal file
@@ -0,0 +1,12 @@
|
||||
Admin APIs
|
||||
==========
|
||||
|
||||
This directory includes documentation for the various synapse specific admin
|
||||
APIs available.
|
||||
|
||||
Only users that are server admins can use these APIs. A user can be marked as a
|
||||
server admin by updating the database directly, e.g.:
|
||||
|
||||
``UPDATE users SET admin = 1 WHERE name = '@foo:bar.com'``
|
||||
|
||||
Restarting may be required for the changes to register.
|
||||
15
docs/admin_api/purge_history_api.rst
Normal file
15
docs/admin_api/purge_history_api.rst
Normal file
@@ -0,0 +1,15 @@
|
||||
Purge History API
|
||||
=================
|
||||
|
||||
The purge history API allows server admins to purge historic events from their
|
||||
database, reclaiming disk space.
|
||||
|
||||
Depending on the amount of history being purged a call to the API may take
|
||||
several minutes or longer. During this period users will not be able to
|
||||
paginate further back in the room from the point being purged from.
|
||||
|
||||
The API is simply:
|
||||
|
||||
``POST /_matrix/client/r0/admin/purge_history/<room_id>/<event_id>``
|
||||
|
||||
including an ``access_token`` of a server admin.
|
||||
17
docs/admin_api/purge_remote_media.rst
Normal file
17
docs/admin_api/purge_remote_media.rst
Normal file
@@ -0,0 +1,17 @@
|
||||
Purge Remote Media API
|
||||
======================
|
||||
|
||||
The purge remote media API allows server admins to purge old cached remote
|
||||
media.
|
||||
|
||||
The API is::
|
||||
|
||||
POST /_matrix/client/r0/admin/purge_media_cache?before_ts=<unix_timestamp_in_ms>&access_token=<access_token>
|
||||
|
||||
{}
|
||||
|
||||
Which will remove all cached media that was last accessed before
|
||||
``<unix_timestamp_in_ms>``.
|
||||
|
||||
If the user re-requests purged remote media, synapse will re-request the media
|
||||
from the originating server.
|
||||
@@ -32,5 +32,4 @@ The format of the AS configuration file is as follows:
|
||||
|
||||
See the spec_ for further details on how application services work.
|
||||
|
||||
.. _spec: https://github.com/matrix-org/matrix-doc/blob/master/specification/25_application_service_api.rst#application-service-api
|
||||
|
||||
.. _spec: https://matrix.org/docs/spec/application_service/unstable.html
|
||||
|
||||
@@ -43,7 +43,10 @@ Basically, PEP8
|
||||
together, or want to deliberately extend or preserve vertical/horizontal
|
||||
space)
|
||||
|
||||
Comments should follow the google code style. This is so that we can generate
|
||||
documentation with sphinx (http://sphinxcontrib-napoleon.readthedocs.org/en/latest/)
|
||||
Comments should follow the `google code style <http://google.github.io/styleguide/pyguide.html?showone=Comments#Comments>`_.
|
||||
This is so that we can generate documentation with
|
||||
`sphinx <http://sphinxcontrib-napoleon.readthedocs.org/en/latest/>`_. See the
|
||||
`examples <http://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_google.html>`_
|
||||
in the sphinx documentation.
|
||||
|
||||
Code should pass pep8 --max-line-length=100 without any warnings.
|
||||
|
||||
446
docs/log_contexts.rst
Normal file
446
docs/log_contexts.rst
Normal file
@@ -0,0 +1,446 @@
|
||||
Log contexts
|
||||
============
|
||||
|
||||
.. contents::
|
||||
|
||||
To help track the processing of individual requests, synapse uses a
|
||||
'log context' to track which request it is handling at any given moment. This
|
||||
is done via a thread-local variable; a ``logging.Filter`` is then used to fish
|
||||
the information back out of the thread-local variable and add it to each log
|
||||
record.
|
||||
|
||||
Logcontexts are also used for CPU and database accounting, so that we can track
|
||||
which requests were responsible for high CPU use or database activity.
|
||||
|
||||
The ``synapse.util.logcontext`` module provides a facilities for managing the
|
||||
current log context (as well as providing the ``LoggingContextFilter`` class).
|
||||
|
||||
Deferreds make the whole thing complicated, so this document describes how it
|
||||
all works, and how to write code which follows the rules.
|
||||
|
||||
Logcontexts without Deferreds
|
||||
-----------------------------
|
||||
|
||||
In the absence of any Deferred voodoo, things are simple enough. As with any
|
||||
code of this nature, the rule is that our function should leave things as it
|
||||
found them:
|
||||
|
||||
.. code:: python
|
||||
|
||||
from synapse.util import logcontext # omitted from future snippets
|
||||
|
||||
def handle_request(request_id):
|
||||
request_context = logcontext.LoggingContext()
|
||||
|
||||
calling_context = logcontext.LoggingContext.current_context()
|
||||
logcontext.LoggingContext.set_current_context(request_context)
|
||||
try:
|
||||
request_context.request = request_id
|
||||
do_request_handling()
|
||||
logger.debug("finished")
|
||||
finally:
|
||||
logcontext.LoggingContext.set_current_context(calling_context)
|
||||
|
||||
def do_request_handling():
|
||||
logger.debug("phew") # this will be logged against request_id
|
||||
|
||||
|
||||
LoggingContext implements the context management methods, so the above can be
|
||||
written much more succinctly as:
|
||||
|
||||
.. code:: python
|
||||
|
||||
def handle_request(request_id):
|
||||
with logcontext.LoggingContext() as request_context:
|
||||
request_context.request = request_id
|
||||
do_request_handling()
|
||||
logger.debug("finished")
|
||||
|
||||
def do_request_handling():
|
||||
logger.debug("phew")
|
||||
|
||||
|
||||
Using logcontexts with Deferreds
|
||||
--------------------------------
|
||||
|
||||
Deferreds — and in particular, ``defer.inlineCallbacks`` — break
|
||||
the linear flow of code so that there is no longer a single entry point where
|
||||
we should set the logcontext and a single exit point where we should remove it.
|
||||
|
||||
Consider the example above, where ``do_request_handling`` needs to do some
|
||||
blocking operation, and returns a deferred:
|
||||
|
||||
.. code:: python
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def handle_request(request_id):
|
||||
with logcontext.LoggingContext() as request_context:
|
||||
request_context.request = request_id
|
||||
yield do_request_handling()
|
||||
logger.debug("finished")
|
||||
|
||||
|
||||
In the above flow:
|
||||
|
||||
* The logcontext is set
|
||||
* ``do_request_handling`` is called, and returns a deferred
|
||||
* ``handle_request`` yields the deferred
|
||||
* The ``inlineCallbacks`` wrapper of ``handle_request`` returns a deferred
|
||||
|
||||
So we have stopped processing the request (and will probably go on to start
|
||||
processing the next), without clearing the logcontext.
|
||||
|
||||
To circumvent this problem, synapse code assumes that, wherever you have a
|
||||
deferred, you will want to yield on it. To that end, whereever functions return
|
||||
a deferred, we adopt the following conventions:
|
||||
|
||||
**Rules for functions returning deferreds:**
|
||||
|
||||
* If the deferred is already complete, the function returns with the same
|
||||
logcontext it started with.
|
||||
* If the deferred is incomplete, the function clears the logcontext before
|
||||
returning; when the deferred completes, it restores the logcontext before
|
||||
running any callbacks.
|
||||
|
||||
That sounds complicated, but actually it means a lot of code (including the
|
||||
example above) "just works". There are two cases:
|
||||
|
||||
* If ``do_request_handling`` returns a completed deferred, then the logcontext
|
||||
will still be in place. In this case, execution will continue immediately
|
||||
after the ``yield``; the "finished" line will be logged against the right
|
||||
context, and the ``with`` block restores the original context before we
|
||||
return to the caller.
|
||||
|
||||
* If the returned deferred is incomplete, ``do_request_handling`` clears the
|
||||
logcontext before returning. The logcontext is therefore clear when
|
||||
``handle_request`` yields the deferred. At that point, the ``inlineCallbacks``
|
||||
wrapper adds a callback to the deferred, and returns another (incomplete)
|
||||
deferred to the caller, and it is safe to begin processing the next request.
|
||||
|
||||
Once ``do_request_handling``'s deferred completes, it will reinstate the
|
||||
logcontext, before running the callback added by the ``inlineCallbacks``
|
||||
wrapper. That callback runs the second half of ``handle_request``, so again
|
||||
the "finished" line will be logged against the right
|
||||
context, and the ``with`` block restores the original context.
|
||||
|
||||
As an aside, it's worth noting that ``handle_request`` follows our rules -
|
||||
though that only matters if the caller has its own logcontext which it cares
|
||||
about.
|
||||
|
||||
The following sections describe pitfalls and helpful patterns when implementing
|
||||
these rules.
|
||||
|
||||
Always yield your deferreds
|
||||
---------------------------
|
||||
|
||||
Whenever you get a deferred back from a function, you should ``yield`` on it
|
||||
as soon as possible. (Returning it directly to your caller is ok too, if you're
|
||||
not doing ``inlineCallbacks``.) Do not pass go; do not do any logging; do not
|
||||
call any other functions.
|
||||
|
||||
.. code:: python
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def fun():
|
||||
logger.debug("starting")
|
||||
yield do_some_stuff() # just like this
|
||||
|
||||
d = more_stuff()
|
||||
result = yield d # also fine, of course
|
||||
|
||||
defer.returnValue(result)
|
||||
|
||||
def nonInlineCallbacksFun():
|
||||
logger.debug("just a wrapper really")
|
||||
return do_some_stuff() # this is ok too - the caller will yield on
|
||||
# it anyway.
|
||||
|
||||
Provided this pattern is followed all the way back up to the callchain to where
|
||||
the logcontext was set, this will make things work out ok: provided
|
||||
``do_some_stuff`` and ``more_stuff`` follow the rules above, then so will
|
||||
``fun`` (as wrapped by ``inlineCallbacks``) and ``nonInlineCallbacksFun``.
|
||||
|
||||
It's all too easy to forget to ``yield``: for instance if we forgot that
|
||||
``do_some_stuff`` returned a deferred, we might plough on regardless. This
|
||||
leads to a mess; it will probably work itself out eventually, but not before
|
||||
a load of stuff has been logged against the wrong content. (Normally, other
|
||||
things will break, more obviously, if you forget to ``yield``, so this tends
|
||||
not to be a major problem in practice.)
|
||||
|
||||
Of course sometimes you need to do something a bit fancier with your Deferreds
|
||||
- not all code follows the linear A-then-B-then-C pattern. Notes on
|
||||
implementing more complex patterns are in later sections.
|
||||
|
||||
Where you create a new Deferred, make it follow the rules
|
||||
---------------------------------------------------------
|
||||
|
||||
Most of the time, a Deferred comes from another synapse function. Sometimes,
|
||||
though, we need to make up a new Deferred, or we get a Deferred back from
|
||||
external code. We need to make it follow our rules.
|
||||
|
||||
The easy way to do it is with a combination of ``defer.inlineCallbacks``, and
|
||||
``logcontext.PreserveLoggingContext``. Suppose we want to implement ``sleep``,
|
||||
which returns a deferred which will run its callbacks after a given number of
|
||||
seconds. That might look like:
|
||||
|
||||
.. code:: python
|
||||
|
||||
# not a logcontext-rules-compliant function
|
||||
def get_sleep_deferred(seconds):
|
||||
d = defer.Deferred()
|
||||
reactor.callLater(seconds, d.callback, None)
|
||||
return d
|
||||
|
||||
That doesn't follow the rules, but we can fix it by wrapping it with
|
||||
``PreserveLoggingContext`` and ``yield`` ing on it:
|
||||
|
||||
.. code:: python
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def sleep(seconds):
|
||||
with PreserveLoggingContext():
|
||||
yield get_sleep_deferred(seconds)
|
||||
|
||||
This technique works equally for external functions which return deferreds,
|
||||
or deferreds we have made ourselves.
|
||||
|
||||
You can also use ``logcontext.make_deferred_yieldable``, which just does the
|
||||
boilerplate for you, so the above could be written:
|
||||
|
||||
.. code:: python
|
||||
|
||||
def sleep(seconds):
|
||||
return logcontext.make_deferred_yieldable(get_sleep_deferred(seconds))
|
||||
|
||||
|
||||
Fire-and-forget
|
||||
---------------
|
||||
|
||||
Sometimes you want to fire off a chain of execution, but not wait for its
|
||||
result. That might look a bit like this:
|
||||
|
||||
.. code:: python
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def do_request_handling():
|
||||
yield foreground_operation()
|
||||
|
||||
# *don't* do this
|
||||
background_operation()
|
||||
|
||||
logger.debug("Request handling complete")
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def background_operation():
|
||||
yield first_background_step()
|
||||
logger.debug("Completed first step")
|
||||
yield second_background_step()
|
||||
logger.debug("Completed second step")
|
||||
|
||||
The above code does a couple of steps in the background after
|
||||
``do_request_handling`` has finished. The log lines are still logged against
|
||||
the ``request_context`` logcontext, which may or may not be desirable. There
|
||||
are two big problems with the above, however. The first problem is that, if
|
||||
``background_operation`` returns an incomplete Deferred, it will expect its
|
||||
caller to ``yield`` immediately, so will have cleared the logcontext. In this
|
||||
example, that means that 'Request handling complete' will be logged without any
|
||||
context.
|
||||
|
||||
The second problem, which is potentially even worse, is that when the Deferred
|
||||
returned by ``background_operation`` completes, it will restore the original
|
||||
logcontext. There is nothing waiting on that Deferred, so the logcontext will
|
||||
leak into the reactor and possibly get attached to some arbitrary future
|
||||
operation.
|
||||
|
||||
There are two potential solutions to this.
|
||||
|
||||
One option is to surround the call to ``background_operation`` with a
|
||||
``PreserveLoggingContext`` call. That will reset the logcontext before
|
||||
starting ``background_operation`` (so the context restored when the deferred
|
||||
completes will be the empty logcontext), and will restore the current
|
||||
logcontext before continuing the foreground process:
|
||||
|
||||
.. code:: python
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def do_request_handling():
|
||||
yield foreground_operation()
|
||||
|
||||
# start background_operation off in the empty logcontext, to
|
||||
# avoid leaking the current context into the reactor.
|
||||
with PreserveLoggingContext():
|
||||
background_operation()
|
||||
|
||||
# this will now be logged against the request context
|
||||
logger.debug("Request handling complete")
|
||||
|
||||
Obviously that option means that the operations done in
|
||||
``background_operation`` would be not be logged against a logcontext (though
|
||||
that might be fixed by setting a different logcontext via a ``with
|
||||
LoggingContext(...)`` in ``background_operation``).
|
||||
|
||||
The second option is to use ``logcontext.preserve_fn``, which wraps a function
|
||||
so that it doesn't reset the logcontext even when it returns an incomplete
|
||||
deferred, and adds a callback to the returned deferred to reset the
|
||||
logcontext. In other words, it turns a function that follows the Synapse rules
|
||||
about logcontexts and Deferreds into one which behaves more like an external
|
||||
function — the opposite operation to that described in the previous section.
|
||||
It can be used like this:
|
||||
|
||||
.. code:: python
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def do_request_handling():
|
||||
yield foreground_operation()
|
||||
|
||||
logcontext.preserve_fn(background_operation)()
|
||||
|
||||
# this will now be logged against the request context
|
||||
logger.debug("Request handling complete")
|
||||
|
||||
XXX: I think ``preserve_context_over_fn`` is supposed to do the first option,
|
||||
but the fact that it does ``preserve_context_over_deferred`` on its results
|
||||
means that its use is fraught with difficulty.
|
||||
|
||||
Passing synapse deferreds into third-party functions
|
||||
----------------------------------------------------
|
||||
|
||||
A typical example of this is where we want to collect together two or more
|
||||
deferred via ``defer.gatherResults``:
|
||||
|
||||
.. code:: python
|
||||
|
||||
d1 = operation1()
|
||||
d2 = operation2()
|
||||
d3 = defer.gatherResults([d1, d2])
|
||||
|
||||
This is really a variation of the fire-and-forget problem above, in that we are
|
||||
firing off ``d1`` and ``d2`` without yielding on them. The difference
|
||||
is that we now have third-party code attached to their callbacks. Anyway either
|
||||
technique given in the `Fire-and-forget`_ section will work.
|
||||
|
||||
Of course, the new Deferred returned by ``gatherResults`` needs to be wrapped
|
||||
in order to make it follow the logcontext rules before we can yield it, as
|
||||
described in `Where you create a new Deferred, make it follow the rules`_.
|
||||
|
||||
So, option one: reset the logcontext before starting the operations to be
|
||||
gathered:
|
||||
|
||||
.. code:: python
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def do_request_handling():
|
||||
with PreserveLoggingContext():
|
||||
d1 = operation1()
|
||||
d2 = operation2()
|
||||
result = yield defer.gatherResults([d1, d2])
|
||||
|
||||
In this case particularly, though, option two, of using
|
||||
``logcontext.preserve_fn`` almost certainly makes more sense, so that
|
||||
``operation1`` and ``operation2`` are both logged against the original
|
||||
logcontext. This looks like:
|
||||
|
||||
.. code:: python
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def do_request_handling():
|
||||
d1 = logcontext.preserve_fn(operation1)()
|
||||
d2 = logcontext.preserve_fn(operation2)()
|
||||
|
||||
with PreserveLoggingContext():
|
||||
result = yield defer.gatherResults([d1, d2])
|
||||
|
||||
|
||||
Was all this really necessary?
|
||||
------------------------------
|
||||
|
||||
The conventions used work fine for a linear flow where everything happens in
|
||||
series via ``defer.inlineCallbacks`` and ``yield``, but are certainly tricky to
|
||||
follow for any more exotic flows. It's hard not to wonder if we could have done
|
||||
something else.
|
||||
|
||||
We're not going to rewrite Synapse now, so the following is entirely of
|
||||
academic interest, but I'd like to record some thoughts on an alternative
|
||||
approach.
|
||||
|
||||
I briefly prototyped some code following an alternative set of rules. I think
|
||||
it would work, but I certainly didn't get as far as thinking how it would
|
||||
interact with concepts as complicated as the cache descriptors.
|
||||
|
||||
My alternative rules were:
|
||||
|
||||
* functions always preserve the logcontext of their caller, whether or not they
|
||||
are returning a Deferred.
|
||||
|
||||
* Deferreds returned by synapse functions run their callbacks in the same
|
||||
context as the function was orignally called in.
|
||||
|
||||
The main point of this scheme is that everywhere that sets the logcontext is
|
||||
responsible for clearing it before returning control to the reactor.
|
||||
|
||||
So, for example, if you were the function which started a ``with
|
||||
LoggingContext`` block, you wouldn't ``yield`` within it — instead you'd start
|
||||
off the background process, and then leave the ``with`` block to wait for it:
|
||||
|
||||
.. code:: python
|
||||
|
||||
def handle_request(request_id):
|
||||
with logcontext.LoggingContext() as request_context:
|
||||
request_context.request = request_id
|
||||
d = do_request_handling()
|
||||
|
||||
def cb(r):
|
||||
logger.debug("finished")
|
||||
|
||||
d.addCallback(cb)
|
||||
return d
|
||||
|
||||
(in general, mixing ``with LoggingContext`` blocks and
|
||||
``defer.inlineCallbacks`` in the same function leads to slighly
|
||||
counter-intuitive code, under this scheme).
|
||||
|
||||
Because we leave the original ``with`` block as soon as the Deferred is
|
||||
returned (as opposed to waiting for it to be resolved, as we do today), the
|
||||
logcontext is cleared before control passes back to the reactor; so if there is
|
||||
some code within ``do_request_handling`` which needs to wait for a Deferred to
|
||||
complete, there is no need for it to worry about clearing the logcontext before
|
||||
doing so:
|
||||
|
||||
.. code:: python
|
||||
|
||||
def handle_request():
|
||||
r = do_some_stuff()
|
||||
r.addCallback(do_some_more_stuff)
|
||||
return r
|
||||
|
||||
— and provided ``do_some_stuff`` follows the rules of returning a Deferred which
|
||||
runs its callbacks in the original logcontext, all is happy.
|
||||
|
||||
The business of a Deferred which runs its callbacks in the original logcontext
|
||||
isn't hard to achieve — we have it today, in the shape of
|
||||
``logcontext._PreservingContextDeferred``:
|
||||
|
||||
.. code:: python
|
||||
|
||||
def do_some_stuff():
|
||||
deferred = do_some_io()
|
||||
pcd = _PreservingContextDeferred(LoggingContext.current_context())
|
||||
deferred.chainDeferred(pcd)
|
||||
return pcd
|
||||
|
||||
It turns out that, thanks to the way that Deferreds chain together, we
|
||||
automatically get the property of a context-preserving deferred with
|
||||
``defer.inlineCallbacks``, provided the final Defered the function ``yields``
|
||||
on has that property. So we can just write:
|
||||
|
||||
.. code:: python
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def handle_request():
|
||||
yield do_some_stuff()
|
||||
yield do_some_more_stuff()
|
||||
|
||||
To conclude: I think this scheme would have worked equally well, with less
|
||||
danger of messing it up, and probably made some more esoteric code easier to
|
||||
write. But again — changing the conventions of the entire Synapse codebase is
|
||||
not a sensible option for the marginal improvement offered.
|
||||
@@ -1,50 +1,69 @@
|
||||
How to monitor Synapse metrics using Prometheus
|
||||
===============================================
|
||||
|
||||
1: Install prometheus:
|
||||
Follow instructions at http://prometheus.io/docs/introduction/install/
|
||||
1. Install prometheus:
|
||||
|
||||
2: Enable synapse metrics:
|
||||
Simply setting a (local) port number will enable it. Pick a port.
|
||||
prometheus itself defaults to 9090, so starting just above that for
|
||||
locally monitored services seems reasonable. E.g. 9092:
|
||||
Follow instructions at http://prometheus.io/docs/introduction/install/
|
||||
|
||||
Add to homeserver.yaml
|
||||
2. Enable synapse metrics:
|
||||
|
||||
metrics_port: 9092
|
||||
Simply setting a (local) port number will enable it. Pick a port.
|
||||
prometheus itself defaults to 9090, so starting just above that for
|
||||
locally monitored services seems reasonable. E.g. 9092:
|
||||
|
||||
Restart synapse
|
||||
Add to homeserver.yaml::
|
||||
|
||||
3: Check out synapse-prometheus-config
|
||||
https://github.com/matrix-org/synapse-prometheus-config
|
||||
metrics_port: 9092
|
||||
|
||||
4: Add ``synapse.html`` and ``synapse.rules``
|
||||
The ``.html`` file needs to appear in prometheus's ``consoles`` directory,
|
||||
and the ``.rules`` file needs to be invoked somewhere in the main config
|
||||
file. A symlink to each from the git checkout into the prometheus directory
|
||||
might be easiest to ensure ``git pull`` keeps it updated.
|
||||
Also ensure that ``enable_metrics`` is set to ``True``.
|
||||
|
||||
Restart synapse.
|
||||
|
||||
5: Add a prometheus target for synapse
|
||||
This is easiest if prometheus runs on the same machine as synapse, as it can
|
||||
then just use localhost::
|
||||
3. Add a prometheus target for synapse.
|
||||
|
||||
global: {
|
||||
rule_file: "synapse.rules"
|
||||
}
|
||||
It needs to set the ``metrics_path`` to a non-default value::
|
||||
|
||||
job: {
|
||||
name: "synapse"
|
||||
- job_name: "synapse"
|
||||
metrics_path: "/_synapse/metrics"
|
||||
static_configs:
|
||||
- targets:
|
||||
"my.server.here:9092"
|
||||
|
||||
target_group: {
|
||||
target: "http://localhost:9092/"
|
||||
}
|
||||
}
|
||||
If your prometheus is older than 1.5.2, you will need to replace
|
||||
``static_configs`` in the above with ``target_groups``.
|
||||
|
||||
Restart prometheus.
|
||||
|
||||
6: Start prometheus::
|
||||
Standard Metric Names
|
||||
---------------------
|
||||
|
||||
./prometheus -config.file=prometheus.conf
|
||||
As of synapse version 0.18.2, the format of the process-wide metrics has been
|
||||
changed to fit prometheus standard naming conventions. Additionally the units
|
||||
have been changed to seconds, from miliseconds.
|
||||
|
||||
7: Wait a few seconds for it to start and perform the first scrape,
|
||||
then visit the console:
|
||||
================================== =============================
|
||||
New name Old name
|
||||
---------------------------------- -----------------------------
|
||||
process_cpu_user_seconds_total process_resource_utime / 1000
|
||||
process_cpu_system_seconds_total process_resource_stime / 1000
|
||||
process_open_fds (no 'type' label) process_fds
|
||||
================================== =============================
|
||||
|
||||
http://server-where-prometheus-runs:9090/consoles/synapse.html
|
||||
The python-specific counts of garbage collector performance have been renamed.
|
||||
|
||||
=========================== ======================
|
||||
New name Old name
|
||||
--------------------------- ----------------------
|
||||
python_gc_time reactor_gc_time
|
||||
python_gc_unreachable_total reactor_gc_unreachable
|
||||
python_gc_counts reactor_gc_counts
|
||||
=========================== ======================
|
||||
|
||||
The twisted-specific reactor metrics have been renamed.
|
||||
|
||||
==================================== =====================
|
||||
New name Old name
|
||||
------------------------------------ ---------------------
|
||||
python_twisted_reactor_pending_calls reactor_pending_calls
|
||||
python_twisted_reactor_tick_time reactor_tick_time
|
||||
==================================== =====================
|
||||
|
||||
58
docs/replication.rst
Normal file
58
docs/replication.rst
Normal file
@@ -0,0 +1,58 @@
|
||||
Replication Architecture
|
||||
========================
|
||||
|
||||
Motivation
|
||||
----------
|
||||
|
||||
We'd like to be able to split some of the work that synapse does into multiple
|
||||
python processes. In theory multiple synapse processes could share a single
|
||||
postgresql database and we'd scale up by running more synapse processes.
|
||||
However much of synapse assumes that only one process is interacting with the
|
||||
database, both for assigning unique identifiers when inserting into tables,
|
||||
notifying components about new updates, and for invalidating its caches.
|
||||
|
||||
So running multiple copies of the current code isn't an option. One way to
|
||||
run multiple processes would be to have a single writer process and multiple
|
||||
reader processes connected to the same database. In order to do this we'd need
|
||||
a way for the reader process to invalidate its in-memory caches when an update
|
||||
happens on the writer. One way to do this is for the writer to present an
|
||||
append-only log of updates which the readers can consume to invalidate their
|
||||
caches and to push updates to listening clients or pushers.
|
||||
|
||||
Synapse already stores much of its data as an append-only log so that it can
|
||||
correctly respond to /sync requests so the amount of code changes needed to
|
||||
expose the append-only log to the readers should be fairly minimal.
|
||||
|
||||
Architecture
|
||||
------------
|
||||
|
||||
The Replication API
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Synapse will optionally expose a long poll HTTP API for extracting updates. The
|
||||
API will have a similar shape to /sync in that clients provide tokens
|
||||
indicating where in the log they have reached and a timeout. The synapse server
|
||||
then either responds with updates immediately if it already has updates or it
|
||||
waits until the timeout for more updates. If the timeout expires and nothing
|
||||
happened then the server returns an empty response.
|
||||
|
||||
However unlike the /sync API this replication API is returning synapse specific
|
||||
data rather than trying to implement a matrix specification. The replication
|
||||
results are returned as arrays of rows where the rows are mostly lifted
|
||||
directly from the database. This avoids unnecessary JSON parsing on the server
|
||||
and hopefully avoids an impedance mismatch between the data returned and the
|
||||
required updates to the datastore.
|
||||
|
||||
This does not replicate all the database tables as many of the database tables
|
||||
are indexes that can be recovered from the contents of other tables.
|
||||
|
||||
The format and parameters for the api are documented in
|
||||
``synapse/replication/resource.py``.
|
||||
|
||||
|
||||
The Slaved DataStore
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
There are read-only version of the synapse storage layer in
|
||||
``synapse/replication/slave/storage`` that use the response of the replication
|
||||
API to invalidate their caches.
|
||||
177
docs/tcp_replication.rst
Normal file
177
docs/tcp_replication.rst
Normal file
@@ -0,0 +1,177 @@
|
||||
TCP Replication
|
||||
===============
|
||||
|
||||
This describes the TCP replication protocol that replaces the HTTP protocol.
|
||||
|
||||
Motivation
|
||||
----------
|
||||
|
||||
The HTTP API used long poll from the workers to the master, this has the problem
|
||||
of causing a lot of duplicate work on the server. This TCP protocol aims to
|
||||
solve.
|
||||
|
||||
Overview
|
||||
--------
|
||||
|
||||
The protocol is based on fire and forget, line based commands. An example flow
|
||||
would be (where '>' indicates master->worker and '<' worker->master flows)::
|
||||
|
||||
> SERVER example.com
|
||||
< REPLICATE events 53
|
||||
> RDATA events 54 ["$foo1:bar.com", ...]
|
||||
> RDATA events 55 ["$foo4:bar.com", ...]
|
||||
|
||||
The example shows the server accepting a new connection and sending its identity
|
||||
with the ``SERVER`` command, followed by the client asking to subscribe to the
|
||||
``events`` stream from the token ``53``. The server then periodically sends ``RDATA``
|
||||
commands which have the format ``RDATA <stream_name> <token> <row>```, where the
|
||||
format of ``<row>`` is defined by the individual streams.
|
||||
|
||||
Error reporting happens by either the client or server sending an `ERROR`
|
||||
command, and usually the connection will be closed.
|
||||
|
||||
|
||||
Since the protocol is a simple line based, its possible to manually connect to
|
||||
the server using a tool like netcat. A few things should be noted when manually
|
||||
using the protocol:
|
||||
|
||||
* When subscribing to a stream using ``REPLICATE``, the special token ``NOW`` can
|
||||
be used to get all future updates. The special stream name ``ALL`` can be used
|
||||
with ``NOW`` to subscribe to all available streams.
|
||||
* The federation stream is only available if federation sending has been
|
||||
disabled on the main process.
|
||||
* The server will only time connections out that have sent a ``PING`` command.
|
||||
If a ping is sent then the connection will be closed if no further commands
|
||||
are receieved within 15s. Both the client and server protocol implementations
|
||||
will send an initial PING on connection and ensure at least one command every
|
||||
5s is sent (not necessarily ``PING``).
|
||||
* ``RDATA`` commands *usually* include a numeric token, however if the stream
|
||||
has multiple rows to replicate per token the server will send multiple
|
||||
``RDATA`` commands, with all but the last having a token of ``batch``. See
|
||||
the documentation on ``commands.RdataCommand`` for further details.
|
||||
|
||||
|
||||
Architecture
|
||||
------------
|
||||
|
||||
The basic structure of the protocol is line based, where the initial word of
|
||||
each line specifies the command. The rest of the line is parsed based on the
|
||||
command. For example, the `RDATA` command is defined as::
|
||||
|
||||
RDATA <stream_name> <token> <row_json>
|
||||
|
||||
(Note that `<row_json>` may contains spaces, but cannot contain newlines.)
|
||||
|
||||
Blank lines are ignored.
|
||||
|
||||
|
||||
Keep alives
|
||||
~~~~~~~~~~~
|
||||
|
||||
Both sides are expected to send at least one command every 5s or so, and
|
||||
should send a ``PING`` command if necessary. If either side do not receive a
|
||||
command within e.g. 15s then the connection should be closed.
|
||||
|
||||
Because the server may be connected to manually using e.g. netcat, the timeouts
|
||||
aren't enabled until an initial ``PING`` command is seen. Both the client and
|
||||
server implementations below send a ``PING`` command immediately on connection to
|
||||
ensure the timeouts are enabled.
|
||||
|
||||
This ensures that both sides can quickly realize if the tcp connection has gone
|
||||
and handle the situation appropriately.
|
||||
|
||||
|
||||
Start up
|
||||
~~~~~~~~
|
||||
|
||||
When a new connection is made, the server:
|
||||
|
||||
* Sends a ``SERVER`` command, which includes the identity of the server, allowing
|
||||
the client to detect if its connected to the expected server
|
||||
* Sends a ``PING`` command as above, to enable the client to time out connections
|
||||
promptly.
|
||||
|
||||
The client:
|
||||
|
||||
* Sends a ``NAME`` command, allowing the server to associate a human friendly
|
||||
name with the connection. This is optional.
|
||||
* Sends a ``PING`` as above
|
||||
* For each stream the client wishes to subscribe to it sends a ``REPLICATE``
|
||||
with the stream_name and token it wants to subscribe from.
|
||||
* On receipt of a ``SERVER`` command, checks that the server name matches the
|
||||
expected server name.
|
||||
|
||||
|
||||
Error handling
|
||||
~~~~~~~~~~~~~~
|
||||
|
||||
If either side detects an error it can send an ``ERROR`` command and close the
|
||||
connection.
|
||||
|
||||
If the client side loses the connection to the server it should reconnect,
|
||||
following the steps above.
|
||||
|
||||
|
||||
Congestion
|
||||
~~~~~~~~~~
|
||||
|
||||
If the server sends messages faster than the client can consume them the server
|
||||
will first buffer a (fairly large) number of commands and then disconnect the
|
||||
client. This ensures that we don't queue up an unbounded number of commands in
|
||||
memory and gives us a potential oppurtunity to squawk loudly. When/if the client
|
||||
recovers it can reconnect to the server and ask for missed messages.
|
||||
|
||||
|
||||
Reliability
|
||||
~~~~~~~~~~~
|
||||
|
||||
In general the replication stream should be consisdered an unreliable transport
|
||||
since e.g. commands are not resent if the connection disappears.
|
||||
|
||||
The exception to that are the replication streams, i.e. RDATA commands, since
|
||||
these include tokens which can be used to restart the stream on connection
|
||||
errors.
|
||||
|
||||
The client should keep track of the token in the last RDATA command received
|
||||
for each stream so that on reconneciton it can start streaming from the correct
|
||||
place. Note: not all RDATA have valid tokens due to batching. See
|
||||
``RdataCommand`` for more details.
|
||||
|
||||
|
||||
Example
|
||||
~~~~~~~
|
||||
|
||||
An example iteraction is shown below. Each line is prefixed with '>' or '<' to
|
||||
indicate which side is sending, these are *not* included on the wire::
|
||||
|
||||
* connection established *
|
||||
> SERVER localhost:8823
|
||||
> PING 1490197665618
|
||||
< NAME synapse.app.appservice
|
||||
< PING 1490197665618
|
||||
< REPLICATE events 1
|
||||
< REPLICATE backfill 1
|
||||
< REPLICATE caches 1
|
||||
> POSITION events 1
|
||||
> POSITION backfill 1
|
||||
> POSITION caches 1
|
||||
> RDATA caches 2 ["get_user_by_id",["@01register-user:localhost:8823"],1490197670513]
|
||||
> RDATA events 14 ["$149019767112vOHxz:localhost:8823",
|
||||
"!AFDCvgApUmpdfVjIXm:localhost:8823","m.room.guest_access","",null]
|
||||
< PING 1490197675618
|
||||
> ERROR server stopping
|
||||
* connection closed by server *
|
||||
|
||||
The ``POSITION`` command sent by the server is used to set the clients position
|
||||
without needing to send data with the ``RDATA`` command.
|
||||
|
||||
|
||||
An example of a batched set of ``RDATA`` is::
|
||||
|
||||
> RDATA caches batch ["get_user_by_id",["@test:localhost:8823"],1490197670513]
|
||||
> RDATA caches batch ["get_user_by_id",["@test2:localhost:8823"],1490197670513]
|
||||
> RDATA caches batch ["get_user_by_id",["@test3:localhost:8823"],1490197670513]
|
||||
> RDATA caches 54 ["get_user_by_id",["@test4:localhost:8823"],1490197670513]
|
||||
|
||||
In this case the client shouldn't advance their caches token until it sees the
|
||||
the last ``RDATA``.
|
||||
@@ -9,31 +9,35 @@ the Home Server to generate credentials that are valid for use on the TURN
|
||||
server through the use of a secret shared between the Home Server and the
|
||||
TURN server.
|
||||
|
||||
This document described how to install coturn
|
||||
(https://code.google.com/p/coturn/) which also supports the TURN REST API,
|
||||
This document describes how to install coturn
|
||||
(https://github.com/coturn/coturn) which also supports the TURN REST API,
|
||||
and integrate it with synapse.
|
||||
|
||||
coturn Setup
|
||||
============
|
||||
|
||||
You may be able to setup coturn via your package manager, or set it up manually using the usual ``configure, make, make install`` process.
|
||||
|
||||
1. Check out coturn::
|
||||
svn checkout http://coturn.googlecode.com/svn/trunk/ coturn
|
||||
|
||||
git clone https://github.com/coturn/coturn.git coturn
|
||||
cd coturn
|
||||
|
||||
2. Configure it::
|
||||
|
||||
./configure
|
||||
|
||||
You may need to install libevent2: if so, you should do so
|
||||
You may need to install ``libevent2``: if so, you should do so
|
||||
in the way recommended by your operating system.
|
||||
You can ignore warnings about lack of database support: a
|
||||
database is unnecessary for this purpose.
|
||||
|
||||
3. Build and install it::
|
||||
|
||||
make
|
||||
make install
|
||||
|
||||
4. Make a config file in /etc/turnserver.conf. You can customise
|
||||
a config file from turnserver.conf.default. The relevant
|
||||
4. Create or edit the config file in ``/etc/turnserver.conf``. The relevant
|
||||
lines, with example values, are::
|
||||
|
||||
lt-cred-mech
|
||||
@@ -41,7 +45,7 @@ coturn Setup
|
||||
static-auth-secret=[your secret key here]
|
||||
realm=turn.myserver.org
|
||||
|
||||
See turnserver.conf.default for explanations of the options.
|
||||
See turnserver.conf for explanations of the options.
|
||||
One way to generate the static-auth-secret is with pwgen::
|
||||
|
||||
pwgen -s 64 1
|
||||
@@ -54,6 +58,7 @@ coturn Setup
|
||||
import your private key and certificate.
|
||||
|
||||
7. Start the turn server::
|
||||
|
||||
bin/turnserver -o
|
||||
|
||||
|
||||
|
||||
74
docs/url_previews.rst
Normal file
74
docs/url_previews.rst
Normal file
@@ -0,0 +1,74 @@
|
||||
URL Previews
|
||||
============
|
||||
|
||||
Design notes on a URL previewing service for Matrix:
|
||||
|
||||
Options are:
|
||||
|
||||
1. Have an AS which listens for URLs, downloads them, and inserts an event that describes their metadata.
|
||||
* Pros:
|
||||
* Decouples the implementation entirely from Synapse.
|
||||
* Uses existing Matrix events & content repo to store the metadata.
|
||||
* Cons:
|
||||
* Which AS should provide this service for a room, and why should you trust it?
|
||||
* Doesn't work well with E2E; you'd have to cut the AS into every room
|
||||
* the AS would end up subscribing to every room anyway.
|
||||
|
||||
2. Have a generic preview API (nothing to do with Matrix) that provides a previewing service:
|
||||
* Pros:
|
||||
* Simple and flexible; can be used by any clients at any point
|
||||
* Cons:
|
||||
* If each HS provides one of these independently, all the HSes in a room may needlessly DoS the target URI
|
||||
* We need somewhere to store the URL metadata rather than just using Matrix itself
|
||||
* We can't piggyback on matrix to distribute the metadata between HSes.
|
||||
|
||||
3. Make the synapse of the sending user responsible for spidering the URL and inserting an event asynchronously which describes the metadata.
|
||||
* Pros:
|
||||
* Works transparently for all clients
|
||||
* Piggy-backs nicely on using Matrix for distributing the metadata.
|
||||
* No confusion as to which AS
|
||||
* Cons:
|
||||
* Doesn't work with E2E
|
||||
* We might want to decouple the implementation of the spider from the HS, given spider behaviour can be quite complicated and evolve much more rapidly than the HS. It's more like a bot than a core part of the server.
|
||||
|
||||
4. Make the sending client use the preview API and insert the event itself when successful.
|
||||
* Pros:
|
||||
* Works well with E2E
|
||||
* No custom server functionality
|
||||
* Lets the client customise the preview that they send (like on FB)
|
||||
* Cons:
|
||||
* Entirely specific to the sending client, whereas it'd be nice if /any/ URL was correctly previewed if clients support it.
|
||||
|
||||
5. Have the option of specifying a shared (centralised) previewing service used by a room, to avoid all the different HSes in the room DoSing the target.
|
||||
|
||||
Best solution is probably a combination of both 2 and 4.
|
||||
* Sending clients do their best to create and send a preview at the point of sending the message, perhaps delaying the message until the preview is computed? (This also lets the user validate the preview before sending)
|
||||
* Receiving clients have the option of going and creating their own preview if one doesn't arrive soon enough (or if the original sender didn't create one)
|
||||
|
||||
This is a bit magical though in that the preview could come from two entirely different sources - the sending HS or your local one. However, this can always be exposed to users: "Generate your own URL previews if none are available?"
|
||||
|
||||
This is tantamount also to senders calculating their own thumbnails for sending in advance of the main content - we are trusting the sender not to lie about the content in the thumbnail. Whereas currently thumbnails are calculated by the receiving homeserver to avoid this attack.
|
||||
|
||||
However, this kind of phishing attack does exist whether we let senders pick their thumbnails or not, in that a malicious sender can send normal text messages around the attachment claiming it to be legitimate. We could rely on (future) reputation/abuse management to punish users who phish (be it with bogus metadata or bogus descriptions). Bogus metadata is particularly bad though, especially if it's avoidable.
|
||||
|
||||
As a first cut, let's do #2 and have the receiver hit the API to calculate its own previews (as it does currently for image thumbnails). We can then extend/optimise this to option 4 as a special extra if needed.
|
||||
|
||||
API
|
||||
---
|
||||
|
||||
GET /_matrix/media/r0/preview_url?url=http://wherever.com
|
||||
200 OK
|
||||
{
|
||||
"og:type" : "article"
|
||||
"og:url" : "https://twitter.com/matrixdotorg/status/684074366691356672"
|
||||
"og:title" : "Matrix on Twitter"
|
||||
"og:image" : "https://pbs.twimg.com/profile_images/500400952029888512/yI0qtFi7_400x400.png"
|
||||
"og:description" : "“Synapse 0.12 is out! Lots of polishing, performance &amp; bugfixes: /sync API, /r0 prefix, fulltext search, 3PID invites https://t.co/5alhXLLEGP”"
|
||||
"og:site_name" : "Twitter"
|
||||
}
|
||||
|
||||
* Downloads the URL
|
||||
* If HTML, just stores it in RAM and parses it for OG meta tags
|
||||
* Download any media OG meta tags to the media repo, and refer to them in the OG via mxc:// URIs.
|
||||
* If a media filetype we know we can thumbnail: store it on disk, and hand it to the thumbnailer. Generate OG meta tags from the thumbnailer contents.
|
||||
* Otherwise, don't bother downloading further.
|
||||
98
docs/workers.rst
Normal file
98
docs/workers.rst
Normal file
@@ -0,0 +1,98 @@
|
||||
Scaling synapse via workers
|
||||
---------------------------
|
||||
|
||||
Synapse has experimental support for splitting out functionality into
|
||||
multiple separate python processes, helping greatly with scalability. These
|
||||
processes are called 'workers', and are (eventually) intended to scale
|
||||
horizontally independently.
|
||||
|
||||
All processes continue to share the same database instance, and as such, workers
|
||||
only work with postgres based synapse deployments (sharing a single sqlite
|
||||
across multiple processes is a recipe for disaster, plus you should be using
|
||||
postgres anyway if you care about scalability).
|
||||
|
||||
The workers communicate with the master synapse process via a synapse-specific
|
||||
HTTP protocol called 'replication' - analogous to MySQL or Postgres style
|
||||
database replication; feeding a stream of relevant data to the workers so they
|
||||
can be kept in sync with the main synapse process and database state.
|
||||
|
||||
To enable workers, you need to add a replication listener to the master synapse, e.g.::
|
||||
|
||||
listeners:
|
||||
- port: 9092
|
||||
bind_address: '127.0.0.1'
|
||||
type: http
|
||||
tls: false
|
||||
x_forwarded: false
|
||||
resources:
|
||||
- names: [replication]
|
||||
compress: false
|
||||
|
||||
Under **no circumstances** should this replication API listener be exposed to the
|
||||
public internet; it currently implements no authentication whatsoever and is
|
||||
unencrypted HTTP.
|
||||
|
||||
You then create a set of configs for the various worker processes. These should be
|
||||
worker configuration files should be stored in a dedicated subdirectory, to allow
|
||||
synctl to manipulate them.
|
||||
|
||||
The current available worker applications are:
|
||||
* synapse.app.pusher - handles sending push notifications to sygnal and email
|
||||
* synapse.app.synchrotron - handles /sync endpoints. can scales horizontally through multiple instances.
|
||||
* synapse.app.appservice - handles output traffic to Application Services
|
||||
* synapse.app.federation_reader - handles receiving federation traffic (including public_rooms API)
|
||||
* synapse.app.media_repository - handles the media repository.
|
||||
* synapse.app.client_reader - handles client API endpoints like /publicRooms
|
||||
|
||||
Each worker configuration file inherits the configuration of the main homeserver
|
||||
configuration file. You can then override configuration specific to that worker,
|
||||
e.g. the HTTP listener that it provides (if any); logging configuration; etc.
|
||||
You should minimise the number of overrides though to maintain a usable config.
|
||||
|
||||
You must specify the type of worker application (worker_app) and the replication
|
||||
endpoint that it's talking to on the main synapse process (worker_replication_url).
|
||||
|
||||
For instance::
|
||||
|
||||
worker_app: synapse.app.synchrotron
|
||||
|
||||
# The replication listener on the synapse to talk to.
|
||||
worker_replication_url: http://127.0.0.1:9092/_synapse/replication
|
||||
|
||||
worker_listeners:
|
||||
- type: http
|
||||
port: 8083
|
||||
resources:
|
||||
- names:
|
||||
- client
|
||||
|
||||
worker_daemonize: True
|
||||
worker_pid_file: /home/matrix/synapse/synchrotron.pid
|
||||
worker_log_config: /home/matrix/synapse/config/synchrotron_log_config.yaml
|
||||
|
||||
...is a full configuration for a synchrotron worker instance, which will expose a
|
||||
plain HTTP /sync endpoint on port 8083 separately from the /sync endpoint provided
|
||||
by the main synapse.
|
||||
|
||||
Obviously you should configure your loadbalancer to route the /sync endpoint to
|
||||
the synchrotron instance(s) in this instance.
|
||||
|
||||
Finally, to actually run your worker-based synapse, you must pass synctl the -a
|
||||
commandline option to tell it to operate on all the worker configurations found
|
||||
in the given directory, e.g.::
|
||||
|
||||
synctl -a $CONFIG/workers start
|
||||
|
||||
Currently one should always restart all workers when restarting or upgrading
|
||||
synapse, unless you explicitly know it's safe not to. For instance, restarting
|
||||
synapse without restarting all the synchrotrons may result in broken typing
|
||||
notifications.
|
||||
|
||||
To manipulate a specific worker, you pass the -w option to synctl::
|
||||
|
||||
synctl -w $CONFIG/workers/synchrotron.yaml restart
|
||||
|
||||
All of the above is highly experimental and subject to change as Synapse evolves,
|
||||
but documenting it here to help folks needing highly scalable Synapses similar
|
||||
to the one running matrix.org!
|
||||
|
||||
22
jenkins-dendron-haproxy-postgres.sh
Executable file
22
jenkins-dendron-haproxy-postgres.sh
Executable file
@@ -0,0 +1,22 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -eux
|
||||
|
||||
: ${WORKSPACE:="$(pwd)"}
|
||||
|
||||
export WORKSPACE
|
||||
export PYTHONDONTWRITEBYTECODE=yep
|
||||
export SYNAPSE_CACHE_FACTOR=1
|
||||
|
||||
export HAPROXY_BIN=/home/haproxy/haproxy-1.6.11/haproxy
|
||||
|
||||
./jenkins/prepare_synapse.sh
|
||||
./jenkins/clone.sh sytest https://github.com/matrix-org/sytest.git
|
||||
./jenkins/clone.sh dendron https://github.com/matrix-org/dendron.git
|
||||
./dendron/jenkins/build_dendron.sh
|
||||
./sytest/jenkins/prep_sytest_for_postgres.sh
|
||||
|
||||
./sytest/jenkins/install_and_run.sh \
|
||||
--synapse-directory $WORKSPACE \
|
||||
--dendron $WORKSPACE/dendron/bin/dendron \
|
||||
--haproxy \
|
||||
19
jenkins-dendron-postgres.sh
Executable file
19
jenkins-dendron-postgres.sh
Executable file
@@ -0,0 +1,19 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -eux
|
||||
|
||||
: ${WORKSPACE:="$(pwd)"}
|
||||
|
||||
export WORKSPACE
|
||||
export PYTHONDONTWRITEBYTECODE=yep
|
||||
export SYNAPSE_CACHE_FACTOR=1
|
||||
|
||||
./jenkins/prepare_synapse.sh
|
||||
./jenkins/clone.sh sytest https://github.com/matrix-org/sytest.git
|
||||
./jenkins/clone.sh dendron https://github.com/matrix-org/dendron.git
|
||||
./dendron/jenkins/build_dendron.sh
|
||||
./sytest/jenkins/prep_sytest_for_postgres.sh
|
||||
|
||||
./sytest/jenkins/install_and_run.sh \
|
||||
--synapse-directory $WORKSPACE \
|
||||
--dendron $WORKSPACE/dendron/bin/dendron \
|
||||
@@ -4,58 +4,14 @@ set -eux
|
||||
|
||||
: ${WORKSPACE:="$(pwd)"}
|
||||
|
||||
export WORKSPACE
|
||||
export PYTHONDONTWRITEBYTECODE=yep
|
||||
export SYNAPSE_CACHE_FACTOR=1
|
||||
|
||||
# Output test results as junit xml
|
||||
export TRIAL_FLAGS="--reporter=subunit"
|
||||
export TOXSUFFIX="| subunit-1to2 | subunit2junitxml --no-passthrough --output-to=results.xml"
|
||||
# Write coverage reports to a separate file for each process
|
||||
export COVERAGE_OPTS="-p"
|
||||
export DUMP_COVERAGE_COMMAND="coverage help"
|
||||
./jenkins/prepare_synapse.sh
|
||||
./jenkins/clone.sh sytest https://github.com/matrix-org/sytest.git
|
||||
|
||||
# Output flake8 violations to violations.flake8.log
|
||||
# Don't exit with non-0 status code on Jenkins,
|
||||
# so that the build steps continue and a later step can decided whether to
|
||||
# UNSTABLE or FAILURE this build.
|
||||
export PEP8SUFFIX="--output-file=violations.flake8.log || echo flake8 finished with status code \$?"
|
||||
./sytest/jenkins/prep_sytest_for_postgres.sh
|
||||
|
||||
rm .coverage* || echo "No coverage files to remove"
|
||||
|
||||
tox --notest -e py27
|
||||
|
||||
TOX_BIN=$WORKSPACE/.tox/py27/bin
|
||||
$TOX_BIN/pip install psycopg2
|
||||
|
||||
: ${GIT_BRANCH:="origin/$(git rev-parse --abbrev-ref HEAD)"}
|
||||
|
||||
if [[ ! -e .sytest-base ]]; then
|
||||
git clone https://github.com/matrix-org/sytest.git .sytest-base --mirror
|
||||
else
|
||||
(cd .sytest-base; git fetch -p)
|
||||
fi
|
||||
|
||||
rm -rf sytest
|
||||
git clone .sytest-base sytest --shared
|
||||
cd sytest
|
||||
|
||||
git checkout "${GIT_BRANCH}" || (echo >&2 "No ref ${GIT_BRANCH} found, falling back to develop" ; git checkout develop)
|
||||
|
||||
: ${PORT_BASE:=8000}
|
||||
|
||||
./jenkins/prep_sytest_for_postgres.sh
|
||||
|
||||
echo >&2 "Running sytest with PostgreSQL";
|
||||
./jenkins/install_and_run.sh --coverage \
|
||||
--python $TOX_BIN/python \
|
||||
--synapse-directory $WORKSPACE \
|
||||
--port-base $PORT_BASE
|
||||
|
||||
cd ..
|
||||
cp sytest/.coverage.* .
|
||||
|
||||
# Combine the coverage reports
|
||||
echo "Combining:" .coverage.*
|
||||
$TOX_BIN/python -m coverage combine
|
||||
# Output coverage to coverage.xml
|
||||
$TOX_BIN/coverage xml -o coverage.xml
|
||||
./sytest/jenkins/install_and_run.sh \
|
||||
--synapse-directory $WORKSPACE \
|
||||
|
||||
@@ -4,52 +4,12 @@ set -eux
|
||||
|
||||
: ${WORKSPACE:="$(pwd)"}
|
||||
|
||||
export WORKSPACE
|
||||
export PYTHONDONTWRITEBYTECODE=yep
|
||||
export SYNAPSE_CACHE_FACTOR=1
|
||||
|
||||
# Output test results as junit xml
|
||||
export TRIAL_FLAGS="--reporter=subunit"
|
||||
export TOXSUFFIX="| subunit-1to2 | subunit2junitxml --no-passthrough --output-to=results.xml"
|
||||
# Write coverage reports to a separate file for each process
|
||||
export COVERAGE_OPTS="-p"
|
||||
export DUMP_COVERAGE_COMMAND="coverage help"
|
||||
./jenkins/prepare_synapse.sh
|
||||
./jenkins/clone.sh sytest https://github.com/matrix-org/sytest.git
|
||||
|
||||
# Output flake8 violations to violations.flake8.log
|
||||
# Don't exit with non-0 status code on Jenkins,
|
||||
# so that the build steps continue and a later step can decided whether to
|
||||
# UNSTABLE or FAILURE this build.
|
||||
export PEP8SUFFIX="--output-file=violations.flake8.log || echo flake8 finished with status code \$?"
|
||||
|
||||
rm .coverage* || echo "No coverage files to remove"
|
||||
|
||||
tox --notest -e py27
|
||||
TOX_BIN=$WORKSPACE/.tox/py27/bin
|
||||
|
||||
: ${GIT_BRANCH:="origin/$(git rev-parse --abbrev-ref HEAD)"}
|
||||
|
||||
if [[ ! -e .sytest-base ]]; then
|
||||
git clone https://github.com/matrix-org/sytest.git .sytest-base --mirror
|
||||
else
|
||||
(cd .sytest-base; git fetch -p)
|
||||
fi
|
||||
|
||||
rm -rf sytest
|
||||
git clone .sytest-base sytest --shared
|
||||
cd sytest
|
||||
|
||||
git checkout "${GIT_BRANCH}" || (echo >&2 "No ref ${GIT_BRANCH} found, falling back to develop" ; git checkout develop)
|
||||
|
||||
: ${PORT_BASE:=8500}
|
||||
./jenkins/install_and_run.sh --coverage \
|
||||
--python $TOX_BIN/python \
|
||||
--synapse-directory $WORKSPACE \
|
||||
--port-base $PORT_BASE
|
||||
|
||||
cd ..
|
||||
cp sytest/.coverage.* .
|
||||
|
||||
# Combine the coverage reports
|
||||
echo "Combining:" .coverage.*
|
||||
$TOX_BIN/python -m coverage combine
|
||||
# Output coverage to coverage.xml
|
||||
$TOX_BIN/coverage xml -o coverage.xml
|
||||
./sytest/jenkins/install_and_run.sh \
|
||||
--synapse-directory $WORKSPACE \
|
||||
|
||||
@@ -22,4 +22,9 @@ export PEP8SUFFIX="--output-file=violations.flake8.log || echo flake8 finished w
|
||||
|
||||
rm .coverage* || echo "No coverage files to remove"
|
||||
|
||||
tox --notest -e py27
|
||||
TOX_BIN=$WORKSPACE/.tox/py27/bin
|
||||
python synapse/python_dependencies.py | xargs -n1 $TOX_BIN/pip install
|
||||
$TOX_BIN/pip install lxml
|
||||
|
||||
tox -e py27
|
||||
|
||||
86
jenkins.sh
86
jenkins.sh
@@ -1,86 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -eux
|
||||
|
||||
: ${WORKSPACE:="$(pwd)"}
|
||||
|
||||
export PYTHONDONTWRITEBYTECODE=yep
|
||||
export SYNAPSE_CACHE_FACTOR=1
|
||||
|
||||
# Output test results as junit xml
|
||||
export TRIAL_FLAGS="--reporter=subunit"
|
||||
export TOXSUFFIX="| subunit-1to2 | subunit2junitxml --no-passthrough --output-to=results.xml"
|
||||
# Write coverage reports to a separate file for each process
|
||||
export COVERAGE_OPTS="-p"
|
||||
export DUMP_COVERAGE_COMMAND="coverage help"
|
||||
|
||||
# Output flake8 violations to violations.flake8.log
|
||||
# Don't exit with non-0 status code on Jenkins,
|
||||
# so that the build steps continue and a later step can decided whether to
|
||||
# UNSTABLE or FAILURE this build.
|
||||
export PEP8SUFFIX="--output-file=violations.flake8.log || echo flake8 finished with status code \$?"
|
||||
|
||||
rm .coverage* || echo "No coverage files to remove"
|
||||
|
||||
tox
|
||||
|
||||
: ${GIT_BRANCH:="origin/$(git rev-parse --abbrev-ref HEAD)"}
|
||||
|
||||
TOX_BIN=$WORKSPACE/.tox/py27/bin
|
||||
|
||||
if [[ ! -e .sytest-base ]]; then
|
||||
git clone https://github.com/matrix-org/sytest.git .sytest-base --mirror
|
||||
else
|
||||
(cd .sytest-base; git fetch -p)
|
||||
fi
|
||||
|
||||
rm -rf sytest
|
||||
git clone .sytest-base sytest --shared
|
||||
cd sytest
|
||||
|
||||
git checkout "${GIT_BRANCH}" || (echo >&2 "No ref ${GIT_BRANCH} found, falling back to develop" ; git checkout develop)
|
||||
|
||||
: ${PERL5LIB:=$WORKSPACE/perl5/lib/perl5}
|
||||
: ${PERL_MB_OPT:=--install_base=$WORKSPACE/perl5}
|
||||
: ${PERL_MM_OPT:=INSTALL_BASE=$WORKSPACE/perl5}
|
||||
export PERL5LIB PERL_MB_OPT PERL_MM_OPT
|
||||
|
||||
./install-deps.pl
|
||||
|
||||
: ${PORT_BASE:=8000}
|
||||
|
||||
echo >&2 "Running sytest with SQLite3";
|
||||
./run-tests.pl --coverage -O tap --synapse-directory $WORKSPACE \
|
||||
--python $TOX_BIN/python --all --port-base $PORT_BASE > results-sqlite3.tap
|
||||
|
||||
RUN_POSTGRES=""
|
||||
|
||||
for port in $(($PORT_BASE + 1)) $(($PORT_BASE + 2)); do
|
||||
if psql synapse_jenkins_$port <<< ""; then
|
||||
RUN_POSTGRES="$RUN_POSTGRES:$port"
|
||||
cat > localhost-$port/database.yaml << EOF
|
||||
name: psycopg2
|
||||
args:
|
||||
database: synapse_jenkins_$port
|
||||
EOF
|
||||
fi
|
||||
done
|
||||
|
||||
# Run if both postgresql databases exist
|
||||
if test "$RUN_POSTGRES" = ":$(($PORT_BASE + 1)):$(($PORT_BASE + 2))"; then
|
||||
echo >&2 "Running sytest with PostgreSQL";
|
||||
$TOX_BIN/pip install psycopg2
|
||||
./run-tests.pl --coverage -O tap --synapse-directory $WORKSPACE \
|
||||
--python $TOX_BIN/python --all --port-base $PORT_BASE > results-postgresql.tap
|
||||
else
|
||||
echo >&2 "Skipping running sytest with PostgreSQL, $RUN_POSTGRES"
|
||||
fi
|
||||
|
||||
cd ..
|
||||
cp sytest/.coverage.* .
|
||||
|
||||
# Combine the coverage reports
|
||||
echo "Combining:" .coverage.*
|
||||
$TOX_BIN/python -m coverage combine
|
||||
# Output coverage to coverage.xml
|
||||
$TOX_BIN/coverage xml -o coverage.xml
|
||||
44
jenkins/clone.sh
Executable file
44
jenkins/clone.sh
Executable file
@@ -0,0 +1,44 @@
|
||||
#! /bin/bash
|
||||
|
||||
# This clones a project from github into a named subdirectory
|
||||
# If the project has a branch with the same name as this branch
|
||||
# then it will checkout that branch after cloning.
|
||||
# Otherwise it will checkout "origin/develop."
|
||||
# The first argument is the name of the directory to checkout
|
||||
# the branch into.
|
||||
# The second argument is the URL of the remote repository to checkout.
|
||||
# Usually something like https://github.com/matrix-org/sytest.git
|
||||
|
||||
set -eux
|
||||
|
||||
NAME=$1
|
||||
PROJECT=$2
|
||||
BASE=".$NAME-base"
|
||||
|
||||
# Update our mirror.
|
||||
if [ ! -d ".$NAME-base" ]; then
|
||||
# Create a local mirror of the source repository.
|
||||
# This saves us from having to download the entire repository
|
||||
# when this script is next run.
|
||||
git clone "$PROJECT" "$BASE" --mirror
|
||||
else
|
||||
# Fetch any updates from the source repository.
|
||||
(cd "$BASE"; git fetch -p)
|
||||
fi
|
||||
|
||||
# Remove the existing repository so that we have a clean copy
|
||||
rm -rf "$NAME"
|
||||
# Cloning with --shared means that we will share portions of the
|
||||
# .git directory with our local mirror.
|
||||
git clone "$BASE" "$NAME" --shared
|
||||
|
||||
# Jenkins may have supplied us with the name of the branch in the
|
||||
# environment. Otherwise we will have to guess based on the current
|
||||
# commit.
|
||||
: ${GIT_BRANCH:="origin/$(git rev-parse --abbrev-ref HEAD)"}
|
||||
cd "$NAME"
|
||||
# check out the relevant branch
|
||||
git checkout "${GIT_BRANCH}" || (
|
||||
echo >&2 "No ref ${GIT_BRANCH} found, falling back to develop"
|
||||
git checkout "origin/develop"
|
||||
)
|
||||
20
jenkins/prepare_synapse.sh
Executable file
20
jenkins/prepare_synapse.sh
Executable file
@@ -0,0 +1,20 @@
|
||||
#! /bin/bash
|
||||
|
||||
cd "`dirname $0`/.."
|
||||
|
||||
TOX_DIR=$WORKSPACE/.tox
|
||||
|
||||
mkdir -p $TOX_DIR
|
||||
|
||||
if ! [ $TOX_DIR -ef .tox ]; then
|
||||
ln -s "$TOX_DIR" .tox
|
||||
fi
|
||||
|
||||
# set up the virtualenv
|
||||
tox -e py27 --notest -v
|
||||
|
||||
TOX_BIN=$TOX_DIR/py27/bin
|
||||
$TOX_BIN/pip install setuptools
|
||||
{ python synapse/python_dependencies.py
|
||||
echo lxml psycopg2
|
||||
} | xargs $TOX_BIN/pip install
|
||||
7
res/templates/mail-Vector.css
Normal file
7
res/templates/mail-Vector.css
Normal file
@@ -0,0 +1,7 @@
|
||||
.header {
|
||||
border-bottom: 4px solid #e4f7ed ! important;
|
||||
}
|
||||
|
||||
.notif_link a, .footer a {
|
||||
color: #76CFA6 ! important;
|
||||
}
|
||||
156
res/templates/mail.css
Normal file
156
res/templates/mail.css
Normal file
@@ -0,0 +1,156 @@
|
||||
body {
|
||||
margin: 0px;
|
||||
}
|
||||
|
||||
pre, code {
|
||||
word-break: break-word;
|
||||
white-space: pre-wrap;
|
||||
}
|
||||
|
||||
#page {
|
||||
font-family: 'Open Sans', Helvetica, Arial, Sans-Serif;
|
||||
font-color: #454545;
|
||||
font-size: 12pt;
|
||||
width: 100%;
|
||||
padding: 20px;
|
||||
}
|
||||
|
||||
#inner {
|
||||
width: 640px;
|
||||
}
|
||||
|
||||
.header {
|
||||
width: 100%;
|
||||
height: 87px;
|
||||
color: #454545;
|
||||
border-bottom: 4px solid #e5e5e5;
|
||||
}
|
||||
|
||||
.logo {
|
||||
text-align: right;
|
||||
margin-left: 20px;
|
||||
}
|
||||
|
||||
.salutation {
|
||||
padding-top: 10px;
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
.summarytext {
|
||||
}
|
||||
|
||||
.room {
|
||||
width: 100%;
|
||||
color: #454545;
|
||||
border-bottom: 1px solid #e5e5e5;
|
||||
}
|
||||
|
||||
.room_header td {
|
||||
padding-top: 38px;
|
||||
padding-bottom: 10px;
|
||||
border-bottom: 1px solid #e5e5e5;
|
||||
}
|
||||
|
||||
.room_name {
|
||||
vertical-align: middle;
|
||||
font-size: 18px;
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
.room_header h2 {
|
||||
margin-top: 0px;
|
||||
margin-left: 75px;
|
||||
font-size: 20px;
|
||||
}
|
||||
|
||||
.room_avatar {
|
||||
width: 56px;
|
||||
line-height: 0px;
|
||||
text-align: center;
|
||||
vertical-align: middle;
|
||||
}
|
||||
|
||||
.room_avatar img {
|
||||
width: 48px;
|
||||
height: 48px;
|
||||
object-fit: cover;
|
||||
border-radius: 24px;
|
||||
}
|
||||
|
||||
.notif {
|
||||
border-bottom: 1px solid #e5e5e5;
|
||||
margin-top: 16px;
|
||||
padding-bottom: 16px;
|
||||
}
|
||||
|
||||
.historical_message .sender_avatar {
|
||||
opacity: 0.3;
|
||||
}
|
||||
|
||||
/* spell out opacity and historical_message class names for Outlook aka Word */
|
||||
.historical_message .sender_name {
|
||||
color: #e3e3e3;
|
||||
}
|
||||
|
||||
.historical_message .message_time {
|
||||
color: #e3e3e3;
|
||||
}
|
||||
|
||||
.historical_message .message_body {
|
||||
color: #c7c7c7;
|
||||
}
|
||||
|
||||
.historical_message td,
|
||||
.message td {
|
||||
padding-top: 10px;
|
||||
}
|
||||
|
||||
.sender_avatar {
|
||||
width: 56px;
|
||||
text-align: center;
|
||||
vertical-align: top;
|
||||
}
|
||||
|
||||
.sender_avatar img {
|
||||
margin-top: -2px;
|
||||
width: 32px;
|
||||
height: 32px;
|
||||
border-radius: 16px;
|
||||
}
|
||||
|
||||
.sender_name {
|
||||
display: inline;
|
||||
font-size: 13px;
|
||||
color: #a2a2a2;
|
||||
}
|
||||
|
||||
.message_time {
|
||||
text-align: right;
|
||||
width: 100px;
|
||||
font-size: 11px;
|
||||
color: #a2a2a2;
|
||||
}
|
||||
|
||||
.message_body {
|
||||
}
|
||||
|
||||
.notif_link td {
|
||||
padding-top: 10px;
|
||||
padding-bottom: 10px;
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
.notif_link a, .footer a {
|
||||
color: #454545;
|
||||
text-decoration: none;
|
||||
}
|
||||
|
||||
.debug {
|
||||
font-size: 10px;
|
||||
color: #888;
|
||||
}
|
||||
|
||||
.footer {
|
||||
margin-top: 20px;
|
||||
text-align: center;
|
||||
}
|
||||
45
res/templates/notif.html
Normal file
45
res/templates/notif.html
Normal file
@@ -0,0 +1,45 @@
|
||||
{% for message in notif.messages %}
|
||||
<tr class="{{ "historical_message" if message.is_historical else "message" }}">
|
||||
<td class="sender_avatar">
|
||||
{% if loop.index0 == 0 or notif.messages[loop.index0 - 1].sender_name != notif.messages[loop.index0].sender_name %}
|
||||
{% if message.sender_avatar_url %}
|
||||
<img alt="" class="sender_avatar" src="{{ message.sender_avatar_url|mxc_to_http(32,32) }}" />
|
||||
{% else %}
|
||||
{% if message.sender_hash % 3 == 0 %}
|
||||
<img class="sender_avatar" src="https://vector.im/beta/img/76cfa6.png" />
|
||||
{% elif message.sender_hash % 3 == 1 %}
|
||||
<img class="sender_avatar" src="https://vector.im/beta/img/50e2c2.png" />
|
||||
{% else %}
|
||||
<img class="sender_avatar" src="https://vector.im/beta/img/f4c371.png" />
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
</td>
|
||||
<td class="message_contents">
|
||||
{% if loop.index0 == 0 or notif.messages[loop.index0 - 1].sender_name != notif.messages[loop.index0].sender_name %}
|
||||
<div class="sender_name">{% if message.msgtype == "m.emote" %}*{% endif %} {{ message.sender_name }}</div>
|
||||
{% endif %}
|
||||
<div class="message_body">
|
||||
{% if message.msgtype == "m.text" %}
|
||||
{{ message.body_text_html }}
|
||||
{% elif message.msgtype == "m.emote" %}
|
||||
{{ message.body_text_html }}
|
||||
{% elif message.msgtype == "m.notice" %}
|
||||
{{ message.body_text_html }}
|
||||
{% elif message.msgtype == "m.image" %}
|
||||
<img src="{{ message.image_url|mxc_to_http(640, 480, scale) }}" />
|
||||
{% elif message.msgtype == "m.file" %}
|
||||
<span class="filename">{{ message.body_text_plain }}</span>
|
||||
{% endif %}
|
||||
</div>
|
||||
</td>
|
||||
<td class="message_time">{{ message.ts|format_ts("%H:%M") }}</td>
|
||||
</tr>
|
||||
{% endfor %}
|
||||
<tr class="notif_link">
|
||||
<td></td>
|
||||
<td>
|
||||
<a href="{{ notif.link }}">View {{ room.title }}</a>
|
||||
</td>
|
||||
<td></td>
|
||||
</tr>
|
||||
16
res/templates/notif.txt
Normal file
16
res/templates/notif.txt
Normal file
@@ -0,0 +1,16 @@
|
||||
{% for message in notif.messages %}
|
||||
{% if message.msgtype == "m.emote" %}* {% endif %}{{ message.sender_name }} ({{ message.ts|format_ts("%H:%M") }})
|
||||
{% if message.msgtype == "m.text" %}
|
||||
{{ message.body_text_plain }}
|
||||
{% elif message.msgtype == "m.emote" %}
|
||||
{{ message.body_text_plain }}
|
||||
{% elif message.msgtype == "m.notice" %}
|
||||
{{ message.body_text_plain }}
|
||||
{% elif message.msgtype == "m.image" %}
|
||||
{{ message.body_text_plain }}
|
||||
{% elif message.msgtype == "m.file" %}
|
||||
{{ message.body_text_plain }}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
View {{ room.title }} at {{ notif.link }}
|
||||
55
res/templates/notif_mail.html
Normal file
55
res/templates/notif_mail.html
Normal file
@@ -0,0 +1,55 @@
|
||||
<!doctype html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<style type="text/css">
|
||||
{% include 'mail.css' without context %}
|
||||
{% include "mail-%s.css" % app_name ignore missing without context %}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<table id="page">
|
||||
<tr>
|
||||
<td> </td>
|
||||
<td id="inner">
|
||||
<table class="header">
|
||||
<tr>
|
||||
<td>
|
||||
<div class="salutation">Hi {{ user_display_name }},</div>
|
||||
<div class="summarytext">{{ summary_text }}</div>
|
||||
</td>
|
||||
<td class="logo">
|
||||
{% if app_name == "Riot" %}
|
||||
<img src="http://matrix.org/img/riot-logo-email.png" width="83" height="83" alt="[Riot]"/>
|
||||
{% elif app_name == "Vector" %}
|
||||
<img src="http://matrix.org/img/vector-logo-email.png" width="64" height="83" alt="[Vector]"/>
|
||||
{% else %}
|
||||
<img src="http://matrix.org/img/matrix-120x51.png" width="120" height="51" alt="[matrix]"/>
|
||||
{% endif %}
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
{% for room in rooms %}
|
||||
{% include 'room.html' with context %}
|
||||
{% endfor %}
|
||||
<div class="footer">
|
||||
<a href="{{ unsubscribe_link }}">Unsubscribe</a>
|
||||
<br/>
|
||||
<br/>
|
||||
<div class="debug">
|
||||
Sending email at {{ reason.now|format_ts("%c") }} due to activity in room {{ reason.room_name }} because
|
||||
an event was received at {{ reason.received_at|format_ts("%c") }}
|
||||
which is more than {{ "%.1f"|format(reason.delay_before_mail_ms / (60*1000)) }} ({{ reason.delay_before_mail_ms }}) mins ago,
|
||||
{% if reason.last_sent_ts %}
|
||||
and the last time we sent a mail for this room was {{ reason.last_sent_ts|format_ts("%c") }},
|
||||
which is more than {{ "%.1f"|format(reason.throttle_ms / (60*1000)) }} (current throttle_ms) mins ago.
|
||||
{% else %}
|
||||
and we don't have a last time we sent a mail for this room.
|
||||
{% endif %}
|
||||
</div>
|
||||
</div>
|
||||
</td>
|
||||
<td> </td>
|
||||
</tr>
|
||||
</table>
|
||||
</body>
|
||||
</html>
|
||||
10
res/templates/notif_mail.txt
Normal file
10
res/templates/notif_mail.txt
Normal file
@@ -0,0 +1,10 @@
|
||||
Hi {{ user_display_name }},
|
||||
|
||||
{{ summary_text }}
|
||||
|
||||
{% for room in rooms %}
|
||||
{% include 'room.txt' with context %}
|
||||
{% endfor %}
|
||||
|
||||
You can disable these notifications at {{ unsubscribe_link }}
|
||||
|
||||
33
res/templates/room.html
Normal file
33
res/templates/room.html
Normal file
@@ -0,0 +1,33 @@
|
||||
<table class="room">
|
||||
<tr class="room_header">
|
||||
<td class="room_avatar">
|
||||
{% if room.avatar_url %}
|
||||
<img alt="" src="{{ room.avatar_url|mxc_to_http(48,48) }}" />
|
||||
{% else %}
|
||||
{% if room.hash % 3 == 0 %}
|
||||
<img alt="" src="https://vector.im/beta/img/76cfa6.png" />
|
||||
{% elif room.hash % 3 == 1 %}
|
||||
<img alt="" src="https://vector.im/beta/img/50e2c2.png" />
|
||||
{% else %}
|
||||
<img alt="" src="https://vector.im/beta/img/f4c371.png" />
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
</td>
|
||||
<td class="room_name" colspan="2">
|
||||
{{ room.title }}
|
||||
</td>
|
||||
</tr>
|
||||
{% if room.invite %}
|
||||
<tr>
|
||||
<td></td>
|
||||
<td>
|
||||
<a href="{{ room.link }}">Join the conversation.</a>
|
||||
</td>
|
||||
<td></td>
|
||||
</tr>
|
||||
{% else %}
|
||||
{% for notif in room.notifs %}
|
||||
{% include 'notif.html' with context %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
</table>
|
||||
9
res/templates/room.txt
Normal file
9
res/templates/room.txt
Normal file
@@ -0,0 +1,9 @@
|
||||
{{ room.title }}
|
||||
|
||||
{% if room.invite %}
|
||||
You've been invited, join at {{ room.link }}
|
||||
{% else %}
|
||||
{% for notif in room.notifs %}
|
||||
{% include 'notif.txt' with context %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
@@ -116,17 +116,19 @@ def get_json(origin_name, origin_key, destination, path):
|
||||
authorization_headers = []
|
||||
|
||||
for key, sig in signed_json["signatures"][origin_name].items():
|
||||
authorization_headers.append(bytes(
|
||||
"X-Matrix origin=%s,key=\"%s\",sig=\"%s\"" % (
|
||||
origin_name, key, sig,
|
||||
)
|
||||
))
|
||||
header = "X-Matrix origin=%s,key=\"%s\",sig=\"%s\"" % (
|
||||
origin_name, key, sig,
|
||||
)
|
||||
authorization_headers.append(bytes(header))
|
||||
sys.stderr.write(header)
|
||||
sys.stderr.write("\n")
|
||||
|
||||
result = requests.get(
|
||||
lookup(destination, path),
|
||||
headers={"Authorization": authorization_headers[0]},
|
||||
verify=False,
|
||||
)
|
||||
sys.stderr.write("Status Code: %d\n" % (result.status_code,))
|
||||
return result.json()
|
||||
|
||||
|
||||
@@ -141,6 +143,7 @@ def main():
|
||||
)
|
||||
|
||||
json.dump(result, sys.stdout)
|
||||
print ""
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
||||
@@ -1,10 +1,16 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
import argparse
|
||||
|
||||
import sys
|
||||
|
||||
import bcrypt
|
||||
import getpass
|
||||
|
||||
import yaml
|
||||
|
||||
bcrypt_rounds=12
|
||||
password_pepper = ""
|
||||
|
||||
def prompt_for_pass():
|
||||
password = getpass.getpass("Password: ")
|
||||
@@ -28,12 +34,22 @@ if __name__ == "__main__":
|
||||
default=None,
|
||||
help="New password for user. Will prompt if omitted.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-c", "--config",
|
||||
type=argparse.FileType('r'),
|
||||
help="Path to server config file. Used to read in bcrypt_rounds and password_pepper.",
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
if "config" in args and args.config:
|
||||
config = yaml.safe_load(args.config)
|
||||
bcrypt_rounds = config.get("bcrypt_rounds", bcrypt_rounds)
|
||||
password_config = config.get("password_config", {})
|
||||
password_pepper = password_config.get("pepper", password_pepper)
|
||||
password = args.password
|
||||
|
||||
if not password:
|
||||
password = prompt_for_pass()
|
||||
|
||||
print bcrypt.hashpw(password, bcrypt.gensalt(bcrypt_rounds))
|
||||
print bcrypt.hashpw(password + password_pepper, bcrypt.gensalt(bcrypt_rounds))
|
||||
|
||||
|
||||
@@ -25,18 +25,26 @@ import urllib2
|
||||
import yaml
|
||||
|
||||
|
||||
def request_registration(user, password, server_location, shared_secret):
|
||||
def request_registration(user, password, server_location, shared_secret, admin=False):
|
||||
mac = hmac.new(
|
||||
key=shared_secret,
|
||||
msg=user,
|
||||
digestmod=hashlib.sha1,
|
||||
).hexdigest()
|
||||
)
|
||||
|
||||
mac.update(user)
|
||||
mac.update("\x00")
|
||||
mac.update(password)
|
||||
mac.update("\x00")
|
||||
mac.update("admin" if admin else "notadmin")
|
||||
|
||||
mac = mac.hexdigest()
|
||||
|
||||
data = {
|
||||
"user": user,
|
||||
"password": password,
|
||||
"mac": mac,
|
||||
"type": "org.matrix.login.shared_secret",
|
||||
"admin": admin,
|
||||
}
|
||||
|
||||
server_location = server_location.rstrip("/")
|
||||
@@ -68,7 +76,7 @@ def request_registration(user, password, server_location, shared_secret):
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def register_new_user(user, password, server_location, shared_secret):
|
||||
def register_new_user(user, password, server_location, shared_secret, admin):
|
||||
if not user:
|
||||
try:
|
||||
default_user = getpass.getuser()
|
||||
@@ -99,7 +107,14 @@ def register_new_user(user, password, server_location, shared_secret):
|
||||
print "Passwords do not match"
|
||||
sys.exit(1)
|
||||
|
||||
request_registration(user, password, server_location, shared_secret)
|
||||
if not admin:
|
||||
admin = raw_input("Make admin [no]: ")
|
||||
if admin in ("y", "yes", "true"):
|
||||
admin = True
|
||||
else:
|
||||
admin = False
|
||||
|
||||
request_registration(user, password, server_location, shared_secret, bool(admin))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
@@ -119,6 +134,11 @@ if __name__ == "__main__":
|
||||
default=None,
|
||||
help="New password for user. Will prompt if omitted.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-a", "--admin",
|
||||
action="store_true",
|
||||
help="Register new user as an admin. Will prompt if omitted.",
|
||||
)
|
||||
|
||||
group = parser.add_mutually_exclusive_group(required=True)
|
||||
group.add_argument(
|
||||
@@ -151,4 +171,4 @@ if __name__ == "__main__":
|
||||
else:
|
||||
secret = args.shared_secret
|
||||
|
||||
register_new_user(args.user, args.password, args.server_url, secret)
|
||||
register_new_user(args.user, args.password, args.server_url, secret, args.admin)
|
||||
|
||||
@@ -34,11 +34,13 @@ logger = logging.getLogger("synapse_port_db")
|
||||
|
||||
|
||||
BOOLEAN_COLUMNS = {
|
||||
"events": ["processed", "outlier"],
|
||||
"events": ["processed", "outlier", "contains_url"],
|
||||
"rooms": ["is_public"],
|
||||
"event_edges": ["is_state"],
|
||||
"presence_list": ["accepted"],
|
||||
"presence_stream": ["currently_active"],
|
||||
"public_room_list_stream": ["visibility"],
|
||||
"device_lists_outbound_pokes": ["sent"],
|
||||
}
|
||||
|
||||
|
||||
@@ -71,6 +73,14 @@ APPEND_ONLY_TABLES = [
|
||||
"event_to_state_groups",
|
||||
"rejections",
|
||||
"event_search",
|
||||
"presence_stream",
|
||||
"push_rules_stream",
|
||||
"current_state_resets",
|
||||
"ex_outlier_stream",
|
||||
"cache_invalidation_stream",
|
||||
"public_room_list_stream",
|
||||
"state_group_edges",
|
||||
"stream_ordering_to_exterm",
|
||||
]
|
||||
|
||||
|
||||
@@ -92,8 +102,12 @@ class Store(object):
|
||||
|
||||
_simple_select_onecol_txn = SQLBaseStore.__dict__["_simple_select_onecol_txn"]
|
||||
_simple_select_onecol = SQLBaseStore.__dict__["_simple_select_onecol"]
|
||||
_simple_select_one = SQLBaseStore.__dict__["_simple_select_one"]
|
||||
_simple_select_one_txn = SQLBaseStore.__dict__["_simple_select_one_txn"]
|
||||
_simple_select_one_onecol = SQLBaseStore.__dict__["_simple_select_one_onecol"]
|
||||
_simple_select_one_onecol_txn = SQLBaseStore.__dict__["_simple_select_one_onecol_txn"]
|
||||
_simple_select_one_onecol_txn = SQLBaseStore.__dict__[
|
||||
"_simple_select_one_onecol_txn"
|
||||
]
|
||||
|
||||
_simple_update_one = SQLBaseStore.__dict__["_simple_update_one"]
|
||||
_simple_update_one_txn = SQLBaseStore.__dict__["_simple_update_one_txn"]
|
||||
@@ -158,31 +172,40 @@ class Porter(object):
|
||||
def setup_table(self, table):
|
||||
if table in APPEND_ONLY_TABLES:
|
||||
# It's safe to just carry on inserting.
|
||||
next_chunk = yield self.postgres_store._simple_select_one_onecol(
|
||||
row = yield self.postgres_store._simple_select_one(
|
||||
table="port_from_sqlite3",
|
||||
keyvalues={"table_name": table},
|
||||
retcol="rowid",
|
||||
retcols=("forward_rowid", "backward_rowid"),
|
||||
allow_none=True,
|
||||
)
|
||||
|
||||
total_to_port = None
|
||||
if next_chunk is None:
|
||||
if row is None:
|
||||
if table == "sent_transactions":
|
||||
next_chunk, already_ported, total_to_port = (
|
||||
forward_chunk, already_ported, total_to_port = (
|
||||
yield self._setup_sent_transactions()
|
||||
)
|
||||
backward_chunk = 0
|
||||
else:
|
||||
yield self.postgres_store._simple_insert(
|
||||
table="port_from_sqlite3",
|
||||
values={"table_name": table, "rowid": 1}
|
||||
values={
|
||||
"table_name": table,
|
||||
"forward_rowid": 1,
|
||||
"backward_rowid": 0,
|
||||
}
|
||||
)
|
||||
|
||||
next_chunk = 1
|
||||
forward_chunk = 1
|
||||
backward_chunk = 0
|
||||
already_ported = 0
|
||||
else:
|
||||
forward_chunk = row["forward_rowid"]
|
||||
backward_chunk = row["backward_rowid"]
|
||||
|
||||
if total_to_port is None:
|
||||
already_ported, total_to_port = yield self._get_total_count_to_port(
|
||||
table, next_chunk
|
||||
table, forward_chunk, backward_chunk
|
||||
)
|
||||
else:
|
||||
def delete_all(txn):
|
||||
@@ -196,32 +219,124 @@ class Porter(object):
|
||||
|
||||
yield self.postgres_store._simple_insert(
|
||||
table="port_from_sqlite3",
|
||||
values={"table_name": table, "rowid": 0}
|
||||
values={
|
||||
"table_name": table,
|
||||
"forward_rowid": 1,
|
||||
"backward_rowid": 0,
|
||||
}
|
||||
)
|
||||
|
||||
next_chunk = 1
|
||||
forward_chunk = 1
|
||||
backward_chunk = 0
|
||||
|
||||
already_ported, total_to_port = yield self._get_total_count_to_port(
|
||||
table, next_chunk
|
||||
table, forward_chunk, backward_chunk
|
||||
)
|
||||
|
||||
defer.returnValue((table, already_ported, total_to_port, next_chunk))
|
||||
defer.returnValue(
|
||||
(table, already_ported, total_to_port, forward_chunk, backward_chunk)
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def handle_table(self, table, postgres_size, table_size, next_chunk):
|
||||
def handle_table(self, table, postgres_size, table_size, forward_chunk,
|
||||
backward_chunk):
|
||||
if not table_size:
|
||||
return
|
||||
|
||||
self.progress.add_table(table, postgres_size, table_size)
|
||||
|
||||
select = (
|
||||
if table == "event_search":
|
||||
yield self.handle_search_table(
|
||||
postgres_size, table_size, forward_chunk, backward_chunk
|
||||
)
|
||||
return
|
||||
|
||||
forward_select = (
|
||||
"SELECT rowid, * FROM %s WHERE rowid >= ? ORDER BY rowid LIMIT ?"
|
||||
% (table,)
|
||||
)
|
||||
|
||||
backward_select = (
|
||||
"SELECT rowid, * FROM %s WHERE rowid <= ? ORDER BY rowid LIMIT ?"
|
||||
% (table,)
|
||||
)
|
||||
|
||||
do_forward = [True]
|
||||
do_backward = [True]
|
||||
|
||||
while True:
|
||||
def r(txn):
|
||||
txn.execute(select, (next_chunk, self.batch_size,))
|
||||
forward_rows = []
|
||||
backward_rows = []
|
||||
if do_forward[0]:
|
||||
txn.execute(forward_select, (forward_chunk, self.batch_size,))
|
||||
forward_rows = txn.fetchall()
|
||||
if not forward_rows:
|
||||
do_forward[0] = False
|
||||
|
||||
if do_backward[0]:
|
||||
txn.execute(backward_select, (backward_chunk, self.batch_size,))
|
||||
backward_rows = txn.fetchall()
|
||||
if not backward_rows:
|
||||
do_backward[0] = False
|
||||
|
||||
if forward_rows or backward_rows:
|
||||
headers = [column[0] for column in txn.description]
|
||||
else:
|
||||
headers = None
|
||||
|
||||
return headers, forward_rows, backward_rows
|
||||
|
||||
headers, frows, brows = yield self.sqlite_store.runInteraction(
|
||||
"select", r
|
||||
)
|
||||
|
||||
if frows or brows:
|
||||
if frows:
|
||||
forward_chunk = max(row[0] for row in frows) + 1
|
||||
if brows:
|
||||
backward_chunk = min(row[0] for row in brows) - 1
|
||||
|
||||
rows = frows + brows
|
||||
self._convert_rows(table, headers, rows)
|
||||
|
||||
def insert(txn):
|
||||
self.postgres_store.insert_many_txn(
|
||||
txn, table, headers[1:], rows
|
||||
)
|
||||
|
||||
self.postgres_store._simple_update_one_txn(
|
||||
txn,
|
||||
table="port_from_sqlite3",
|
||||
keyvalues={"table_name": table},
|
||||
updatevalues={
|
||||
"forward_rowid": forward_chunk,
|
||||
"backward_rowid": backward_chunk,
|
||||
},
|
||||
)
|
||||
|
||||
yield self.postgres_store.execute(insert)
|
||||
|
||||
postgres_size += len(rows)
|
||||
|
||||
self.progress.update(table, postgres_size)
|
||||
else:
|
||||
return
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def handle_search_table(self, postgres_size, table_size, forward_chunk,
|
||||
backward_chunk):
|
||||
select = (
|
||||
"SELECT es.rowid, es.*, e.origin_server_ts, e.stream_ordering"
|
||||
" FROM event_search as es"
|
||||
" INNER JOIN events AS e USING (event_id, room_id)"
|
||||
" WHERE es.rowid >= ?"
|
||||
" ORDER BY es.rowid LIMIT ?"
|
||||
)
|
||||
|
||||
while True:
|
||||
def r(txn):
|
||||
txn.execute(select, (forward_chunk, self.batch_size,))
|
||||
rows = txn.fetchall()
|
||||
headers = [column[0] for column in txn.description]
|
||||
|
||||
@@ -230,59 +345,51 @@ class Porter(object):
|
||||
headers, rows = yield self.sqlite_store.runInteraction("select", r)
|
||||
|
||||
if rows:
|
||||
next_chunk = rows[-1][0] + 1
|
||||
forward_chunk = rows[-1][0] + 1
|
||||
|
||||
if table == "event_search":
|
||||
# We have to treat event_search differently since it has a
|
||||
# different structure in the two different databases.
|
||||
def insert(txn):
|
||||
sql = (
|
||||
"INSERT INTO event_search (event_id, room_id, key, sender, vector)"
|
||||
" VALUES (?,?,?,?,to_tsvector('english', ?))"
|
||||
# We have to treat event_search differently since it has a
|
||||
# different structure in the two different databases.
|
||||
def insert(txn):
|
||||
sql = (
|
||||
"INSERT INTO event_search (event_id, room_id, key,"
|
||||
" sender, vector, origin_server_ts, stream_ordering)"
|
||||
" VALUES (?,?,?,?,to_tsvector('english', ?),?,?)"
|
||||
)
|
||||
|
||||
rows_dict = [
|
||||
dict(zip(headers, row))
|
||||
for row in rows
|
||||
]
|
||||
|
||||
txn.executemany(sql, [
|
||||
(
|
||||
row["event_id"],
|
||||
row["room_id"],
|
||||
row["key"],
|
||||
row["sender"],
|
||||
row["value"],
|
||||
row["origin_server_ts"],
|
||||
row["stream_ordering"],
|
||||
)
|
||||
for row in rows_dict
|
||||
])
|
||||
|
||||
rows_dict = [
|
||||
dict(zip(headers, row))
|
||||
for row in rows
|
||||
]
|
||||
|
||||
txn.executemany(sql, [
|
||||
(
|
||||
row["event_id"],
|
||||
row["room_id"],
|
||||
row["key"],
|
||||
row["sender"],
|
||||
row["value"],
|
||||
)
|
||||
for row in rows_dict
|
||||
])
|
||||
|
||||
self.postgres_store._simple_update_one_txn(
|
||||
txn,
|
||||
table="port_from_sqlite3",
|
||||
keyvalues={"table_name": table},
|
||||
updatevalues={"rowid": next_chunk},
|
||||
)
|
||||
else:
|
||||
self._convert_rows(table, headers, rows)
|
||||
|
||||
def insert(txn):
|
||||
self.postgres_store.insert_many_txn(
|
||||
txn, table, headers[1:], rows
|
||||
)
|
||||
|
||||
self.postgres_store._simple_update_one_txn(
|
||||
txn,
|
||||
table="port_from_sqlite3",
|
||||
keyvalues={"table_name": table},
|
||||
updatevalues={"rowid": next_chunk},
|
||||
)
|
||||
self.postgres_store._simple_update_one_txn(
|
||||
txn,
|
||||
table="port_from_sqlite3",
|
||||
keyvalues={"table_name": "event_search"},
|
||||
updatevalues={
|
||||
"forward_rowid": forward_chunk,
|
||||
"backward_rowid": backward_chunk,
|
||||
},
|
||||
)
|
||||
|
||||
yield self.postgres_store.execute(insert)
|
||||
|
||||
postgres_size += len(rows)
|
||||
|
||||
self.progress.update(table, postgres_size)
|
||||
self.progress.update("event_search", postgres_size)
|
||||
|
||||
else:
|
||||
return
|
||||
|
||||
@@ -356,10 +463,32 @@ class Porter(object):
|
||||
txn.execute(
|
||||
"CREATE TABLE port_from_sqlite3 ("
|
||||
" table_name varchar(100) NOT NULL UNIQUE,"
|
||||
" rowid bigint NOT NULL"
|
||||
" forward_rowid bigint NOT NULL,"
|
||||
" backward_rowid bigint NOT NULL"
|
||||
")"
|
||||
)
|
||||
|
||||
# The old port script created a table with just a "rowid" column.
|
||||
# We want people to be able to rerun this script from an old port
|
||||
# so that they can pick up any missing events that were not
|
||||
# ported across.
|
||||
def alter_table(txn):
|
||||
txn.execute(
|
||||
"ALTER TABLE IF EXISTS port_from_sqlite3"
|
||||
" RENAME rowid TO forward_rowid"
|
||||
)
|
||||
txn.execute(
|
||||
"ALTER TABLE IF EXISTS port_from_sqlite3"
|
||||
" ADD backward_rowid bigint NOT NULL DEFAULT 0"
|
||||
)
|
||||
|
||||
try:
|
||||
yield self.postgres_store.runInteraction(
|
||||
"alter_table", alter_table
|
||||
)
|
||||
except Exception as e:
|
||||
logger.info("Failed to create port table: %s", e)
|
||||
|
||||
try:
|
||||
yield self.postgres_store.runInteraction(
|
||||
"create_port_table", create_port_table
|
||||
@@ -419,7 +548,7 @@ class Porter(object):
|
||||
@defer.inlineCallbacks
|
||||
def _setup_sent_transactions(self):
|
||||
# Only save things from the last day
|
||||
yesterday = int(time.time()*1000) - 86400000
|
||||
yesterday = int(time.time() * 1000) - 86400000
|
||||
|
||||
# And save the max transaction id from each destination
|
||||
select = (
|
||||
@@ -475,7 +604,11 @@ class Porter(object):
|
||||
|
||||
yield self.postgres_store._simple_insert(
|
||||
table="port_from_sqlite3",
|
||||
values={"table_name": "sent_transactions", "rowid": next_chunk}
|
||||
values={
|
||||
"table_name": "sent_transactions",
|
||||
"forward_rowid": next_chunk,
|
||||
"backward_rowid": 0,
|
||||
}
|
||||
)
|
||||
|
||||
def get_sent_table_size(txn):
|
||||
@@ -496,13 +629,18 @@ class Porter(object):
|
||||
defer.returnValue((next_chunk, inserted_rows, total_count))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _get_remaining_count_to_port(self, table, next_chunk):
|
||||
rows = yield self.sqlite_store.execute_sql(
|
||||
def _get_remaining_count_to_port(self, table, forward_chunk, backward_chunk):
|
||||
frows = yield self.sqlite_store.execute_sql(
|
||||
"SELECT count(*) FROM %s WHERE rowid >= ?" % (table,),
|
||||
next_chunk,
|
||||
forward_chunk,
|
||||
)
|
||||
|
||||
defer.returnValue(rows[0][0])
|
||||
brows = yield self.sqlite_store.execute_sql(
|
||||
"SELECT count(*) FROM %s WHERE rowid <= ?" % (table,),
|
||||
backward_chunk,
|
||||
)
|
||||
|
||||
defer.returnValue(frows[0][0] + brows[0][0])
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _get_already_ported_count(self, table):
|
||||
@@ -513,10 +651,10 @@ class Porter(object):
|
||||
defer.returnValue(rows[0][0])
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _get_total_count_to_port(self, table, next_chunk):
|
||||
def _get_total_count_to_port(self, table, forward_chunk, backward_chunk):
|
||||
remaining, done = yield defer.gatherResults(
|
||||
[
|
||||
self._get_remaining_count_to_port(table, next_chunk),
|
||||
self._get_remaining_count_to_port(table, forward_chunk, backward_chunk),
|
||||
self._get_already_ported_count(table),
|
||||
],
|
||||
consumeErrors=True,
|
||||
@@ -647,7 +785,7 @@ class CursesProgress(Progress):
|
||||
color = curses.color_pair(2) if perc == 100 else curses.color_pair(1)
|
||||
|
||||
self.stdscr.addstr(
|
||||
i+2, left_margin + max_len - len(table),
|
||||
i + 2, left_margin + max_len - len(table),
|
||||
table,
|
||||
curses.A_BOLD | color,
|
||||
)
|
||||
@@ -655,18 +793,18 @@ class CursesProgress(Progress):
|
||||
size = 20
|
||||
|
||||
progress = "[%s%s]" % (
|
||||
"#" * int(perc*size/100),
|
||||
" " * (size - int(perc*size/100)),
|
||||
"#" * int(perc * size / 100),
|
||||
" " * (size - int(perc * size / 100)),
|
||||
)
|
||||
|
||||
self.stdscr.addstr(
|
||||
i+2, left_margin + max_len + middle_space,
|
||||
i + 2, left_margin + max_len + middle_space,
|
||||
"%s %3d%% (%d/%d)" % (progress, perc, data["num_done"], data["total"]),
|
||||
)
|
||||
|
||||
if self.finished:
|
||||
self.stdscr.addstr(
|
||||
rows-1, 0,
|
||||
rows - 1, 0,
|
||||
"Press any key to exit...",
|
||||
)
|
||||
|
||||
|
||||
@@ -16,7 +16,5 @@ ignore =
|
||||
|
||||
[flake8]
|
||||
max-line-length = 90
|
||||
ignore = W503 ; W503 requires that binary operators be at the end, not start, of lines. Erik doesn't like it.
|
||||
|
||||
[pep8]
|
||||
max-line-length = 90
|
||||
# W503 requires that binary operators be at the end, not start, of lines. Erik doesn't like it.
|
||||
ignore = W503
|
||||
|
||||
73
setup.py
73
setup.py
@@ -23,6 +23,45 @@ import sys
|
||||
here = os.path.abspath(os.path.dirname(__file__))
|
||||
|
||||
|
||||
# Some notes on `setup.py test`:
|
||||
#
|
||||
# Once upon a time we used to try to make `setup.py test` run `tox` to run the
|
||||
# tests. That's a bad idea for three reasons:
|
||||
#
|
||||
# 1: `setup.py test` is supposed to find out whether the tests work in the
|
||||
# *current* environmentt, not whatever tox sets up.
|
||||
# 2: Empirically, trying to install tox during the test run wasn't working ("No
|
||||
# module named virtualenv").
|
||||
# 3: The tox documentation advises against it[1].
|
||||
#
|
||||
# Even further back in time, we used to use setuptools_trial [2]. That has its
|
||||
# own set of issues: for instance, it requires installation of Twisted to build
|
||||
# an sdist (because the recommended mode of usage is to add it to
|
||||
# `setup_requires`). That in turn means that in order to successfully run tox
|
||||
# you have to have the python header files installed for whichever version of
|
||||
# python tox uses (which is python3 on recent ubuntus, for example).
|
||||
#
|
||||
# So, for now at least, we stick with what appears to be the convention among
|
||||
# Twisted projects, and don't attempt to do anything when someone runs
|
||||
# `setup.py test`; instead we direct people to run `trial` directly if they
|
||||
# care.
|
||||
#
|
||||
# [1]: http://tox.readthedocs.io/en/2.5.0/example/basic.html#integration-with-setup-py-test-command
|
||||
# [2]: https://pypi.python.org/pypi/setuptools_trial
|
||||
class TestCommand(Command):
|
||||
user_options = []
|
||||
|
||||
def initialize_options(self):
|
||||
pass
|
||||
|
||||
def finalize_options(self):
|
||||
pass
|
||||
|
||||
def run(self):
|
||||
print ("""Synapse's tests cannot be run via setup.py. To run them, try:
|
||||
PYTHONPATH="." trial tests
|
||||
""")
|
||||
|
||||
def read_file(path_segments):
|
||||
"""Read a file from the package. Takes a list of strings to join to
|
||||
make the path"""
|
||||
@@ -39,38 +78,6 @@ def exec_file(path_segments):
|
||||
return result
|
||||
|
||||
|
||||
class Tox(Command):
|
||||
user_options = [('tox-args=', 'a', "Arguments to pass to tox")]
|
||||
|
||||
def initialize_options(self):
|
||||
self.tox_args = None
|
||||
|
||||
def finalize_options(self):
|
||||
self.test_args = []
|
||||
self.test_suite = True
|
||||
|
||||
def run(self):
|
||||
#import here, cause outside the eggs aren't loaded
|
||||
try:
|
||||
import tox
|
||||
except ImportError:
|
||||
try:
|
||||
self.distribution.fetch_build_eggs("tox")
|
||||
import tox
|
||||
except:
|
||||
raise RuntimeError(
|
||||
"The tests need 'tox' to run. Please install 'tox'."
|
||||
)
|
||||
import shlex
|
||||
args = self.tox_args
|
||||
if args:
|
||||
args = shlex.split(self.tox_args)
|
||||
else:
|
||||
args = []
|
||||
errno = tox.cmdline(args=args)
|
||||
sys.exit(errno)
|
||||
|
||||
|
||||
version = exec_file(("synapse", "__init__.py"))["__version__"]
|
||||
dependencies = exec_file(("synapse", "python_dependencies.py"))
|
||||
long_description = read_file(("README.rst",))
|
||||
@@ -86,5 +93,5 @@ setup(
|
||||
zip_safe=False,
|
||||
long_description=long_description,
|
||||
scripts=["synctl"] + glob.glob("scripts/*"),
|
||||
cmdclass={'test': Tox},
|
||||
cmdclass={'test': TestCommand},
|
||||
)
|
||||
|
||||
@@ -16,4 +16,4 @@
|
||||
""" This is a reference implementation of a Matrix home server.
|
||||
"""
|
||||
|
||||
__version__ = "0.14.0"
|
||||
__version__ = "0.19.3"
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,5 +1,6 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
# Copyright 2017 Vector Creations Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -43,10 +44,8 @@ class JoinRules(object):
|
||||
|
||||
class LoginType(object):
|
||||
PASSWORD = u"m.login.password"
|
||||
OAUTH = u"m.login.oauth2"
|
||||
EMAIL_CODE = u"m.login.email.code"
|
||||
EMAIL_URL = u"m.login.email.url"
|
||||
EMAIL_IDENTITY = u"m.login.email.identity"
|
||||
MSISDN = u"m.login.msisdn"
|
||||
RECAPTCHA = u"m.login.recaptcha"
|
||||
DUMMY = u"m.login.dummy"
|
||||
|
||||
@@ -85,3 +84,8 @@ class RoomCreationPreset(object):
|
||||
PRIVATE_CHAT = "private_chat"
|
||||
PUBLIC_CHAT = "public_chat"
|
||||
TRUSTED_PRIVATE_CHAT = "trusted_private_chat"
|
||||
|
||||
|
||||
class ThirdPartyEntityKind(object):
|
||||
USER = "user"
|
||||
LOCATION = "location"
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
|
||||
"""Contains exceptions and error codes."""
|
||||
|
||||
import json
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -39,35 +40,46 @@ class Codes(object):
|
||||
CAPTCHA_NEEDED = "M_CAPTCHA_NEEDED"
|
||||
CAPTCHA_INVALID = "M_CAPTCHA_INVALID"
|
||||
MISSING_PARAM = "M_MISSING_PARAM"
|
||||
INVALID_PARAM = "M_INVALID_PARAM"
|
||||
TOO_LARGE = "M_TOO_LARGE"
|
||||
EXCLUSIVE = "M_EXCLUSIVE"
|
||||
THREEPID_AUTH_FAILED = "M_THREEPID_AUTH_FAILED"
|
||||
THREEPID_IN_USE = "THREEPID_IN_USE"
|
||||
THREEPID_IN_USE = "M_THREEPID_IN_USE"
|
||||
THREEPID_NOT_FOUND = "M_THREEPID_NOT_FOUND"
|
||||
INVALID_USERNAME = "M_INVALID_USERNAME"
|
||||
SERVER_NOT_TRUSTED = "M_SERVER_NOT_TRUSTED"
|
||||
|
||||
|
||||
class CodeMessageException(RuntimeError):
|
||||
"""An exception with integer code and message string attributes."""
|
||||
"""An exception with integer code and message string attributes.
|
||||
|
||||
Attributes:
|
||||
code (int): HTTP error code
|
||||
msg (str): string describing the error
|
||||
"""
|
||||
def __init__(self, code, msg):
|
||||
super(CodeMessageException, self).__init__("%d: %s" % (code, msg))
|
||||
self.code = code
|
||||
self.msg = msg
|
||||
self.response_code_message = None
|
||||
|
||||
def error_dict(self):
|
||||
return cs_error(self.msg)
|
||||
|
||||
|
||||
class SynapseError(CodeMessageException):
|
||||
"""A base error which can be caught for all synapse events."""
|
||||
"""A base exception type for matrix errors which have an errcode and error
|
||||
message (as well as an HTTP status code).
|
||||
|
||||
Attributes:
|
||||
errcode (str): Matrix error code e.g 'M_FORBIDDEN'
|
||||
"""
|
||||
def __init__(self, code, msg, errcode=Codes.UNKNOWN):
|
||||
"""Constructs a synapse error.
|
||||
|
||||
Args:
|
||||
code (int): The integer error code (an HTTP response code)
|
||||
msg (str): The human-readable error message.
|
||||
err (str): The error code e.g 'M_FORBIDDEN'
|
||||
errcode (str): The matrix error code e.g 'M_FORBIDDEN'
|
||||
"""
|
||||
super(SynapseError, self).__init__(code, msg)
|
||||
self.errcode = errcode
|
||||
@@ -78,6 +90,39 @@ class SynapseError(CodeMessageException):
|
||||
self.errcode,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def from_http_response_exception(cls, err):
|
||||
"""Make a SynapseError based on an HTTPResponseException
|
||||
|
||||
This is useful when a proxied request has failed, and we need to
|
||||
decide how to map the failure onto a matrix error to send back to the
|
||||
client.
|
||||
|
||||
An attempt is made to parse the body of the http response as a matrix
|
||||
error. If that succeeds, the errcode and error message from the body
|
||||
are used as the errcode and error message in the new synapse error.
|
||||
|
||||
Otherwise, the errcode is set to M_UNKNOWN, and the error message is
|
||||
set to the reason code from the HTTP response.
|
||||
|
||||
Args:
|
||||
err (HttpResponseException):
|
||||
|
||||
Returns:
|
||||
SynapseError:
|
||||
"""
|
||||
# try to parse the body as json, to get better errcode/msg, but
|
||||
# default to M_UNKNOWN with the HTTP status as the error text
|
||||
try:
|
||||
j = json.loads(err.response)
|
||||
except ValueError:
|
||||
j = {}
|
||||
errcode = j.get('errcode', Codes.UNKNOWN)
|
||||
errmsg = j.get('error', err.msg)
|
||||
|
||||
res = SynapseError(err.code, errmsg, errcode)
|
||||
return res
|
||||
|
||||
|
||||
class RegistrationError(SynapseError):
|
||||
"""An error raised when a registration event fails."""
|
||||
@@ -103,13 +148,11 @@ class UnrecognizedRequestError(SynapseError):
|
||||
|
||||
class NotFoundError(SynapseError):
|
||||
"""An error indicating we can't find the thing you asked for"""
|
||||
def __init__(self, *args, **kwargs):
|
||||
if "errcode" not in kwargs:
|
||||
kwargs["errcode"] = Codes.NOT_FOUND
|
||||
def __init__(self, msg="Not found", errcode=Codes.NOT_FOUND):
|
||||
super(NotFoundError, self).__init__(
|
||||
404,
|
||||
"Not found",
|
||||
**kwargs
|
||||
msg,
|
||||
errcode=errcode
|
||||
)
|
||||
|
||||
|
||||
@@ -170,7 +213,6 @@ class LimitExceededError(SynapseError):
|
||||
errcode=Codes.LIMIT_EXCEEDED):
|
||||
super(LimitExceededError, self).__init__(code, msg, errcode)
|
||||
self.retry_after_ms = retry_after_ms
|
||||
self.response_code_message = "Too Many Requests"
|
||||
|
||||
def error_dict(self):
|
||||
return cs_error(
|
||||
@@ -240,6 +282,19 @@ class FederationError(RuntimeError):
|
||||
|
||||
|
||||
class HttpResponseException(CodeMessageException):
|
||||
"""
|
||||
Represents an HTTP-level failure of an outbound request
|
||||
|
||||
Attributes:
|
||||
response (str): body of response
|
||||
"""
|
||||
def __init__(self, code, msg, response):
|
||||
self.response = response
|
||||
"""
|
||||
|
||||
Args:
|
||||
code (int): HTTP status code
|
||||
msg (str): reason phrase from HTTP response status line
|
||||
response (str): body of response
|
||||
"""
|
||||
super(HttpResponseException, self).__init__(code, msg)
|
||||
self.response = response
|
||||
|
||||
@@ -13,9 +13,174 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from synapse.api.errors import SynapseError
|
||||
from synapse.storage.presence import UserPresenceState
|
||||
from synapse.types import UserID, RoomID
|
||||
from twisted.internet import defer
|
||||
|
||||
import ujson as json
|
||||
import jsonschema
|
||||
from jsonschema import FormatChecker
|
||||
|
||||
FILTER_SCHEMA = {
|
||||
"additionalProperties": False,
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"limit": {
|
||||
"type": "number"
|
||||
},
|
||||
"senders": {
|
||||
"$ref": "#/definitions/user_id_array"
|
||||
},
|
||||
"not_senders": {
|
||||
"$ref": "#/definitions/user_id_array"
|
||||
},
|
||||
# TODO: We don't limit event type values but we probably should...
|
||||
# check types are valid event types
|
||||
"types": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"not_types": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ROOM_FILTER_SCHEMA = {
|
||||
"additionalProperties": False,
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"not_rooms": {
|
||||
"$ref": "#/definitions/room_id_array"
|
||||
},
|
||||
"rooms": {
|
||||
"$ref": "#/definitions/room_id_array"
|
||||
},
|
||||
"ephemeral": {
|
||||
"$ref": "#/definitions/room_event_filter"
|
||||
},
|
||||
"include_leave": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"state": {
|
||||
"$ref": "#/definitions/room_event_filter"
|
||||
},
|
||||
"timeline": {
|
||||
"$ref": "#/definitions/room_event_filter"
|
||||
},
|
||||
"account_data": {
|
||||
"$ref": "#/definitions/room_event_filter"
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
ROOM_EVENT_FILTER_SCHEMA = {
|
||||
"additionalProperties": False,
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"limit": {
|
||||
"type": "number"
|
||||
},
|
||||
"senders": {
|
||||
"$ref": "#/definitions/user_id_array"
|
||||
},
|
||||
"not_senders": {
|
||||
"$ref": "#/definitions/user_id_array"
|
||||
},
|
||||
"types": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"not_types": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"rooms": {
|
||||
"$ref": "#/definitions/room_id_array"
|
||||
},
|
||||
"not_rooms": {
|
||||
"$ref": "#/definitions/room_id_array"
|
||||
},
|
||||
"contains_url": {
|
||||
"type": "boolean"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
USER_ID_ARRAY_SCHEMA = {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string",
|
||||
"format": "matrix_user_id"
|
||||
}
|
||||
}
|
||||
|
||||
ROOM_ID_ARRAY_SCHEMA = {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string",
|
||||
"format": "matrix_room_id"
|
||||
}
|
||||
}
|
||||
|
||||
USER_FILTER_SCHEMA = {
|
||||
"$schema": "http://json-schema.org/draft-04/schema#",
|
||||
"description": "schema for a Sync filter",
|
||||
"type": "object",
|
||||
"definitions": {
|
||||
"room_id_array": ROOM_ID_ARRAY_SCHEMA,
|
||||
"user_id_array": USER_ID_ARRAY_SCHEMA,
|
||||
"filter": FILTER_SCHEMA,
|
||||
"room_filter": ROOM_FILTER_SCHEMA,
|
||||
"room_event_filter": ROOM_EVENT_FILTER_SCHEMA
|
||||
},
|
||||
"properties": {
|
||||
"presence": {
|
||||
"$ref": "#/definitions/filter"
|
||||
},
|
||||
"account_data": {
|
||||
"$ref": "#/definitions/filter"
|
||||
},
|
||||
"room": {
|
||||
"$ref": "#/definitions/room_filter"
|
||||
},
|
||||
"event_format": {
|
||||
"type": "string",
|
||||
"enum": ["client", "federation"]
|
||||
},
|
||||
"event_fields": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string",
|
||||
# Don't allow '\\' in event field filters. This makes matching
|
||||
# events a lot easier as we can then use a negative lookbehind
|
||||
# assertion to split '\.' If we allowed \\ then it would
|
||||
# incorrectly split '\\.' See synapse.events.utils.serialize_event
|
||||
"pattern": "^((?!\\\).)*$"
|
||||
}
|
||||
}
|
||||
},
|
||||
"additionalProperties": False
|
||||
}
|
||||
|
||||
|
||||
@FormatChecker.cls_checks('matrix_room_id')
|
||||
def matrix_room_id_validator(room_id_str):
|
||||
return RoomID.from_string(room_id_str)
|
||||
|
||||
|
||||
@FormatChecker.cls_checks('matrix_user_id')
|
||||
def matrix_user_id_validator(user_id_str):
|
||||
return UserID.from_string(user_id_str)
|
||||
|
||||
|
||||
class Filtering(object):
|
||||
@@ -24,10 +189,10 @@ class Filtering(object):
|
||||
super(Filtering, self).__init__()
|
||||
self.store = hs.get_datastore()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_user_filter(self, user_localpart, filter_id):
|
||||
result = self.store.get_user_filter(user_localpart, filter_id)
|
||||
result.addCallback(FilterCollection)
|
||||
return result
|
||||
result = yield self.store.get_user_filter(user_localpart, filter_id)
|
||||
defer.returnValue(FilterCollection(result))
|
||||
|
||||
def add_user_filter(self, user_localpart, user_filter):
|
||||
self.check_valid_filter(user_filter)
|
||||
@@ -50,83 +215,11 @@ class Filtering(object):
|
||||
# NB: Filters are the complete json blobs. "Definitions" are an
|
||||
# individual top-level key e.g. public_user_data. Filters are made of
|
||||
# many definitions.
|
||||
|
||||
top_level_definitions = [
|
||||
"presence", "account_data"
|
||||
]
|
||||
|
||||
room_level_definitions = [
|
||||
"state", "timeline", "ephemeral", "account_data"
|
||||
]
|
||||
|
||||
for key in top_level_definitions:
|
||||
if key in user_filter_json:
|
||||
self._check_definition(user_filter_json[key])
|
||||
|
||||
if "room" in user_filter_json:
|
||||
self._check_definition_room_lists(user_filter_json["room"])
|
||||
for key in room_level_definitions:
|
||||
if key in user_filter_json["room"]:
|
||||
self._check_definition(user_filter_json["room"][key])
|
||||
|
||||
def _check_definition_room_lists(self, definition):
|
||||
"""Check that "rooms" and "not_rooms" are lists of room ids if they
|
||||
are present
|
||||
|
||||
Args:
|
||||
definition(dict): The filter definition
|
||||
Raises:
|
||||
SynapseError: If there was a problem with this definition.
|
||||
"""
|
||||
# check rooms are valid room IDs
|
||||
room_id_keys = ["rooms", "not_rooms"]
|
||||
for key in room_id_keys:
|
||||
if key in definition:
|
||||
if type(definition[key]) != list:
|
||||
raise SynapseError(400, "Expected %s to be a list." % key)
|
||||
for room_id in definition[key]:
|
||||
RoomID.from_string(room_id)
|
||||
|
||||
def _check_definition(self, definition):
|
||||
"""Check if the provided definition is valid.
|
||||
|
||||
This inspects not only the types but also the values to make sure they
|
||||
make sense.
|
||||
|
||||
Args:
|
||||
definition(dict): The filter definition
|
||||
Raises:
|
||||
SynapseError: If there was a problem with this definition.
|
||||
"""
|
||||
# NB: Filters are the complete json blobs. "Definitions" are an
|
||||
# individual top-level key e.g. public_user_data. Filters are made of
|
||||
# many definitions.
|
||||
if type(definition) != dict:
|
||||
raise SynapseError(
|
||||
400, "Expected JSON object, not %s" % (definition,)
|
||||
)
|
||||
|
||||
self._check_definition_room_lists(definition)
|
||||
|
||||
# check senders are valid user IDs
|
||||
user_id_keys = ["senders", "not_senders"]
|
||||
for key in user_id_keys:
|
||||
if key in definition:
|
||||
if type(definition[key]) != list:
|
||||
raise SynapseError(400, "Expected %s to be a list." % key)
|
||||
for user_id in definition[key]:
|
||||
UserID.from_string(user_id)
|
||||
|
||||
# TODO: We don't limit event type values but we probably should...
|
||||
# check types are valid event types
|
||||
event_keys = ["types", "not_types"]
|
||||
for key in event_keys:
|
||||
if key in definition:
|
||||
if type(definition[key]) != list:
|
||||
raise SynapseError(400, "Expected %s to be a list." % key)
|
||||
for event_type in definition[key]:
|
||||
if not isinstance(event_type, basestring):
|
||||
raise SynapseError(400, "Event type should be a string")
|
||||
try:
|
||||
jsonschema.validate(user_filter_json, USER_FILTER_SCHEMA,
|
||||
format_checker=FormatChecker())
|
||||
except jsonschema.ValidationError as e:
|
||||
raise SynapseError(400, e.message)
|
||||
|
||||
|
||||
class FilterCollection(object):
|
||||
@@ -150,6 +243,7 @@ class FilterCollection(object):
|
||||
self.include_leave = filter_json.get("room", {}).get(
|
||||
"include_leave", False
|
||||
)
|
||||
self.event_fields = filter_json.get("event_fields", [])
|
||||
|
||||
def __repr__(self):
|
||||
return "<FilterCollection %s>" % (json.dumps(self._filter_json),)
|
||||
@@ -184,32 +278,89 @@ class FilterCollection(object):
|
||||
def filter_room_account_data(self, events):
|
||||
return self._room_account_data.filter(self._room_filter.filter(events))
|
||||
|
||||
def blocks_all_presence(self):
|
||||
return (
|
||||
self._presence_filter.filters_all_types() or
|
||||
self._presence_filter.filters_all_senders()
|
||||
)
|
||||
|
||||
def blocks_all_room_ephemeral(self):
|
||||
return (
|
||||
self._room_ephemeral_filter.filters_all_types() or
|
||||
self._room_ephemeral_filter.filters_all_senders() or
|
||||
self._room_ephemeral_filter.filters_all_rooms()
|
||||
)
|
||||
|
||||
def blocks_all_room_timeline(self):
|
||||
return (
|
||||
self._room_timeline_filter.filters_all_types() or
|
||||
self._room_timeline_filter.filters_all_senders() or
|
||||
self._room_timeline_filter.filters_all_rooms()
|
||||
)
|
||||
|
||||
|
||||
class Filter(object):
|
||||
def __init__(self, filter_json):
|
||||
self.filter_json = filter_json
|
||||
|
||||
self.types = self.filter_json.get("types", None)
|
||||
self.not_types = self.filter_json.get("not_types", [])
|
||||
|
||||
self.rooms = self.filter_json.get("rooms", None)
|
||||
self.not_rooms = self.filter_json.get("not_rooms", [])
|
||||
|
||||
self.senders = self.filter_json.get("senders", None)
|
||||
self.not_senders = self.filter_json.get("not_senders", [])
|
||||
|
||||
self.contains_url = self.filter_json.get("contains_url", None)
|
||||
|
||||
def filters_all_types(self):
|
||||
return "*" in self.not_types
|
||||
|
||||
def filters_all_senders(self):
|
||||
return "*" in self.not_senders
|
||||
|
||||
def filters_all_rooms(self):
|
||||
return "*" in self.not_rooms
|
||||
|
||||
def check(self, event):
|
||||
"""Checks whether the filter matches the given event.
|
||||
|
||||
Returns:
|
||||
bool: True if the event matches
|
||||
"""
|
||||
sender = event.get("sender", None)
|
||||
if not sender:
|
||||
# Presence events have their 'sender' in content.user_id
|
||||
content = event.get("content")
|
||||
# account_data has been allowed to have non-dict content, so check type first
|
||||
if isinstance(content, dict):
|
||||
sender = content.get("user_id")
|
||||
# We usually get the full "events" as dictionaries coming through,
|
||||
# except for presence which actually gets passed around as its own
|
||||
# namedtuple type.
|
||||
if isinstance(event, UserPresenceState):
|
||||
sender = event.user_id
|
||||
room_id = None
|
||||
ev_type = "m.presence"
|
||||
is_url = False
|
||||
else:
|
||||
sender = event.get("sender", None)
|
||||
if not sender:
|
||||
# Presence events had their 'sender' in content.user_id, but are
|
||||
# now handled above. We don't know if anything else uses this
|
||||
# form. TODO: Check this and probably remove it.
|
||||
content = event.get("content")
|
||||
# account_data has been allowed to have non-dict content, so
|
||||
# check type first
|
||||
if isinstance(content, dict):
|
||||
sender = content.get("user_id")
|
||||
|
||||
room_id = event.get("room_id", None)
|
||||
ev_type = event.get("type", None)
|
||||
is_url = "url" in event.get("content", {})
|
||||
|
||||
return self.check_fields(
|
||||
event.get("room_id", None),
|
||||
room_id,
|
||||
sender,
|
||||
event.get("type", None),
|
||||
ev_type,
|
||||
is_url,
|
||||
)
|
||||
|
||||
def check_fields(self, room_id, sender, event_type):
|
||||
def check_fields(self, room_id, sender, event_type, contains_url):
|
||||
"""Checks whether the filter matches the given event fields.
|
||||
|
||||
Returns:
|
||||
@@ -223,15 +374,20 @@ class Filter(object):
|
||||
|
||||
for name, match_func in literal_keys.items():
|
||||
not_name = "not_%s" % (name,)
|
||||
disallowed_values = self.filter_json.get(not_name, [])
|
||||
disallowed_values = getattr(self, not_name)
|
||||
if any(map(match_func, disallowed_values)):
|
||||
return False
|
||||
|
||||
allowed_values = self.filter_json.get(name, None)
|
||||
allowed_values = getattr(self, name)
|
||||
if allowed_values is not None:
|
||||
if not any(map(match_func, allowed_values)):
|
||||
return False
|
||||
|
||||
contains_url_filter = self.filter_json.get("contains_url")
|
||||
if contains_url_filter is not None:
|
||||
if contains_url_filter != contains_url:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def filter_rooms(self, room_ids):
|
||||
|
||||
@@ -23,7 +23,7 @@ class Ratelimiter(object):
|
||||
def __init__(self):
|
||||
self.message_counts = collections.OrderedDict()
|
||||
|
||||
def send_message(self, user_id, time_now_s, msg_rate_hz, burst_count):
|
||||
def send_message(self, user_id, time_now_s, msg_rate_hz, burst_count, update=True):
|
||||
"""Can the user send a message?
|
||||
Args:
|
||||
user_id: The user sending a message.
|
||||
@@ -32,12 +32,15 @@ class Ratelimiter(object):
|
||||
second.
|
||||
burst_count: How many messages the user can send before being
|
||||
limited.
|
||||
update (bool): Whether to update the message rates or not. This is
|
||||
useful to check if a message would be allowed to be sent before
|
||||
its ready to be actually sent.
|
||||
Returns:
|
||||
A pair of a bool indicating if they can send a message now and a
|
||||
time in seconds of when they can next send a message.
|
||||
"""
|
||||
self.prune_message_counts(time_now_s)
|
||||
message_count, time_start, _ignored = self.message_counts.pop(
|
||||
message_count, time_start, _ignored = self.message_counts.get(
|
||||
user_id, (0., time_now_s, None),
|
||||
)
|
||||
time_delta = time_now_s - time_start
|
||||
@@ -52,9 +55,10 @@ class Ratelimiter(object):
|
||||
allowed = True
|
||||
message_count += 1
|
||||
|
||||
self.message_counts[user_id] = (
|
||||
message_count, time_start, msg_rate_hz
|
||||
)
|
||||
if update:
|
||||
self.message_counts[user_id] = (
|
||||
message_count, time_start, msg_rate_hz
|
||||
)
|
||||
|
||||
if msg_rate_hz > 0:
|
||||
time_allowed = (
|
||||
|
||||
@@ -25,4 +25,3 @@ SERVER_KEY_PREFIX = "/_matrix/key/v1"
|
||||
SERVER_KEY_V2_PREFIX = "/_matrix/key/v2"
|
||||
MEDIA_PREFIX = "/_matrix/media/r0"
|
||||
LEGACY_MEDIA_PREFIX = "/_matrix/media/v1"
|
||||
APP_SERVICE_PREFIX = "/_matrix/appservice/v1"
|
||||
|
||||
@@ -16,13 +16,11 @@
|
||||
import sys
|
||||
sys.dont_write_bytecode = True
|
||||
|
||||
from synapse.python_dependencies import (
|
||||
check_requirements, MissingRequirementError
|
||||
) # NOQA
|
||||
from synapse import python_dependencies # noqa: E402
|
||||
|
||||
try:
|
||||
check_requirements()
|
||||
except MissingRequirementError as e:
|
||||
python_dependencies.check_requirements()
|
||||
except python_dependencies.MissingRequirementError as e:
|
||||
message = "\n".join([
|
||||
"Missing Requirement: %s" % (e.message,),
|
||||
"To install run:",
|
||||
|
||||
224
synapse/app/appservice.py
Normal file
224
synapse/app/appservice.py
Normal file
@@ -0,0 +1,224 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import synapse
|
||||
|
||||
from synapse.server import HomeServer
|
||||
from synapse.config._base import ConfigError
|
||||
from synapse.config.logger import setup_logging
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.http.site import SynapseSite
|
||||
from synapse.metrics.resource import MetricsResource, METRICS_PREFIX
|
||||
from synapse.replication.slave.storage.directory import DirectoryStore
|
||||
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||
from synapse.storage.engines import create_engine
|
||||
from synapse.util.async import sleep
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.logcontext import LoggingContext, PreserveLoggingContext
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.rlimit import change_resource_limit
|
||||
from synapse.util.versionstring import get_version_string
|
||||
|
||||
from synapse import events
|
||||
|
||||
from twisted.internet import reactor, defer
|
||||
from twisted.web.resource import Resource
|
||||
|
||||
from daemonize import Daemonize
|
||||
|
||||
import sys
|
||||
import logging
|
||||
import gc
|
||||
|
||||
logger = logging.getLogger("synapse.app.appservice")
|
||||
|
||||
|
||||
class AppserviceSlaveStore(
|
||||
DirectoryStore, SlavedEventStore, SlavedApplicationServiceStore,
|
||||
SlavedRegistrationStore,
|
||||
):
|
||||
pass
|
||||
|
||||
|
||||
class AppserviceServer(HomeServer):
|
||||
def get_db_conn(self, run_new_connection=True):
|
||||
# Any param beginning with cp_ is a parameter for adbapi, and should
|
||||
# not be passed to the database engine.
|
||||
db_params = {
|
||||
k: v for k, v in self.db_config.get("args", {}).items()
|
||||
if not k.startswith("cp_")
|
||||
}
|
||||
db_conn = self.database_engine.module.connect(**db_params)
|
||||
|
||||
if run_new_connection:
|
||||
self.database_engine.on_new_connection(db_conn)
|
||||
return db_conn
|
||||
|
||||
def setup(self):
|
||||
logger.info("Setting up.")
|
||||
self.datastore = AppserviceSlaveStore(self.get_db_conn(), self)
|
||||
logger.info("Finished setting up.")
|
||||
|
||||
def _listen_http(self, listener_config):
|
||||
port = listener_config["port"]
|
||||
bind_addresses = listener_config["bind_addresses"]
|
||||
site_tag = listener_config.get("tag", port)
|
||||
resources = {}
|
||||
for res in listener_config["resources"]:
|
||||
for name in res["names"]:
|
||||
if name == "metrics":
|
||||
resources[METRICS_PREFIX] = MetricsResource(self)
|
||||
|
||||
root_resource = create_resource_tree(resources, Resource())
|
||||
|
||||
for address in bind_addresses:
|
||||
reactor.listenTCP(
|
||||
port,
|
||||
SynapseSite(
|
||||
"synapse.access.http.%s" % (site_tag,),
|
||||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
),
|
||||
interface=address
|
||||
)
|
||||
|
||||
logger.info("Synapse appservice now listening on port %d", port)
|
||||
|
||||
def start_listening(self, listeners):
|
||||
for listener in listeners:
|
||||
if listener["type"] == "http":
|
||||
self._listen_http(listener)
|
||||
elif listener["type"] == "manhole":
|
||||
bind_addresses = listener["bind_addresses"]
|
||||
|
||||
for address in bind_addresses:
|
||||
reactor.listenTCP(
|
||||
listener["port"],
|
||||
manhole(
|
||||
username="matrix",
|
||||
password="rabbithole",
|
||||
globals={"hs": self},
|
||||
),
|
||||
interface=address
|
||||
)
|
||||
else:
|
||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def replicate(self):
|
||||
http_client = self.get_simple_http_client()
|
||||
store = self.get_datastore()
|
||||
replication_url = self.config.worker_replication_url
|
||||
appservice_handler = self.get_application_service_handler()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def replicate(results):
|
||||
stream = results.get("events")
|
||||
if stream:
|
||||
max_stream_id = stream["position"]
|
||||
yield appservice_handler.notify_interested_services(max_stream_id)
|
||||
|
||||
while True:
|
||||
try:
|
||||
args = store.stream_positions()
|
||||
args["timeout"] = 30000
|
||||
result = yield http_client.get_json(replication_url, args=args)
|
||||
yield store.process_replication(result)
|
||||
replicate(result)
|
||||
except:
|
||||
logger.exception("Error replicating from %r", replication_url)
|
||||
yield sleep(30)
|
||||
|
||||
|
||||
def start(config_options):
|
||||
try:
|
||||
config = HomeServerConfig.load_config(
|
||||
"Synapse appservice", config_options
|
||||
)
|
||||
except ConfigError as e:
|
||||
sys.stderr.write("\n" + e.message + "\n")
|
||||
sys.exit(1)
|
||||
|
||||
assert config.worker_app == "synapse.app.appservice"
|
||||
|
||||
setup_logging(config, use_worker_options=True)
|
||||
|
||||
events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||
|
||||
database_engine = create_engine(config.database_config)
|
||||
|
||||
if config.notify_appservices:
|
||||
sys.stderr.write(
|
||||
"\nThe appservices must be disabled in the main synapse process"
|
||||
"\nbefore they can be run in a separate worker."
|
||||
"\nPlease add ``notify_appservices: false`` to the main config"
|
||||
"\n"
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
# Force the pushers to start since they will be disabled in the main config
|
||||
config.notify_appservices = True
|
||||
|
||||
ps = AppserviceServer(
|
||||
config.server_name,
|
||||
db_config=config.database_config,
|
||||
config=config,
|
||||
version_string="Synapse/" + get_version_string(synapse),
|
||||
database_engine=database_engine,
|
||||
)
|
||||
|
||||
ps.setup()
|
||||
ps.start_listening(config.worker_listeners)
|
||||
|
||||
def run():
|
||||
# make sure that we run the reactor with the sentinel log context,
|
||||
# otherwise other PreserveLoggingContext instances will get confused
|
||||
# and complain when they see the logcontext arbitrarily swapping
|
||||
# between the sentinel and `run` logcontexts.
|
||||
with PreserveLoggingContext():
|
||||
logger.info("Running")
|
||||
change_resource_limit(config.soft_file_limit)
|
||||
if config.gc_thresholds:
|
||||
gc.set_threshold(*config.gc_thresholds)
|
||||
reactor.run()
|
||||
|
||||
def start():
|
||||
ps.replicate()
|
||||
ps.get_datastore().start_profiling()
|
||||
ps.get_state_handler().start_caching()
|
||||
|
||||
reactor.callWhenRunning(start)
|
||||
|
||||
if config.worker_daemonize:
|
||||
daemon = Daemonize(
|
||||
app="synapse-appservice",
|
||||
pid=config.worker_pid_file,
|
||||
action=run,
|
||||
auto_close_fds=False,
|
||||
verbose=True,
|
||||
logger=logger,
|
||||
)
|
||||
daemon.start()
|
||||
else:
|
||||
run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
with LoggingContext("main"):
|
||||
start(sys.argv[1:])
|
||||
232
synapse/app/client_reader.py
Normal file
232
synapse/app/client_reader.py
Normal file
@@ -0,0 +1,232 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import synapse
|
||||
|
||||
from synapse.config._base import ConfigError
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.config.logger import setup_logging
|
||||
from synapse.http.site import SynapseSite
|
||||
from synapse.http.server import JsonResource
|
||||
from synapse.metrics.resource import MetricsResource, METRICS_PREFIX
|
||||
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||
from synapse.replication.slave.storage.keys import SlavedKeyStore
|
||||
from synapse.replication.slave.storage.room import RoomStore
|
||||
from synapse.replication.slave.storage.directory import DirectoryStore
|
||||
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||
from synapse.replication.slave.storage.transactions import TransactionStore
|
||||
from synapse.rest.client.v1.room import PublicRoomListRestServlet
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.client_ips import ClientIpStore
|
||||
from synapse.storage.engines import create_engine
|
||||
from synapse.util.async import sleep
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.logcontext import LoggingContext, PreserveLoggingContext
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.rlimit import change_resource_limit
|
||||
from synapse.util.versionstring import get_version_string
|
||||
from synapse.crypto import context_factory
|
||||
|
||||
from synapse import events
|
||||
|
||||
|
||||
from twisted.internet import reactor, defer
|
||||
from twisted.web.resource import Resource
|
||||
|
||||
from daemonize import Daemonize
|
||||
|
||||
import sys
|
||||
import logging
|
||||
import gc
|
||||
|
||||
logger = logging.getLogger("synapse.app.client_reader")
|
||||
|
||||
|
||||
class ClientReaderSlavedStore(
|
||||
SlavedEventStore,
|
||||
SlavedKeyStore,
|
||||
RoomStore,
|
||||
DirectoryStore,
|
||||
SlavedApplicationServiceStore,
|
||||
SlavedRegistrationStore,
|
||||
TransactionStore,
|
||||
BaseSlavedStore,
|
||||
ClientIpStore, # After BaseSlavedStore because the constructor is different
|
||||
):
|
||||
pass
|
||||
|
||||
|
||||
class ClientReaderServer(HomeServer):
|
||||
def get_db_conn(self, run_new_connection=True):
|
||||
# Any param beginning with cp_ is a parameter for adbapi, and should
|
||||
# not be passed to the database engine.
|
||||
db_params = {
|
||||
k: v for k, v in self.db_config.get("args", {}).items()
|
||||
if not k.startswith("cp_")
|
||||
}
|
||||
db_conn = self.database_engine.module.connect(**db_params)
|
||||
|
||||
if run_new_connection:
|
||||
self.database_engine.on_new_connection(db_conn)
|
||||
return db_conn
|
||||
|
||||
def setup(self):
|
||||
logger.info("Setting up.")
|
||||
self.datastore = ClientReaderSlavedStore(self.get_db_conn(), self)
|
||||
logger.info("Finished setting up.")
|
||||
|
||||
def _listen_http(self, listener_config):
|
||||
port = listener_config["port"]
|
||||
bind_addresses = listener_config["bind_addresses"]
|
||||
site_tag = listener_config.get("tag", port)
|
||||
resources = {}
|
||||
for res in listener_config["resources"]:
|
||||
for name in res["names"]:
|
||||
if name == "metrics":
|
||||
resources[METRICS_PREFIX] = MetricsResource(self)
|
||||
elif name == "client":
|
||||
resource = JsonResource(self, canonical_json=False)
|
||||
PublicRoomListRestServlet(self).register(resource)
|
||||
resources.update({
|
||||
"/_matrix/client/r0": resource,
|
||||
"/_matrix/client/unstable": resource,
|
||||
"/_matrix/client/v2_alpha": resource,
|
||||
"/_matrix/client/api/v1": resource,
|
||||
})
|
||||
|
||||
root_resource = create_resource_tree(resources, Resource())
|
||||
|
||||
for address in bind_addresses:
|
||||
reactor.listenTCP(
|
||||
port,
|
||||
SynapseSite(
|
||||
"synapse.access.http.%s" % (site_tag,),
|
||||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
),
|
||||
interface=address
|
||||
)
|
||||
|
||||
logger.info("Synapse client reader now listening on port %d", port)
|
||||
|
||||
def start_listening(self, listeners):
|
||||
for listener in listeners:
|
||||
if listener["type"] == "http":
|
||||
self._listen_http(listener)
|
||||
elif listener["type"] == "manhole":
|
||||
bind_addresses = listener["bind_addresses"]
|
||||
|
||||
for address in bind_addresses:
|
||||
reactor.listenTCP(
|
||||
listener["port"],
|
||||
manhole(
|
||||
username="matrix",
|
||||
password="rabbithole",
|
||||
globals={"hs": self},
|
||||
),
|
||||
interface=address
|
||||
)
|
||||
else:
|
||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def replicate(self):
|
||||
http_client = self.get_simple_http_client()
|
||||
store = self.get_datastore()
|
||||
replication_url = self.config.worker_replication_url
|
||||
|
||||
while True:
|
||||
try:
|
||||
args = store.stream_positions()
|
||||
args["timeout"] = 30000
|
||||
result = yield http_client.get_json(replication_url, args=args)
|
||||
yield store.process_replication(result)
|
||||
except:
|
||||
logger.exception("Error replicating from %r", replication_url)
|
||||
yield sleep(5)
|
||||
|
||||
|
||||
def start(config_options):
|
||||
try:
|
||||
config = HomeServerConfig.load_config(
|
||||
"Synapse client reader", config_options
|
||||
)
|
||||
except ConfigError as e:
|
||||
sys.stderr.write("\n" + e.message + "\n")
|
||||
sys.exit(1)
|
||||
|
||||
assert config.worker_app == "synapse.app.client_reader"
|
||||
|
||||
setup_logging(config, use_worker_options=True)
|
||||
|
||||
events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||
|
||||
database_engine = create_engine(config.database_config)
|
||||
|
||||
tls_server_context_factory = context_factory.ServerContextFactory(config)
|
||||
|
||||
ss = ClientReaderServer(
|
||||
config.server_name,
|
||||
db_config=config.database_config,
|
||||
tls_server_context_factory=tls_server_context_factory,
|
||||
config=config,
|
||||
version_string="Synapse/" + get_version_string(synapse),
|
||||
database_engine=database_engine,
|
||||
)
|
||||
|
||||
ss.setup()
|
||||
ss.get_handlers()
|
||||
ss.start_listening(config.worker_listeners)
|
||||
|
||||
def run():
|
||||
# make sure that we run the reactor with the sentinel log context,
|
||||
# otherwise other PreserveLoggingContext instances will get confused
|
||||
# and complain when they see the logcontext arbitrarily swapping
|
||||
# between the sentinel and `run` logcontexts.
|
||||
with PreserveLoggingContext():
|
||||
logger.info("Running")
|
||||
change_resource_limit(config.soft_file_limit)
|
||||
if config.gc_thresholds:
|
||||
gc.set_threshold(*config.gc_thresholds)
|
||||
reactor.run()
|
||||
|
||||
def start():
|
||||
ss.get_state_handler().start_caching()
|
||||
ss.get_datastore().start_profiling()
|
||||
ss.replicate()
|
||||
|
||||
reactor.callWhenRunning(start)
|
||||
|
||||
if config.worker_daemonize:
|
||||
daemon = Daemonize(
|
||||
app="synapse-client-reader",
|
||||
pid=config.worker_pid_file,
|
||||
action=run,
|
||||
auto_close_fds=False,
|
||||
verbose=True,
|
||||
logger=logger,
|
||||
)
|
||||
daemon.start()
|
||||
else:
|
||||
run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
with LoggingContext("main"):
|
||||
start(sys.argv[1:])
|
||||
221
synapse/app/federation_reader.py
Normal file
221
synapse/app/federation_reader.py
Normal file
@@ -0,0 +1,221 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import synapse
|
||||
|
||||
from synapse.config._base import ConfigError
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.config.logger import setup_logging
|
||||
from synapse.http.site import SynapseSite
|
||||
from synapse.metrics.resource import MetricsResource, METRICS_PREFIX
|
||||
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||
from synapse.replication.slave.storage.keys import SlavedKeyStore
|
||||
from synapse.replication.slave.storage.room import RoomStore
|
||||
from synapse.replication.slave.storage.transactions import TransactionStore
|
||||
from synapse.replication.slave.storage.directory import DirectoryStore
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.engines import create_engine
|
||||
from synapse.util.async import sleep
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.logcontext import LoggingContext, PreserveLoggingContext
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.rlimit import change_resource_limit
|
||||
from synapse.util.versionstring import get_version_string
|
||||
from synapse.api.urls import FEDERATION_PREFIX
|
||||
from synapse.federation.transport.server import TransportLayerServer
|
||||
from synapse.crypto import context_factory
|
||||
|
||||
from synapse import events
|
||||
|
||||
|
||||
from twisted.internet import reactor, defer
|
||||
from twisted.web.resource import Resource
|
||||
|
||||
from daemonize import Daemonize
|
||||
|
||||
import sys
|
||||
import logging
|
||||
import gc
|
||||
|
||||
logger = logging.getLogger("synapse.app.federation_reader")
|
||||
|
||||
|
||||
class FederationReaderSlavedStore(
|
||||
SlavedEventStore,
|
||||
SlavedKeyStore,
|
||||
RoomStore,
|
||||
DirectoryStore,
|
||||
TransactionStore,
|
||||
BaseSlavedStore,
|
||||
):
|
||||
pass
|
||||
|
||||
|
||||
class FederationReaderServer(HomeServer):
|
||||
def get_db_conn(self, run_new_connection=True):
|
||||
# Any param beginning with cp_ is a parameter for adbapi, and should
|
||||
# not be passed to the database engine.
|
||||
db_params = {
|
||||
k: v for k, v in self.db_config.get("args", {}).items()
|
||||
if not k.startswith("cp_")
|
||||
}
|
||||
db_conn = self.database_engine.module.connect(**db_params)
|
||||
|
||||
if run_new_connection:
|
||||
self.database_engine.on_new_connection(db_conn)
|
||||
return db_conn
|
||||
|
||||
def setup(self):
|
||||
logger.info("Setting up.")
|
||||
self.datastore = FederationReaderSlavedStore(self.get_db_conn(), self)
|
||||
logger.info("Finished setting up.")
|
||||
|
||||
def _listen_http(self, listener_config):
|
||||
port = listener_config["port"]
|
||||
bind_addresses = listener_config["bind_addresses"]
|
||||
site_tag = listener_config.get("tag", port)
|
||||
resources = {}
|
||||
for res in listener_config["resources"]:
|
||||
for name in res["names"]:
|
||||
if name == "metrics":
|
||||
resources[METRICS_PREFIX] = MetricsResource(self)
|
||||
elif name == "federation":
|
||||
resources.update({
|
||||
FEDERATION_PREFIX: TransportLayerServer(self),
|
||||
})
|
||||
|
||||
root_resource = create_resource_tree(resources, Resource())
|
||||
|
||||
for address in bind_addresses:
|
||||
reactor.listenTCP(
|
||||
port,
|
||||
SynapseSite(
|
||||
"synapse.access.http.%s" % (site_tag,),
|
||||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
),
|
||||
interface=address
|
||||
)
|
||||
|
||||
logger.info("Synapse federation reader now listening on port %d", port)
|
||||
|
||||
def start_listening(self, listeners):
|
||||
for listener in listeners:
|
||||
if listener["type"] == "http":
|
||||
self._listen_http(listener)
|
||||
elif listener["type"] == "manhole":
|
||||
bind_addresses = listener["bind_addresses"]
|
||||
|
||||
for address in bind_addresses:
|
||||
reactor.listenTCP(
|
||||
listener["port"],
|
||||
manhole(
|
||||
username="matrix",
|
||||
password="rabbithole",
|
||||
globals={"hs": self},
|
||||
),
|
||||
interface=address
|
||||
)
|
||||
else:
|
||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def replicate(self):
|
||||
http_client = self.get_simple_http_client()
|
||||
store = self.get_datastore()
|
||||
replication_url = self.config.worker_replication_url
|
||||
|
||||
while True:
|
||||
try:
|
||||
args = store.stream_positions()
|
||||
args["timeout"] = 30000
|
||||
result = yield http_client.get_json(replication_url, args=args)
|
||||
yield store.process_replication(result)
|
||||
except:
|
||||
logger.exception("Error replicating from %r", replication_url)
|
||||
yield sleep(5)
|
||||
|
||||
|
||||
def start(config_options):
|
||||
try:
|
||||
config = HomeServerConfig.load_config(
|
||||
"Synapse federation reader", config_options
|
||||
)
|
||||
except ConfigError as e:
|
||||
sys.stderr.write("\n" + e.message + "\n")
|
||||
sys.exit(1)
|
||||
|
||||
assert config.worker_app == "synapse.app.federation_reader"
|
||||
|
||||
setup_logging(config, use_worker_options=True)
|
||||
|
||||
events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||
|
||||
database_engine = create_engine(config.database_config)
|
||||
|
||||
tls_server_context_factory = context_factory.ServerContextFactory(config)
|
||||
|
||||
ss = FederationReaderServer(
|
||||
config.server_name,
|
||||
db_config=config.database_config,
|
||||
tls_server_context_factory=tls_server_context_factory,
|
||||
config=config,
|
||||
version_string="Synapse/" + get_version_string(synapse),
|
||||
database_engine=database_engine,
|
||||
)
|
||||
|
||||
ss.setup()
|
||||
ss.get_handlers()
|
||||
ss.start_listening(config.worker_listeners)
|
||||
|
||||
def run():
|
||||
# make sure that we run the reactor with the sentinel log context,
|
||||
# otherwise other PreserveLoggingContext instances will get confused
|
||||
# and complain when they see the logcontext arbitrarily swapping
|
||||
# between the sentinel and `run` logcontexts.
|
||||
with PreserveLoggingContext():
|
||||
logger.info("Running")
|
||||
change_resource_limit(config.soft_file_limit)
|
||||
if config.gc_thresholds:
|
||||
gc.set_threshold(*config.gc_thresholds)
|
||||
reactor.run()
|
||||
|
||||
def start():
|
||||
ss.get_state_handler().start_caching()
|
||||
ss.get_datastore().start_profiling()
|
||||
ss.replicate()
|
||||
|
||||
reactor.callWhenRunning(start)
|
||||
|
||||
if config.worker_daemonize:
|
||||
daemon = Daemonize(
|
||||
app="synapse-federation-reader",
|
||||
pid=config.worker_pid_file,
|
||||
action=run,
|
||||
auto_close_fds=False,
|
||||
verbose=True,
|
||||
logger=logger,
|
||||
)
|
||||
daemon.start()
|
||||
else:
|
||||
run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
with LoggingContext("main"):
|
||||
start(sys.argv[1:])
|
||||
342
synapse/app/federation_sender.py
Normal file
342
synapse/app/federation_sender.py
Normal file
@@ -0,0 +1,342 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import synapse
|
||||
|
||||
from synapse.server import HomeServer
|
||||
from synapse.config._base import ConfigError
|
||||
from synapse.config.logger import setup_logging
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.crypto import context_factory
|
||||
from synapse.http.site import SynapseSite
|
||||
from synapse.federation import send_queue
|
||||
from synapse.federation.units import Edu
|
||||
from synapse.metrics.resource import MetricsResource, METRICS_PREFIX
|
||||
from synapse.replication.slave.storage.deviceinbox import SlavedDeviceInboxStore
|
||||
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
|
||||
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||
from synapse.replication.slave.storage.transactions import TransactionStore
|
||||
from synapse.replication.slave.storage.devices import SlavedDeviceStore
|
||||
from synapse.storage.engines import create_engine
|
||||
from synapse.storage.presence import UserPresenceState
|
||||
from synapse.util.async import sleep
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.logcontext import LoggingContext, PreserveLoggingContext
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.rlimit import change_resource_limit
|
||||
from synapse.util.versionstring import get_version_string
|
||||
|
||||
from synapse import events
|
||||
|
||||
from twisted.internet import reactor, defer
|
||||
from twisted.web.resource import Resource
|
||||
|
||||
from daemonize import Daemonize
|
||||
|
||||
import sys
|
||||
import logging
|
||||
import gc
|
||||
import ujson as json
|
||||
|
||||
logger = logging.getLogger("synapse.app.appservice")
|
||||
|
||||
|
||||
class FederationSenderSlaveStore(
|
||||
SlavedDeviceInboxStore, TransactionStore, SlavedReceiptsStore, SlavedEventStore,
|
||||
SlavedRegistrationStore, SlavedDeviceStore,
|
||||
):
|
||||
pass
|
||||
|
||||
|
||||
class FederationSenderServer(HomeServer):
|
||||
def get_db_conn(self, run_new_connection=True):
|
||||
# Any param beginning with cp_ is a parameter for adbapi, and should
|
||||
# not be passed to the database engine.
|
||||
db_params = {
|
||||
k: v for k, v in self.db_config.get("args", {}).items()
|
||||
if not k.startswith("cp_")
|
||||
}
|
||||
db_conn = self.database_engine.module.connect(**db_params)
|
||||
|
||||
if run_new_connection:
|
||||
self.database_engine.on_new_connection(db_conn)
|
||||
return db_conn
|
||||
|
||||
def setup(self):
|
||||
logger.info("Setting up.")
|
||||
self.datastore = FederationSenderSlaveStore(self.get_db_conn(), self)
|
||||
logger.info("Finished setting up.")
|
||||
|
||||
def _listen_http(self, listener_config):
|
||||
port = listener_config["port"]
|
||||
bind_addresses = listener_config["bind_addresses"]
|
||||
site_tag = listener_config.get("tag", port)
|
||||
resources = {}
|
||||
for res in listener_config["resources"]:
|
||||
for name in res["names"]:
|
||||
if name == "metrics":
|
||||
resources[METRICS_PREFIX] = MetricsResource(self)
|
||||
|
||||
root_resource = create_resource_tree(resources, Resource())
|
||||
|
||||
for address in bind_addresses:
|
||||
reactor.listenTCP(
|
||||
port,
|
||||
SynapseSite(
|
||||
"synapse.access.http.%s" % (site_tag,),
|
||||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
),
|
||||
interface=address
|
||||
)
|
||||
|
||||
logger.info("Synapse federation_sender now listening on port %d", port)
|
||||
|
||||
def start_listening(self, listeners):
|
||||
for listener in listeners:
|
||||
if listener["type"] == "http":
|
||||
self._listen_http(listener)
|
||||
elif listener["type"] == "manhole":
|
||||
bind_addresses = listener["bind_addresses"]
|
||||
|
||||
for address in bind_addresses:
|
||||
reactor.listenTCP(
|
||||
listener["port"],
|
||||
manhole(
|
||||
username="matrix",
|
||||
password="rabbithole",
|
||||
globals={"hs": self},
|
||||
),
|
||||
interface=address
|
||||
)
|
||||
else:
|
||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def replicate(self):
|
||||
http_client = self.get_simple_http_client()
|
||||
store = self.get_datastore()
|
||||
replication_url = self.config.worker_replication_url
|
||||
send_handler = FederationSenderHandler(self)
|
||||
|
||||
send_handler.on_start()
|
||||
|
||||
while True:
|
||||
try:
|
||||
args = store.stream_positions()
|
||||
args.update((yield send_handler.stream_positions()))
|
||||
args["timeout"] = 30000
|
||||
result = yield http_client.get_json(replication_url, args=args)
|
||||
yield store.process_replication(result)
|
||||
yield send_handler.process_replication(result)
|
||||
except:
|
||||
logger.exception("Error replicating from %r", replication_url)
|
||||
yield sleep(30)
|
||||
|
||||
|
||||
def start(config_options):
|
||||
try:
|
||||
config = HomeServerConfig.load_config(
|
||||
"Synapse federation sender", config_options
|
||||
)
|
||||
except ConfigError as e:
|
||||
sys.stderr.write("\n" + e.message + "\n")
|
||||
sys.exit(1)
|
||||
|
||||
assert config.worker_app == "synapse.app.federation_sender"
|
||||
|
||||
setup_logging(config, use_worker_options=True)
|
||||
|
||||
events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||
|
||||
database_engine = create_engine(config.database_config)
|
||||
|
||||
if config.send_federation:
|
||||
sys.stderr.write(
|
||||
"\nThe send_federation must be disabled in the main synapse process"
|
||||
"\nbefore they can be run in a separate worker."
|
||||
"\nPlease add ``send_federation: false`` to the main config"
|
||||
"\n"
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
# Force the pushers to start since they will be disabled in the main config
|
||||
config.send_federation = True
|
||||
|
||||
tls_server_context_factory = context_factory.ServerContextFactory(config)
|
||||
|
||||
ps = FederationSenderServer(
|
||||
config.server_name,
|
||||
db_config=config.database_config,
|
||||
tls_server_context_factory=tls_server_context_factory,
|
||||
config=config,
|
||||
version_string="Synapse/" + get_version_string(synapse),
|
||||
database_engine=database_engine,
|
||||
)
|
||||
|
||||
ps.setup()
|
||||
ps.start_listening(config.worker_listeners)
|
||||
|
||||
def run():
|
||||
# make sure that we run the reactor with the sentinel log context,
|
||||
# otherwise other PreserveLoggingContext instances will get confused
|
||||
# and complain when they see the logcontext arbitrarily swapping
|
||||
# between the sentinel and `run` logcontexts.
|
||||
with PreserveLoggingContext():
|
||||
logger.info("Running")
|
||||
change_resource_limit(config.soft_file_limit)
|
||||
if config.gc_thresholds:
|
||||
gc.set_threshold(*config.gc_thresholds)
|
||||
reactor.run()
|
||||
|
||||
def start():
|
||||
ps.replicate()
|
||||
ps.get_datastore().start_profiling()
|
||||
ps.get_state_handler().start_caching()
|
||||
|
||||
reactor.callWhenRunning(start)
|
||||
|
||||
if config.worker_daemonize:
|
||||
daemon = Daemonize(
|
||||
app="synapse-federation-sender",
|
||||
pid=config.worker_pid_file,
|
||||
action=run,
|
||||
auto_close_fds=False,
|
||||
verbose=True,
|
||||
logger=logger,
|
||||
)
|
||||
daemon.start()
|
||||
else:
|
||||
run()
|
||||
|
||||
|
||||
class FederationSenderHandler(object):
|
||||
"""Processes the replication stream and forwards the appropriate entries
|
||||
to the federation sender.
|
||||
"""
|
||||
def __init__(self, hs):
|
||||
self.store = hs.get_datastore()
|
||||
self.federation_sender = hs.get_federation_sender()
|
||||
|
||||
self._room_serials = {}
|
||||
self._room_typing = {}
|
||||
|
||||
def on_start(self):
|
||||
# There may be some events that are persisted but haven't been sent,
|
||||
# so send them now.
|
||||
self.federation_sender.notify_new_events(
|
||||
self.store.get_room_max_stream_ordering()
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def stream_positions(self):
|
||||
stream_id = yield self.store.get_federation_out_pos("federation")
|
||||
defer.returnValue({
|
||||
"federation": stream_id,
|
||||
|
||||
# Ack stuff we've "processed", this should only be called from
|
||||
# one process.
|
||||
"federation_ack": stream_id,
|
||||
})
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def process_replication(self, result):
|
||||
# The federation stream contains things that we want to send out, e.g.
|
||||
# presence, typing, etc.
|
||||
fed_stream = result.get("federation")
|
||||
if fed_stream:
|
||||
latest_id = int(fed_stream["position"])
|
||||
|
||||
# The federation stream containis a bunch of different types of
|
||||
# rows that need to be handled differently. We parse the rows, put
|
||||
# them into the appropriate collection and then send them off.
|
||||
presence_to_send = {}
|
||||
keyed_edus = {}
|
||||
edus = {}
|
||||
failures = {}
|
||||
device_destinations = set()
|
||||
|
||||
# Parse the rows in the stream
|
||||
for row in fed_stream["rows"]:
|
||||
position, typ, content_js = row
|
||||
content = json.loads(content_js)
|
||||
|
||||
if typ == send_queue.PRESENCE_TYPE:
|
||||
destination = content["destination"]
|
||||
state = UserPresenceState.from_dict(content["state"])
|
||||
|
||||
presence_to_send.setdefault(destination, []).append(state)
|
||||
elif typ == send_queue.KEYED_EDU_TYPE:
|
||||
key = content["key"]
|
||||
edu = Edu(**content["edu"])
|
||||
|
||||
keyed_edus.setdefault(
|
||||
edu.destination, {}
|
||||
)[(edu.destination, tuple(key))] = edu
|
||||
elif typ == send_queue.EDU_TYPE:
|
||||
edu = Edu(**content)
|
||||
|
||||
edus.setdefault(edu.destination, []).append(edu)
|
||||
elif typ == send_queue.FAILURE_TYPE:
|
||||
destination = content["destination"]
|
||||
failure = content["failure"]
|
||||
|
||||
failures.setdefault(destination, []).append(failure)
|
||||
elif typ == send_queue.DEVICE_MESSAGE_TYPE:
|
||||
device_destinations.add(content["destination"])
|
||||
else:
|
||||
raise Exception("Unrecognised federation type: %r", typ)
|
||||
|
||||
# We've finished collecting, send everything off
|
||||
for destination, states in presence_to_send.items():
|
||||
self.federation_sender.send_presence(destination, states)
|
||||
|
||||
for destination, edu_map in keyed_edus.items():
|
||||
for key, edu in edu_map.items():
|
||||
self.federation_sender.send_edu(
|
||||
edu.destination, edu.edu_type, edu.content, key=key,
|
||||
)
|
||||
|
||||
for destination, edu_list in edus.items():
|
||||
for edu in edu_list:
|
||||
self.federation_sender.send_edu(
|
||||
edu.destination, edu.edu_type, edu.content, key=None,
|
||||
)
|
||||
|
||||
for destination, failure_list in failures.items():
|
||||
for failure in failure_list:
|
||||
self.federation_sender.send_failure(destination, failure)
|
||||
|
||||
for destination in device_destinations:
|
||||
self.federation_sender.send_device_messages(destination)
|
||||
|
||||
# Record where we are in the stream.
|
||||
yield self.store.update_federation_out_pos(
|
||||
"federation", latest_id
|
||||
)
|
||||
|
||||
# We also need to poke the federation sender when new events happen
|
||||
event_stream = result.get("events")
|
||||
if event_stream:
|
||||
latest_pos = event_stream["position"]
|
||||
self.federation_sender.notify_new_events(latest_pos)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
with LoggingContext("main"):
|
||||
start(sys.argv[1:])
|
||||
@@ -16,14 +16,12 @@
|
||||
|
||||
import synapse
|
||||
|
||||
import contextlib
|
||||
import gc
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import resource
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
|
||||
import synapse.config.logger
|
||||
from synapse.config._base import ConfigError
|
||||
|
||||
from synapse.python_dependencies import (
|
||||
@@ -37,18 +35,11 @@ from synapse.storage.prepare_database import UpgradeDatabaseException, prepare_d
|
||||
|
||||
from synapse.server import HomeServer
|
||||
|
||||
|
||||
from twisted.conch.manhole import ColoredManhole
|
||||
from twisted.conch.insults import insults
|
||||
from twisted.conch import manhole_ssh
|
||||
from twisted.cred import checkers, portal
|
||||
|
||||
|
||||
from twisted.internet import reactor, task, defer
|
||||
from twisted.application import service
|
||||
from twisted.web.resource import Resource, EncodingResourceWrapper
|
||||
from twisted.web.static import File
|
||||
from twisted.web.server import Site, GzipEncoderFactory, Request
|
||||
from twisted.web.server import GzipEncoderFactory
|
||||
from synapse.http.server import RootRedirect
|
||||
from synapse.rest.media.v0.content_repository import ContentRepoResource
|
||||
from synapse.rest.media.v1.media_repository import MediaRepositoryResource
|
||||
@@ -61,11 +52,20 @@ from synapse.api.urls import (
|
||||
)
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.crypto import context_factory
|
||||
from synapse.util.logcontext import LoggingContext
|
||||
from synapse.util.logcontext import LoggingContext, PreserveLoggingContext
|
||||
from synapse.metrics import register_memory_metrics, get_metrics_for
|
||||
from synapse.metrics.resource import MetricsResource, METRICS_PREFIX
|
||||
from synapse.replication.resource import ReplicationResource, REPLICATION_PREFIX
|
||||
from synapse.replication.tcp.resource import ReplicationStreamProtocolFactory
|
||||
from synapse.federation.transport.server import TransportLayerServer
|
||||
|
||||
from synapse.util.rlimit import change_resource_limit
|
||||
from synapse.util.versionstring import get_version_string
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.manhole import manhole
|
||||
|
||||
from synapse.http.site import SynapseSite
|
||||
|
||||
from synapse import events
|
||||
|
||||
from daemonize import Daemonize
|
||||
@@ -73,9 +73,6 @@ from daemonize import Daemonize
|
||||
logger = logging.getLogger("synapse.app.homeserver")
|
||||
|
||||
|
||||
ACCESS_TOKEN_RE = re.compile(r'(\?.*access(_|%5[Ff])token=)[^&]*(.*)$')
|
||||
|
||||
|
||||
def gz_wrap(r):
|
||||
return EncodingResourceWrapper(r, [GzipEncoderFactory()])
|
||||
|
||||
@@ -113,7 +110,7 @@ def build_resource_for_web_client(hs):
|
||||
class SynapseHomeServer(HomeServer):
|
||||
def _listener_http(self, config, listener_config):
|
||||
port = listener_config["port"]
|
||||
bind_address = listener_config.get("bind_address", "")
|
||||
bind_addresses = listener_config["bind_addresses"]
|
||||
tls = listener_config.get("tls", False)
|
||||
site_tag = listener_config.get("tag", port)
|
||||
|
||||
@@ -154,7 +151,7 @@ class SynapseHomeServer(HomeServer):
|
||||
MEDIA_PREFIX: media_repo,
|
||||
LEGACY_MEDIA_PREFIX: media_repo,
|
||||
CONTENT_REPO_PREFIX: ContentRepoResource(
|
||||
self, self.config.uploads_path, self.auth, self.content_addr
|
||||
self, self.config.uploads_path
|
||||
),
|
||||
})
|
||||
|
||||
@@ -173,30 +170,38 @@ class SynapseHomeServer(HomeServer):
|
||||
if name == "replication":
|
||||
resources[REPLICATION_PREFIX] = ReplicationResource(self)
|
||||
|
||||
root_resource = create_resource_tree(resources)
|
||||
if tls:
|
||||
reactor.listenSSL(
|
||||
port,
|
||||
SynapseSite(
|
||||
"synapse.access.https.%s" % (site_tag,),
|
||||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
),
|
||||
self.tls_server_context_factory,
|
||||
interface=bind_address
|
||||
)
|
||||
if WEB_CLIENT_PREFIX in resources:
|
||||
root_resource = RootRedirect(WEB_CLIENT_PREFIX)
|
||||
else:
|
||||
reactor.listenTCP(
|
||||
port,
|
||||
SynapseSite(
|
||||
"synapse.access.http.%s" % (site_tag,),
|
||||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
),
|
||||
interface=bind_address
|
||||
)
|
||||
root_resource = Resource()
|
||||
|
||||
root_resource = create_resource_tree(resources, root_resource)
|
||||
|
||||
if tls:
|
||||
for address in bind_addresses:
|
||||
reactor.listenSSL(
|
||||
port,
|
||||
SynapseSite(
|
||||
"synapse.access.https.%s" % (site_tag,),
|
||||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
),
|
||||
self.tls_server_context_factory,
|
||||
interface=address
|
||||
)
|
||||
else:
|
||||
for address in bind_addresses:
|
||||
reactor.listenTCP(
|
||||
port,
|
||||
SynapseSite(
|
||||
"synapse.access.http.%s" % (site_tag,),
|
||||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
),
|
||||
interface=address
|
||||
)
|
||||
logger.info("Synapse now listening on port %d", port)
|
||||
|
||||
def start_listening(self):
|
||||
@@ -206,26 +211,28 @@ class SynapseHomeServer(HomeServer):
|
||||
if listener["type"] == "http":
|
||||
self._listener_http(config, listener)
|
||||
elif listener["type"] == "manhole":
|
||||
checker = checkers.InMemoryUsernamePasswordDatabaseDontUse(
|
||||
matrix="rabbithole"
|
||||
)
|
||||
bind_addresses = listener["bind_addresses"]
|
||||
|
||||
rlm = manhole_ssh.TerminalRealm()
|
||||
rlm.chainedProtocolFactory = lambda: insults.ServerProtocol(
|
||||
ColoredManhole,
|
||||
{
|
||||
"__name__": "__console__",
|
||||
"hs": self,
|
||||
}
|
||||
)
|
||||
|
||||
f = manhole_ssh.ConchFactory(portal.Portal(rlm, [checker]))
|
||||
|
||||
reactor.listenTCP(
|
||||
listener["port"],
|
||||
f,
|
||||
interface=listener.get("bind_address", '127.0.0.1')
|
||||
)
|
||||
for address in bind_addresses:
|
||||
reactor.listenTCP(
|
||||
listener["port"],
|
||||
manhole(
|
||||
username="matrix",
|
||||
password="rabbithole",
|
||||
globals={"hs": self},
|
||||
),
|
||||
interface=address
|
||||
)
|
||||
elif listener["type"] == "replication":
|
||||
bind_addresses = listener["bind_addresses"]
|
||||
for address in bind_addresses:
|
||||
factory = ReplicationStreamProtocolFactory(self)
|
||||
server_listener = reactor.listenTCP(
|
||||
listener["port"], factory, interface=address
|
||||
)
|
||||
reactor.addSystemEventTrigger(
|
||||
"before", "shutdown", server_listener.stopListening,
|
||||
)
|
||||
else:
|
||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||
|
||||
@@ -269,86 +276,6 @@ def quit_with_error(error_string):
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def get_version_string():
|
||||
try:
|
||||
null = open(os.devnull, 'w')
|
||||
cwd = os.path.dirname(os.path.abspath(__file__))
|
||||
try:
|
||||
git_branch = subprocess.check_output(
|
||||
['git', 'rev-parse', '--abbrev-ref', 'HEAD'],
|
||||
stderr=null,
|
||||
cwd=cwd,
|
||||
).strip()
|
||||
git_branch = "b=" + git_branch
|
||||
except subprocess.CalledProcessError:
|
||||
git_branch = ""
|
||||
|
||||
try:
|
||||
git_tag = subprocess.check_output(
|
||||
['git', 'describe', '--exact-match'],
|
||||
stderr=null,
|
||||
cwd=cwd,
|
||||
).strip()
|
||||
git_tag = "t=" + git_tag
|
||||
except subprocess.CalledProcessError:
|
||||
git_tag = ""
|
||||
|
||||
try:
|
||||
git_commit = subprocess.check_output(
|
||||
['git', 'rev-parse', '--short', 'HEAD'],
|
||||
stderr=null,
|
||||
cwd=cwd,
|
||||
).strip()
|
||||
except subprocess.CalledProcessError:
|
||||
git_commit = ""
|
||||
|
||||
try:
|
||||
dirty_string = "-this_is_a_dirty_checkout"
|
||||
is_dirty = subprocess.check_output(
|
||||
['git', 'describe', '--dirty=' + dirty_string],
|
||||
stderr=null,
|
||||
cwd=cwd,
|
||||
).strip().endswith(dirty_string)
|
||||
|
||||
git_dirty = "dirty" if is_dirty else ""
|
||||
except subprocess.CalledProcessError:
|
||||
git_dirty = ""
|
||||
|
||||
if git_branch or git_tag or git_commit or git_dirty:
|
||||
git_version = ",".join(
|
||||
s for s in
|
||||
(git_branch, git_tag, git_commit, git_dirty,)
|
||||
if s
|
||||
)
|
||||
|
||||
return (
|
||||
"Synapse/%s (%s)" % (
|
||||
synapse.__version__, git_version,
|
||||
)
|
||||
).encode("ascii")
|
||||
except Exception as e:
|
||||
logger.info("Failed to check for git repository: %s", e)
|
||||
|
||||
return ("Synapse/%s" % (synapse.__version__,)).encode("ascii")
|
||||
|
||||
|
||||
def change_resource_limit(soft_file_no):
|
||||
try:
|
||||
soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
|
||||
|
||||
if not soft_file_no:
|
||||
soft_file_no = hard
|
||||
|
||||
resource.setrlimit(resource.RLIMIT_NOFILE, (soft_file_no, hard))
|
||||
logger.info("Set file limit to: %d", soft_file_no)
|
||||
|
||||
resource.setrlimit(
|
||||
resource.RLIMIT_CORE, (resource.RLIM_INFINITY, resource.RLIM_INFINITY)
|
||||
)
|
||||
except (ValueError, resource.error) as e:
|
||||
logger.warn("Failed to set file or core limit: %s", e)
|
||||
|
||||
|
||||
def setup(config_options):
|
||||
"""
|
||||
Args:
|
||||
@@ -359,10 +286,9 @@ def setup(config_options):
|
||||
HomeServer
|
||||
"""
|
||||
try:
|
||||
config = HomeServerConfig.load_config(
|
||||
config = HomeServerConfig.load_or_generate_config(
|
||||
"Synapse Homeserver",
|
||||
config_options,
|
||||
generate_section="Homeserver"
|
||||
)
|
||||
except ConfigError as e:
|
||||
sys.stderr.write("\n" + e.message + "\n")
|
||||
@@ -373,12 +299,12 @@ def setup(config_options):
|
||||
# generating config files and shouldn't try to continue.
|
||||
sys.exit(0)
|
||||
|
||||
config.setup_logging()
|
||||
synapse.config.logger.setup_logging(config, use_worker_options=False)
|
||||
|
||||
# check any extra requirements we have now we have a config
|
||||
check_requirements(config)
|
||||
|
||||
version_string = get_version_string()
|
||||
version_string = "Synapse/" + get_version_string(synapse)
|
||||
|
||||
logger.info("Server hostname: %s", config.server_name)
|
||||
logger.info("Server version: %s", version_string)
|
||||
@@ -395,7 +321,6 @@ def setup(config_options):
|
||||
db_config=config.database_config,
|
||||
tls_server_context_factory=tls_server_context_factory,
|
||||
config=config,
|
||||
content_addr=config.content_addr,
|
||||
version_string=version_string,
|
||||
database_engine=database_engine,
|
||||
)
|
||||
@@ -430,6 +355,8 @@ def setup(config_options):
|
||||
hs.get_datastore().start_doing_background_updates()
|
||||
hs.get_replication_layer().start_get_pdu_cache()
|
||||
|
||||
register_memory_metrics(hs)
|
||||
|
||||
reactor.callWhenRunning(start)
|
||||
|
||||
return hs
|
||||
@@ -445,215 +372,13 @@ class SynapseService(service.Service):
|
||||
def startService(self):
|
||||
hs = setup(self.config)
|
||||
change_resource_limit(hs.config.soft_file_limit)
|
||||
if hs.config.gc_thresholds:
|
||||
gc.set_threshold(*hs.config.gc_thresholds)
|
||||
|
||||
def stopService(self):
|
||||
return self._port.stopListening()
|
||||
|
||||
|
||||
class SynapseRequest(Request):
|
||||
def __init__(self, site, *args, **kw):
|
||||
Request.__init__(self, *args, **kw)
|
||||
self.site = site
|
||||
self.authenticated_entity = None
|
||||
self.start_time = 0
|
||||
|
||||
def __repr__(self):
|
||||
# We overwrite this so that we don't log ``access_token``
|
||||
return '<%s at 0x%x method=%s uri=%s clientproto=%s site=%s>' % (
|
||||
self.__class__.__name__,
|
||||
id(self),
|
||||
self.method,
|
||||
self.get_redacted_uri(),
|
||||
self.clientproto,
|
||||
self.site.site_tag,
|
||||
)
|
||||
|
||||
def get_redacted_uri(self):
|
||||
return ACCESS_TOKEN_RE.sub(
|
||||
r'\1<redacted>\3',
|
||||
self.uri
|
||||
)
|
||||
|
||||
def get_user_agent(self):
|
||||
return self.requestHeaders.getRawHeaders("User-Agent", [None])[-1]
|
||||
|
||||
def started_processing(self):
|
||||
self.site.access_logger.info(
|
||||
"%s - %s - Received request: %s %s",
|
||||
self.getClientIP(),
|
||||
self.site.site_tag,
|
||||
self.method,
|
||||
self.get_redacted_uri()
|
||||
)
|
||||
self.start_time = int(time.time() * 1000)
|
||||
|
||||
def finished_processing(self):
|
||||
|
||||
try:
|
||||
context = LoggingContext.current_context()
|
||||
ru_utime, ru_stime = context.get_resource_usage()
|
||||
db_txn_count = context.db_txn_count
|
||||
db_txn_duration = context.db_txn_duration
|
||||
except:
|
||||
ru_utime, ru_stime = (0, 0)
|
||||
db_txn_count, db_txn_duration = (0, 0)
|
||||
|
||||
self.site.access_logger.info(
|
||||
"%s - %s - {%s}"
|
||||
" Processed request: %dms (%dms, %dms) (%dms/%d)"
|
||||
" %sB %s \"%s %s %s\" \"%s\"",
|
||||
self.getClientIP(),
|
||||
self.site.site_tag,
|
||||
self.authenticated_entity,
|
||||
int(time.time() * 1000) - self.start_time,
|
||||
int(ru_utime * 1000),
|
||||
int(ru_stime * 1000),
|
||||
int(db_txn_duration * 1000),
|
||||
int(db_txn_count),
|
||||
self.sentLength,
|
||||
self.code,
|
||||
self.method,
|
||||
self.get_redacted_uri(),
|
||||
self.clientproto,
|
||||
self.get_user_agent(),
|
||||
)
|
||||
|
||||
@contextlib.contextmanager
|
||||
def processing(self):
|
||||
self.started_processing()
|
||||
yield
|
||||
self.finished_processing()
|
||||
|
||||
|
||||
class XForwardedForRequest(SynapseRequest):
|
||||
def __init__(self, *args, **kw):
|
||||
SynapseRequest.__init__(self, *args, **kw)
|
||||
|
||||
"""
|
||||
Add a layer on top of another request that only uses the value of an
|
||||
X-Forwarded-For header as the result of C{getClientIP}.
|
||||
"""
|
||||
def getClientIP(self):
|
||||
"""
|
||||
@return: The client address (the first address) in the value of the
|
||||
I{X-Forwarded-For header}. If the header is not present, return
|
||||
C{b"-"}.
|
||||
"""
|
||||
return self.requestHeaders.getRawHeaders(
|
||||
b"x-forwarded-for", [b"-"])[0].split(b",")[0].strip()
|
||||
|
||||
|
||||
class SynapseRequestFactory(object):
|
||||
def __init__(self, site, x_forwarded_for):
|
||||
self.site = site
|
||||
self.x_forwarded_for = x_forwarded_for
|
||||
|
||||
def __call__(self, *args, **kwargs):
|
||||
if self.x_forwarded_for:
|
||||
return XForwardedForRequest(self.site, *args, **kwargs)
|
||||
else:
|
||||
return SynapseRequest(self.site, *args, **kwargs)
|
||||
|
||||
|
||||
class SynapseSite(Site):
|
||||
"""
|
||||
Subclass of a twisted http Site that does access logging with python's
|
||||
standard logging
|
||||
"""
|
||||
def __init__(self, logger_name, site_tag, config, resource, *args, **kwargs):
|
||||
Site.__init__(self, resource, *args, **kwargs)
|
||||
|
||||
self.site_tag = site_tag
|
||||
|
||||
proxied = config.get("x_forwarded", False)
|
||||
self.requestFactory = SynapseRequestFactory(self, proxied)
|
||||
self.access_logger = logging.getLogger(logger_name)
|
||||
|
||||
def log(self, request):
|
||||
pass
|
||||
|
||||
|
||||
def create_resource_tree(desired_tree, redirect_root_to_web_client=True):
|
||||
"""Create the resource tree for this Home Server.
|
||||
|
||||
This in unduly complicated because Twisted does not support putting
|
||||
child resources more than 1 level deep at a time.
|
||||
|
||||
Args:
|
||||
web_client (bool): True to enable the web client.
|
||||
redirect_root_to_web_client (bool): True to redirect '/' to the
|
||||
location of the web client. This does nothing if web_client is not
|
||||
True.
|
||||
"""
|
||||
if redirect_root_to_web_client and WEB_CLIENT_PREFIX in desired_tree:
|
||||
root_resource = RootRedirect(WEB_CLIENT_PREFIX)
|
||||
else:
|
||||
root_resource = Resource()
|
||||
|
||||
# ideally we'd just use getChild and putChild but getChild doesn't work
|
||||
# unless you give it a Request object IN ADDITION to the name :/ So
|
||||
# instead, we'll store a copy of this mapping so we can actually add
|
||||
# extra resources to existing nodes. See self._resource_id for the key.
|
||||
resource_mappings = {}
|
||||
for full_path, res in desired_tree.items():
|
||||
logger.info("Attaching %s to path %s", res, full_path)
|
||||
last_resource = root_resource
|
||||
for path_seg in full_path.split('/')[1:-1]:
|
||||
if path_seg not in last_resource.listNames():
|
||||
# resource doesn't exist, so make a "dummy resource"
|
||||
child_resource = Resource()
|
||||
last_resource.putChild(path_seg, child_resource)
|
||||
res_id = _resource_id(last_resource, path_seg)
|
||||
resource_mappings[res_id] = child_resource
|
||||
last_resource = child_resource
|
||||
else:
|
||||
# we have an existing Resource, use that instead.
|
||||
res_id = _resource_id(last_resource, path_seg)
|
||||
last_resource = resource_mappings[res_id]
|
||||
|
||||
# ===========================
|
||||
# now attach the actual desired resource
|
||||
last_path_seg = full_path.split('/')[-1]
|
||||
|
||||
# if there is already a resource here, thieve its children and
|
||||
# replace it
|
||||
res_id = _resource_id(last_resource, last_path_seg)
|
||||
if res_id in resource_mappings:
|
||||
# there is a dummy resource at this path already, which needs
|
||||
# to be replaced with the desired resource.
|
||||
existing_dummy_resource = resource_mappings[res_id]
|
||||
for child_name in existing_dummy_resource.listNames():
|
||||
child_res_id = _resource_id(
|
||||
existing_dummy_resource, child_name
|
||||
)
|
||||
child_resource = resource_mappings[child_res_id]
|
||||
# steal the children
|
||||
res.putChild(child_name, child_resource)
|
||||
|
||||
# finally, insert the desired resource in the right place
|
||||
last_resource.putChild(last_path_seg, res)
|
||||
res_id = _resource_id(last_resource, last_path_seg)
|
||||
resource_mappings[res_id] = res
|
||||
|
||||
return root_resource
|
||||
|
||||
|
||||
def _resource_id(resource, path_seg):
|
||||
"""Construct an arbitrary resource ID so you can retrieve the mapping
|
||||
later.
|
||||
|
||||
If you want to represent resource A putChild resource B with path C,
|
||||
the mapping should looks like _resource_id(A,C) = B.
|
||||
|
||||
Args:
|
||||
resource (Resource): The *parent* Resourceb
|
||||
path_seg (str): The name of the child Resource to be attached.
|
||||
Returns:
|
||||
str: A unique string which can be a key to the child Resource.
|
||||
"""
|
||||
return "%s-%s" % (resource, path_seg)
|
||||
|
||||
|
||||
def run(hs):
|
||||
PROFILE_SYNAPSE = False
|
||||
if PROFILE_SYNAPSE:
|
||||
@@ -679,6 +404,8 @@ def run(hs):
|
||||
|
||||
start_time = hs.get_clock().time()
|
||||
|
||||
stats = {}
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def phone_stats_home():
|
||||
logger.info("Gathering stats for reporting")
|
||||
@@ -687,7 +414,10 @@ def run(hs):
|
||||
if uptime < 0:
|
||||
uptime = 0
|
||||
|
||||
stats = {}
|
||||
# If the stats directory is empty then this is the first time we've
|
||||
# reported stats.
|
||||
first_time = not stats
|
||||
|
||||
stats["homeserver"] = hs.config.server_name
|
||||
stats["timestamp"] = now
|
||||
stats["uptime_seconds"] = uptime
|
||||
@@ -700,6 +430,25 @@ def run(hs):
|
||||
daily_messages = yield hs.get_datastore().count_daily_messages()
|
||||
if daily_messages is not None:
|
||||
stats["daily_messages"] = daily_messages
|
||||
else:
|
||||
stats.pop("daily_messages", None)
|
||||
|
||||
if first_time:
|
||||
# Add callbacks to report the synapse stats as metrics whenever
|
||||
# prometheus requests them, typically every 30s.
|
||||
# As some of the stats are expensive to calculate we only update
|
||||
# them when synapse phones home to matrix.org every 24 hours.
|
||||
metrics = get_metrics_for("synapse.usage")
|
||||
metrics.add_callback("timestamp", lambda: stats["timestamp"])
|
||||
metrics.add_callback("uptime_seconds", lambda: stats["uptime_seconds"])
|
||||
metrics.add_callback("total_users", lambda: stats["total_users"])
|
||||
metrics.add_callback("total_room_count", lambda: stats["total_room_count"])
|
||||
metrics.add_callback(
|
||||
"daily_active_users", lambda: stats["daily_active_users"]
|
||||
)
|
||||
metrics.add_callback(
|
||||
"daily_messages", lambda: stats.get("daily_messages", 0)
|
||||
)
|
||||
|
||||
logger.info("Reporting stats to matrix.org: %s" % (stats,))
|
||||
try:
|
||||
@@ -718,8 +467,15 @@ def run(hs):
|
||||
def in_thread():
|
||||
# Uncomment to enable tracing of log context changes.
|
||||
# sys.settrace(logcontext_tracer)
|
||||
with LoggingContext("run"):
|
||||
|
||||
# make sure that we run the reactor with the sentinel log context,
|
||||
# otherwise other PreserveLoggingContext instances will get confused
|
||||
# and complain when they see the logcontext arbitrarily swapping
|
||||
# between the sentinel and `run` logcontexts.
|
||||
with PreserveLoggingContext():
|
||||
change_resource_limit(hs.config.soft_file_limit)
|
||||
if hs.config.gc_thresholds:
|
||||
gc.set_threshold(*hs.config.gc_thresholds)
|
||||
reactor.run()
|
||||
|
||||
if hs.config.daemonize:
|
||||
|
||||
229
synapse/app/media_repository.py
Normal file
229
synapse/app/media_repository.py
Normal file
@@ -0,0 +1,229 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import synapse
|
||||
|
||||
from synapse.config._base import ConfigError
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.config.logger import setup_logging
|
||||
from synapse.http.site import SynapseSite
|
||||
from synapse.metrics.resource import MetricsResource, METRICS_PREFIX
|
||||
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||
from synapse.replication.slave.storage.transactions import TransactionStore
|
||||
from synapse.rest.media.v0.content_repository import ContentRepoResource
|
||||
from synapse.rest.media.v1.media_repository import MediaRepositoryResource
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.client_ips import ClientIpStore
|
||||
from synapse.storage.engines import create_engine
|
||||
from synapse.storage.media_repository import MediaRepositoryStore
|
||||
from synapse.util.async import sleep
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.logcontext import LoggingContext, PreserveLoggingContext
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.rlimit import change_resource_limit
|
||||
from synapse.util.versionstring import get_version_string
|
||||
from synapse.api.urls import (
|
||||
CONTENT_REPO_PREFIX, LEGACY_MEDIA_PREFIX, MEDIA_PREFIX
|
||||
)
|
||||
from synapse.crypto import context_factory
|
||||
|
||||
from synapse import events
|
||||
|
||||
|
||||
from twisted.internet import reactor, defer
|
||||
from twisted.web.resource import Resource
|
||||
|
||||
from daemonize import Daemonize
|
||||
|
||||
import sys
|
||||
import logging
|
||||
import gc
|
||||
|
||||
logger = logging.getLogger("synapse.app.media_repository")
|
||||
|
||||
|
||||
class MediaRepositorySlavedStore(
|
||||
SlavedApplicationServiceStore,
|
||||
SlavedRegistrationStore,
|
||||
TransactionStore,
|
||||
BaseSlavedStore,
|
||||
MediaRepositoryStore,
|
||||
ClientIpStore,
|
||||
):
|
||||
pass
|
||||
|
||||
|
||||
class MediaRepositoryServer(HomeServer):
|
||||
def get_db_conn(self, run_new_connection=True):
|
||||
# Any param beginning with cp_ is a parameter for adbapi, and should
|
||||
# not be passed to the database engine.
|
||||
db_params = {
|
||||
k: v for k, v in self.db_config.get("args", {}).items()
|
||||
if not k.startswith("cp_")
|
||||
}
|
||||
db_conn = self.database_engine.module.connect(**db_params)
|
||||
|
||||
if run_new_connection:
|
||||
self.database_engine.on_new_connection(db_conn)
|
||||
return db_conn
|
||||
|
||||
def setup(self):
|
||||
logger.info("Setting up.")
|
||||
self.datastore = MediaRepositorySlavedStore(self.get_db_conn(), self)
|
||||
logger.info("Finished setting up.")
|
||||
|
||||
def _listen_http(self, listener_config):
|
||||
port = listener_config["port"]
|
||||
bind_addresses = listener_config["bind_addresses"]
|
||||
site_tag = listener_config.get("tag", port)
|
||||
resources = {}
|
||||
for res in listener_config["resources"]:
|
||||
for name in res["names"]:
|
||||
if name == "metrics":
|
||||
resources[METRICS_PREFIX] = MetricsResource(self)
|
||||
elif name == "media":
|
||||
media_repo = MediaRepositoryResource(self)
|
||||
resources.update({
|
||||
MEDIA_PREFIX: media_repo,
|
||||
LEGACY_MEDIA_PREFIX: media_repo,
|
||||
CONTENT_REPO_PREFIX: ContentRepoResource(
|
||||
self, self.config.uploads_path
|
||||
),
|
||||
})
|
||||
|
||||
root_resource = create_resource_tree(resources, Resource())
|
||||
|
||||
for address in bind_addresses:
|
||||
reactor.listenTCP(
|
||||
port,
|
||||
SynapseSite(
|
||||
"synapse.access.http.%s" % (site_tag,),
|
||||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
),
|
||||
interface=address
|
||||
)
|
||||
|
||||
logger.info("Synapse media repository now listening on port %d", port)
|
||||
|
||||
def start_listening(self, listeners):
|
||||
for listener in listeners:
|
||||
if listener["type"] == "http":
|
||||
self._listen_http(listener)
|
||||
elif listener["type"] == "manhole":
|
||||
bind_addresses = listener["bind_addresses"]
|
||||
|
||||
for address in bind_addresses:
|
||||
reactor.listenTCP(
|
||||
listener["port"],
|
||||
manhole(
|
||||
username="matrix",
|
||||
password="rabbithole",
|
||||
globals={"hs": self},
|
||||
),
|
||||
interface=address
|
||||
)
|
||||
else:
|
||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def replicate(self):
|
||||
http_client = self.get_simple_http_client()
|
||||
store = self.get_datastore()
|
||||
replication_url = self.config.worker_replication_url
|
||||
|
||||
while True:
|
||||
try:
|
||||
args = store.stream_positions()
|
||||
args["timeout"] = 30000
|
||||
result = yield http_client.get_json(replication_url, args=args)
|
||||
yield store.process_replication(result)
|
||||
except:
|
||||
logger.exception("Error replicating from %r", replication_url)
|
||||
yield sleep(5)
|
||||
|
||||
|
||||
def start(config_options):
|
||||
try:
|
||||
config = HomeServerConfig.load_config(
|
||||
"Synapse media repository", config_options
|
||||
)
|
||||
except ConfigError as e:
|
||||
sys.stderr.write("\n" + e.message + "\n")
|
||||
sys.exit(1)
|
||||
|
||||
assert config.worker_app == "synapse.app.media_repository"
|
||||
|
||||
setup_logging(config, use_worker_options=True)
|
||||
|
||||
events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||
|
||||
database_engine = create_engine(config.database_config)
|
||||
|
||||
tls_server_context_factory = context_factory.ServerContextFactory(config)
|
||||
|
||||
ss = MediaRepositoryServer(
|
||||
config.server_name,
|
||||
db_config=config.database_config,
|
||||
tls_server_context_factory=tls_server_context_factory,
|
||||
config=config,
|
||||
version_string="Synapse/" + get_version_string(synapse),
|
||||
database_engine=database_engine,
|
||||
)
|
||||
|
||||
ss.setup()
|
||||
ss.get_handlers()
|
||||
ss.start_listening(config.worker_listeners)
|
||||
|
||||
def run():
|
||||
# make sure that we run the reactor with the sentinel log context,
|
||||
# otherwise other PreserveLoggingContext instances will get confused
|
||||
# and complain when they see the logcontext arbitrarily swapping
|
||||
# between the sentinel and `run` logcontexts.
|
||||
with PreserveLoggingContext():
|
||||
logger.info("Running")
|
||||
change_resource_limit(config.soft_file_limit)
|
||||
if config.gc_thresholds:
|
||||
gc.set_threshold(*config.gc_thresholds)
|
||||
reactor.run()
|
||||
|
||||
def start():
|
||||
ss.get_state_handler().start_caching()
|
||||
ss.get_datastore().start_profiling()
|
||||
ss.replicate()
|
||||
|
||||
reactor.callWhenRunning(start)
|
||||
|
||||
if config.worker_daemonize:
|
||||
daemon = Daemonize(
|
||||
app="synapse-media-repository",
|
||||
pid=config.worker_pid_file,
|
||||
action=run,
|
||||
auto_close_fds=False,
|
||||
verbose=True,
|
||||
logger=logger,
|
||||
)
|
||||
daemon.start()
|
||||
else:
|
||||
run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
with LoggingContext("main"):
|
||||
start(sys.argv[1:])
|
||||
314
synapse/app/pusher.py
Normal file
314
synapse/app/pusher.py
Normal file
@@ -0,0 +1,314 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import synapse
|
||||
|
||||
from synapse.server import HomeServer
|
||||
from synapse.config._base import ConfigError
|
||||
from synapse.config.logger import setup_logging
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.http.site import SynapseSite
|
||||
from synapse.metrics.resource import MetricsResource, METRICS_PREFIX
|
||||
from synapse.storage.roommember import RoomMemberStore
|
||||
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||
from synapse.replication.slave.storage.pushers import SlavedPusherStore
|
||||
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
|
||||
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
|
||||
from synapse.storage.engines import create_engine
|
||||
from synapse.storage import DataStore
|
||||
from synapse.util.async import sleep
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.logcontext import LoggingContext, preserve_fn, \
|
||||
PreserveLoggingContext
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.rlimit import change_resource_limit
|
||||
from synapse.util.versionstring import get_version_string
|
||||
|
||||
from synapse import events
|
||||
|
||||
from twisted.internet import reactor, defer
|
||||
from twisted.web.resource import Resource
|
||||
|
||||
from daemonize import Daemonize
|
||||
|
||||
import sys
|
||||
import logging
|
||||
import gc
|
||||
|
||||
logger = logging.getLogger("synapse.app.pusher")
|
||||
|
||||
|
||||
class PusherSlaveStore(
|
||||
SlavedEventStore, SlavedPusherStore, SlavedReceiptsStore,
|
||||
SlavedAccountDataStore
|
||||
):
|
||||
update_pusher_last_stream_ordering_and_success = (
|
||||
DataStore.update_pusher_last_stream_ordering_and_success.__func__
|
||||
)
|
||||
|
||||
update_pusher_failing_since = (
|
||||
DataStore.update_pusher_failing_since.__func__
|
||||
)
|
||||
|
||||
update_pusher_last_stream_ordering = (
|
||||
DataStore.update_pusher_last_stream_ordering.__func__
|
||||
)
|
||||
|
||||
get_throttle_params_by_room = (
|
||||
DataStore.get_throttle_params_by_room.__func__
|
||||
)
|
||||
|
||||
set_throttle_params = (
|
||||
DataStore.set_throttle_params.__func__
|
||||
)
|
||||
|
||||
get_time_of_last_push_action_before = (
|
||||
DataStore.get_time_of_last_push_action_before.__func__
|
||||
)
|
||||
|
||||
get_profile_displayname = (
|
||||
DataStore.get_profile_displayname.__func__
|
||||
)
|
||||
|
||||
who_forgot_in_room = (
|
||||
RoomMemberStore.__dict__["who_forgot_in_room"]
|
||||
)
|
||||
|
||||
|
||||
class PusherServer(HomeServer):
|
||||
|
||||
def get_db_conn(self, run_new_connection=True):
|
||||
# Any param beginning with cp_ is a parameter for adbapi, and should
|
||||
# not be passed to the database engine.
|
||||
db_params = {
|
||||
k: v for k, v in self.db_config.get("args", {}).items()
|
||||
if not k.startswith("cp_")
|
||||
}
|
||||
db_conn = self.database_engine.module.connect(**db_params)
|
||||
|
||||
if run_new_connection:
|
||||
self.database_engine.on_new_connection(db_conn)
|
||||
return db_conn
|
||||
|
||||
def setup(self):
|
||||
logger.info("Setting up.")
|
||||
self.datastore = PusherSlaveStore(self.get_db_conn(), self)
|
||||
logger.info("Finished setting up.")
|
||||
|
||||
def remove_pusher(self, app_id, push_key, user_id):
|
||||
http_client = self.get_simple_http_client()
|
||||
replication_url = self.config.worker_replication_url
|
||||
url = replication_url + "/remove_pushers"
|
||||
return http_client.post_json_get_json(url, {
|
||||
"remove": [{
|
||||
"app_id": app_id,
|
||||
"push_key": push_key,
|
||||
"user_id": user_id,
|
||||
}]
|
||||
})
|
||||
|
||||
def _listen_http(self, listener_config):
|
||||
port = listener_config["port"]
|
||||
bind_addresses = listener_config["bind_addresses"]
|
||||
site_tag = listener_config.get("tag", port)
|
||||
resources = {}
|
||||
for res in listener_config["resources"]:
|
||||
for name in res["names"]:
|
||||
if name == "metrics":
|
||||
resources[METRICS_PREFIX] = MetricsResource(self)
|
||||
|
||||
root_resource = create_resource_tree(resources, Resource())
|
||||
|
||||
for address in bind_addresses:
|
||||
reactor.listenTCP(
|
||||
port,
|
||||
SynapseSite(
|
||||
"synapse.access.http.%s" % (site_tag,),
|
||||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
),
|
||||
interface=address
|
||||
)
|
||||
|
||||
logger.info("Synapse pusher now listening on port %d", port)
|
||||
|
||||
def start_listening(self, listeners):
|
||||
for listener in listeners:
|
||||
if listener["type"] == "http":
|
||||
self._listen_http(listener)
|
||||
elif listener["type"] == "manhole":
|
||||
bind_addresses = listener["bind_addresses"]
|
||||
|
||||
for address in bind_addresses:
|
||||
reactor.listenTCP(
|
||||
listener["port"],
|
||||
manhole(
|
||||
username="matrix",
|
||||
password="rabbithole",
|
||||
globals={"hs": self},
|
||||
),
|
||||
interface=address
|
||||
)
|
||||
else:
|
||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def replicate(self):
|
||||
http_client = self.get_simple_http_client()
|
||||
store = self.get_datastore()
|
||||
replication_url = self.config.worker_replication_url
|
||||
pusher_pool = self.get_pusherpool()
|
||||
|
||||
def stop_pusher(user_id, app_id, pushkey):
|
||||
key = "%s:%s" % (app_id, pushkey)
|
||||
pushers_for_user = pusher_pool.pushers.get(user_id, {})
|
||||
pusher = pushers_for_user.pop(key, None)
|
||||
if pusher is None:
|
||||
return
|
||||
logger.info("Stopping pusher %r / %r", user_id, key)
|
||||
pusher.on_stop()
|
||||
|
||||
def start_pusher(user_id, app_id, pushkey):
|
||||
key = "%s:%s" % (app_id, pushkey)
|
||||
logger.info("Starting pusher %r / %r", user_id, key)
|
||||
return pusher_pool._refresh_pusher(app_id, pushkey, user_id)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def poke_pushers(results):
|
||||
pushers_rows = set(
|
||||
map(tuple, results.get("pushers", {}).get("rows", []))
|
||||
)
|
||||
deleted_pushers_rows = set(
|
||||
map(tuple, results.get("deleted_pushers", {}).get("rows", []))
|
||||
)
|
||||
for row in sorted(pushers_rows | deleted_pushers_rows):
|
||||
if row in deleted_pushers_rows:
|
||||
user_id, app_id, pushkey = row[1:4]
|
||||
stop_pusher(user_id, app_id, pushkey)
|
||||
elif row in pushers_rows:
|
||||
user_id = row[1]
|
||||
app_id = row[5]
|
||||
pushkey = row[8]
|
||||
yield start_pusher(user_id, app_id, pushkey)
|
||||
|
||||
stream = results.get("events")
|
||||
if stream and stream["rows"]:
|
||||
min_stream_id = stream["rows"][0][0]
|
||||
max_stream_id = stream["position"]
|
||||
preserve_fn(pusher_pool.on_new_notifications)(
|
||||
min_stream_id, max_stream_id
|
||||
)
|
||||
|
||||
stream = results.get("receipts")
|
||||
if stream and stream["rows"]:
|
||||
rows = stream["rows"]
|
||||
affected_room_ids = set(row[1] for row in rows)
|
||||
min_stream_id = rows[0][0]
|
||||
max_stream_id = stream["position"]
|
||||
preserve_fn(pusher_pool.on_new_receipts)(
|
||||
min_stream_id, max_stream_id, affected_room_ids
|
||||
)
|
||||
|
||||
while True:
|
||||
try:
|
||||
args = store.stream_positions()
|
||||
args["timeout"] = 30000
|
||||
result = yield http_client.get_json(replication_url, args=args)
|
||||
yield store.process_replication(result)
|
||||
poke_pushers(result)
|
||||
except:
|
||||
logger.exception("Error replicating from %r", replication_url)
|
||||
yield sleep(30)
|
||||
|
||||
|
||||
def start(config_options):
|
||||
try:
|
||||
config = HomeServerConfig.load_config(
|
||||
"Synapse pusher", config_options
|
||||
)
|
||||
except ConfigError as e:
|
||||
sys.stderr.write("\n" + e.message + "\n")
|
||||
sys.exit(1)
|
||||
|
||||
assert config.worker_app == "synapse.app.pusher"
|
||||
|
||||
setup_logging(config, use_worker_options=True)
|
||||
|
||||
events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||
|
||||
if config.start_pushers:
|
||||
sys.stderr.write(
|
||||
"\nThe pushers must be disabled in the main synapse process"
|
||||
"\nbefore they can be run in a separate worker."
|
||||
"\nPlease add ``start_pushers: false`` to the main config"
|
||||
"\n"
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
# Force the pushers to start since they will be disabled in the main config
|
||||
config.start_pushers = True
|
||||
|
||||
database_engine = create_engine(config.database_config)
|
||||
|
||||
ps = PusherServer(
|
||||
config.server_name,
|
||||
db_config=config.database_config,
|
||||
config=config,
|
||||
version_string="Synapse/" + get_version_string(synapse),
|
||||
database_engine=database_engine,
|
||||
)
|
||||
|
||||
ps.setup()
|
||||
ps.start_listening(config.worker_listeners)
|
||||
|
||||
def run():
|
||||
# make sure that we run the reactor with the sentinel log context,
|
||||
# otherwise other PreserveLoggingContext instances will get confused
|
||||
# and complain when they see the logcontext arbitrarily swapping
|
||||
# between the sentinel and `run` logcontexts.
|
||||
with PreserveLoggingContext():
|
||||
logger.info("Running")
|
||||
change_resource_limit(config.soft_file_limit)
|
||||
if config.gc_thresholds:
|
||||
gc.set_threshold(*config.gc_thresholds)
|
||||
reactor.run()
|
||||
|
||||
def start():
|
||||
ps.replicate()
|
||||
ps.get_pusherpool().start()
|
||||
ps.get_datastore().start_profiling()
|
||||
ps.get_state_handler().start_caching()
|
||||
|
||||
reactor.callWhenRunning(start)
|
||||
|
||||
if config.worker_daemonize:
|
||||
daemon = Daemonize(
|
||||
app="synapse-pusher",
|
||||
pid=config.worker_pid_file,
|
||||
action=run,
|
||||
auto_close_fds=False,
|
||||
verbose=True,
|
||||
logger=logger,
|
||||
)
|
||||
daemon.start()
|
||||
else:
|
||||
run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
with LoggingContext("main"):
|
||||
ps = start(sys.argv[1:])
|
||||
538
synapse/app/synchrotron.py
Normal file
538
synapse/app/synchrotron.py
Normal file
@@ -0,0 +1,538 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import synapse
|
||||
|
||||
from synapse.api.constants import EventTypes, PresenceState
|
||||
from synapse.config._base import ConfigError
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.config.logger import setup_logging
|
||||
from synapse.handlers.presence import PresenceHandler
|
||||
from synapse.http.site import SynapseSite
|
||||
from synapse.http.server import JsonResource
|
||||
from synapse.metrics.resource import MetricsResource, METRICS_PREFIX
|
||||
from synapse.rest.client.v2_alpha import sync
|
||||
from synapse.rest.client.v1 import events
|
||||
from synapse.rest.client.v1.room import RoomInitialSyncRestServlet
|
||||
from synapse.rest.client.v1.initial_sync import InitialSyncRestServlet
|
||||
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
|
||||
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
|
||||
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||
from synapse.replication.slave.storage.filtering import SlavedFilteringStore
|
||||
from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore
|
||||
from synapse.replication.slave.storage.presence import SlavedPresenceStore
|
||||
from synapse.replication.slave.storage.deviceinbox import SlavedDeviceInboxStore
|
||||
from synapse.replication.slave.storage.devices import SlavedDeviceStore
|
||||
from synapse.replication.slave.storage.room import RoomStore
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.client_ips import ClientIpStore
|
||||
from synapse.storage.engines import create_engine
|
||||
from synapse.storage.presence import PresenceStore, UserPresenceState
|
||||
from synapse.storage.roommember import RoomMemberStore
|
||||
from synapse.util.async import sleep
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.logcontext import LoggingContext, preserve_fn, \
|
||||
PreserveLoggingContext
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.rlimit import change_resource_limit
|
||||
from synapse.util.stringutils import random_string
|
||||
from synapse.util.versionstring import get_version_string
|
||||
|
||||
from twisted.internet import reactor, defer
|
||||
from twisted.web.resource import Resource
|
||||
|
||||
from daemonize import Daemonize
|
||||
|
||||
import sys
|
||||
import logging
|
||||
import contextlib
|
||||
import gc
|
||||
import ujson as json
|
||||
|
||||
logger = logging.getLogger("synapse.app.synchrotron")
|
||||
|
||||
|
||||
class SynchrotronSlavedStore(
|
||||
SlavedPushRuleStore,
|
||||
SlavedEventStore,
|
||||
SlavedReceiptsStore,
|
||||
SlavedAccountDataStore,
|
||||
SlavedApplicationServiceStore,
|
||||
SlavedRegistrationStore,
|
||||
SlavedFilteringStore,
|
||||
SlavedPresenceStore,
|
||||
SlavedDeviceInboxStore,
|
||||
SlavedDeviceStore,
|
||||
RoomStore,
|
||||
BaseSlavedStore,
|
||||
ClientIpStore, # After BaseSlavedStore because the constructor is different
|
||||
):
|
||||
who_forgot_in_room = (
|
||||
RoomMemberStore.__dict__["who_forgot_in_room"]
|
||||
)
|
||||
|
||||
did_forget = (
|
||||
RoomMemberStore.__dict__["did_forget"]
|
||||
)
|
||||
|
||||
# XXX: This is a bit broken because we don't persist the accepted list in a
|
||||
# way that can be replicated. This means that we don't have a way to
|
||||
# invalidate the cache correctly.
|
||||
get_presence_list_accepted = PresenceStore.__dict__[
|
||||
"get_presence_list_accepted"
|
||||
]
|
||||
get_presence_list_observers_accepted = PresenceStore.__dict__[
|
||||
"get_presence_list_observers_accepted"
|
||||
]
|
||||
|
||||
|
||||
UPDATE_SYNCING_USERS_MS = 10 * 1000
|
||||
|
||||
|
||||
class SynchrotronPresence(object):
|
||||
def __init__(self, hs):
|
||||
self.is_mine_id = hs.is_mine_id
|
||||
self.http_client = hs.get_simple_http_client()
|
||||
self.store = hs.get_datastore()
|
||||
self.user_to_num_current_syncs = {}
|
||||
self.syncing_users_url = hs.config.worker_replication_url + "/syncing_users"
|
||||
self.clock = hs.get_clock()
|
||||
self.notifier = hs.get_notifier()
|
||||
|
||||
active_presence = self.store.take_presence_startup_info()
|
||||
self.user_to_current_state = {
|
||||
state.user_id: state
|
||||
for state in active_presence
|
||||
}
|
||||
|
||||
self.process_id = random_string(16)
|
||||
logger.info("Presence process_id is %r", self.process_id)
|
||||
|
||||
self._sending_sync = False
|
||||
self._need_to_send_sync = False
|
||||
self.clock.looping_call(
|
||||
self._send_syncing_users_regularly,
|
||||
UPDATE_SYNCING_USERS_MS,
|
||||
)
|
||||
|
||||
reactor.addSystemEventTrigger("before", "shutdown", self._on_shutdown)
|
||||
|
||||
def set_state(self, user, state, ignore_status_msg=False):
|
||||
# TODO Hows this supposed to work?
|
||||
pass
|
||||
|
||||
get_states = PresenceHandler.get_states.__func__
|
||||
get_state = PresenceHandler.get_state.__func__
|
||||
_get_interested_parties = PresenceHandler._get_interested_parties.__func__
|
||||
current_state_for_users = PresenceHandler.current_state_for_users.__func__
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def user_syncing(self, user_id, affect_presence):
|
||||
if affect_presence:
|
||||
curr_sync = self.user_to_num_current_syncs.get(user_id, 0)
|
||||
self.user_to_num_current_syncs[user_id] = curr_sync + 1
|
||||
prev_states = yield self.current_state_for_users([user_id])
|
||||
if prev_states[user_id].state == PresenceState.OFFLINE:
|
||||
# TODO: Don't block the sync request on this HTTP hit.
|
||||
yield self._send_syncing_users_now()
|
||||
|
||||
def _end():
|
||||
# We check that the user_id is in user_to_num_current_syncs because
|
||||
# user_to_num_current_syncs may have been cleared if we are
|
||||
# shutting down.
|
||||
if affect_presence and user_id in self.user_to_num_current_syncs:
|
||||
self.user_to_num_current_syncs[user_id] -= 1
|
||||
|
||||
@contextlib.contextmanager
|
||||
def _user_syncing():
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
_end()
|
||||
|
||||
defer.returnValue(_user_syncing())
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _on_shutdown(self):
|
||||
# When the synchrotron is shutdown tell the master to clear the in
|
||||
# progress syncs for this process
|
||||
self.user_to_num_current_syncs.clear()
|
||||
yield self._send_syncing_users_now()
|
||||
|
||||
def _send_syncing_users_regularly(self):
|
||||
# Only send an update if we aren't in the middle of sending one.
|
||||
if not self._sending_sync:
|
||||
preserve_fn(self._send_syncing_users_now)()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _send_syncing_users_now(self):
|
||||
if self._sending_sync:
|
||||
# We don't want to race with sending another update.
|
||||
# Instead we wait for that update to finish and send another
|
||||
# update afterwards.
|
||||
self._need_to_send_sync = True
|
||||
return
|
||||
|
||||
# Flag that we are sending an update.
|
||||
self._sending_sync = True
|
||||
|
||||
yield self.http_client.post_json_get_json(self.syncing_users_url, {
|
||||
"process_id": self.process_id,
|
||||
"syncing_users": [
|
||||
user_id for user_id, count in self.user_to_num_current_syncs.items()
|
||||
if count > 0
|
||||
],
|
||||
})
|
||||
|
||||
# Unset the flag as we are no longer sending an update.
|
||||
self._sending_sync = False
|
||||
if self._need_to_send_sync:
|
||||
# If something happened while we were sending the update then
|
||||
# we might need to send another update.
|
||||
# TODO: Check if the update that was sent matches the current state
|
||||
# as we only need to send an update if they are different.
|
||||
self._need_to_send_sync = False
|
||||
yield self._send_syncing_users_now()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def notify_from_replication(self, states, stream_id):
|
||||
parties = yield self._get_interested_parties(
|
||||
states, calculate_remote_hosts=False
|
||||
)
|
||||
room_ids_to_states, users_to_states, _ = parties
|
||||
|
||||
self.notifier.on_new_event(
|
||||
"presence_key", stream_id, rooms=room_ids_to_states.keys(),
|
||||
users=users_to_states.keys()
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def process_replication(self, result):
|
||||
stream = result.get("presence", {"rows": []})
|
||||
states = []
|
||||
for row in stream["rows"]:
|
||||
(
|
||||
position, user_id, state, last_active_ts,
|
||||
last_federation_update_ts, last_user_sync_ts, status_msg,
|
||||
currently_active
|
||||
) = row
|
||||
state = UserPresenceState(
|
||||
user_id, state, last_active_ts,
|
||||
last_federation_update_ts, last_user_sync_ts, status_msg,
|
||||
currently_active
|
||||
)
|
||||
self.user_to_current_state[user_id] = state
|
||||
states.append(state)
|
||||
|
||||
if states and "position" in stream:
|
||||
stream_id = int(stream["position"])
|
||||
yield self.notify_from_replication(states, stream_id)
|
||||
|
||||
|
||||
class SynchrotronTyping(object):
|
||||
def __init__(self, hs):
|
||||
self._latest_room_serial = 0
|
||||
self._room_serials = {}
|
||||
self._room_typing = {}
|
||||
|
||||
def stream_positions(self):
|
||||
# We must update this typing token from the response of the previous
|
||||
# sync. In particular, the stream id may "reset" back to zero/a low
|
||||
# value which we *must* use for the next replication request.
|
||||
return {"typing": self._latest_room_serial}
|
||||
|
||||
def process_replication(self, result):
|
||||
stream = result.get("typing")
|
||||
if stream:
|
||||
self._latest_room_serial = int(stream["position"])
|
||||
|
||||
for row in stream["rows"]:
|
||||
position, room_id, typing_json = row
|
||||
typing = json.loads(typing_json)
|
||||
self._room_serials[room_id] = position
|
||||
self._room_typing[room_id] = typing
|
||||
|
||||
|
||||
class SynchrotronApplicationService(object):
|
||||
def notify_interested_services(self, event):
|
||||
pass
|
||||
|
||||
|
||||
class SynchrotronServer(HomeServer):
|
||||
def get_db_conn(self, run_new_connection=True):
|
||||
# Any param beginning with cp_ is a parameter for adbapi, and should
|
||||
# not be passed to the database engine.
|
||||
db_params = {
|
||||
k: v for k, v in self.db_config.get("args", {}).items()
|
||||
if not k.startswith("cp_")
|
||||
}
|
||||
db_conn = self.database_engine.module.connect(**db_params)
|
||||
|
||||
if run_new_connection:
|
||||
self.database_engine.on_new_connection(db_conn)
|
||||
return db_conn
|
||||
|
||||
def setup(self):
|
||||
logger.info("Setting up.")
|
||||
self.datastore = SynchrotronSlavedStore(self.get_db_conn(), self)
|
||||
logger.info("Finished setting up.")
|
||||
|
||||
def _listen_http(self, listener_config):
|
||||
port = listener_config["port"]
|
||||
bind_addresses = listener_config["bind_addresses"]
|
||||
site_tag = listener_config.get("tag", port)
|
||||
resources = {}
|
||||
for res in listener_config["resources"]:
|
||||
for name in res["names"]:
|
||||
if name == "metrics":
|
||||
resources[METRICS_PREFIX] = MetricsResource(self)
|
||||
elif name == "client":
|
||||
resource = JsonResource(self, canonical_json=False)
|
||||
sync.register_servlets(self, resource)
|
||||
events.register_servlets(self, resource)
|
||||
InitialSyncRestServlet(self).register(resource)
|
||||
RoomInitialSyncRestServlet(self).register(resource)
|
||||
resources.update({
|
||||
"/_matrix/client/r0": resource,
|
||||
"/_matrix/client/unstable": resource,
|
||||
"/_matrix/client/v2_alpha": resource,
|
||||
"/_matrix/client/api/v1": resource,
|
||||
})
|
||||
|
||||
root_resource = create_resource_tree(resources, Resource())
|
||||
|
||||
for address in bind_addresses:
|
||||
reactor.listenTCP(
|
||||
port,
|
||||
SynapseSite(
|
||||
"synapse.access.http.%s" % (site_tag,),
|
||||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
),
|
||||
interface=address
|
||||
)
|
||||
|
||||
logger.info("Synapse synchrotron now listening on port %d", port)
|
||||
|
||||
def start_listening(self, listeners):
|
||||
for listener in listeners:
|
||||
if listener["type"] == "http":
|
||||
self._listen_http(listener)
|
||||
elif listener["type"] == "manhole":
|
||||
bind_addresses = listener["bind_addresses"]
|
||||
|
||||
for address in bind_addresses:
|
||||
reactor.listenTCP(
|
||||
listener["port"],
|
||||
manhole(
|
||||
username="matrix",
|
||||
password="rabbithole",
|
||||
globals={"hs": self},
|
||||
),
|
||||
interface=address
|
||||
)
|
||||
else:
|
||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def replicate(self):
|
||||
http_client = self.get_simple_http_client()
|
||||
store = self.get_datastore()
|
||||
replication_url = self.config.worker_replication_url
|
||||
notifier = self.get_notifier()
|
||||
presence_handler = self.get_presence_handler()
|
||||
typing_handler = self.get_typing_handler()
|
||||
|
||||
def notify_from_stream(
|
||||
result, stream_name, stream_key, room=None, user=None
|
||||
):
|
||||
stream = result.get(stream_name)
|
||||
if stream:
|
||||
position_index = stream["field_names"].index("position")
|
||||
if room:
|
||||
room_index = stream["field_names"].index(room)
|
||||
if user:
|
||||
user_index = stream["field_names"].index(user)
|
||||
|
||||
users = ()
|
||||
rooms = ()
|
||||
for row in stream["rows"]:
|
||||
position = row[position_index]
|
||||
|
||||
if user:
|
||||
users = (row[user_index],)
|
||||
|
||||
if room:
|
||||
rooms = (row[room_index],)
|
||||
|
||||
notifier.on_new_event(
|
||||
stream_key, position, users=users, rooms=rooms
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def notify_device_list_update(result):
|
||||
stream = result.get("device_lists")
|
||||
if not stream:
|
||||
return
|
||||
|
||||
position_index = stream["field_names"].index("position")
|
||||
user_index = stream["field_names"].index("user_id")
|
||||
|
||||
for row in stream["rows"]:
|
||||
position = row[position_index]
|
||||
user_id = row[user_index]
|
||||
|
||||
room_ids = yield store.get_rooms_for_user(user_id)
|
||||
|
||||
notifier.on_new_event(
|
||||
"device_list_key", position, rooms=room_ids,
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def notify(result):
|
||||
stream = result.get("events")
|
||||
if stream:
|
||||
max_position = stream["position"]
|
||||
|
||||
event_map = yield store.get_events([row[1] for row in stream["rows"]])
|
||||
|
||||
for row in stream["rows"]:
|
||||
position = row[0]
|
||||
event_id = row[1]
|
||||
event = event_map.get(event_id, None)
|
||||
if not event:
|
||||
continue
|
||||
|
||||
extra_users = ()
|
||||
if event.type == EventTypes.Member:
|
||||
extra_users = (event.state_key,)
|
||||
notifier.on_new_room_event(
|
||||
event, position, max_position, extra_users
|
||||
)
|
||||
|
||||
notify_from_stream(
|
||||
result, "push_rules", "push_rules_key", user="user_id"
|
||||
)
|
||||
notify_from_stream(
|
||||
result, "user_account_data", "account_data_key", user="user_id"
|
||||
)
|
||||
notify_from_stream(
|
||||
result, "room_account_data", "account_data_key", user="user_id"
|
||||
)
|
||||
notify_from_stream(
|
||||
result, "tag_account_data", "account_data_key", user="user_id"
|
||||
)
|
||||
notify_from_stream(
|
||||
result, "receipts", "receipt_key", room="room_id"
|
||||
)
|
||||
notify_from_stream(
|
||||
result, "typing", "typing_key", room="room_id"
|
||||
)
|
||||
notify_from_stream(
|
||||
result, "to_device", "to_device_key", user="user_id"
|
||||
)
|
||||
yield notify_device_list_update(result)
|
||||
|
||||
while True:
|
||||
try:
|
||||
args = store.stream_positions()
|
||||
args.update(typing_handler.stream_positions())
|
||||
args["timeout"] = 30000
|
||||
result = yield http_client.get_json(replication_url, args=args)
|
||||
yield store.process_replication(result)
|
||||
typing_handler.process_replication(result)
|
||||
yield presence_handler.process_replication(result)
|
||||
yield notify(result)
|
||||
except:
|
||||
logger.exception("Error replicating from %r", replication_url)
|
||||
yield sleep(5)
|
||||
|
||||
def build_presence_handler(self):
|
||||
return SynchrotronPresence(self)
|
||||
|
||||
def build_typing_handler(self):
|
||||
return SynchrotronTyping(self)
|
||||
|
||||
|
||||
def start(config_options):
|
||||
try:
|
||||
config = HomeServerConfig.load_config(
|
||||
"Synapse synchrotron", config_options
|
||||
)
|
||||
except ConfigError as e:
|
||||
sys.stderr.write("\n" + e.message + "\n")
|
||||
sys.exit(1)
|
||||
|
||||
assert config.worker_app == "synapse.app.synchrotron"
|
||||
|
||||
setup_logging(config, use_worker_options=True)
|
||||
|
||||
synapse.events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||
|
||||
database_engine = create_engine(config.database_config)
|
||||
|
||||
ss = SynchrotronServer(
|
||||
config.server_name,
|
||||
db_config=config.database_config,
|
||||
config=config,
|
||||
version_string="Synapse/" + get_version_string(synapse),
|
||||
database_engine=database_engine,
|
||||
application_service_handler=SynchrotronApplicationService(),
|
||||
)
|
||||
|
||||
ss.setup()
|
||||
ss.start_listening(config.worker_listeners)
|
||||
|
||||
def run():
|
||||
# make sure that we run the reactor with the sentinel log context,
|
||||
# otherwise other PreserveLoggingContext instances will get confused
|
||||
# and complain when they see the logcontext arbitrarily swapping
|
||||
# between the sentinel and `run` logcontexts.
|
||||
with PreserveLoggingContext():
|
||||
logger.info("Running")
|
||||
change_resource_limit(config.soft_file_limit)
|
||||
if config.gc_thresholds:
|
||||
gc.set_threshold(*config.gc_thresholds)
|
||||
reactor.run()
|
||||
|
||||
def start():
|
||||
ss.get_datastore().start_profiling()
|
||||
ss.replicate()
|
||||
ss.get_state_handler().start_caching()
|
||||
|
||||
reactor.callWhenRunning(start)
|
||||
|
||||
if config.worker_daemonize:
|
||||
daemon = Daemonize(
|
||||
app="synapse-synchrotron",
|
||||
pid=config.worker_pid_file,
|
||||
action=run,
|
||||
auto_close_fds=False,
|
||||
verbose=True,
|
||||
logger=logger,
|
||||
)
|
||||
daemon.start()
|
||||
else:
|
||||
run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
with LoggingContext("main"):
|
||||
start(sys.argv[1:])
|
||||
@@ -14,71 +14,238 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import sys
|
||||
import argparse
|
||||
import collections
|
||||
import glob
|
||||
import os
|
||||
import os.path
|
||||
import subprocess
|
||||
import signal
|
||||
import subprocess
|
||||
import sys
|
||||
import yaml
|
||||
import errno
|
||||
import time
|
||||
|
||||
SYNAPSE = ["python", "-B", "-m", "synapse.app.homeserver"]
|
||||
SYNAPSE = [sys.executable, "-B", "-m", "synapse.app.homeserver"]
|
||||
|
||||
GREEN = "\x1b[1;32m"
|
||||
YELLOW = "\x1b[1;33m"
|
||||
RED = "\x1b[1;31m"
|
||||
NORMAL = "\x1b[m"
|
||||
|
||||
|
||||
def pid_running(pid):
|
||||
try:
|
||||
os.kill(pid, 0)
|
||||
return True
|
||||
except OSError, err:
|
||||
if err.errno == errno.EPERM:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def write(message, colour=NORMAL, stream=sys.stdout):
|
||||
if colour == NORMAL:
|
||||
stream.write(message + "\n")
|
||||
else:
|
||||
stream.write(colour + message + NORMAL + "\n")
|
||||
|
||||
|
||||
def abort(message, colour=RED, stream=sys.stderr):
|
||||
write(message, colour, stream)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def start(configfile):
|
||||
print ("Starting ...")
|
||||
write("Starting ...")
|
||||
args = SYNAPSE
|
||||
args.extend(["--daemonize", "-c", configfile])
|
||||
|
||||
try:
|
||||
subprocess.check_call(args)
|
||||
print (GREEN + "started" + NORMAL)
|
||||
write("started synapse.app.homeserver(%r)" %
|
||||
(configfile,), colour=GREEN)
|
||||
except subprocess.CalledProcessError as e:
|
||||
print (
|
||||
RED +
|
||||
"error starting (exit code: %d); see above for logs" % e.returncode +
|
||||
NORMAL
|
||||
write(
|
||||
"error starting (exit code: %d); see above for logs" % e.returncode,
|
||||
colour=RED,
|
||||
)
|
||||
|
||||
|
||||
def stop(pidfile):
|
||||
def start_worker(app, configfile, worker_configfile):
|
||||
args = [
|
||||
"python", "-B",
|
||||
"-m", app,
|
||||
"-c", configfile,
|
||||
"-c", worker_configfile
|
||||
]
|
||||
|
||||
try:
|
||||
subprocess.check_call(args)
|
||||
write("started %s(%r)" % (app, worker_configfile), colour=GREEN)
|
||||
except subprocess.CalledProcessError as e:
|
||||
write(
|
||||
"error starting %s(%r) (exit code: %d); see above for logs" % (
|
||||
app, worker_configfile, e.returncode,
|
||||
),
|
||||
colour=RED,
|
||||
)
|
||||
|
||||
|
||||
def stop(pidfile, app):
|
||||
if os.path.exists(pidfile):
|
||||
pid = int(open(pidfile).read())
|
||||
os.kill(pid, signal.SIGTERM)
|
||||
print (GREEN + "stopped" + NORMAL)
|
||||
try:
|
||||
os.kill(pid, signal.SIGTERM)
|
||||
write("stopped %s" % (app,), colour=GREEN)
|
||||
except OSError, err:
|
||||
if err.errno == errno.ESRCH:
|
||||
write("%s not running" % (app,), colour=YELLOW)
|
||||
elif err.errno == errno.EPERM:
|
||||
abort("Cannot stop %s: Operation not permitted" % (app,))
|
||||
else:
|
||||
abort("Cannot stop %s: Unknown error" % (app,))
|
||||
|
||||
|
||||
Worker = collections.namedtuple("Worker", [
|
||||
"app", "configfile", "pidfile", "cache_factor"
|
||||
])
|
||||
|
||||
|
||||
def main():
|
||||
configfile = sys.argv[2] if len(sys.argv) == 3 else "homeserver.yaml"
|
||||
|
||||
if not os.path.exists(configfile):
|
||||
sys.stderr.write(
|
||||
"No config file found\n"
|
||||
"To generate a config file, run '%s -c %s --generate-config"
|
||||
" --server-name=<server name>'\n" % (
|
||||
" ".join(SYNAPSE), configfile
|
||||
)
|
||||
parser = argparse.ArgumentParser()
|
||||
|
||||
parser.add_argument(
|
||||
"action",
|
||||
choices=["start", "stop", "restart"],
|
||||
help="whether to start, stop or restart the synapse",
|
||||
)
|
||||
parser.add_argument(
|
||||
"configfile",
|
||||
nargs="?",
|
||||
default="homeserver.yaml",
|
||||
help="the homeserver config file, defaults to homserver.yaml",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-w", "--worker",
|
||||
metavar="WORKERCONFIG",
|
||||
help="start or stop a single worker",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-a", "--all-processes",
|
||||
metavar="WORKERCONFIGDIR",
|
||||
help="start or stop all the workers in the given directory"
|
||||
" and the main synapse process",
|
||||
)
|
||||
|
||||
options = parser.parse_args()
|
||||
|
||||
if options.worker and options.all_processes:
|
||||
write(
|
||||
'Cannot use "--worker" with "--all-processes"',
|
||||
stream=sys.stderr
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
config = yaml.load(open(configfile))
|
||||
pidfile = config["pid_file"]
|
||||
configfile = options.configfile
|
||||
|
||||
action = sys.argv[1] if sys.argv[1:] else "usage"
|
||||
if action == "start":
|
||||
start(configfile)
|
||||
elif action == "stop":
|
||||
stop(pidfile)
|
||||
elif action == "restart":
|
||||
stop(pidfile)
|
||||
start(configfile)
|
||||
else:
|
||||
sys.stderr.write("Usage: %s [start|stop|restart] [configfile]\n" % (sys.argv[0],))
|
||||
if not os.path.exists(configfile):
|
||||
write(
|
||||
"No config file found\n"
|
||||
"To generate a config file, run '%s -c %s --generate-config"
|
||||
" --server-name=<server name>'\n" % (
|
||||
" ".join(SYNAPSE), options.configfile
|
||||
),
|
||||
stream=sys.stderr,
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
with open(configfile) as stream:
|
||||
config = yaml.load(stream)
|
||||
|
||||
pidfile = config["pid_file"]
|
||||
cache_factor = config.get("synctl_cache_factor")
|
||||
start_stop_synapse = True
|
||||
|
||||
if cache_factor:
|
||||
os.environ["SYNAPSE_CACHE_FACTOR"] = str(cache_factor)
|
||||
|
||||
worker_configfiles = []
|
||||
if options.worker:
|
||||
start_stop_synapse = False
|
||||
worker_configfile = options.worker
|
||||
if not os.path.exists(worker_configfile):
|
||||
write(
|
||||
"No worker config found at %r" % (worker_configfile,),
|
||||
stream=sys.stderr,
|
||||
)
|
||||
sys.exit(1)
|
||||
worker_configfiles.append(worker_configfile)
|
||||
|
||||
if options.all_processes:
|
||||
worker_configdir = options.all_processes
|
||||
if not os.path.isdir(worker_configdir):
|
||||
write(
|
||||
"No worker config directory found at %r" % (worker_configdir,),
|
||||
stream=sys.stderr,
|
||||
)
|
||||
sys.exit(1)
|
||||
worker_configfiles.extend(sorted(glob.glob(
|
||||
os.path.join(worker_configdir, "*.yaml")
|
||||
)))
|
||||
|
||||
workers = []
|
||||
for worker_configfile in worker_configfiles:
|
||||
with open(worker_configfile) as stream:
|
||||
worker_config = yaml.load(stream)
|
||||
worker_app = worker_config["worker_app"]
|
||||
worker_pidfile = worker_config["worker_pid_file"]
|
||||
worker_daemonize = worker_config["worker_daemonize"]
|
||||
assert worker_daemonize # TODO print something more user friendly
|
||||
worker_cache_factor = worker_config.get("synctl_cache_factor")
|
||||
workers.append(Worker(
|
||||
worker_app, worker_configfile, worker_pidfile, worker_cache_factor,
|
||||
))
|
||||
|
||||
action = options.action
|
||||
|
||||
if action == "stop" or action == "restart":
|
||||
for worker in workers:
|
||||
stop(worker.pidfile, worker.app)
|
||||
|
||||
if start_stop_synapse:
|
||||
stop(pidfile, "synapse.app.homeserver")
|
||||
|
||||
# Wait for synapse to actually shutdown before starting it again
|
||||
if action == "restart":
|
||||
running_pids = []
|
||||
if start_stop_synapse and os.path.exists(pidfile):
|
||||
running_pids.append(int(open(pidfile).read()))
|
||||
for worker in workers:
|
||||
if os.path.exists(worker.pidfile):
|
||||
running_pids.append(int(open(worker.pidfile).read()))
|
||||
if len(running_pids) > 0:
|
||||
write("Waiting for process to exit before restarting...")
|
||||
for running_pid in running_pids:
|
||||
while pid_running(running_pid):
|
||||
time.sleep(0.2)
|
||||
|
||||
if action == "start" or action == "restart":
|
||||
if start_stop_synapse:
|
||||
start(configfile)
|
||||
|
||||
for worker in workers:
|
||||
if worker.cache_factor:
|
||||
os.environ["SYNAPSE_CACHE_FACTOR"] = str(worker.cache_factor)
|
||||
|
||||
start_worker(worker.app, configfile, worker.configfile)
|
||||
|
||||
if cache_factor:
|
||||
os.environ["SYNAPSE_CACHE_FACTOR"] = str(cache_factor)
|
||||
else:
|
||||
os.environ.pop("SYNAPSE_CACHE_FACTOR", None)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
||||
@@ -13,6 +13,9 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from synapse.api.constants import EventTypes
|
||||
from synapse.util.caches.descriptors import cachedInlineCallbacks
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
import logging
|
||||
import re
|
||||
@@ -79,7 +82,7 @@ class ApplicationService(object):
|
||||
NS_LIST = [NS_USERS, NS_ALIASES, NS_ROOMS]
|
||||
|
||||
def __init__(self, token, url=None, namespaces=None, hs_token=None,
|
||||
sender=None, id=None):
|
||||
sender=None, id=None, protocols=None, rate_limited=True):
|
||||
self.token = token
|
||||
self.url = url
|
||||
self.hs_token = hs_token
|
||||
@@ -87,6 +90,17 @@ class ApplicationService(object):
|
||||
self.namespaces = self._check_namespaces(namespaces)
|
||||
self.id = id
|
||||
|
||||
if "|" in self.id:
|
||||
raise Exception("application service ID cannot contain '|' character")
|
||||
|
||||
# .protocols is a publicly visible field
|
||||
if protocols:
|
||||
self.protocols = set(protocols)
|
||||
else:
|
||||
self.protocols = set()
|
||||
|
||||
self.rate_limited = rate_limited
|
||||
|
||||
def _check_namespaces(self, namespaces):
|
||||
# Sanity check that it is of the form:
|
||||
# {
|
||||
@@ -111,92 +125,94 @@ class ApplicationService(object):
|
||||
raise ValueError(
|
||||
"Expected bool for 'exclusive' in ns '%s'" % ns
|
||||
)
|
||||
if not isinstance(regex_obj.get("regex"), basestring):
|
||||
regex = regex_obj.get("regex")
|
||||
if isinstance(regex, basestring):
|
||||
regex_obj["regex"] = re.compile(regex) # Pre-compile regex
|
||||
else:
|
||||
raise ValueError(
|
||||
"Expected string for 'regex' in ns '%s'" % ns
|
||||
)
|
||||
return namespaces
|
||||
|
||||
def _matches_regex(self, test_string, namespace_key, return_obj=False):
|
||||
if not isinstance(test_string, basestring):
|
||||
logger.error(
|
||||
"Expected a string to test regex against, but got %s",
|
||||
test_string
|
||||
)
|
||||
return False
|
||||
|
||||
def _matches_regex(self, test_string, namespace_key):
|
||||
for regex_obj in self.namespaces[namespace_key]:
|
||||
if re.match(regex_obj["regex"], test_string):
|
||||
if return_obj:
|
||||
return regex_obj
|
||||
return True
|
||||
return False
|
||||
if regex_obj["regex"].match(test_string):
|
||||
return regex_obj
|
||||
return None
|
||||
|
||||
def _is_exclusive(self, ns_key, test_string):
|
||||
regex_obj = self._matches_regex(test_string, ns_key, return_obj=True)
|
||||
regex_obj = self._matches_regex(test_string, ns_key)
|
||||
if regex_obj:
|
||||
return regex_obj["exclusive"]
|
||||
return False
|
||||
|
||||
def _matches_user(self, event, member_list):
|
||||
if (hasattr(event, "sender") and
|
||||
self.is_interested_in_user(event.sender)):
|
||||
return True
|
||||
@defer.inlineCallbacks
|
||||
def _matches_user(self, event, store):
|
||||
if not event:
|
||||
defer.returnValue(False)
|
||||
|
||||
if self.is_interested_in_user(event.sender):
|
||||
defer.returnValue(True)
|
||||
# also check m.room.member state key
|
||||
if (hasattr(event, "type") and event.type == EventTypes.Member
|
||||
and hasattr(event, "state_key")
|
||||
and self.is_interested_in_user(event.state_key)):
|
||||
return True
|
||||
if (event.type == EventTypes.Member and
|
||||
self.is_interested_in_user(event.state_key)):
|
||||
defer.returnValue(True)
|
||||
|
||||
if not store:
|
||||
defer.returnValue(False)
|
||||
|
||||
does_match = yield self._matches_user_in_member_list(event.room_id, store)
|
||||
defer.returnValue(does_match)
|
||||
|
||||
@cachedInlineCallbacks(num_args=1, cache_context=True)
|
||||
def _matches_user_in_member_list(self, room_id, store, cache_context):
|
||||
member_list = yield store.get_users_in_room(
|
||||
room_id, on_invalidate=cache_context.invalidate
|
||||
)
|
||||
|
||||
# check joined member events
|
||||
for user_id in member_list:
|
||||
if self.is_interested_in_user(user_id):
|
||||
return True
|
||||
return False
|
||||
defer.returnValue(True)
|
||||
defer.returnValue(False)
|
||||
|
||||
def _matches_room_id(self, event):
|
||||
if hasattr(event, "room_id"):
|
||||
return self.is_interested_in_room(event.room_id)
|
||||
return False
|
||||
|
||||
def _matches_aliases(self, event, alias_list):
|
||||
@defer.inlineCallbacks
|
||||
def _matches_aliases(self, event, store):
|
||||
if not store or not event:
|
||||
defer.returnValue(False)
|
||||
|
||||
alias_list = yield store.get_aliases_for_room(event.room_id)
|
||||
for alias in alias_list:
|
||||
if self.is_interested_in_alias(alias):
|
||||
return True
|
||||
return False
|
||||
defer.returnValue(True)
|
||||
defer.returnValue(False)
|
||||
|
||||
def is_interested(self, event, restrict_to=None, aliases_for_event=None,
|
||||
member_list=None):
|
||||
@defer.inlineCallbacks
|
||||
def is_interested(self, event, store=None):
|
||||
"""Check if this service is interested in this event.
|
||||
|
||||
Args:
|
||||
event(Event): The event to check.
|
||||
restrict_to(str): The namespace to restrict regex tests to.
|
||||
aliases_for_event(list): A list of all the known room aliases for
|
||||
this event.
|
||||
member_list(list): A list of all joined user_ids in this room.
|
||||
store(DataStore)
|
||||
Returns:
|
||||
bool: True if this service would like to know about this event.
|
||||
"""
|
||||
if aliases_for_event is None:
|
||||
aliases_for_event = []
|
||||
if member_list is None:
|
||||
member_list = []
|
||||
# Do cheap checks first
|
||||
if self._matches_room_id(event):
|
||||
defer.returnValue(True)
|
||||
|
||||
if restrict_to and restrict_to not in ApplicationService.NS_LIST:
|
||||
# this is a programming error, so fail early and raise a general
|
||||
# exception
|
||||
raise Exception("Unexpected restrict_to value: %s". restrict_to)
|
||||
if (yield self._matches_aliases(event, store)):
|
||||
defer.returnValue(True)
|
||||
|
||||
if not restrict_to:
|
||||
return (self._matches_user(event, member_list)
|
||||
or self._matches_aliases(event, aliases_for_event)
|
||||
or self._matches_room_id(event))
|
||||
elif restrict_to == ApplicationService.NS_ALIASES:
|
||||
return self._matches_aliases(event, aliases_for_event)
|
||||
elif restrict_to == ApplicationService.NS_ROOMS:
|
||||
return self._matches_room_id(event)
|
||||
elif restrict_to == ApplicationService.NS_USERS:
|
||||
return self._matches_user(event, member_list)
|
||||
if (yield self._matches_user(event, store)):
|
||||
defer.returnValue(True)
|
||||
|
||||
defer.returnValue(False)
|
||||
|
||||
def is_interested_in_user(self, user_id):
|
||||
return (
|
||||
@@ -205,10 +221,10 @@ class ApplicationService(object):
|
||||
)
|
||||
|
||||
def is_interested_in_alias(self, alias):
|
||||
return self._matches_regex(alias, ApplicationService.NS_ALIASES)
|
||||
return bool(self._matches_regex(alias, ApplicationService.NS_ALIASES))
|
||||
|
||||
def is_interested_in_room(self, room_id):
|
||||
return self._matches_regex(room_id, ApplicationService.NS_ROOMS)
|
||||
return bool(self._matches_regex(room_id, ApplicationService.NS_ROOMS))
|
||||
|
||||
def is_exclusive_user(self, user_id):
|
||||
return (
|
||||
@@ -216,11 +232,17 @@ class ApplicationService(object):
|
||||
or user_id == self.sender
|
||||
)
|
||||
|
||||
def is_interested_in_protocol(self, protocol):
|
||||
return protocol in self.protocols
|
||||
|
||||
def is_exclusive_alias(self, alias):
|
||||
return self._is_exclusive(ApplicationService.NS_ALIASES, alias)
|
||||
|
||||
def is_exclusive_room(self, room_id):
|
||||
return self._is_exclusive(ApplicationService.NS_ROOMS, room_id)
|
||||
|
||||
def is_rate_limited(self):
|
||||
return self.rate_limited
|
||||
|
||||
def __str__(self):
|
||||
return "ApplicationService: %s" % (self.__dict__,)
|
||||
|
||||
@@ -14,9 +14,12 @@
|
||||
# limitations under the License.
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.constants import ThirdPartyEntityKind
|
||||
from synapse.api.errors import CodeMessageException
|
||||
from synapse.http.client import SimpleHttpClient
|
||||
from synapse.events.utils import serialize_event
|
||||
from synapse.util.caches.response_cache import ResponseCache
|
||||
from synapse.types import ThirdPartyInstanceID
|
||||
|
||||
import logging
|
||||
import urllib
|
||||
@@ -24,6 +27,42 @@ import urllib
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
HOUR_IN_MS = 60 * 60 * 1000
|
||||
|
||||
|
||||
APP_SERVICE_PREFIX = "/_matrix/app/unstable"
|
||||
|
||||
|
||||
def _is_valid_3pe_metadata(info):
|
||||
if "instances" not in info:
|
||||
return False
|
||||
if not isinstance(info["instances"], list):
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def _is_valid_3pe_result(r, field):
|
||||
if not isinstance(r, dict):
|
||||
return False
|
||||
|
||||
for k in (field, "protocol"):
|
||||
if k not in r:
|
||||
return False
|
||||
if not isinstance(r[k], str):
|
||||
return False
|
||||
|
||||
if "fields" not in r:
|
||||
return False
|
||||
fields = r["fields"]
|
||||
if not isinstance(fields, dict):
|
||||
return False
|
||||
for k in fields.keys():
|
||||
if not isinstance(fields[k], str):
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
class ApplicationServiceApi(SimpleHttpClient):
|
||||
"""This class manages HS -> AS communications, including querying and
|
||||
pushing.
|
||||
@@ -33,8 +72,12 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
super(ApplicationServiceApi, self).__init__(hs)
|
||||
self.clock = hs.get_clock()
|
||||
|
||||
self.protocol_meta_cache = ResponseCache(hs, timeout_ms=HOUR_IN_MS)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def query_user(self, service, user_id):
|
||||
if service.url is None:
|
||||
defer.returnValue(False)
|
||||
uri = service.url + ("/users/%s" % urllib.quote(user_id))
|
||||
response = None
|
||||
try:
|
||||
@@ -54,6 +97,8 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def query_alias(self, service, alias):
|
||||
if service.url is None:
|
||||
defer.returnValue(False)
|
||||
uri = service.url + ("/rooms/%s" % urllib.quote(alias))
|
||||
response = None
|
||||
try:
|
||||
@@ -71,8 +116,91 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
logger.warning("query_alias to %s threw exception %s", uri, ex)
|
||||
defer.returnValue(False)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def query_3pe(self, service, kind, protocol, fields):
|
||||
if kind == ThirdPartyEntityKind.USER:
|
||||
required_field = "userid"
|
||||
elif kind == ThirdPartyEntityKind.LOCATION:
|
||||
required_field = "alias"
|
||||
else:
|
||||
raise ValueError(
|
||||
"Unrecognised 'kind' argument %r to query_3pe()", kind
|
||||
)
|
||||
if service.url is None:
|
||||
defer.returnValue([])
|
||||
|
||||
uri = "%s%s/thirdparty/%s/%s" % (
|
||||
service.url,
|
||||
APP_SERVICE_PREFIX,
|
||||
kind,
|
||||
urllib.quote(protocol)
|
||||
)
|
||||
try:
|
||||
response = yield self.get_json(uri, fields)
|
||||
if not isinstance(response, list):
|
||||
logger.warning(
|
||||
"query_3pe to %s returned an invalid response %r",
|
||||
uri, response
|
||||
)
|
||||
defer.returnValue([])
|
||||
|
||||
ret = []
|
||||
for r in response:
|
||||
if _is_valid_3pe_result(r, field=required_field):
|
||||
ret.append(r)
|
||||
else:
|
||||
logger.warning(
|
||||
"query_3pe to %s returned an invalid result %r",
|
||||
uri, r
|
||||
)
|
||||
|
||||
defer.returnValue(ret)
|
||||
except Exception as ex:
|
||||
logger.warning("query_3pe to %s threw exception %s", uri, ex)
|
||||
defer.returnValue([])
|
||||
|
||||
def get_3pe_protocol(self, service, protocol):
|
||||
if service.url is None:
|
||||
defer.returnValue({})
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _get():
|
||||
uri = "%s%s/thirdparty/protocol/%s" % (
|
||||
service.url,
|
||||
APP_SERVICE_PREFIX,
|
||||
urllib.quote(protocol)
|
||||
)
|
||||
try:
|
||||
info = yield self.get_json(uri, {})
|
||||
|
||||
if not _is_valid_3pe_metadata(info):
|
||||
logger.warning("query_3pe_protocol to %s did not return a"
|
||||
" valid result", uri)
|
||||
defer.returnValue(None)
|
||||
|
||||
for instance in info.get("instances", []):
|
||||
network_id = instance.get("network_id", None)
|
||||
if network_id is not None:
|
||||
instance["instance_id"] = ThirdPartyInstanceID(
|
||||
service.id, network_id,
|
||||
).to_string()
|
||||
|
||||
defer.returnValue(info)
|
||||
except Exception as ex:
|
||||
logger.warning("query_3pe_protocol to %s threw exception %s",
|
||||
uri, ex)
|
||||
defer.returnValue(None)
|
||||
|
||||
key = (service.id, protocol)
|
||||
return self.protocol_meta_cache.get(key) or (
|
||||
self.protocol_meta_cache.set(key, _get())
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def push_bulk(self, service, events, txn_id=None):
|
||||
if service.url is None:
|
||||
defer.returnValue(True)
|
||||
|
||||
events = self._serialize(events)
|
||||
|
||||
if txn_id is None:
|
||||
|
||||
@@ -48,32 +48,35 @@ UP & quit +---------- YES SUCCESS
|
||||
This is all tied together by the AppServiceScheduler which DIs the required
|
||||
components.
|
||||
"""
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.appservice import ApplicationServiceState
|
||||
from twisted.internet import defer
|
||||
from synapse.util.logcontext import preserve_fn
|
||||
from synapse.util.metrics import Measure
|
||||
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AppServiceScheduler(object):
|
||||
class ApplicationServiceScheduler(object):
|
||||
""" Public facing API for this module. Does the required DI to tie the
|
||||
components together. This also serves as the "event_pool", which in this
|
||||
case is a simple array.
|
||||
"""
|
||||
|
||||
def __init__(self, clock, store, as_api):
|
||||
self.clock = clock
|
||||
self.store = store
|
||||
self.as_api = as_api
|
||||
def __init__(self, hs):
|
||||
self.clock = hs.get_clock()
|
||||
self.store = hs.get_datastore()
|
||||
self.as_api = hs.get_application_service_api()
|
||||
|
||||
def create_recoverer(service, callback):
|
||||
return _Recoverer(clock, store, as_api, service, callback)
|
||||
return _Recoverer(self.clock, self.store, self.as_api, service, callback)
|
||||
|
||||
self.txn_ctrl = _TransactionController(
|
||||
clock, store, as_api, create_recoverer
|
||||
self.clock, self.store, self.as_api, create_recoverer
|
||||
)
|
||||
self.queuer = _ServiceQueuer(self.txn_ctrl)
|
||||
self.queuer = _ServiceQueuer(self.txn_ctrl, self.clock)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def start(self):
|
||||
@@ -94,38 +97,36 @@ class _ServiceQueuer(object):
|
||||
this schedules any other events in the queue to run.
|
||||
"""
|
||||
|
||||
def __init__(self, txn_ctrl):
|
||||
def __init__(self, txn_ctrl, clock):
|
||||
self.queued_events = {} # dict of {service_id: [events]}
|
||||
self.pending_requests = {} # dict of {service_id: Deferred}
|
||||
self.requests_in_flight = set()
|
||||
self.txn_ctrl = txn_ctrl
|
||||
self.clock = clock
|
||||
|
||||
def enqueue(self, service, event):
|
||||
# if this service isn't being sent something
|
||||
if not self.pending_requests.get(service.id):
|
||||
self._send_request(service, [event])
|
||||
else:
|
||||
# add to queue for this service
|
||||
if service.id not in self.queued_events:
|
||||
self.queued_events[service.id] = []
|
||||
self.queued_events[service.id].append(event)
|
||||
self.queued_events.setdefault(service.id, []).append(event)
|
||||
preserve_fn(self._send_request)(service)
|
||||
|
||||
def _send_request(self, service, events):
|
||||
# send request and add callbacks
|
||||
d = self.txn_ctrl.send(service, events)
|
||||
d.addBoth(self._on_request_finish)
|
||||
d.addErrback(self._on_request_fail)
|
||||
self.pending_requests[service.id] = d
|
||||
@defer.inlineCallbacks
|
||||
def _send_request(self, service):
|
||||
if service.id in self.requests_in_flight:
|
||||
return
|
||||
|
||||
def _on_request_finish(self, service):
|
||||
self.pending_requests[service.id] = None
|
||||
# if there are queued events, then send them.
|
||||
if (service.id in self.queued_events
|
||||
and len(self.queued_events[service.id]) > 0):
|
||||
self._send_request(service, self.queued_events[service.id])
|
||||
self.queued_events[service.id] = []
|
||||
self.requests_in_flight.add(service.id)
|
||||
try:
|
||||
while True:
|
||||
events = self.queued_events.pop(service.id, [])
|
||||
if not events:
|
||||
return
|
||||
|
||||
def _on_request_fail(self, err):
|
||||
logger.error("AS request failed: %s", err)
|
||||
with Measure(self.clock, "servicequeuer.send"):
|
||||
try:
|
||||
yield self.txn_ctrl.send(service, events)
|
||||
except:
|
||||
logger.exception("AS request failed")
|
||||
finally:
|
||||
self.requests_in_flight.discard(service.id)
|
||||
|
||||
|
||||
class _TransactionController(object):
|
||||
@@ -149,14 +150,12 @@ class _TransactionController(object):
|
||||
if service_is_up:
|
||||
sent = yield txn.send(self.as_api)
|
||||
if sent:
|
||||
txn.complete(self.store)
|
||||
yield txn.complete(self.store)
|
||||
else:
|
||||
self._start_recoverer(service)
|
||||
preserve_fn(self._start_recoverer)(service)
|
||||
except Exception as e:
|
||||
logger.exception(e)
|
||||
self._start_recoverer(service)
|
||||
# request has finished
|
||||
defer.returnValue(service)
|
||||
preserve_fn(self._start_recoverer)(service)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_recovered(self, recoverer):
|
||||
|
||||
@@ -64,11 +64,12 @@ class Config(object):
|
||||
if isinstance(value, int) or isinstance(value, long):
|
||||
return value
|
||||
second = 1000
|
||||
hour = 60 * 60 * second
|
||||
minute = 60 * second
|
||||
hour = 60 * minute
|
||||
day = 24 * hour
|
||||
week = 7 * day
|
||||
year = 365 * day
|
||||
sizes = {"s": second, "h": hour, "d": day, "w": week, "y": year}
|
||||
sizes = {"s": second, "m": minute, "h": hour, "d": day, "w": week, "y": year}
|
||||
size = 1
|
||||
suffix = value[-1]
|
||||
if suffix in sizes:
|
||||
@@ -157,9 +158,40 @@ class Config(object):
|
||||
return default_config, config
|
||||
|
||||
@classmethod
|
||||
def load_config(cls, description, argv, generate_section=None):
|
||||
obj = cls()
|
||||
def load_config(cls, description, argv):
|
||||
config_parser = argparse.ArgumentParser(
|
||||
description=description,
|
||||
)
|
||||
config_parser.add_argument(
|
||||
"-c", "--config-path",
|
||||
action="append",
|
||||
metavar="CONFIG_FILE",
|
||||
help="Specify config file. Can be given multiple times and"
|
||||
" may specify directories containing *.yaml files."
|
||||
)
|
||||
|
||||
config_parser.add_argument(
|
||||
"--keys-directory",
|
||||
metavar="DIRECTORY",
|
||||
help="Where files such as certs and signing keys are stored when"
|
||||
" their location is given explicitly in the config."
|
||||
" Defaults to the directory containing the last config file",
|
||||
)
|
||||
|
||||
config_args = config_parser.parse_args(argv)
|
||||
|
||||
config_files = find_config_files(search_paths=config_args.config_path)
|
||||
|
||||
obj = cls()
|
||||
obj.read_config_files(
|
||||
config_files,
|
||||
keys_directory=config_args.keys_directory,
|
||||
generate_keys=False,
|
||||
)
|
||||
return obj
|
||||
|
||||
@classmethod
|
||||
def load_or_generate_config(cls, description, argv):
|
||||
config_parser = argparse.ArgumentParser(add_help=False)
|
||||
config_parser.add_argument(
|
||||
"-c", "--config-path",
|
||||
@@ -176,7 +208,7 @@ class Config(object):
|
||||
config_parser.add_argument(
|
||||
"--report-stats",
|
||||
action="store",
|
||||
help="Stuff",
|
||||
help="Whether the generated config reports anonymized usage statistics",
|
||||
choices=["yes", "no"]
|
||||
)
|
||||
config_parser.add_argument(
|
||||
@@ -197,36 +229,11 @@ class Config(object):
|
||||
)
|
||||
config_args, remaining_args = config_parser.parse_known_args(argv)
|
||||
|
||||
config_files = find_config_files(search_paths=config_args.config_path)
|
||||
|
||||
generate_keys = config_args.generate_keys
|
||||
|
||||
config_files = []
|
||||
if config_args.config_path:
|
||||
for config_path in config_args.config_path:
|
||||
if os.path.isdir(config_path):
|
||||
# We accept specifying directories as config paths, we search
|
||||
# inside that directory for all files matching *.yaml, and then
|
||||
# we apply them in *sorted* order.
|
||||
files = []
|
||||
for entry in os.listdir(config_path):
|
||||
entry_path = os.path.join(config_path, entry)
|
||||
if not os.path.isfile(entry_path):
|
||||
print (
|
||||
"Found subdirectory in config directory: %r. IGNORING."
|
||||
) % (entry_path, )
|
||||
continue
|
||||
|
||||
if not entry.endswith(".yaml"):
|
||||
print (
|
||||
"Found file in config directory that does not"
|
||||
" end in '.yaml': %r. IGNORING."
|
||||
) % (entry_path, )
|
||||
continue
|
||||
|
||||
files.append(entry_path)
|
||||
|
||||
config_files.extend(sorted(files))
|
||||
else:
|
||||
config_files.append(config_path)
|
||||
obj = cls()
|
||||
|
||||
if config_args.generate_config:
|
||||
if config_args.report_stats is None:
|
||||
@@ -299,28 +306,43 @@ class Config(object):
|
||||
" -c CONFIG-FILE\""
|
||||
)
|
||||
|
||||
if config_args.keys_directory:
|
||||
config_dir_path = config_args.keys_directory
|
||||
else:
|
||||
config_dir_path = os.path.dirname(config_args.config_path[-1])
|
||||
config_dir_path = os.path.abspath(config_dir_path)
|
||||
obj.read_config_files(
|
||||
config_files,
|
||||
keys_directory=config_args.keys_directory,
|
||||
generate_keys=generate_keys,
|
||||
)
|
||||
|
||||
if generate_keys:
|
||||
return None
|
||||
|
||||
obj.invoke_all("read_arguments", args)
|
||||
|
||||
return obj
|
||||
|
||||
def read_config_files(self, config_files, keys_directory=None,
|
||||
generate_keys=False):
|
||||
if not keys_directory:
|
||||
keys_directory = os.path.dirname(config_files[-1])
|
||||
|
||||
config_dir_path = os.path.abspath(keys_directory)
|
||||
|
||||
specified_config = {}
|
||||
for config_file in config_files:
|
||||
yaml_config = cls.read_config_file(config_file)
|
||||
yaml_config = self.read_config_file(config_file)
|
||||
specified_config.update(yaml_config)
|
||||
|
||||
if "server_name" not in specified_config:
|
||||
raise ConfigError(MISSING_SERVER_NAME)
|
||||
|
||||
server_name = specified_config["server_name"]
|
||||
_, config = obj.generate_config(
|
||||
_, config = self.generate_config(
|
||||
config_dir_path=config_dir_path,
|
||||
server_name=server_name,
|
||||
is_generating_file=False,
|
||||
)
|
||||
config.pop("log_config")
|
||||
config.update(specified_config)
|
||||
|
||||
if "report_stats" not in config:
|
||||
raise ConfigError(
|
||||
MISSING_REPORT_STATS_CONFIG_INSTRUCTIONS + "\n" +
|
||||
@@ -328,11 +350,51 @@ class Config(object):
|
||||
)
|
||||
|
||||
if generate_keys:
|
||||
obj.invoke_all("generate_files", config)
|
||||
self.invoke_all("generate_files", config)
|
||||
return
|
||||
|
||||
obj.invoke_all("read_config", config)
|
||||
self.invoke_all("read_config", config)
|
||||
|
||||
obj.invoke_all("read_arguments", args)
|
||||
|
||||
return obj
|
||||
def find_config_files(search_paths):
|
||||
"""Finds config files using a list of search paths. If a path is a file
|
||||
then that file path is added to the list. If a search path is a directory
|
||||
then all the "*.yaml" files in that directory are added to the list in
|
||||
sorted order.
|
||||
|
||||
Args:
|
||||
search_paths(list(str)): A list of paths to search.
|
||||
|
||||
Returns:
|
||||
list(str): A list of file paths.
|
||||
"""
|
||||
|
||||
config_files = []
|
||||
if search_paths:
|
||||
for config_path in search_paths:
|
||||
if os.path.isdir(config_path):
|
||||
# We accept specifying directories as config paths, we search
|
||||
# inside that directory for all files matching *.yaml, and then
|
||||
# we apply them in *sorted* order.
|
||||
files = []
|
||||
for entry in os.listdir(config_path):
|
||||
entry_path = os.path.join(config_path, entry)
|
||||
if not os.path.isfile(entry_path):
|
||||
print (
|
||||
"Found subdirectory in config directory: %r. IGNORING."
|
||||
) % (entry_path, )
|
||||
continue
|
||||
|
||||
if not entry.endswith(".yaml"):
|
||||
print (
|
||||
"Found file in config directory that does not"
|
||||
" end in '.yaml': %r. IGNORING."
|
||||
) % (entry_path, )
|
||||
continue
|
||||
|
||||
files.append(entry_path)
|
||||
|
||||
config_files.extend(sorted(files))
|
||||
else:
|
||||
config_files.append(config_path)
|
||||
return config_files
|
||||
|
||||
@@ -12,16 +12,153 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from ._base import Config
|
||||
from ._base import Config, ConfigError
|
||||
|
||||
from synapse.appservice import ApplicationService
|
||||
from synapse.types import UserID
|
||||
|
||||
import urllib
|
||||
import yaml
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AppServiceConfig(Config):
|
||||
|
||||
def read_config(self, config):
|
||||
self.app_service_config_files = config.get("app_service_config_files", [])
|
||||
self.notify_appservices = config.get("notify_appservices", True)
|
||||
|
||||
def default_config(cls, **kwargs):
|
||||
return """\
|
||||
# A list of application service config file to use
|
||||
app_service_config_files: []
|
||||
"""
|
||||
|
||||
|
||||
def load_appservices(hostname, config_files):
|
||||
"""Returns a list of Application Services from the config files."""
|
||||
if not isinstance(config_files, list):
|
||||
logger.warning(
|
||||
"Expected %s to be a list of AS config files.", config_files
|
||||
)
|
||||
return []
|
||||
|
||||
# Dicts of value -> filename
|
||||
seen_as_tokens = {}
|
||||
seen_ids = {}
|
||||
|
||||
appservices = []
|
||||
|
||||
for config_file in config_files:
|
||||
try:
|
||||
with open(config_file, 'r') as f:
|
||||
appservice = _load_appservice(
|
||||
hostname, yaml.load(f), config_file
|
||||
)
|
||||
if appservice.id in seen_ids:
|
||||
raise ConfigError(
|
||||
"Cannot reuse ID across application services: "
|
||||
"%s (files: %s, %s)" % (
|
||||
appservice.id, config_file, seen_ids[appservice.id],
|
||||
)
|
||||
)
|
||||
seen_ids[appservice.id] = config_file
|
||||
if appservice.token in seen_as_tokens:
|
||||
raise ConfigError(
|
||||
"Cannot reuse as_token across application services: "
|
||||
"%s (files: %s, %s)" % (
|
||||
appservice.token,
|
||||
config_file,
|
||||
seen_as_tokens[appservice.token],
|
||||
)
|
||||
)
|
||||
seen_as_tokens[appservice.token] = config_file
|
||||
logger.info("Loaded application service: %s", appservice)
|
||||
appservices.append(appservice)
|
||||
except Exception as e:
|
||||
logger.error("Failed to load appservice from '%s'", config_file)
|
||||
logger.exception(e)
|
||||
raise
|
||||
return appservices
|
||||
|
||||
|
||||
def _load_appservice(hostname, as_info, config_filename):
|
||||
required_string_fields = [
|
||||
"id", "as_token", "hs_token", "sender_localpart"
|
||||
]
|
||||
for field in required_string_fields:
|
||||
if not isinstance(as_info.get(field), basestring):
|
||||
raise KeyError("Required string field: '%s' (%s)" % (
|
||||
field, config_filename,
|
||||
))
|
||||
|
||||
# 'url' must either be a string or explicitly null, not missing
|
||||
# to avoid accidentally turning off push for ASes.
|
||||
if (not isinstance(as_info.get("url"), basestring) and
|
||||
as_info.get("url", "") is not None):
|
||||
raise KeyError(
|
||||
"Required string field or explicit null: 'url' (%s)" % (config_filename,)
|
||||
)
|
||||
|
||||
localpart = as_info["sender_localpart"]
|
||||
if urllib.quote(localpart) != localpart:
|
||||
raise ValueError(
|
||||
"sender_localpart needs characters which are not URL encoded."
|
||||
)
|
||||
user = UserID(localpart, hostname)
|
||||
user_id = user.to_string()
|
||||
|
||||
# Rate limiting for users of this AS is on by default (excludes sender)
|
||||
rate_limited = True
|
||||
if isinstance(as_info.get("rate_limited"), bool):
|
||||
rate_limited = as_info.get("rate_limited")
|
||||
|
||||
# namespace checks
|
||||
if not isinstance(as_info.get("namespaces"), dict):
|
||||
raise KeyError("Requires 'namespaces' object.")
|
||||
for ns in ApplicationService.NS_LIST:
|
||||
# specific namespaces are optional
|
||||
if ns in as_info["namespaces"]:
|
||||
# expect a list of dicts with exclusive and regex keys
|
||||
for regex_obj in as_info["namespaces"][ns]:
|
||||
if not isinstance(regex_obj, dict):
|
||||
raise ValueError(
|
||||
"Expected namespace entry in %s to be an object,"
|
||||
" but got %s", ns, regex_obj
|
||||
)
|
||||
if not isinstance(regex_obj.get("regex"), basestring):
|
||||
raise ValueError(
|
||||
"Missing/bad type 'regex' key in %s", regex_obj
|
||||
)
|
||||
if not isinstance(regex_obj.get("exclusive"), bool):
|
||||
raise ValueError(
|
||||
"Missing/bad type 'exclusive' key in %s", regex_obj
|
||||
)
|
||||
# protocols check
|
||||
protocols = as_info.get("protocols")
|
||||
if protocols:
|
||||
# Because strings are lists in python
|
||||
if isinstance(protocols, str) or not isinstance(protocols, list):
|
||||
raise KeyError("Optional 'protocols' must be a list if present.")
|
||||
for p in protocols:
|
||||
if not isinstance(p, str):
|
||||
raise KeyError("Bad value for 'protocols' item")
|
||||
|
||||
if as_info["url"] is None:
|
||||
logger.info(
|
||||
"(%s) Explicitly empty 'url' provided. This application service"
|
||||
" will not receive events or queries.",
|
||||
config_filename,
|
||||
)
|
||||
return ApplicationService(
|
||||
token=as_info["as_token"],
|
||||
url=as_info["url"],
|
||||
namespaces=as_info["namespaces"],
|
||||
hs_token=as_info["hs_token"],
|
||||
sender=user_id,
|
||||
id=as_info["id"],
|
||||
protocols=protocols,
|
||||
rate_limited=rate_limited
|
||||
)
|
||||
|
||||
@@ -27,6 +27,7 @@ class CaptchaConfig(Config):
|
||||
def default_config(self, **kwargs):
|
||||
return """\
|
||||
## Captcha ##
|
||||
# See docs/CAPTCHA_SETUP for full details of configuring this.
|
||||
|
||||
# This Home Server's ReCAPTCHA public key.
|
||||
recaptcha_public_key: "YOUR_PUBLIC_KEY"
|
||||
|
||||
105
synapse/config/emailconfig.py
Normal file
105
synapse/config/emailconfig.py
Normal file
@@ -0,0 +1,105 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2015, 2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# This file can't be called email.py because if it is, we cannot:
|
||||
import email.utils
|
||||
|
||||
from ._base import Config
|
||||
|
||||
|
||||
class EmailConfig(Config):
|
||||
def read_config(self, config):
|
||||
self.email_enable_notifs = False
|
||||
|
||||
email_config = config.get("email", {})
|
||||
self.email_enable_notifs = email_config.get("enable_notifs", False)
|
||||
|
||||
if self.email_enable_notifs:
|
||||
# make sure we can import the required deps
|
||||
import jinja2
|
||||
import bleach
|
||||
# prevent unused warnings
|
||||
jinja2
|
||||
bleach
|
||||
|
||||
required = [
|
||||
"smtp_host",
|
||||
"smtp_port",
|
||||
"notif_from",
|
||||
"template_dir",
|
||||
"notif_template_html",
|
||||
"notif_template_text",
|
||||
]
|
||||
|
||||
missing = []
|
||||
for k in required:
|
||||
if k not in email_config:
|
||||
missing.append(k)
|
||||
|
||||
if (len(missing) > 0):
|
||||
raise RuntimeError(
|
||||
"email.enable_notifs is True but required keys are missing: %s" %
|
||||
(", ".join(["email." + k for k in missing]),)
|
||||
)
|
||||
|
||||
if config.get("public_baseurl") is None:
|
||||
raise RuntimeError(
|
||||
"email.enable_notifs is True but no public_baseurl is set"
|
||||
)
|
||||
|
||||
self.email_smtp_host = email_config["smtp_host"]
|
||||
self.email_smtp_port = email_config["smtp_port"]
|
||||
self.email_notif_from = email_config["notif_from"]
|
||||
self.email_template_dir = email_config["template_dir"]
|
||||
self.email_notif_template_html = email_config["notif_template_html"]
|
||||
self.email_notif_template_text = email_config["notif_template_text"]
|
||||
self.email_notif_for_new_users = email_config.get(
|
||||
"notif_for_new_users", True
|
||||
)
|
||||
self.email_riot_base_url = email_config.get(
|
||||
"riot_base_url", None
|
||||
)
|
||||
if "app_name" in email_config:
|
||||
self.email_app_name = email_config["app_name"]
|
||||
else:
|
||||
self.email_app_name = "Matrix"
|
||||
|
||||
# make sure it's valid
|
||||
parsed = email.utils.parseaddr(self.email_notif_from)
|
||||
if parsed[1] == '':
|
||||
raise RuntimeError("Invalid notif_from address")
|
||||
else:
|
||||
self.email_enable_notifs = False
|
||||
# Not much point setting defaults for the rest: it would be an
|
||||
# error for them to be used.
|
||||
|
||||
def default_config(self, config_dir_path, server_name, **kwargs):
|
||||
return """
|
||||
# Enable sending emails for notification events
|
||||
# Defining a custom URL for Riot is only needed if email notifications
|
||||
# should contain links to a self-hosted installation of Riot; when set
|
||||
# the "app_name" setting is ignored.
|
||||
#email:
|
||||
# enable_notifs: false
|
||||
# smtp_host: "localhost"
|
||||
# smtp_port: 25
|
||||
# notif_from: "Your Friendly %(app)s Home Server <noreply@example.com>"
|
||||
# app_name: Matrix
|
||||
# template_dir: res/templates
|
||||
# notif_template_html: notif_mail.html
|
||||
# notif_template_text: notif_mail.txt
|
||||
# notif_for_new_users: True
|
||||
# riot_base_url: "http://localhost/riot"
|
||||
"""
|
||||
@@ -30,14 +30,17 @@ from .saml2 import SAML2Config
|
||||
from .cas import CasConfig
|
||||
from .password import PasswordConfig
|
||||
from .jwt import JWTConfig
|
||||
from .ldap import LDAPConfig
|
||||
from .password_auth_providers import PasswordAuthProviderConfig
|
||||
from .emailconfig import EmailConfig
|
||||
from .workers import WorkerConfig
|
||||
|
||||
|
||||
class HomeServerConfig(TlsConfig, ServerConfig, DatabaseConfig, LoggingConfig,
|
||||
RatelimitConfig, ContentRepositoryConfig, CaptchaConfig,
|
||||
VoipConfig, RegistrationConfig, MetricsConfig, ApiConfig,
|
||||
AppServiceConfig, KeyConfig, SAML2Config, CasConfig,
|
||||
JWTConfig, LDAPConfig, PasswordConfig,):
|
||||
JWTConfig, PasswordConfig, EmailConfig,
|
||||
WorkerConfig, PasswordAuthProviderConfig,):
|
||||
pass
|
||||
|
||||
|
||||
|
||||
@@ -13,7 +13,16 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from ._base import Config
|
||||
from ._base import Config, ConfigError
|
||||
|
||||
|
||||
MISSING_JWT = (
|
||||
"""Missing jwt library. This is required for jwt login.
|
||||
|
||||
Install by running:
|
||||
pip install pyjwt
|
||||
"""
|
||||
)
|
||||
|
||||
|
||||
class JWTConfig(Config):
|
||||
@@ -23,6 +32,12 @@ class JWTConfig(Config):
|
||||
self.jwt_enabled = jwt_config.get("enabled", False)
|
||||
self.jwt_secret = jwt_config["secret"]
|
||||
self.jwt_algorithm = jwt_config["algorithm"]
|
||||
|
||||
try:
|
||||
import jwt
|
||||
jwt # To stop unused lint.
|
||||
except ImportError:
|
||||
raise ConfigError(MISSING_JWT)
|
||||
else:
|
||||
self.jwt_enabled = False
|
||||
self.jwt_secret = None
|
||||
@@ -30,6 +45,8 @@ class JWTConfig(Config):
|
||||
|
||||
def default_config(self, **kwargs):
|
||||
return """\
|
||||
# The JWT needs to contain a globally unique "sub" (subject) claim.
|
||||
#
|
||||
# jwt_config:
|
||||
# enabled: true
|
||||
# secret: "a secret"
|
||||
|
||||
@@ -57,6 +57,8 @@ class KeyConfig(Config):
|
||||
seed = self.signing_key[0].seed
|
||||
self.macaroon_secret_key = hashlib.sha256(seed)
|
||||
|
||||
self.expire_access_token = config.get("expire_access_token", False)
|
||||
|
||||
def default_config(self, config_dir_path, server_name, is_generating_file=False,
|
||||
**kwargs):
|
||||
base_key_name = os.path.join(config_dir_path, server_name)
|
||||
@@ -69,6 +71,9 @@ class KeyConfig(Config):
|
||||
return """\
|
||||
macaroon_secret_key: "%(macaroon_secret_key)s"
|
||||
|
||||
# Used to enable access token expiration.
|
||||
expire_access_token: False
|
||||
|
||||
## Signing Keys ##
|
||||
|
||||
# Path to the signing key to sign messages with
|
||||
|
||||
@@ -1,52 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2015 Niklas Riekenbrauck
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from ._base import Config
|
||||
|
||||
|
||||
class LDAPConfig(Config):
|
||||
def read_config(self, config):
|
||||
ldap_config = config.get("ldap_config", None)
|
||||
if ldap_config:
|
||||
self.ldap_enabled = ldap_config.get("enabled", False)
|
||||
self.ldap_server = ldap_config["server"]
|
||||
self.ldap_port = ldap_config["port"]
|
||||
self.ldap_tls = ldap_config.get("tls", False)
|
||||
self.ldap_search_base = ldap_config["search_base"]
|
||||
self.ldap_search_property = ldap_config["search_property"]
|
||||
self.ldap_email_property = ldap_config["email_property"]
|
||||
self.ldap_full_name_property = ldap_config["full_name_property"]
|
||||
else:
|
||||
self.ldap_enabled = False
|
||||
self.ldap_server = None
|
||||
self.ldap_port = None
|
||||
self.ldap_tls = False
|
||||
self.ldap_search_base = None
|
||||
self.ldap_search_property = None
|
||||
self.ldap_email_property = None
|
||||
self.ldap_full_name_property = None
|
||||
|
||||
def default_config(self, **kwargs):
|
||||
return """\
|
||||
# ldap_config:
|
||||
# enabled: true
|
||||
# server: "ldap://localhost"
|
||||
# port: 389
|
||||
# tls: false
|
||||
# search_base: "ou=Users,dc=example,dc=com"
|
||||
# search_property: "cn"
|
||||
# email_property: "email"
|
||||
# full_name_property: "givenName"
|
||||
"""
|
||||
@@ -15,14 +15,13 @@
|
||||
|
||||
from ._base import Config
|
||||
from synapse.util.logcontext import LoggingContextFilter
|
||||
from twisted.python.log import PythonLoggingObserver
|
||||
from twisted.logger import globalLogBeginner, STDLibLogObserver
|
||||
import logging
|
||||
import logging.config
|
||||
import yaml
|
||||
from string import Template
|
||||
import os
|
||||
import signal
|
||||
from synapse.util.debug import debug_deferreds
|
||||
|
||||
|
||||
DEFAULT_LOG_CONFIG = Template("""
|
||||
@@ -46,16 +45,18 @@ handlers:
|
||||
maxBytes: 104857600
|
||||
backupCount: 10
|
||||
filters: [context]
|
||||
level: INFO
|
||||
console:
|
||||
class: logging.StreamHandler
|
||||
formatter: precise
|
||||
filters: [context]
|
||||
|
||||
loggers:
|
||||
synapse:
|
||||
level: INFO
|
||||
|
||||
synapse.storage.SQL:
|
||||
# beware: increasing this to DEBUG will make synapse log sensitive
|
||||
# information such as access tokens.
|
||||
level: INFO
|
||||
|
||||
root:
|
||||
@@ -68,10 +69,9 @@ class LoggingConfig(Config):
|
||||
|
||||
def read_config(self, config):
|
||||
self.verbosity = config.get("verbose", 0)
|
||||
self.no_redirect_stdio = config.get("no_redirect_stdio", False)
|
||||
self.log_config = self.abspath(config.get("log_config"))
|
||||
self.log_file = self.abspath(config.get("log_file"))
|
||||
if config.get("full_twisted_stacktraces"):
|
||||
debug_deferreds()
|
||||
|
||||
def default_config(self, config_dir_path, server_name, **kwargs):
|
||||
log_file = self.abspath("homeserver.log")
|
||||
@@ -79,24 +79,21 @@ class LoggingConfig(Config):
|
||||
os.path.join(config_dir_path, server_name + ".log.config")
|
||||
)
|
||||
return """
|
||||
# Logging verbosity level.
|
||||
# Logging verbosity level. Ignored if log_config is specified.
|
||||
verbose: 0
|
||||
|
||||
# File to write logging to
|
||||
# File to write logging to. Ignored if log_config is specified.
|
||||
log_file: "%(log_file)s"
|
||||
|
||||
# A yaml python logging config file
|
||||
log_config: "%(log_config)s"
|
||||
|
||||
# Stop twisted from discarding the stack traces of exceptions in
|
||||
# deferreds by waiting a reactor tick before running a deferred's
|
||||
# callbacks.
|
||||
# full_twisted_stacktraces: true
|
||||
""" % locals()
|
||||
|
||||
def read_arguments(self, args):
|
||||
if args.verbose is not None:
|
||||
self.verbosity = args.verbose
|
||||
if args.no_redirect_stdio is not None:
|
||||
self.no_redirect_stdio = args.no_redirect_stdio
|
||||
if args.log_config is not None:
|
||||
self.log_config = args.log_config
|
||||
if args.log_file is not None:
|
||||
@@ -106,16 +103,22 @@ class LoggingConfig(Config):
|
||||
logging_group = parser.add_argument_group("logging")
|
||||
logging_group.add_argument(
|
||||
'-v', '--verbose', dest="verbose", action='count',
|
||||
help="The verbosity level."
|
||||
help="The verbosity level. Specify multiple times to increase "
|
||||
"verbosity. (Ignored if --log-config is specified.)"
|
||||
)
|
||||
logging_group.add_argument(
|
||||
'-f', '--log-file', dest="log_file",
|
||||
help="File to log to."
|
||||
help="File to log to. (Ignored if --log-config is specified.)"
|
||||
)
|
||||
logging_group.add_argument(
|
||||
'--log-config', dest="log_config", default=None,
|
||||
help="Python logging config file"
|
||||
)
|
||||
logging_group.add_argument(
|
||||
'-n', '--no-redirect-stdio',
|
||||
action='store_true', default=None,
|
||||
help="Do not redirect stdout/stderr to the log"
|
||||
)
|
||||
|
||||
def generate_files(self, config):
|
||||
log_config = config.get("log_config")
|
||||
@@ -125,55 +128,92 @@ class LoggingConfig(Config):
|
||||
DEFAULT_LOG_CONFIG.substitute(log_file=config["log_file"])
|
||||
)
|
||||
|
||||
def setup_logging(self):
|
||||
log_format = (
|
||||
"%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s"
|
||||
" - %(message)s"
|
||||
)
|
||||
if self.log_config is None:
|
||||
|
||||
level = logging.INFO
|
||||
level_for_storage = logging.INFO
|
||||
if self.verbosity:
|
||||
level = logging.DEBUG
|
||||
if self.verbosity > 1:
|
||||
level_for_storage = logging.DEBUG
|
||||
def setup_logging(config, use_worker_options=False):
|
||||
""" Set up python logging
|
||||
|
||||
# FIXME: we need a logging.WARN for a -q quiet option
|
||||
logger = logging.getLogger('')
|
||||
logger.setLevel(level)
|
||||
Args:
|
||||
config (LoggingConfig | synapse.config.workers.WorkerConfig):
|
||||
configuration data
|
||||
|
||||
logging.getLogger('synapse.storage').setLevel(level_for_storage)
|
||||
use_worker_options (bool): True to use 'worker_log_config' and
|
||||
'worker_log_file' options instead of 'log_config' and 'log_file'.
|
||||
"""
|
||||
log_config = (config.worker_log_config if use_worker_options
|
||||
else config.log_config)
|
||||
log_file = (config.worker_log_file if use_worker_options
|
||||
else config.log_file)
|
||||
|
||||
formatter = logging.Formatter(log_format)
|
||||
if self.log_file:
|
||||
# TODO: Customisable file size / backup count
|
||||
handler = logging.handlers.RotatingFileHandler(
|
||||
self.log_file, maxBytes=(1000 * 1000 * 100), backupCount=3
|
||||
)
|
||||
log_format = (
|
||||
"%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s"
|
||||
" - %(message)s"
|
||||
)
|
||||
if log_config is None:
|
||||
|
||||
def sighup(signum, stack):
|
||||
logger.info("Closing log file due to SIGHUP")
|
||||
handler.doRollover()
|
||||
logger.info("Opened new log file due to SIGHUP")
|
||||
level = logging.INFO
|
||||
level_for_storage = logging.INFO
|
||||
if config.verbosity:
|
||||
level = logging.DEBUG
|
||||
if config.verbosity > 1:
|
||||
level_for_storage = logging.DEBUG
|
||||
|
||||
# TODO(paul): obviously this is a terrible mechanism for
|
||||
# stealing SIGHUP, because it means no other part of synapse
|
||||
# can use it instead. If we want to catch SIGHUP anywhere
|
||||
# else as well, I'd suggest we find a nicer way to broadcast
|
||||
# it around.
|
||||
if getattr(signal, "SIGHUP"):
|
||||
signal.signal(signal.SIGHUP, sighup)
|
||||
else:
|
||||
handler = logging.StreamHandler()
|
||||
handler.setFormatter(formatter)
|
||||
# FIXME: we need a logging.WARN for a -q quiet option
|
||||
logger = logging.getLogger('')
|
||||
logger.setLevel(level)
|
||||
|
||||
handler.addFilter(LoggingContextFilter(request=""))
|
||||
logging.getLogger('synapse.storage').setLevel(level_for_storage)
|
||||
|
||||
logger.addHandler(handler)
|
||||
formatter = logging.Formatter(log_format)
|
||||
if log_file:
|
||||
# TODO: Customisable file size / backup count
|
||||
handler = logging.handlers.RotatingFileHandler(
|
||||
log_file, maxBytes=(1000 * 1000 * 100), backupCount=3
|
||||
)
|
||||
|
||||
def sighup(signum, stack):
|
||||
logger.info("Closing log file due to SIGHUP")
|
||||
handler.doRollover()
|
||||
logger.info("Opened new log file due to SIGHUP")
|
||||
else:
|
||||
with open(self.log_config, 'r') as f:
|
||||
handler = logging.StreamHandler()
|
||||
handler.setFormatter(formatter)
|
||||
|
||||
handler.addFilter(LoggingContextFilter(request=""))
|
||||
|
||||
logger.addHandler(handler)
|
||||
else:
|
||||
def load_log_config():
|
||||
with open(log_config, 'r') as f:
|
||||
logging.config.dictConfig(yaml.load(f))
|
||||
|
||||
observer = PythonLoggingObserver()
|
||||
observer.start()
|
||||
def sighup(signum, stack):
|
||||
# it might be better to use a file watcher or something for this.
|
||||
logging.info("Reloading log config from %s due to SIGHUP",
|
||||
log_config)
|
||||
load_log_config()
|
||||
|
||||
load_log_config()
|
||||
|
||||
# TODO(paul): obviously this is a terrible mechanism for
|
||||
# stealing SIGHUP, because it means no other part of synapse
|
||||
# can use it instead. If we want to catch SIGHUP anywhere
|
||||
# else as well, I'd suggest we find a nicer way to broadcast
|
||||
# it around.
|
||||
if getattr(signal, "SIGHUP"):
|
||||
signal.signal(signal.SIGHUP, sighup)
|
||||
|
||||
# It's critical to point twisted's internal logging somewhere, otherwise it
|
||||
# stacks up and leaks kup to 64K object;
|
||||
# see: https://twistedmatrix.com/trac/ticket/8164
|
||||
#
|
||||
# Routing to the python logging framework could be a performance problem if
|
||||
# the handlers blocked for a long time as python.logging is a blocking API
|
||||
# see https://twistedmatrix.com/documents/current/core/howto/logger.html
|
||||
# filed as https://github.com/matrix-org/synapse/issues/1727
|
||||
#
|
||||
# However this may not be too much of a problem if we are just writing to a file.
|
||||
observer = STDLibLogObserver()
|
||||
globalLogBeginner.beginLoggingTo(
|
||||
[observer],
|
||||
redirectStandardIO=not config.no_redirect_stdio,
|
||||
)
|
||||
|
||||
@@ -23,10 +23,14 @@ class PasswordConfig(Config):
|
||||
def read_config(self, config):
|
||||
password_config = config.get("password_config", {})
|
||||
self.password_enabled = password_config.get("enabled", True)
|
||||
self.password_pepper = password_config.get("pepper", "")
|
||||
|
||||
def default_config(self, config_dir_path, server_name, **kwargs):
|
||||
return """
|
||||
# Enable password for login.
|
||||
password_config:
|
||||
enabled: true
|
||||
# Uncomment and change to a secret random string for extra security.
|
||||
# DO NOT CHANGE THIS AFTER INITIAL SETUP!
|
||||
#pepper: ""
|
||||
"""
|
||||
|
||||
72
synapse/config/password_auth_providers.py
Normal file
72
synapse/config/password_auth_providers.py
Normal file
@@ -0,0 +1,72 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2016 Openmarket
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from ._base import Config, ConfigError
|
||||
|
||||
import importlib
|
||||
|
||||
|
||||
class PasswordAuthProviderConfig(Config):
|
||||
def read_config(self, config):
|
||||
self.password_providers = []
|
||||
|
||||
# We want to be backwards compatible with the old `ldap_config`
|
||||
# param.
|
||||
ldap_config = config.get("ldap_config", {})
|
||||
self.ldap_enabled = ldap_config.get("enabled", False)
|
||||
if self.ldap_enabled:
|
||||
from ldap_auth_provider import LdapAuthProvider
|
||||
parsed_config = LdapAuthProvider.parse_config(ldap_config)
|
||||
self.password_providers.append((LdapAuthProvider, parsed_config))
|
||||
|
||||
providers = config.get("password_providers", [])
|
||||
for provider in providers:
|
||||
# This is for backwards compat when the ldap auth provider resided
|
||||
# in this package.
|
||||
if provider['module'] == "synapse.util.ldap_auth_provider.LdapAuthProvider":
|
||||
from ldap_auth_provider import LdapAuthProvider
|
||||
provider_class = LdapAuthProvider
|
||||
else:
|
||||
# We need to import the module, and then pick the class out of
|
||||
# that, so we split based on the last dot.
|
||||
module, clz = provider['module'].rsplit(".", 1)
|
||||
module = importlib.import_module(module)
|
||||
provider_class = getattr(module, clz)
|
||||
|
||||
try:
|
||||
provider_config = provider_class.parse_config(provider["config"])
|
||||
except Exception as e:
|
||||
raise ConfigError(
|
||||
"Failed to parse config for %r: %r" % (provider['module'], e)
|
||||
)
|
||||
self.password_providers.append((provider_class, provider_config))
|
||||
|
||||
def default_config(self, **kwargs):
|
||||
return """\
|
||||
# password_providers:
|
||||
# - module: "ldap_auth_provider.LdapAuthProvider"
|
||||
# config:
|
||||
# enabled: true
|
||||
# uri: "ldap://ldap.example.com:389"
|
||||
# start_tls: true
|
||||
# base: "ou=users,dc=example,dc=com"
|
||||
# attributes:
|
||||
# uid: "cn"
|
||||
# mail: "email"
|
||||
# name: "givenName"
|
||||
# #bind_dn:
|
||||
# #bind_password:
|
||||
# #filter: "(objectClass=posixAccount)"
|
||||
"""
|
||||
@@ -13,9 +13,25 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from ._base import Config
|
||||
from ._base import Config, ConfigError
|
||||
from collections import namedtuple
|
||||
|
||||
|
||||
MISSING_NETADDR = (
|
||||
"Missing netaddr library. This is required for URL preview API."
|
||||
)
|
||||
|
||||
MISSING_LXML = (
|
||||
"""Missing lxml library. This is required for URL preview API.
|
||||
|
||||
Install by running:
|
||||
pip install lxml
|
||||
|
||||
Requires libxslt1-dev system package.
|
||||
"""
|
||||
)
|
||||
|
||||
|
||||
ThumbnailRequirement = namedtuple(
|
||||
"ThumbnailRequirement", ["width", "height", "method", "media_type"]
|
||||
)
|
||||
@@ -23,7 +39,7 @@ ThumbnailRequirement = namedtuple(
|
||||
|
||||
def parse_thumbnail_requirements(thumbnail_sizes):
|
||||
""" Takes a list of dictionaries with "width", "height", and "method" keys
|
||||
and creates a map from image media types to the thumbnail size, thumnailing
|
||||
and creates a map from image media types to the thumbnail size, thumbnailing
|
||||
method, and thumbnail media type to precalculate
|
||||
|
||||
Args:
|
||||
@@ -53,12 +69,44 @@ class ContentRepositoryConfig(Config):
|
||||
def read_config(self, config):
|
||||
self.max_upload_size = self.parse_size(config["max_upload_size"])
|
||||
self.max_image_pixels = self.parse_size(config["max_image_pixels"])
|
||||
self.max_spider_size = self.parse_size(config["max_spider_size"])
|
||||
self.media_store_path = self.ensure_directory(config["media_store_path"])
|
||||
self.uploads_path = self.ensure_directory(config["uploads_path"])
|
||||
self.dynamic_thumbnails = config["dynamic_thumbnails"]
|
||||
self.thumbnail_requirements = parse_thumbnail_requirements(
|
||||
config["thumbnail_sizes"]
|
||||
)
|
||||
self.url_preview_enabled = config.get("url_preview_enabled", False)
|
||||
if self.url_preview_enabled:
|
||||
try:
|
||||
import lxml
|
||||
lxml # To stop unused lint.
|
||||
except ImportError:
|
||||
raise ConfigError(MISSING_LXML)
|
||||
|
||||
try:
|
||||
from netaddr import IPSet
|
||||
except ImportError:
|
||||
raise ConfigError(MISSING_NETADDR)
|
||||
|
||||
if "url_preview_ip_range_blacklist" in config:
|
||||
self.url_preview_ip_range_blacklist = IPSet(
|
||||
config["url_preview_ip_range_blacklist"]
|
||||
)
|
||||
else:
|
||||
raise ConfigError(
|
||||
"For security, you must specify an explicit target IP address "
|
||||
"blacklist in url_preview_ip_range_blacklist for url previewing "
|
||||
"to work"
|
||||
)
|
||||
|
||||
self.url_preview_ip_range_whitelist = IPSet(
|
||||
config.get("url_preview_ip_range_whitelist", ())
|
||||
)
|
||||
|
||||
self.url_preview_url_blacklist = config.get(
|
||||
"url_preview_url_blacklist", ()
|
||||
)
|
||||
|
||||
def default_config(self, **kwargs):
|
||||
media_store = self.default_path("media_store")
|
||||
@@ -80,7 +128,7 @@ class ContentRepositoryConfig(Config):
|
||||
# the resolution requested by the client. If true then whenever
|
||||
# a new resolution is requested by the client the server will
|
||||
# generate a new thumbnail. If false the server will pick a thumbnail
|
||||
# from a precalcualted list.
|
||||
# from a precalculated list.
|
||||
dynamic_thumbnails: false
|
||||
|
||||
# List of thumbnail to precalculate when an image is uploaded.
|
||||
@@ -100,4 +148,73 @@ class ContentRepositoryConfig(Config):
|
||||
- width: 800
|
||||
height: 600
|
||||
method: scale
|
||||
|
||||
# Is the preview URL API enabled? If enabled, you *must* specify
|
||||
# an explicit url_preview_ip_range_blacklist of IPs that the spider is
|
||||
# denied from accessing.
|
||||
url_preview_enabled: False
|
||||
|
||||
# List of IP address CIDR ranges that the URL preview spider is denied
|
||||
# from accessing. There are no defaults: you must explicitly
|
||||
# specify a list for URL previewing to work. You should specify any
|
||||
# internal services in your network that you do not want synapse to try
|
||||
# to connect to, otherwise anyone in any Matrix room could cause your
|
||||
# synapse to issue arbitrary GET requests to your internal services,
|
||||
# causing serious security issues.
|
||||
#
|
||||
# url_preview_ip_range_blacklist:
|
||||
# - '127.0.0.0/8'
|
||||
# - '10.0.0.0/8'
|
||||
# - '172.16.0.0/12'
|
||||
# - '192.168.0.0/16'
|
||||
# - '100.64.0.0/10'
|
||||
# - '169.254.0.0/16'
|
||||
#
|
||||
# List of IP address CIDR ranges that the URL preview spider is allowed
|
||||
# to access even if they are specified in url_preview_ip_range_blacklist.
|
||||
# This is useful for specifying exceptions to wide-ranging blacklisted
|
||||
# target IP ranges - e.g. for enabling URL previews for a specific private
|
||||
# website only visible in your network.
|
||||
#
|
||||
# url_preview_ip_range_whitelist:
|
||||
# - '192.168.1.1'
|
||||
|
||||
# Optional list of URL matches that the URL preview spider is
|
||||
# denied from accessing. You should use url_preview_ip_range_blacklist
|
||||
# in preference to this, otherwise someone could define a public DNS
|
||||
# entry that points to a private IP address and circumvent the blacklist.
|
||||
# This is more useful if you know there is an entire shape of URL that
|
||||
# you know that will never want synapse to try to spider.
|
||||
#
|
||||
# Each list entry is a dictionary of url component attributes as returned
|
||||
# by urlparse.urlsplit as applied to the absolute form of the URL. See
|
||||
# https://docs.python.org/2/library/urlparse.html#urlparse.urlsplit
|
||||
# The values of the dictionary are treated as an filename match pattern
|
||||
# applied to that component of URLs, unless they start with a ^ in which
|
||||
# case they are treated as a regular expression match. If all the
|
||||
# specified component matches for a given list item succeed, the URL is
|
||||
# blacklisted.
|
||||
#
|
||||
# url_preview_url_blacklist:
|
||||
# # blacklist any URL with a username in its URI
|
||||
# - username: '*'
|
||||
#
|
||||
# # blacklist all *.google.com URLs
|
||||
# - netloc: 'google.com'
|
||||
# - netloc: '*.google.com'
|
||||
#
|
||||
# # blacklist all plain HTTP URLs
|
||||
# - scheme: 'http'
|
||||
#
|
||||
# # blacklist http(s)://www.acme.com/foo
|
||||
# - netloc: 'www.acme.com'
|
||||
# path: '/foo'
|
||||
#
|
||||
# # blacklist any URL with a literal IPv4 address
|
||||
# - netloc: '^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$'
|
||||
|
||||
# The largest allowed URL preview spidering size in bytes
|
||||
max_spider_size: "10M"
|
||||
|
||||
|
||||
""" % locals()
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from ._base import Config
|
||||
from ._base import Config, ConfigError
|
||||
|
||||
|
||||
class ServerConfig(Config):
|
||||
@@ -27,10 +27,32 @@ class ServerConfig(Config):
|
||||
self.daemonize = config.get("daemonize")
|
||||
self.print_pidfile = config.get("print_pidfile")
|
||||
self.user_agent_suffix = config.get("user_agent_suffix")
|
||||
self.use_frozen_dicts = config.get("use_frozen_dicts", True)
|
||||
self.use_frozen_dicts = config.get("use_frozen_dicts", False)
|
||||
self.public_baseurl = config.get("public_baseurl")
|
||||
|
||||
# Whether to send federation traffic out in this process. This only
|
||||
# applies to some federation traffic, and so shouldn't be used to
|
||||
# "disable" federation
|
||||
self.send_federation = config.get("send_federation", True)
|
||||
|
||||
if self.public_baseurl is not None:
|
||||
if self.public_baseurl[-1] != '/':
|
||||
self.public_baseurl += '/'
|
||||
self.start_pushers = config.get("start_pushers", True)
|
||||
|
||||
self.listeners = config.get("listeners", [])
|
||||
|
||||
for listener in self.listeners:
|
||||
bind_address = listener.pop("bind_address", None)
|
||||
bind_addresses = listener.setdefault("bind_addresses", [])
|
||||
|
||||
if bind_address:
|
||||
bind_addresses.append(bind_address)
|
||||
elif not bind_addresses:
|
||||
bind_addresses.append('')
|
||||
|
||||
self.gc_thresholds = read_gc_thresholds(config.get("gc_thresholds", None))
|
||||
|
||||
bind_port = config.get("bind_port")
|
||||
if bind_port:
|
||||
self.listeners = []
|
||||
@@ -41,7 +63,7 @@ class ServerConfig(Config):
|
||||
|
||||
self.listeners.append({
|
||||
"port": bind_port,
|
||||
"bind_address": bind_host,
|
||||
"bind_addresses": [bind_host],
|
||||
"tls": True,
|
||||
"type": "http",
|
||||
"resources": [
|
||||
@@ -60,7 +82,7 @@ class ServerConfig(Config):
|
||||
if unsecure_port:
|
||||
self.listeners.append({
|
||||
"port": unsecure_port,
|
||||
"bind_address": bind_host,
|
||||
"bind_addresses": [bind_host],
|
||||
"tls": False,
|
||||
"type": "http",
|
||||
"resources": [
|
||||
@@ -79,7 +101,7 @@ class ServerConfig(Config):
|
||||
if manhole:
|
||||
self.listeners.append({
|
||||
"port": manhole,
|
||||
"bind_address": "127.0.0.1",
|
||||
"bind_addresses": ["127.0.0.1"],
|
||||
"type": "manhole",
|
||||
})
|
||||
|
||||
@@ -87,7 +109,7 @@ class ServerConfig(Config):
|
||||
if metrics_port:
|
||||
self.listeners.append({
|
||||
"port": metrics_port,
|
||||
"bind_address": config.get("metrics_bind_host", "127.0.0.1"),
|
||||
"bind_addresses": [config.get("metrics_bind_host", "127.0.0.1")],
|
||||
"tls": False,
|
||||
"type": "http",
|
||||
"resources": [
|
||||
@@ -98,26 +120,6 @@ class ServerConfig(Config):
|
||||
]
|
||||
})
|
||||
|
||||
# Attempt to guess the content_addr for the v0 content repostitory
|
||||
content_addr = config.get("content_addr")
|
||||
if not content_addr:
|
||||
for listener in self.listeners:
|
||||
if listener["type"] == "http" and not listener.get("tls", False):
|
||||
unsecure_port = listener["port"]
|
||||
break
|
||||
else:
|
||||
raise RuntimeError("Could not determine 'content_addr'")
|
||||
|
||||
host = self.server_name
|
||||
if ':' not in host:
|
||||
host = "%s:%d" % (host, unsecure_port)
|
||||
else:
|
||||
host = host.split(':')[0]
|
||||
host = "%s:%d" % (host, unsecure_port)
|
||||
content_addr = "http://%s" % (host,)
|
||||
|
||||
self.content_addr = content_addr
|
||||
|
||||
def default_config(self, server_name, **kwargs):
|
||||
if ":" in server_name:
|
||||
bind_port = int(server_name.split(":")[1])
|
||||
@@ -142,11 +144,17 @@ class ServerConfig(Config):
|
||||
# Whether to serve a web client from the HTTP/HTTPS root resource.
|
||||
web_client: True
|
||||
|
||||
# The public-facing base URL for the client API (not including _matrix/...)
|
||||
# public_baseurl: https://example.com:8448/
|
||||
|
||||
# Set the soft limit on the number of file descriptors synapse can use
|
||||
# Zero is used to indicate synapse should set the soft limit to the
|
||||
# hard limit.
|
||||
soft_file_limit: 0
|
||||
|
||||
# The GC threshold parameters to pass to `gc.set_threshold`, if defined
|
||||
# gc_thresholds: [700, 10, 10]
|
||||
|
||||
# List of ports that Synapse should listen on, their purpose and their
|
||||
# configuration.
|
||||
listeners:
|
||||
@@ -156,9 +164,14 @@ class ServerConfig(Config):
|
||||
# The port to listen for HTTPS requests on.
|
||||
port: %(bind_port)s
|
||||
|
||||
# Local interface to listen on.
|
||||
# The empty string will cause synapse to listen on all interfaces.
|
||||
bind_address: ''
|
||||
# Local addresses to listen on.
|
||||
# This will listen on all IPv4 addresses by default.
|
||||
bind_addresses:
|
||||
- '0.0.0.0'
|
||||
# Uncomment to listen on all IPv6 interfaces
|
||||
# N.B: On at least Linux this will also listen on all IPv4
|
||||
# addresses, so you will need to comment out the line above.
|
||||
# - '::'
|
||||
|
||||
# This is a 'http' listener, allows us to specify 'resources'.
|
||||
type: http
|
||||
@@ -189,7 +202,7 @@ class ServerConfig(Config):
|
||||
# For when matrix traffic passes through loadbalancer that unwraps TLS.
|
||||
- port: %(unsecure_port)s
|
||||
tls: false
|
||||
bind_address: ''
|
||||
bind_addresses: ['0.0.0.0']
|
||||
type: http
|
||||
|
||||
x_forwarded: false
|
||||
@@ -228,3 +241,20 @@ class ServerConfig(Config):
|
||||
type=int,
|
||||
help="Turn on the twisted telnet manhole"
|
||||
" service on the given port.")
|
||||
|
||||
|
||||
def read_gc_thresholds(thresholds):
|
||||
"""Reads the three integer thresholds for garbage collection. Ensures that
|
||||
the thresholds are integers if thresholds are supplied.
|
||||
"""
|
||||
if thresholds is None:
|
||||
return None
|
||||
try:
|
||||
assert len(thresholds) == 3
|
||||
return (
|
||||
int(thresholds[0]), int(thresholds[1]), int(thresholds[2]),
|
||||
)
|
||||
except:
|
||||
raise ConfigError(
|
||||
"Value of `gc_threshold` must be a list of three integers if set"
|
||||
)
|
||||
|
||||
@@ -19,6 +19,9 @@ from OpenSSL import crypto
|
||||
import subprocess
|
||||
import os
|
||||
|
||||
from hashlib import sha256
|
||||
from unpaddedbase64 import encode_base64
|
||||
|
||||
GENERATE_DH_PARAMS = False
|
||||
|
||||
|
||||
@@ -42,6 +45,19 @@ class TlsConfig(Config):
|
||||
config.get("tls_dh_params_path"), "tls_dh_params"
|
||||
)
|
||||
|
||||
self.tls_fingerprints = config["tls_fingerprints"]
|
||||
|
||||
# Check that our own certificate is included in the list of fingerprints
|
||||
# and include it if it is not.
|
||||
x509_certificate_bytes = crypto.dump_certificate(
|
||||
crypto.FILETYPE_ASN1,
|
||||
self.tls_certificate
|
||||
)
|
||||
sha256_fingerprint = encode_base64(sha256(x509_certificate_bytes).digest())
|
||||
sha256_fingerprints = set(f["sha256"] for f in self.tls_fingerprints)
|
||||
if sha256_fingerprint not in sha256_fingerprints:
|
||||
self.tls_fingerprints.append({u"sha256": sha256_fingerprint})
|
||||
|
||||
# This config option applies to non-federation HTTP clients
|
||||
# (e.g. for talking to recaptcha, identity servers, and such)
|
||||
# It should never be used in production, and is intended for
|
||||
@@ -73,6 +89,28 @@ class TlsConfig(Config):
|
||||
|
||||
# Don't bind to the https port
|
||||
no_tls: False
|
||||
|
||||
# List of allowed TLS fingerprints for this server to publish along
|
||||
# with the signing keys for this server. Other matrix servers that
|
||||
# make HTTPS requests to this server will check that the TLS
|
||||
# certificates returned by this server match one of the fingerprints.
|
||||
#
|
||||
# Synapse automatically adds the fingerprint of its own certificate
|
||||
# to the list. So if federation traffic is handle directly by synapse
|
||||
# then no modification to the list is required.
|
||||
#
|
||||
# If synapse is run behind a load balancer that handles the TLS then it
|
||||
# will be necessary to add the fingerprints of the certificates used by
|
||||
# the loadbalancers to this list if they are different to the one
|
||||
# synapse is using.
|
||||
#
|
||||
# Homeservers are permitted to cache the list of TLS fingerprints
|
||||
# returned in the key responses up to the "valid_until_ts" returned in
|
||||
# key. It may be necessary to publish the fingerprints of a new
|
||||
# certificate and wait until the "valid_until_ts" of the previous key
|
||||
# responses have passed before deploying it.
|
||||
tls_fingerprints: []
|
||||
# tls_fingerprints: [{"sha256": "<base64_encoded_sha256_fingerprint>"}]
|
||||
""" % locals()
|
||||
|
||||
def read_tls_certificate(self, cert_path):
|
||||
|
||||
@@ -19,7 +19,9 @@ class VoipConfig(Config):
|
||||
|
||||
def read_config(self, config):
|
||||
self.turn_uris = config.get("turn_uris", [])
|
||||
self.turn_shared_secret = config["turn_shared_secret"]
|
||||
self.turn_shared_secret = config.get("turn_shared_secret")
|
||||
self.turn_username = config.get("turn_username")
|
||||
self.turn_password = config.get("turn_password")
|
||||
self.turn_user_lifetime = self.parse_duration(config["turn_user_lifetime"])
|
||||
|
||||
def default_config(self, **kwargs):
|
||||
@@ -32,6 +34,11 @@ class VoipConfig(Config):
|
||||
# The shared secret used to compute passwords for the TURN server
|
||||
turn_shared_secret: "YOUR_SHARED_SECRET"
|
||||
|
||||
# The Username and password if the TURN server needs them and
|
||||
# does not use a token
|
||||
#turn_username: "TURNSERVER_USERNAME"
|
||||
#turn_password: "TURNSERVER_PASSWORD"
|
||||
|
||||
# How long generated TURN credentials last
|
||||
turn_user_lifetime: "1h"
|
||||
"""
|
||||
|
||||
41
synapse/config/workers.py
Normal file
41
synapse/config/workers.py
Normal file
@@ -0,0 +1,41 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2016 matrix.org
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from ._base import Config
|
||||
|
||||
|
||||
class WorkerConfig(Config):
|
||||
"""The workers are processes run separately to the main synapse process.
|
||||
They have their own pid_file and listener configuration. They use the
|
||||
replication_url to talk to the main synapse process."""
|
||||
|
||||
def read_config(self, config):
|
||||
self.worker_app = config.get("worker_app")
|
||||
self.worker_listeners = config.get("worker_listeners")
|
||||
self.worker_daemonize = config.get("worker_daemonize")
|
||||
self.worker_pid_file = config.get("worker_pid_file")
|
||||
self.worker_log_file = config.get("worker_log_file")
|
||||
self.worker_log_config = config.get("worker_log_config")
|
||||
self.worker_replication_url = config.get("worker_replication_url")
|
||||
|
||||
if self.worker_listeners:
|
||||
for listener in self.worker_listeners:
|
||||
bind_address = listener.pop("bind_address", None)
|
||||
bind_addresses = listener.setdefault("bind_addresses", [])
|
||||
|
||||
if bind_address:
|
||||
bind_addresses.append(bind_address)
|
||||
elif not bind_addresses:
|
||||
bind_addresses.append('')
|
||||
@@ -77,10 +77,12 @@ class SynapseKeyClientProtocol(HTTPClient):
|
||||
def __init__(self):
|
||||
self.remote_key = defer.Deferred()
|
||||
self.host = None
|
||||
self._peer = None
|
||||
|
||||
def connectionMade(self):
|
||||
self.host = self.transport.getHost()
|
||||
logger.debug("Connected to %s", self.host)
|
||||
self._peer = self.transport.getPeer()
|
||||
logger.debug("Connected to %s", self._peer)
|
||||
|
||||
self.sendCommand(b"GET", self.path)
|
||||
if self.host:
|
||||
self.sendHeader(b"Host", self.host)
|
||||
@@ -124,7 +126,10 @@ class SynapseKeyClientProtocol(HTTPClient):
|
||||
self.timer.cancel()
|
||||
|
||||
def on_timeout(self):
|
||||
logger.debug("Timeout waiting for response from %s", self.host)
|
||||
logger.debug(
|
||||
"Timeout waiting for response from %s: %s",
|
||||
self.host, self._peer,
|
||||
)
|
||||
self.errback(IOError("Timeout waiting for response"))
|
||||
self.transport.abortConnection()
|
||||
|
||||
@@ -133,4 +138,5 @@ class SynapseKeyClientFactory(Factory):
|
||||
def protocol(self):
|
||||
protocol = SynapseKeyClientProtocol()
|
||||
protocol.path = self.path
|
||||
protocol.host = self.host
|
||||
return protocol
|
||||
|
||||
@@ -15,13 +15,13 @@
|
||||
|
||||
from synapse.crypto.keyclient import fetch_server_key
|
||||
from synapse.api.errors import SynapseError, Codes
|
||||
from synapse.util.retryutils import get_retry_limiter
|
||||
from synapse.util import unwrapFirstError
|
||||
from synapse.util.async import ObservableDeferred
|
||||
from synapse.util.logcontext import (
|
||||
preserve_context_over_deferred, preserve_context_over_fn, PreserveLoggingContext,
|
||||
preserve_fn
|
||||
)
|
||||
from synapse.util.metrics import Measure
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
@@ -44,7 +44,25 @@ import logging
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
KeyGroup = namedtuple("KeyGroup", ("server_name", "group_id", "key_ids"))
|
||||
VerifyKeyRequest = namedtuple("VerifyRequest", (
|
||||
"server_name", "key_ids", "json_object", "deferred"
|
||||
))
|
||||
"""
|
||||
A request for a verify key to verify a JSON object.
|
||||
|
||||
Attributes:
|
||||
server_name(str): The name of the server to verify against.
|
||||
key_ids(set(str)): The set of key_ids to that could be used to verify the
|
||||
JSON object
|
||||
json_object(dict): The JSON object to verify.
|
||||
deferred(twisted.internet.defer.Deferred):
|
||||
A deferred (server_name, key_id, verify_key) tuple that resolves when
|
||||
a verify key has been fetched
|
||||
"""
|
||||
|
||||
|
||||
class KeyLookupError(ValueError):
|
||||
pass
|
||||
|
||||
|
||||
class Keyring(object):
|
||||
@@ -74,39 +92,36 @@ class Keyring(object):
|
||||
list of deferreds indicating success or failure to verify each
|
||||
json object's signature for the given server_name.
|
||||
"""
|
||||
group_id_to_json = {}
|
||||
group_id_to_group = {}
|
||||
group_ids = []
|
||||
|
||||
next_group_id = 0
|
||||
deferreds = {}
|
||||
verify_requests = []
|
||||
|
||||
for server_name, json_object in server_and_json:
|
||||
logger.debug("Verifying for %s", server_name)
|
||||
group_id = next_group_id
|
||||
next_group_id += 1
|
||||
group_ids.append(group_id)
|
||||
|
||||
key_ids = signature_ids(json_object, server_name)
|
||||
if not key_ids:
|
||||
deferreds[group_id] = defer.fail(SynapseError(
|
||||
logger.warn("Request from %s: no supported signature keys",
|
||||
server_name)
|
||||
deferred = defer.fail(SynapseError(
|
||||
400,
|
||||
"Not signed with a supported algorithm",
|
||||
Codes.UNAUTHORIZED,
|
||||
))
|
||||
else:
|
||||
deferreds[group_id] = defer.Deferred()
|
||||
deferred = defer.Deferred()
|
||||
|
||||
group = KeyGroup(server_name, group_id, key_ids)
|
||||
logger.debug("Verifying for %s with key_ids %s",
|
||||
server_name, key_ids)
|
||||
|
||||
group_id_to_group[group_id] = group
|
||||
group_id_to_json[group_id] = json_object
|
||||
verify_request = VerifyKeyRequest(
|
||||
server_name, key_ids, json_object, deferred
|
||||
)
|
||||
|
||||
verify_requests.append(verify_request)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def handle_key_deferred(group, deferred):
|
||||
server_name = group.server_name
|
||||
def handle_key_deferred(verify_request):
|
||||
server_name = verify_request.server_name
|
||||
try:
|
||||
_, _, key_id, verify_key = yield deferred
|
||||
_, key_id, verify_key = yield verify_request.deferred
|
||||
except IOError as e:
|
||||
logger.warn(
|
||||
"Got IOError when downloading keys for %s: %s %s",
|
||||
@@ -128,8 +143,11 @@ class Keyring(object):
|
||||
Codes.UNAUTHORIZED,
|
||||
)
|
||||
|
||||
json_object = group_id_to_json[group.group_id]
|
||||
json_object = verify_request.json_object
|
||||
|
||||
logger.debug("Got key %s %s:%s for server %s, verifying" % (
|
||||
key_id, verify_key.alg, verify_key.version, server_name,
|
||||
))
|
||||
try:
|
||||
verify_signed_json(json_object, server_name, verify_key)
|
||||
except:
|
||||
@@ -157,36 +175,34 @@ class Keyring(object):
|
||||
|
||||
# Actually start fetching keys.
|
||||
wait_on_deferred.addBoth(
|
||||
lambda _: self.get_server_verify_keys(group_id_to_group, deferreds)
|
||||
lambda _: self.get_server_verify_keys(verify_requests)
|
||||
)
|
||||
|
||||
# When we've finished fetching all the keys for a given server_name,
|
||||
# resolve the deferred passed to `wait_for_previous_lookups` so that
|
||||
# any lookups waiting will proceed.
|
||||
server_to_gids = {}
|
||||
server_to_request_ids = {}
|
||||
|
||||
def remove_deferreds(res, server_name, group_id):
|
||||
server_to_gids[server_name].discard(group_id)
|
||||
if not server_to_gids[server_name]:
|
||||
def remove_deferreds(res, server_name, verify_request):
|
||||
request_id = id(verify_request)
|
||||
server_to_request_ids[server_name].discard(request_id)
|
||||
if not server_to_request_ids[server_name]:
|
||||
d = server_to_deferred.pop(server_name, None)
|
||||
if d:
|
||||
d.callback(None)
|
||||
return res
|
||||
|
||||
for g_id, deferred in deferreds.items():
|
||||
server_name = group_id_to_group[g_id].server_name
|
||||
server_to_gids.setdefault(server_name, set()).add(g_id)
|
||||
deferred.addBoth(remove_deferreds, server_name, g_id)
|
||||
for verify_request in verify_requests:
|
||||
server_name = verify_request.server_name
|
||||
request_id = id(verify_request)
|
||||
server_to_request_ids.setdefault(server_name, set()).add(request_id)
|
||||
deferred.addBoth(remove_deferreds, server_name, verify_request)
|
||||
|
||||
# Pass those keys to handle_key_deferred so that the json object
|
||||
# signatures can be verified
|
||||
return [
|
||||
preserve_context_over_fn(
|
||||
handle_key_deferred,
|
||||
group_id_to_group[g_id],
|
||||
deferreds[g_id],
|
||||
)
|
||||
for g_id in group_ids
|
||||
preserve_context_over_fn(handle_key_deferred, verify_request)
|
||||
for verify_request in verify_requests
|
||||
]
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@@ -220,9 +236,15 @@ class Keyring(object):
|
||||
|
||||
d.addBoth(rm, server_name)
|
||||
|
||||
def get_server_verify_keys(self, group_id_to_group, group_id_to_deferred):
|
||||
"""Takes a dict of KeyGroups and tries to find at least one key for
|
||||
each group.
|
||||
def get_server_verify_keys(self, verify_requests):
|
||||
"""Tries to find at least one key for each verify request
|
||||
|
||||
For each verify_request, verify_request.deferred is called back with
|
||||
params (server_name, key_id, VerifyKey) if a key is found, or errbacked
|
||||
with a SynapseError if none of the keys are found.
|
||||
|
||||
Args:
|
||||
verify_requests (list[VerifyKeyRequest]): list of verify requests
|
||||
"""
|
||||
|
||||
# These are functions that produce keys given a list of key ids
|
||||
@@ -234,76 +256,92 @@ class Keyring(object):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def do_iterations():
|
||||
merged_results = {}
|
||||
with Measure(self.clock, "get_server_verify_keys"):
|
||||
# dict[str, dict[str, VerifyKey]]: results so far.
|
||||
# map server_name -> key_id -> VerifyKey
|
||||
merged_results = {}
|
||||
|
||||
missing_keys = {}
|
||||
for group in group_id_to_group.values():
|
||||
missing_keys.setdefault(group.server_name, set()).update(
|
||||
group.key_ids
|
||||
)
|
||||
|
||||
for fn in key_fetch_fns:
|
||||
results = yield fn(missing_keys.items())
|
||||
merged_results.update(results)
|
||||
|
||||
# We now need to figure out which groups we have keys for
|
||||
# and which we don't
|
||||
missing_groups = {}
|
||||
for group in group_id_to_group.values():
|
||||
for key_id in group.key_ids:
|
||||
if key_id in merged_results[group.server_name]:
|
||||
with PreserveLoggingContext():
|
||||
group_id_to_deferred[group.group_id].callback((
|
||||
group.group_id,
|
||||
group.server_name,
|
||||
key_id,
|
||||
merged_results[group.server_name][key_id],
|
||||
))
|
||||
break
|
||||
else:
|
||||
missing_groups.setdefault(
|
||||
group.server_name, []
|
||||
).append(group)
|
||||
|
||||
if not missing_groups:
|
||||
break
|
||||
|
||||
missing_keys = {
|
||||
server_name: set(
|
||||
key_id for group in groups for key_id in group.key_ids
|
||||
# dict[str, set(str)]: keys to fetch for each server
|
||||
missing_keys = {}
|
||||
for verify_request in verify_requests:
|
||||
missing_keys.setdefault(verify_request.server_name, set()).update(
|
||||
verify_request.key_ids
|
||||
)
|
||||
for server_name, groups in missing_groups.items()
|
||||
}
|
||||
|
||||
for group in missing_groups.values():
|
||||
group_id_to_deferred[group.group_id].errback(SynapseError(
|
||||
401,
|
||||
"No key for %s with id %s" % (
|
||||
group.server_name, group.key_ids,
|
||||
),
|
||||
Codes.UNAUTHORIZED,
|
||||
))
|
||||
for fn in key_fetch_fns:
|
||||
results = yield fn(missing_keys.items())
|
||||
merged_results.update(results)
|
||||
|
||||
# We now need to figure out which verify requests we have keys
|
||||
# for and which we don't
|
||||
missing_keys = {}
|
||||
requests_missing_keys = []
|
||||
for verify_request in verify_requests:
|
||||
server_name = verify_request.server_name
|
||||
result_keys = merged_results[server_name]
|
||||
|
||||
if verify_request.deferred.called:
|
||||
# We've already called this deferred, which probably
|
||||
# means that we've already found a key for it.
|
||||
continue
|
||||
|
||||
for key_id in verify_request.key_ids:
|
||||
if key_id in result_keys:
|
||||
with PreserveLoggingContext():
|
||||
verify_request.deferred.callback((
|
||||
server_name,
|
||||
key_id,
|
||||
result_keys[key_id],
|
||||
))
|
||||
break
|
||||
else:
|
||||
# The else block is only reached if the loop above
|
||||
# doesn't break.
|
||||
missing_keys.setdefault(server_name, set()).update(
|
||||
verify_request.key_ids
|
||||
)
|
||||
requests_missing_keys.append(verify_request)
|
||||
|
||||
if not missing_keys:
|
||||
break
|
||||
|
||||
for verify_request in requests_missing_keys.values():
|
||||
verify_request.deferred.errback(SynapseError(
|
||||
401,
|
||||
"No key for %s with id %s" % (
|
||||
verify_request.server_name, verify_request.key_ids,
|
||||
),
|
||||
Codes.UNAUTHORIZED,
|
||||
))
|
||||
|
||||
def on_err(err):
|
||||
for deferred in group_id_to_deferred.values():
|
||||
if not deferred.called:
|
||||
deferred.errback(err)
|
||||
for verify_request in verify_requests:
|
||||
if not verify_request.deferred.called:
|
||||
verify_request.deferred.errback(err)
|
||||
|
||||
do_iterations().addErrback(on_err)
|
||||
|
||||
return group_id_to_deferred
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_keys_from_store(self, server_name_and_key_ids):
|
||||
res = yield defer.gatherResults(
|
||||
"""
|
||||
|
||||
Args:
|
||||
server_name_and_key_ids (list[(str, iterable[str])]):
|
||||
list of (server_name, iterable[key_id]) tuples to fetch keys for
|
||||
|
||||
Returns:
|
||||
Deferred: resolves to dict[str, dict[str, VerifyKey]]: map from
|
||||
server_name -> key_id -> VerifyKey
|
||||
"""
|
||||
res = yield preserve_context_over_deferred(defer.gatherResults(
|
||||
[
|
||||
self.store.get_server_verify_keys(
|
||||
preserve_fn(self.store.get_server_verify_keys)(
|
||||
server_name, key_ids
|
||||
).addCallback(lambda ks, server: (server, ks), server_name)
|
||||
for server_name, key_ids in server_name_and_key_ids
|
||||
],
|
||||
consumeErrors=True,
|
||||
).addErrback(unwrapFirstError)
|
||||
)).addErrback(unwrapFirstError)
|
||||
|
||||
defer.returnValue(dict(res))
|
||||
|
||||
@@ -324,13 +362,13 @@ class Keyring(object):
|
||||
)
|
||||
defer.returnValue({})
|
||||
|
||||
results = yield defer.gatherResults(
|
||||
results = yield preserve_context_over_deferred(defer.gatherResults(
|
||||
[
|
||||
get_key(p_name, p_keys)
|
||||
preserve_fn(get_key)(p_name, p_keys)
|
||||
for p_name, p_keys in self.perspective_servers.items()
|
||||
],
|
||||
consumeErrors=True,
|
||||
).addErrback(unwrapFirstError)
|
||||
)).addErrback(unwrapFirstError)
|
||||
|
||||
union_of_keys = {}
|
||||
for result in results:
|
||||
@@ -343,40 +381,34 @@ class Keyring(object):
|
||||
def get_keys_from_server(self, server_name_and_key_ids):
|
||||
@defer.inlineCallbacks
|
||||
def get_key(server_name, key_ids):
|
||||
limiter = yield get_retry_limiter(
|
||||
server_name,
|
||||
self.clock,
|
||||
self.store,
|
||||
)
|
||||
with limiter:
|
||||
keys = None
|
||||
try:
|
||||
keys = yield self.get_server_verify_key_v2_direct(
|
||||
server_name, key_ids
|
||||
)
|
||||
except Exception as e:
|
||||
logger.info(
|
||||
"Unable to getting key %r for %r directly: %s %s",
|
||||
key_ids, server_name,
|
||||
type(e).__name__, str(e.message),
|
||||
)
|
||||
keys = None
|
||||
try:
|
||||
keys = yield self.get_server_verify_key_v2_direct(
|
||||
server_name, key_ids
|
||||
)
|
||||
except Exception as e:
|
||||
logger.info(
|
||||
"Unable to get key %r for %r directly: %s %s",
|
||||
key_ids, server_name,
|
||||
type(e).__name__, str(e.message),
|
||||
)
|
||||
|
||||
if not keys:
|
||||
keys = yield self.get_server_verify_key_v1_direct(
|
||||
server_name, key_ids
|
||||
)
|
||||
if not keys:
|
||||
keys = yield self.get_server_verify_key_v1_direct(
|
||||
server_name, key_ids
|
||||
)
|
||||
|
||||
keys = {server_name: keys}
|
||||
keys = {server_name: keys}
|
||||
|
||||
defer.returnValue(keys)
|
||||
|
||||
results = yield defer.gatherResults(
|
||||
results = yield preserve_context_over_deferred(defer.gatherResults(
|
||||
[
|
||||
get_key(server_name, key_ids)
|
||||
preserve_fn(get_key)(server_name, key_ids)
|
||||
for server_name, key_ids in server_name_and_key_ids
|
||||
],
|
||||
consumeErrors=True,
|
||||
).addErrback(unwrapFirstError)
|
||||
)).addErrback(unwrapFirstError)
|
||||
|
||||
merged = {}
|
||||
for result in results:
|
||||
@@ -418,7 +450,7 @@ class Keyring(object):
|
||||
for response in responses:
|
||||
if (u"signatures" not in response
|
||||
or perspective_name not in response[u"signatures"]):
|
||||
raise ValueError(
|
||||
raise KeyLookupError(
|
||||
"Key response not signed by perspective server"
|
||||
" %r" % (perspective_name,)
|
||||
)
|
||||
@@ -441,21 +473,21 @@ class Keyring(object):
|
||||
list(response[u"signatures"][perspective_name]),
|
||||
list(perspective_keys)
|
||||
)
|
||||
raise ValueError(
|
||||
raise KeyLookupError(
|
||||
"Response not signed with a known key for perspective"
|
||||
" server %r" % (perspective_name,)
|
||||
)
|
||||
|
||||
processed_response = yield self.process_v2_response(
|
||||
perspective_name, response
|
||||
perspective_name, response, only_from_server=False
|
||||
)
|
||||
|
||||
for server_name, response_keys in processed_response.items():
|
||||
keys.setdefault(server_name, {}).update(response_keys)
|
||||
|
||||
yield defer.gatherResults(
|
||||
yield preserve_context_over_deferred(defer.gatherResults(
|
||||
[
|
||||
self.store_keys(
|
||||
preserve_fn(self.store_keys)(
|
||||
server_name=server_name,
|
||||
from_server=perspective_name,
|
||||
verify_keys=response_keys,
|
||||
@@ -463,7 +495,7 @@ class Keyring(object):
|
||||
for server_name, response_keys in keys.items()
|
||||
],
|
||||
consumeErrors=True
|
||||
).addErrback(unwrapFirstError)
|
||||
)).addErrback(unwrapFirstError)
|
||||
|
||||
defer.returnValue(keys)
|
||||
|
||||
@@ -484,10 +516,10 @@ class Keyring(object):
|
||||
|
||||
if (u"signatures" not in response
|
||||
or server_name not in response[u"signatures"]):
|
||||
raise ValueError("Key response not signed by remote server")
|
||||
raise KeyLookupError("Key response not signed by remote server")
|
||||
|
||||
if "tls_fingerprints" not in response:
|
||||
raise ValueError("Key response missing TLS fingerprints")
|
||||
raise KeyLookupError("Key response missing TLS fingerprints")
|
||||
|
||||
certificate_bytes = crypto.dump_certificate(
|
||||
crypto.FILETYPE_ASN1, tls_certificate
|
||||
@@ -501,7 +533,7 @@ class Keyring(object):
|
||||
response_sha256_fingerprints.add(fingerprint[u"sha256"])
|
||||
|
||||
if sha256_fingerprint_b64 not in response_sha256_fingerprints:
|
||||
raise ValueError("TLS certificate not allowed by fingerprints")
|
||||
raise KeyLookupError("TLS certificate not allowed by fingerprints")
|
||||
|
||||
response_keys = yield self.process_v2_response(
|
||||
from_server=server_name,
|
||||
@@ -511,7 +543,7 @@ class Keyring(object):
|
||||
|
||||
keys.update(response_keys)
|
||||
|
||||
yield defer.gatherResults(
|
||||
yield preserve_context_over_deferred(defer.gatherResults(
|
||||
[
|
||||
preserve_fn(self.store_keys)(
|
||||
server_name=key_server_name,
|
||||
@@ -521,13 +553,13 @@ class Keyring(object):
|
||||
for key_server_name, verify_keys in keys.items()
|
||||
],
|
||||
consumeErrors=True
|
||||
).addErrback(unwrapFirstError)
|
||||
)).addErrback(unwrapFirstError)
|
||||
|
||||
defer.returnValue(keys)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def process_v2_response(self, from_server, response_json,
|
||||
requested_ids=[]):
|
||||
requested_ids=[], only_from_server=True):
|
||||
time_now_ms = self.clock.time_msec()
|
||||
response_keys = {}
|
||||
verify_keys = {}
|
||||
@@ -551,9 +583,16 @@ class Keyring(object):
|
||||
|
||||
results = {}
|
||||
server_name = response_json["server_name"]
|
||||
if only_from_server:
|
||||
if server_name != from_server:
|
||||
raise KeyLookupError(
|
||||
"Expected a response for server %r not %r" % (
|
||||
from_server, server_name
|
||||
)
|
||||
)
|
||||
for key_id in response_json["signatures"].get(server_name, {}):
|
||||
if key_id not in response_json["verify_keys"]:
|
||||
raise ValueError(
|
||||
raise KeyLookupError(
|
||||
"Key response must include verification keys for all"
|
||||
" signatures"
|
||||
)
|
||||
@@ -580,7 +619,7 @@ class Keyring(object):
|
||||
response_keys.update(verify_keys)
|
||||
response_keys.update(old_verify_keys)
|
||||
|
||||
yield defer.gatherResults(
|
||||
yield preserve_context_over_deferred(defer.gatherResults(
|
||||
[
|
||||
preserve_fn(self.store.store_server_keys_json)(
|
||||
server_name=server_name,
|
||||
@@ -593,7 +632,7 @@ class Keyring(object):
|
||||
for key_id in updated_key_ids
|
||||
],
|
||||
consumeErrors=True,
|
||||
).addErrback(unwrapFirstError)
|
||||
)).addErrback(unwrapFirstError)
|
||||
|
||||
results[server_name] = response_keys
|
||||
|
||||
@@ -621,15 +660,15 @@ class Keyring(object):
|
||||
|
||||
if ("signatures" not in response
|
||||
or server_name not in response["signatures"]):
|
||||
raise ValueError("Key response not signed by remote server")
|
||||
raise KeyLookupError("Key response not signed by remote server")
|
||||
|
||||
if "tls_certificate" not in response:
|
||||
raise ValueError("Key response missing TLS certificate")
|
||||
raise KeyLookupError("Key response missing TLS certificate")
|
||||
|
||||
tls_certificate_b64 = response["tls_certificate"]
|
||||
|
||||
if encode_base64(x509_certificate_bytes) != tls_certificate_b64:
|
||||
raise ValueError("TLS certificate doesn't match")
|
||||
raise KeyLookupError("TLS certificate doesn't match")
|
||||
|
||||
# Cache the result in the datastore.
|
||||
|
||||
@@ -645,7 +684,7 @@ class Keyring(object):
|
||||
|
||||
for key_id in response["signatures"][server_name]:
|
||||
if key_id not in response["verify_keys"]:
|
||||
raise ValueError(
|
||||
raise KeyLookupError(
|
||||
"Key response must include verification keys for all"
|
||||
" signatures"
|
||||
)
|
||||
@@ -682,7 +721,7 @@ class Keyring(object):
|
||||
A deferred that completes when the keys are stored.
|
||||
"""
|
||||
# TODO(markjh): Store whether the keys have expired.
|
||||
yield defer.gatherResults(
|
||||
yield preserve_context_over_deferred(defer.gatherResults(
|
||||
[
|
||||
preserve_fn(self.store.store_server_verify_key)(
|
||||
server_name, server_name, key.time_added, key
|
||||
@@ -690,4 +729,4 @@ class Keyring(object):
|
||||
for key_id, key in verify_keys.items()
|
||||
],
|
||||
consumeErrors=True,
|
||||
).addErrback(unwrapFirstError)
|
||||
)).addErrback(unwrapFirstError)
|
||||
|
||||
678
synapse/event_auth.py
Normal file
678
synapse/event_auth.py
Normal file
@@ -0,0 +1,678 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014 - 2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
|
||||
from canonicaljson import encode_canonical_json
|
||||
from signedjson.key import decode_verify_key_bytes
|
||||
from signedjson.sign import verify_signed_json, SignatureVerifyException
|
||||
from unpaddedbase64 import decode_base64
|
||||
|
||||
from synapse.api.constants import EventTypes, Membership, JoinRules
|
||||
from synapse.api.errors import AuthError, SynapseError, EventSizeError
|
||||
from synapse.types import UserID, get_domain_from_id
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def check(event, auth_events, do_sig_check=True, do_size_check=True):
|
||||
""" Checks if this event is correctly authed.
|
||||
|
||||
Args:
|
||||
event: the event being checked.
|
||||
auth_events (dict: event-key -> event): the existing room state.
|
||||
|
||||
|
||||
Returns:
|
||||
True if the auth checks pass.
|
||||
"""
|
||||
if do_size_check:
|
||||
_check_size_limits(event)
|
||||
|
||||
if not hasattr(event, "room_id"):
|
||||
raise AuthError(500, "Event has no room_id: %s" % event)
|
||||
|
||||
if do_sig_check:
|
||||
sender_domain = get_domain_from_id(event.sender)
|
||||
event_id_domain = get_domain_from_id(event.event_id)
|
||||
|
||||
is_invite_via_3pid = (
|
||||
event.type == EventTypes.Member
|
||||
and event.membership == Membership.INVITE
|
||||
and "third_party_invite" in event.content
|
||||
)
|
||||
|
||||
# Check the sender's domain has signed the event
|
||||
if not event.signatures.get(sender_domain):
|
||||
# We allow invites via 3pid to have a sender from a different
|
||||
# HS, as the sender must match the sender of the original
|
||||
# 3pid invite. This is checked further down with the
|
||||
# other dedicated membership checks.
|
||||
if not is_invite_via_3pid:
|
||||
raise AuthError(403, "Event not signed by sender's server")
|
||||
|
||||
# Check the event_id's domain has signed the event
|
||||
if not event.signatures.get(event_id_domain):
|
||||
raise AuthError(403, "Event not signed by sending server")
|
||||
|
||||
if auth_events is None:
|
||||
# Oh, we don't know what the state of the room was, so we
|
||||
# are trusting that this is allowed (at least for now)
|
||||
logger.warn("Trusting event: %s", event.event_id)
|
||||
return True
|
||||
|
||||
if event.type == EventTypes.Create:
|
||||
room_id_domain = get_domain_from_id(event.room_id)
|
||||
if room_id_domain != sender_domain:
|
||||
raise AuthError(
|
||||
403,
|
||||
"Creation event's room_id domain does not match sender's"
|
||||
)
|
||||
# FIXME
|
||||
return True
|
||||
|
||||
creation_event = auth_events.get((EventTypes.Create, ""), None)
|
||||
|
||||
if not creation_event:
|
||||
raise SynapseError(
|
||||
403,
|
||||
"Room %r does not exist" % (event.room_id,)
|
||||
)
|
||||
|
||||
creating_domain = get_domain_from_id(event.room_id)
|
||||
originating_domain = get_domain_from_id(event.sender)
|
||||
if creating_domain != originating_domain:
|
||||
if not _can_federate(event, auth_events):
|
||||
raise AuthError(
|
||||
403,
|
||||
"This room has been marked as unfederatable."
|
||||
)
|
||||
|
||||
# FIXME: Temp hack
|
||||
if event.type == EventTypes.Aliases:
|
||||
if not event.is_state():
|
||||
raise AuthError(
|
||||
403,
|
||||
"Alias event must be a state event",
|
||||
)
|
||||
if not event.state_key:
|
||||
raise AuthError(
|
||||
403,
|
||||
"Alias event must have non-empty state_key"
|
||||
)
|
||||
sender_domain = get_domain_from_id(event.sender)
|
||||
if event.state_key != sender_domain:
|
||||
raise AuthError(
|
||||
403,
|
||||
"Alias event's state_key does not match sender's domain"
|
||||
)
|
||||
return True
|
||||
|
||||
if logger.isEnabledFor(logging.DEBUG):
|
||||
logger.debug(
|
||||
"Auth events: %s",
|
||||
[a.event_id for a in auth_events.values()]
|
||||
)
|
||||
|
||||
if event.type == EventTypes.Member:
|
||||
allowed = _is_membership_change_allowed(
|
||||
event, auth_events
|
||||
)
|
||||
if allowed:
|
||||
logger.debug("Allowing! %s", event)
|
||||
else:
|
||||
logger.debug("Denying! %s", event)
|
||||
return allowed
|
||||
|
||||
_check_event_sender_in_room(event, auth_events)
|
||||
|
||||
# Special case to allow m.room.third_party_invite events wherever
|
||||
# a user is allowed to issue invites. Fixes
|
||||
# https://github.com/vector-im/vector-web/issues/1208 hopefully
|
||||
if event.type == EventTypes.ThirdPartyInvite:
|
||||
user_level = get_user_power_level(event.user_id, auth_events)
|
||||
invite_level = _get_named_level(auth_events, "invite", 0)
|
||||
|
||||
if user_level < invite_level:
|
||||
raise AuthError(
|
||||
403, (
|
||||
"You cannot issue a third party invite for %s." %
|
||||
(event.content.display_name,)
|
||||
)
|
||||
)
|
||||
else:
|
||||
return True
|
||||
|
||||
_can_send_event(event, auth_events)
|
||||
|
||||
if event.type == EventTypes.PowerLevels:
|
||||
_check_power_levels(event, auth_events)
|
||||
|
||||
if event.type == EventTypes.Redaction:
|
||||
check_redaction(event, auth_events)
|
||||
|
||||
logger.debug("Allowing! %s", event)
|
||||
|
||||
|
||||
def _check_size_limits(event):
|
||||
def too_big(field):
|
||||
raise EventSizeError("%s too large" % (field,))
|
||||
|
||||
if len(event.user_id) > 255:
|
||||
too_big("user_id")
|
||||
if len(event.room_id) > 255:
|
||||
too_big("room_id")
|
||||
if event.is_state() and len(event.state_key) > 255:
|
||||
too_big("state_key")
|
||||
if len(event.type) > 255:
|
||||
too_big("type")
|
||||
if len(event.event_id) > 255:
|
||||
too_big("event_id")
|
||||
if len(encode_canonical_json(event.get_pdu_json())) > 65536:
|
||||
too_big("event")
|
||||
|
||||
|
||||
def _can_federate(event, auth_events):
|
||||
creation_event = auth_events.get((EventTypes.Create, ""))
|
||||
|
||||
return creation_event.content.get("m.federate", True) is True
|
||||
|
||||
|
||||
def _is_membership_change_allowed(event, auth_events):
|
||||
membership = event.content["membership"]
|
||||
|
||||
# Check if this is the room creator joining:
|
||||
if len(event.prev_events) == 1 and Membership.JOIN == membership:
|
||||
# Get room creation event:
|
||||
key = (EventTypes.Create, "", )
|
||||
create = auth_events.get(key)
|
||||
if create and event.prev_events[0][0] == create.event_id:
|
||||
if create.content["creator"] == event.state_key:
|
||||
return True
|
||||
|
||||
target_user_id = event.state_key
|
||||
|
||||
creating_domain = get_domain_from_id(event.room_id)
|
||||
target_domain = get_domain_from_id(target_user_id)
|
||||
if creating_domain != target_domain:
|
||||
if not _can_federate(event, auth_events):
|
||||
raise AuthError(
|
||||
403,
|
||||
"This room has been marked as unfederatable."
|
||||
)
|
||||
|
||||
# get info about the caller
|
||||
key = (EventTypes.Member, event.user_id, )
|
||||
caller = auth_events.get(key)
|
||||
|
||||
caller_in_room = caller and caller.membership == Membership.JOIN
|
||||
caller_invited = caller and caller.membership == Membership.INVITE
|
||||
|
||||
# get info about the target
|
||||
key = (EventTypes.Member, target_user_id, )
|
||||
target = auth_events.get(key)
|
||||
|
||||
target_in_room = target and target.membership == Membership.JOIN
|
||||
target_banned = target and target.membership == Membership.BAN
|
||||
|
||||
key = (EventTypes.JoinRules, "", )
|
||||
join_rule_event = auth_events.get(key)
|
||||
if join_rule_event:
|
||||
join_rule = join_rule_event.content.get(
|
||||
"join_rule", JoinRules.INVITE
|
||||
)
|
||||
else:
|
||||
join_rule = JoinRules.INVITE
|
||||
|
||||
user_level = get_user_power_level(event.user_id, auth_events)
|
||||
target_level = get_user_power_level(
|
||||
target_user_id, auth_events
|
||||
)
|
||||
|
||||
# FIXME (erikj): What should we do here as the default?
|
||||
ban_level = _get_named_level(auth_events, "ban", 50)
|
||||
|
||||
logger.debug(
|
||||
"_is_membership_change_allowed: %s",
|
||||
{
|
||||
"caller_in_room": caller_in_room,
|
||||
"caller_invited": caller_invited,
|
||||
"target_banned": target_banned,
|
||||
"target_in_room": target_in_room,
|
||||
"membership": membership,
|
||||
"join_rule": join_rule,
|
||||
"target_user_id": target_user_id,
|
||||
"event.user_id": event.user_id,
|
||||
}
|
||||
)
|
||||
|
||||
if Membership.INVITE == membership and "third_party_invite" in event.content:
|
||||
if not _verify_third_party_invite(event, auth_events):
|
||||
raise AuthError(403, "You are not invited to this room.")
|
||||
if target_banned:
|
||||
raise AuthError(
|
||||
403, "%s is banned from the room" % (target_user_id,)
|
||||
)
|
||||
return True
|
||||
|
||||
if Membership.JOIN != membership:
|
||||
if (caller_invited
|
||||
and Membership.LEAVE == membership
|
||||
and target_user_id == event.user_id):
|
||||
return True
|
||||
|
||||
if not caller_in_room: # caller isn't joined
|
||||
raise AuthError(
|
||||
403,
|
||||
"%s not in room %s." % (event.user_id, event.room_id,)
|
||||
)
|
||||
|
||||
if Membership.INVITE == membership:
|
||||
# TODO (erikj): We should probably handle this more intelligently
|
||||
# PRIVATE join rules.
|
||||
|
||||
# Invites are valid iff caller is in the room and target isn't.
|
||||
if target_banned:
|
||||
raise AuthError(
|
||||
403, "%s is banned from the room" % (target_user_id,)
|
||||
)
|
||||
elif target_in_room: # the target is already in the room.
|
||||
raise AuthError(403, "%s is already in the room." %
|
||||
target_user_id)
|
||||
else:
|
||||
invite_level = _get_named_level(auth_events, "invite", 0)
|
||||
|
||||
if user_level < invite_level:
|
||||
raise AuthError(
|
||||
403, "You cannot invite user %s." % target_user_id
|
||||
)
|
||||
elif Membership.JOIN == membership:
|
||||
# Joins are valid iff caller == target and they were:
|
||||
# invited: They are accepting the invitation
|
||||
# joined: It's a NOOP
|
||||
if event.user_id != target_user_id:
|
||||
raise AuthError(403, "Cannot force another user to join.")
|
||||
elif target_banned:
|
||||
raise AuthError(403, "You are banned from this room")
|
||||
elif join_rule == JoinRules.PUBLIC:
|
||||
pass
|
||||
elif join_rule == JoinRules.INVITE:
|
||||
if not caller_in_room and not caller_invited:
|
||||
raise AuthError(403, "You are not invited to this room.")
|
||||
else:
|
||||
# TODO (erikj): may_join list
|
||||
# TODO (erikj): private rooms
|
||||
raise AuthError(403, "You are not allowed to join this room")
|
||||
elif Membership.LEAVE == membership:
|
||||
# TODO (erikj): Implement kicks.
|
||||
if target_banned and user_level < ban_level:
|
||||
raise AuthError(
|
||||
403, "You cannot unban user &s." % (target_user_id,)
|
||||
)
|
||||
elif target_user_id != event.user_id:
|
||||
kick_level = _get_named_level(auth_events, "kick", 50)
|
||||
|
||||
if user_level < kick_level or user_level <= target_level:
|
||||
raise AuthError(
|
||||
403, "You cannot kick user %s." % target_user_id
|
||||
)
|
||||
elif Membership.BAN == membership:
|
||||
if user_level < ban_level or user_level <= target_level:
|
||||
raise AuthError(403, "You don't have permission to ban")
|
||||
else:
|
||||
raise AuthError(500, "Unknown membership %s" % membership)
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def _check_event_sender_in_room(event, auth_events):
|
||||
key = (EventTypes.Member, event.user_id, )
|
||||
member_event = auth_events.get(key)
|
||||
|
||||
return _check_joined_room(
|
||||
member_event,
|
||||
event.user_id,
|
||||
event.room_id
|
||||
)
|
||||
|
||||
|
||||
def _check_joined_room(member, user_id, room_id):
|
||||
if not member or member.membership != Membership.JOIN:
|
||||
raise AuthError(403, "User %s not in room %s (%s)" % (
|
||||
user_id, room_id, repr(member)
|
||||
))
|
||||
|
||||
|
||||
def get_send_level(etype, state_key, auth_events):
|
||||
key = (EventTypes.PowerLevels, "", )
|
||||
send_level_event = auth_events.get(key)
|
||||
send_level = None
|
||||
if send_level_event:
|
||||
send_level = send_level_event.content.get("events", {}).get(
|
||||
etype
|
||||
)
|
||||
if send_level is None:
|
||||
if state_key is not None:
|
||||
send_level = send_level_event.content.get(
|
||||
"state_default", 50
|
||||
)
|
||||
else:
|
||||
send_level = send_level_event.content.get(
|
||||
"events_default", 0
|
||||
)
|
||||
|
||||
if send_level:
|
||||
send_level = int(send_level)
|
||||
else:
|
||||
send_level = 0
|
||||
|
||||
return send_level
|
||||
|
||||
|
||||
def _can_send_event(event, auth_events):
|
||||
send_level = get_send_level(
|
||||
event.type, event.get("state_key", None), auth_events
|
||||
)
|
||||
user_level = get_user_power_level(event.user_id, auth_events)
|
||||
|
||||
if user_level < send_level:
|
||||
raise AuthError(
|
||||
403,
|
||||
"You don't have permission to post that to the room. " +
|
||||
"user_level (%d) < send_level (%d)" % (user_level, send_level)
|
||||
)
|
||||
|
||||
# Check state_key
|
||||
if hasattr(event, "state_key"):
|
||||
if event.state_key.startswith("@"):
|
||||
if event.state_key != event.user_id:
|
||||
raise AuthError(
|
||||
403,
|
||||
"You are not allowed to set others state"
|
||||
)
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def check_redaction(event, auth_events):
|
||||
"""Check whether the event sender is allowed to redact the target event.
|
||||
|
||||
Returns:
|
||||
True if the the sender is allowed to redact the target event if the
|
||||
target event was created by them.
|
||||
False if the sender is allowed to redact the target event with no
|
||||
further checks.
|
||||
|
||||
Raises:
|
||||
AuthError if the event sender is definitely not allowed to redact
|
||||
the target event.
|
||||
"""
|
||||
user_level = get_user_power_level(event.user_id, auth_events)
|
||||
|
||||
redact_level = _get_named_level(auth_events, "redact", 50)
|
||||
|
||||
if user_level >= redact_level:
|
||||
return False
|
||||
|
||||
redacter_domain = get_domain_from_id(event.event_id)
|
||||
redactee_domain = get_domain_from_id(event.redacts)
|
||||
if redacter_domain == redactee_domain:
|
||||
return True
|
||||
|
||||
raise AuthError(
|
||||
403,
|
||||
"You don't have permission to redact events"
|
||||
)
|
||||
|
||||
|
||||
def _check_power_levels(event, auth_events):
|
||||
user_list = event.content.get("users", {})
|
||||
# Validate users
|
||||
for k, v in user_list.items():
|
||||
try:
|
||||
UserID.from_string(k)
|
||||
except:
|
||||
raise SynapseError(400, "Not a valid user_id: %s" % (k,))
|
||||
|
||||
try:
|
||||
int(v)
|
||||
except:
|
||||
raise SynapseError(400, "Not a valid power level: %s" % (v,))
|
||||
|
||||
key = (event.type, event.state_key, )
|
||||
current_state = auth_events.get(key)
|
||||
|
||||
if not current_state:
|
||||
return
|
||||
|
||||
user_level = get_user_power_level(event.user_id, auth_events)
|
||||
|
||||
# Check other levels:
|
||||
levels_to_check = [
|
||||
("users_default", None),
|
||||
("events_default", None),
|
||||
("state_default", None),
|
||||
("ban", None),
|
||||
("redact", None),
|
||||
("kick", None),
|
||||
("invite", None),
|
||||
]
|
||||
|
||||
old_list = current_state.content.get("users")
|
||||
for user in set(old_list.keys() + user_list.keys()):
|
||||
levels_to_check.append(
|
||||
(user, "users")
|
||||
)
|
||||
|
||||
old_list = current_state.content.get("events")
|
||||
new_list = event.content.get("events")
|
||||
for ev_id in set(old_list.keys() + new_list.keys()):
|
||||
levels_to_check.append(
|
||||
(ev_id, "events")
|
||||
)
|
||||
|
||||
old_state = current_state.content
|
||||
new_state = event.content
|
||||
|
||||
for level_to_check, dir in levels_to_check:
|
||||
old_loc = old_state
|
||||
new_loc = new_state
|
||||
if dir:
|
||||
old_loc = old_loc.get(dir, {})
|
||||
new_loc = new_loc.get(dir, {})
|
||||
|
||||
if level_to_check in old_loc:
|
||||
old_level = int(old_loc[level_to_check])
|
||||
else:
|
||||
old_level = None
|
||||
|
||||
if level_to_check in new_loc:
|
||||
new_level = int(new_loc[level_to_check])
|
||||
else:
|
||||
new_level = None
|
||||
|
||||
if new_level is not None and old_level is not None:
|
||||
if new_level == old_level:
|
||||
continue
|
||||
|
||||
if dir == "users" and level_to_check != event.user_id:
|
||||
if old_level == user_level:
|
||||
raise AuthError(
|
||||
403,
|
||||
"You don't have permission to remove ops level equal "
|
||||
"to your own"
|
||||
)
|
||||
|
||||
if old_level > user_level or new_level > user_level:
|
||||
raise AuthError(
|
||||
403,
|
||||
"You don't have permission to add ops level greater "
|
||||
"than your own"
|
||||
)
|
||||
|
||||
|
||||
def _get_power_level_event(auth_events):
|
||||
key = (EventTypes.PowerLevels, "", )
|
||||
return auth_events.get(key)
|
||||
|
||||
|
||||
def get_user_power_level(user_id, auth_events):
|
||||
power_level_event = _get_power_level_event(auth_events)
|
||||
|
||||
if power_level_event:
|
||||
level = power_level_event.content.get("users", {}).get(user_id)
|
||||
if not level:
|
||||
level = power_level_event.content.get("users_default", 0)
|
||||
|
||||
if level is None:
|
||||
return 0
|
||||
else:
|
||||
return int(level)
|
||||
else:
|
||||
key = (EventTypes.Create, "", )
|
||||
create_event = auth_events.get(key)
|
||||
if (create_event is not None and
|
||||
create_event.content["creator"] == user_id):
|
||||
return 100
|
||||
else:
|
||||
return 0
|
||||
|
||||
|
||||
def _get_named_level(auth_events, name, default):
|
||||
power_level_event = _get_power_level_event(auth_events)
|
||||
|
||||
if not power_level_event:
|
||||
return default
|
||||
|
||||
level = power_level_event.content.get(name, None)
|
||||
if level is not None:
|
||||
return int(level)
|
||||
else:
|
||||
return default
|
||||
|
||||
|
||||
def _verify_third_party_invite(event, auth_events):
|
||||
"""
|
||||
Validates that the invite event is authorized by a previous third-party invite.
|
||||
|
||||
Checks that the public key, and keyserver, match those in the third party invite,
|
||||
and that the invite event has a signature issued using that public key.
|
||||
|
||||
Args:
|
||||
event: The m.room.member join event being validated.
|
||||
auth_events: All relevant previous context events which may be used
|
||||
for authorization decisions.
|
||||
|
||||
Return:
|
||||
True if the event fulfills the expectations of a previous third party
|
||||
invite event.
|
||||
"""
|
||||
if "third_party_invite" not in event.content:
|
||||
return False
|
||||
if "signed" not in event.content["third_party_invite"]:
|
||||
return False
|
||||
signed = event.content["third_party_invite"]["signed"]
|
||||
for key in {"mxid", "token"}:
|
||||
if key not in signed:
|
||||
return False
|
||||
|
||||
token = signed["token"]
|
||||
|
||||
invite_event = auth_events.get(
|
||||
(EventTypes.ThirdPartyInvite, token,)
|
||||
)
|
||||
if not invite_event:
|
||||
return False
|
||||
|
||||
if invite_event.sender != event.sender:
|
||||
return False
|
||||
|
||||
if event.user_id != invite_event.user_id:
|
||||
return False
|
||||
|
||||
if signed["mxid"] != event.state_key:
|
||||
return False
|
||||
if signed["token"] != token:
|
||||
return False
|
||||
|
||||
for public_key_object in get_public_keys(invite_event):
|
||||
public_key = public_key_object["public_key"]
|
||||
try:
|
||||
for server, signature_block in signed["signatures"].items():
|
||||
for key_name, encoded_signature in signature_block.items():
|
||||
if not key_name.startswith("ed25519:"):
|
||||
continue
|
||||
verify_key = decode_verify_key_bytes(
|
||||
key_name,
|
||||
decode_base64(public_key)
|
||||
)
|
||||
verify_signed_json(signed, server, verify_key)
|
||||
|
||||
# We got the public key from the invite, so we know that the
|
||||
# correct server signed the signed bundle.
|
||||
# The caller is responsible for checking that the signing
|
||||
# server has not revoked that public key.
|
||||
return True
|
||||
except (KeyError, SignatureVerifyException,):
|
||||
continue
|
||||
return False
|
||||
|
||||
|
||||
def get_public_keys(invite_event):
|
||||
public_keys = []
|
||||
if "public_key" in invite_event.content:
|
||||
o = {
|
||||
"public_key": invite_event.content["public_key"],
|
||||
}
|
||||
if "key_validity_url" in invite_event.content:
|
||||
o["key_validity_url"] = invite_event.content["key_validity_url"]
|
||||
public_keys.append(o)
|
||||
public_keys.extend(invite_event.content.get("public_keys", []))
|
||||
return public_keys
|
||||
|
||||
|
||||
def auth_types_for_event(event):
|
||||
"""Given an event, return a list of (EventType, StateKey) that may be
|
||||
needed to auth the event. The returned list may be a superset of what
|
||||
would actually be required depending on the full state of the room.
|
||||
|
||||
Used to limit the number of events to fetch from the database to
|
||||
actually auth the event.
|
||||
"""
|
||||
if event.type == EventTypes.Create:
|
||||
return []
|
||||
|
||||
auth_types = []
|
||||
|
||||
auth_types.append((EventTypes.PowerLevels, "", ))
|
||||
auth_types.append((EventTypes.Member, event.user_id, ))
|
||||
auth_types.append((EventTypes.Create, "", ))
|
||||
|
||||
if event.type == EventTypes.Member:
|
||||
membership = event.content["membership"]
|
||||
if membership in [Membership.JOIN, Membership.INVITE]:
|
||||
auth_types.append((EventTypes.JoinRules, "", ))
|
||||
|
||||
auth_types.append((EventTypes.Member, event.state_key, ))
|
||||
|
||||
if membership == Membership.INVITE:
|
||||
if "third_party_invite" in event.content:
|
||||
key = (
|
||||
EventTypes.ThirdPartyInvite,
|
||||
event.content["third_party_invite"]["signed"]["token"]
|
||||
)
|
||||
auth_types.append(key)
|
||||
|
||||
return auth_types
|
||||
@@ -36,6 +36,15 @@ class _EventInternalMetadata(object):
|
||||
def is_invite_from_remote(self):
|
||||
return getattr(self, "invite_from_remote", False)
|
||||
|
||||
def get_send_on_behalf_of(self):
|
||||
"""Whether this server should send the event on behalf of another server.
|
||||
This is used by the federation "send_join" API to forward the initial join
|
||||
event for a server in the room.
|
||||
|
||||
returns a str with the name of the server this event is sent on behalf of.
|
||||
"""
|
||||
return getattr(self, "send_on_behalf_of", None)
|
||||
|
||||
|
||||
def _event_dict_property(key):
|
||||
def getter(self):
|
||||
@@ -70,7 +79,6 @@ class EventBase(object):
|
||||
auth_events = _event_dict_property("auth_events")
|
||||
depth = _event_dict_property("depth")
|
||||
content = _event_dict_property("content")
|
||||
event_id = _event_dict_property("event_id")
|
||||
hashes = _event_dict_property("hashes")
|
||||
origin = _event_dict_property("origin")
|
||||
origin_server_ts = _event_dict_property("origin_server_ts")
|
||||
@@ -79,8 +87,6 @@ class EventBase(object):
|
||||
redacts = _event_dict_property("redacts")
|
||||
room_id = _event_dict_property("room_id")
|
||||
sender = _event_dict_property("sender")
|
||||
state_key = _event_dict_property("state_key")
|
||||
type = _event_dict_property("type")
|
||||
user_id = _event_dict_property("sender")
|
||||
|
||||
@property
|
||||
@@ -99,7 +105,7 @@ class EventBase(object):
|
||||
|
||||
return d
|
||||
|
||||
def get(self, key, default):
|
||||
def get(self, key, default=None):
|
||||
return self._event_dict.get(key, default)
|
||||
|
||||
def get_internal_metadata_dict(self):
|
||||
@@ -153,6 +159,11 @@ class FrozenEvent(EventBase):
|
||||
else:
|
||||
frozen_dict = event_dict
|
||||
|
||||
self.event_id = event_dict["event_id"]
|
||||
self.type = event_dict["type"]
|
||||
if "state_key" in event_dict:
|
||||
self.state_key = event_dict["state_key"]
|
||||
|
||||
super(FrozenEvent, self).__init__(
|
||||
frozen_dict,
|
||||
signatures=signatures,
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from . import EventBase, FrozenEvent
|
||||
from . import EventBase, FrozenEvent, _event_dict_property
|
||||
|
||||
from synapse.types import EventID
|
||||
|
||||
@@ -34,6 +34,10 @@ class EventBuilder(EventBase):
|
||||
internal_metadata_dict=internal_metadata_dict,
|
||||
)
|
||||
|
||||
event_id = _event_dict_property("event_id")
|
||||
state_key = _event_dict_property("state_key")
|
||||
type = _event_dict_property("type")
|
||||
|
||||
def build(self):
|
||||
return FrozenEvent.from_event(self)
|
||||
|
||||
|
||||
@@ -15,9 +15,56 @@
|
||||
|
||||
|
||||
class EventContext(object):
|
||||
"""
|
||||
Attributes:
|
||||
current_state_ids (dict[(str, str), str]):
|
||||
The current state map including the current event.
|
||||
(type, state_key) -> event_id
|
||||
|
||||
def __init__(self, current_state=None):
|
||||
self.current_state = current_state
|
||||
prev_state_ids (dict[(str, str), str]):
|
||||
The current state map excluding the current event.
|
||||
(type, state_key) -> event_id
|
||||
|
||||
state_group (int): state group id
|
||||
rejected (bool|str): A rejection reason if the event was rejected, else
|
||||
False
|
||||
|
||||
push_actions (list[(str, list[object])]): list of (user_id, actions)
|
||||
tuples
|
||||
|
||||
prev_group (int): Previously persisted state group. ``None`` for an
|
||||
outlier.
|
||||
delta_ids (dict[(str, str), str]): Delta from ``prev_group``.
|
||||
(type, state_key) -> event_id. ``None`` for an outlier.
|
||||
|
||||
prev_state_events (?): XXX: is this ever set to anything other than
|
||||
the empty list?
|
||||
"""
|
||||
|
||||
__slots__ = [
|
||||
"current_state_ids",
|
||||
"prev_state_ids",
|
||||
"state_group",
|
||||
"rejected",
|
||||
"push_actions",
|
||||
"prev_group",
|
||||
"delta_ids",
|
||||
"prev_state_events",
|
||||
]
|
||||
|
||||
def __init__(self):
|
||||
# The current state including the current event
|
||||
self.current_state_ids = None
|
||||
# The current state excluding the current event
|
||||
self.prev_state_ids = None
|
||||
self.state_group = None
|
||||
|
||||
self.rejected = False
|
||||
self.push_actions = []
|
||||
|
||||
# A previously persisted state group and a delta between that
|
||||
# and this state.
|
||||
self.prev_group = None
|
||||
self.delta_ids = None
|
||||
|
||||
self.prev_state_events = None
|
||||
|
||||
@@ -16,6 +16,17 @@
|
||||
from synapse.api.constants import EventTypes
|
||||
from . import EventBase
|
||||
|
||||
from frozendict import frozendict
|
||||
|
||||
import re
|
||||
|
||||
# Split strings on "." but not "\." This uses a negative lookbehind assertion for '\'
|
||||
# (?<!stuff) matches if the current position in the string is not preceded
|
||||
# by a match for 'stuff'.
|
||||
# TODO: This is fast, but fails to handle "foo\\.bar" which should be treated as
|
||||
# the literal fields "foo\" and "bar" but will instead be treated as "foo\\.bar"
|
||||
SPLIT_FIELD_REGEX = re.compile(r'(?<!\\)\.')
|
||||
|
||||
|
||||
def prune_event(event):
|
||||
""" Returns a pruned version of the given event, which removes all keys we
|
||||
@@ -88,6 +99,8 @@ def prune_event(event):
|
||||
|
||||
if "age_ts" in event.unsigned:
|
||||
allowed_fields["unsigned"]["age_ts"] = event.unsigned["age_ts"]
|
||||
if "replaces_state" in event.unsigned:
|
||||
allowed_fields["unsigned"]["replaces_state"] = event.unsigned["replaces_state"]
|
||||
|
||||
return type(event)(
|
||||
allowed_fields,
|
||||
@@ -95,6 +108,83 @@ def prune_event(event):
|
||||
)
|
||||
|
||||
|
||||
def _copy_field(src, dst, field):
|
||||
"""Copy the field in 'src' to 'dst'.
|
||||
|
||||
For example, if src={"foo":{"bar":5}} and dst={}, and field=["foo","bar"]
|
||||
then dst={"foo":{"bar":5}}.
|
||||
|
||||
Args:
|
||||
src(dict): The dict to read from.
|
||||
dst(dict): The dict to modify.
|
||||
field(list<str>): List of keys to drill down to in 'src'.
|
||||
"""
|
||||
if len(field) == 0: # this should be impossible
|
||||
return
|
||||
if len(field) == 1: # common case e.g. 'origin_server_ts'
|
||||
if field[0] in src:
|
||||
dst[field[0]] = src[field[0]]
|
||||
return
|
||||
|
||||
# Else is a nested field e.g. 'content.body'
|
||||
# Pop the last field as that's the key to move across and we need the
|
||||
# parent dict in order to access the data. Drill down to the right dict.
|
||||
key_to_move = field.pop(-1)
|
||||
sub_dict = src
|
||||
for sub_field in field: # e.g. sub_field => "content"
|
||||
if sub_field in sub_dict and type(sub_dict[sub_field]) in [dict, frozendict]:
|
||||
sub_dict = sub_dict[sub_field]
|
||||
else:
|
||||
return
|
||||
|
||||
if key_to_move not in sub_dict:
|
||||
return
|
||||
|
||||
# Insert the key into the output dictionary, creating nested objects
|
||||
# as required. We couldn't do this any earlier or else we'd need to delete
|
||||
# the empty objects if the key didn't exist.
|
||||
sub_out_dict = dst
|
||||
for sub_field in field:
|
||||
sub_out_dict = sub_out_dict.setdefault(sub_field, {})
|
||||
sub_out_dict[key_to_move] = sub_dict[key_to_move]
|
||||
|
||||
|
||||
def only_fields(dictionary, fields):
|
||||
"""Return a new dict with only the fields in 'dictionary' which are present
|
||||
in 'fields'.
|
||||
|
||||
If there are no event fields specified then all fields are included.
|
||||
The entries may include '.' charaters to indicate sub-fields.
|
||||
So ['content.body'] will include the 'body' field of the 'content' object.
|
||||
A literal '.' character in a field name may be escaped using a '\'.
|
||||
|
||||
Args:
|
||||
dictionary(dict): The dictionary to read from.
|
||||
fields(list<str>): A list of fields to copy over. Only shallow refs are
|
||||
taken.
|
||||
Returns:
|
||||
dict: A new dictionary with only the given fields. If fields was empty,
|
||||
the same dictionary is returned.
|
||||
"""
|
||||
if len(fields) == 0:
|
||||
return dictionary
|
||||
|
||||
# for each field, convert it:
|
||||
# ["content.body.thing\.with\.dots"] => [["content", "body", "thing\.with\.dots"]]
|
||||
split_fields = [SPLIT_FIELD_REGEX.split(f) for f in fields]
|
||||
|
||||
# for each element of the output array of arrays:
|
||||
# remove escaping so we can use the right key names.
|
||||
split_fields[:] = [
|
||||
[f.replace(r'\.', r'.') for f in field_array] for field_array in split_fields
|
||||
]
|
||||
|
||||
output = {}
|
||||
for field_array in split_fields:
|
||||
_copy_field(dictionary, output, field_array)
|
||||
return output
|
||||
|
||||
|
||||
def format_event_raw(d):
|
||||
return d
|
||||
|
||||
@@ -135,7 +225,7 @@ def format_event_for_client_v2_without_room_id(d):
|
||||
|
||||
def serialize_event(e, time_now_ms, as_client_event=True,
|
||||
event_format=format_event_for_client_v1,
|
||||
token_id=None):
|
||||
token_id=None, only_event_fields=None):
|
||||
# FIXME(erikj): To handle the case of presence events and the like
|
||||
if not isinstance(e, EventBase):
|
||||
return e
|
||||
@@ -162,6 +252,12 @@ def serialize_event(e, time_now_ms, as_client_event=True,
|
||||
d["unsigned"]["transaction_id"] = txn_id
|
||||
|
||||
if as_client_event:
|
||||
return event_format(d)
|
||||
else:
|
||||
return d
|
||||
d = event_format(d)
|
||||
|
||||
if only_event_fields:
|
||||
if (not isinstance(only_event_fields, list) or
|
||||
not all(isinstance(f, basestring) for f in only_event_fields)):
|
||||
raise TypeError("only_event_fields must be a list of strings")
|
||||
d = only_fields(d, only_event_fields)
|
||||
|
||||
return d
|
||||
|
||||
@@ -17,10 +17,9 @@
|
||||
"""
|
||||
|
||||
from .replication import ReplicationLayer
|
||||
from .transport.client import TransportLayerClient
|
||||
|
||||
|
||||
def initialize_http_replication(homeserver):
|
||||
transport = TransportLayerClient(homeserver)
|
||||
def initialize_http_replication(hs):
|
||||
transport = hs.get_federation_transport_client()
|
||||
|
||||
return ReplicationLayer(homeserver, transport)
|
||||
return ReplicationLayer(hs, transport)
|
||||
|
||||
@@ -23,6 +23,7 @@ from synapse.crypto.event_signing import check_event_content_hash
|
||||
from synapse.api.errors import SynapseError
|
||||
|
||||
from synapse.util import unwrapFirstError
|
||||
from synapse.util.logcontext import preserve_fn, preserve_context_over_deferred
|
||||
|
||||
import logging
|
||||
|
||||
@@ -31,6 +32,9 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class FederationBase(object):
|
||||
def __init__(self, hs):
|
||||
pass
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _check_sigs_and_hash_and_fetch(self, origin, pdus, outlier=False,
|
||||
include_none=False):
|
||||
@@ -99,10 +103,10 @@ class FederationBase(object):
|
||||
warn, pdu
|
||||
)
|
||||
|
||||
valid_pdus = yield defer.gatherResults(
|
||||
valid_pdus = yield preserve_context_over_deferred(defer.gatherResults(
|
||||
deferreds,
|
||||
consumeErrors=True
|
||||
).addErrback(unwrapFirstError)
|
||||
)).addErrback(unwrapFirstError)
|
||||
|
||||
if include_none:
|
||||
defer.returnValue(valid_pdus)
|
||||
@@ -126,7 +130,7 @@ class FederationBase(object):
|
||||
for pdu in pdus
|
||||
]
|
||||
|
||||
deferreds = self.keyring.verify_json_objects_for_server([
|
||||
deferreds = preserve_fn(self.keyring.verify_json_objects_for_server)([
|
||||
(p.origin, p.get_pdu_json())
|
||||
for p in redacted_pdus
|
||||
])
|
||||
|
||||
@@ -18,7 +18,6 @@ from twisted.internet import defer
|
||||
|
||||
from .federation_base import FederationBase
|
||||
from synapse.api.constants import Membership
|
||||
from .units import Edu
|
||||
|
||||
from synapse.api.errors import (
|
||||
CodeMessageException, HttpResponseException, SynapseError,
|
||||
@@ -26,10 +25,11 @@ from synapse.api.errors import (
|
||||
from synapse.util import unwrapFirstError
|
||||
from synapse.util.caches.expiringcache import ExpiringCache
|
||||
from synapse.util.logutils import log_function
|
||||
from synapse.events import FrozenEvent
|
||||
from synapse.util.logcontext import preserve_fn, preserve_context_over_deferred
|
||||
from synapse.events import FrozenEvent, builder
|
||||
import synapse.metrics
|
||||
|
||||
from synapse.util.retryutils import get_retry_limiter, NotRetryingDestination
|
||||
from synapse.util.retryutils import NotRetryingDestination
|
||||
|
||||
import copy
|
||||
import itertools
|
||||
@@ -43,14 +43,37 @@ logger = logging.getLogger(__name__)
|
||||
# synapse.federation.federation_client is a silly name
|
||||
metrics = synapse.metrics.get_metrics_for("synapse.federation.client")
|
||||
|
||||
sent_pdus_destination_dist = metrics.register_distribution("sent_pdu_destinations")
|
||||
|
||||
sent_edus_counter = metrics.register_counter("sent_edus")
|
||||
|
||||
sent_queries_counter = metrics.register_counter("sent_queries", labels=["type"])
|
||||
|
||||
|
||||
PDU_RETRY_TIME_MS = 1 * 60 * 1000
|
||||
|
||||
|
||||
class FederationClient(FederationBase):
|
||||
def __init__(self, hs):
|
||||
super(FederationClient, self).__init__(hs)
|
||||
|
||||
self.pdu_destination_tried = {}
|
||||
self._clock.looping_call(
|
||||
self._clear_tried_cache, 60 * 1000,
|
||||
)
|
||||
self.state = hs.get_state_handler()
|
||||
|
||||
def _clear_tried_cache(self):
|
||||
"""Clear pdu_destination_tried cache"""
|
||||
now = self._clock.time_msec()
|
||||
|
||||
old_dict = self.pdu_destination_tried
|
||||
self.pdu_destination_tried = {}
|
||||
|
||||
for event_id, destination_dict in old_dict.items():
|
||||
destination_dict = {
|
||||
dest: time
|
||||
for dest, time in destination_dict.items()
|
||||
if time + PDU_RETRY_TIME_MS > now
|
||||
}
|
||||
if destination_dict:
|
||||
self.pdu_destination_tried[event_id] = destination_dict
|
||||
|
||||
def start_get_pdu_cache(self):
|
||||
self._get_pdu_cache = ExpiringCache(
|
||||
@@ -63,58 +86,9 @@ class FederationClient(FederationBase):
|
||||
|
||||
self._get_pdu_cache.start()
|
||||
|
||||
@log_function
|
||||
def send_pdu(self, pdu, destinations):
|
||||
"""Informs the replication layer about a new PDU generated within the
|
||||
home server that should be transmitted to others.
|
||||
|
||||
TODO: Figure out when we should actually resolve the deferred.
|
||||
|
||||
Args:
|
||||
pdu (Pdu): The new Pdu.
|
||||
|
||||
Returns:
|
||||
Deferred: Completes when we have successfully processed the PDU
|
||||
and replicated it to any interested remote home servers.
|
||||
"""
|
||||
order = self._order
|
||||
self._order += 1
|
||||
|
||||
sent_pdus_destination_dist.inc_by(len(destinations))
|
||||
|
||||
logger.debug("[%s] transaction_layer.enqueue_pdu... ", pdu.event_id)
|
||||
|
||||
# TODO, add errback, etc.
|
||||
self._transaction_queue.enqueue_pdu(pdu, destinations, order)
|
||||
|
||||
logger.debug(
|
||||
"[%s] transaction_layer.enqueue_pdu... done",
|
||||
pdu.event_id
|
||||
)
|
||||
|
||||
@log_function
|
||||
def send_edu(self, destination, edu_type, content):
|
||||
edu = Edu(
|
||||
origin=self.server_name,
|
||||
destination=destination,
|
||||
edu_type=edu_type,
|
||||
content=content,
|
||||
)
|
||||
|
||||
sent_edus_counter.inc()
|
||||
|
||||
# TODO, add errback, etc.
|
||||
self._transaction_queue.enqueue_edu(edu)
|
||||
return defer.succeed(None)
|
||||
|
||||
@log_function
|
||||
def send_failure(self, failure, destination):
|
||||
self._transaction_queue.enqueue_failure(failure, destination)
|
||||
return defer.succeed(None)
|
||||
|
||||
@log_function
|
||||
def make_query(self, destination, query_type, args,
|
||||
retry_on_dns_fail=False):
|
||||
retry_on_dns_fail=False, ignore_backoff=False):
|
||||
"""Sends a federation Query to a remote homeserver of the given type
|
||||
and arguments.
|
||||
|
||||
@@ -124,6 +98,8 @@ class FederationClient(FederationBase):
|
||||
handler name used in register_query_handler().
|
||||
args (dict): Mapping of strings to strings containing the details
|
||||
of the query request.
|
||||
ignore_backoff (bool): true to ignore the historical backoff data
|
||||
and try the request anyway.
|
||||
|
||||
Returns:
|
||||
a Deferred which will eventually yield a JSON object from the
|
||||
@@ -132,11 +108,12 @@ class FederationClient(FederationBase):
|
||||
sent_queries_counter.inc(query_type)
|
||||
|
||||
return self.transport_layer.make_query(
|
||||
destination, query_type, args, retry_on_dns_fail=retry_on_dns_fail
|
||||
destination, query_type, args, retry_on_dns_fail=retry_on_dns_fail,
|
||||
ignore_backoff=ignore_backoff,
|
||||
)
|
||||
|
||||
@log_function
|
||||
def query_client_keys(self, destination, content):
|
||||
def query_client_keys(self, destination, content, timeout):
|
||||
"""Query device keys for a device hosted on a remote server.
|
||||
|
||||
Args:
|
||||
@@ -148,10 +125,22 @@ class FederationClient(FederationBase):
|
||||
response
|
||||
"""
|
||||
sent_queries_counter.inc("client_device_keys")
|
||||
return self.transport_layer.query_client_keys(destination, content)
|
||||
return self.transport_layer.query_client_keys(
|
||||
destination, content, timeout
|
||||
)
|
||||
|
||||
@log_function
|
||||
def claim_client_keys(self, destination, content):
|
||||
def query_user_devices(self, destination, user_id, timeout=30000):
|
||||
"""Query the device keys for a list of user ids hosted on a remote
|
||||
server.
|
||||
"""
|
||||
sent_queries_counter.inc("user_devices")
|
||||
return self.transport_layer.query_user_devices(
|
||||
destination, user_id, timeout
|
||||
)
|
||||
|
||||
@log_function
|
||||
def claim_client_keys(self, destination, content, timeout):
|
||||
"""Claims one-time keys for a device hosted on a remote server.
|
||||
|
||||
Args:
|
||||
@@ -163,7 +152,9 @@ class FederationClient(FederationBase):
|
||||
response
|
||||
"""
|
||||
sent_queries_counter.inc("client_one_time_keys")
|
||||
return self.transport_layer.claim_client_keys(destination, content)
|
||||
return self.transport_layer.claim_client_keys(
|
||||
destination, content, timeout
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
@@ -198,10 +189,10 @@ class FederationClient(FederationBase):
|
||||
]
|
||||
|
||||
# FIXME: We should handle signature failures more gracefully.
|
||||
pdus[:] = yield defer.gatherResults(
|
||||
pdus[:] = yield preserve_context_over_deferred(defer.gatherResults(
|
||||
self._check_sigs_and_hashes(pdus),
|
||||
consumeErrors=True,
|
||||
).addErrback(unwrapFirstError)
|
||||
)).addErrback(unwrapFirstError)
|
||||
|
||||
defer.returnValue(pdus)
|
||||
|
||||
@@ -218,8 +209,7 @@ class FederationClient(FederationBase):
|
||||
|
||||
Args:
|
||||
destinations (list): Which home servers to query
|
||||
pdu_origin (str): The home server that originally sent the pdu.
|
||||
event_id (str)
|
||||
event_id (str): event to fetch
|
||||
outlier (bool): Indicates whether the PDU is an `outlier`, i.e. if
|
||||
it's from an arbitary point in the context as opposed to part
|
||||
of the current block of PDUs. Defaults to `False`
|
||||
@@ -233,68 +223,62 @@ class FederationClient(FederationBase):
|
||||
# TODO: Rate limit the number of times we try and get the same event.
|
||||
|
||||
if self._get_pdu_cache:
|
||||
e = self._get_pdu_cache.get(event_id)
|
||||
if e:
|
||||
defer.returnValue(e)
|
||||
ev = self._get_pdu_cache.get(event_id)
|
||||
if ev:
|
||||
defer.returnValue(ev)
|
||||
|
||||
pdu = None
|
||||
pdu_attempts = self.pdu_destination_tried.setdefault(event_id, {})
|
||||
|
||||
signed_pdu = None
|
||||
for destination in destinations:
|
||||
now = self._clock.time_msec()
|
||||
last_attempt = pdu_attempts.get(destination, 0)
|
||||
if last_attempt + PDU_RETRY_TIME_MS > now:
|
||||
continue
|
||||
|
||||
try:
|
||||
limiter = yield get_retry_limiter(
|
||||
destination,
|
||||
self._clock,
|
||||
self.store,
|
||||
transaction_data = yield self.transport_layer.get_event(
|
||||
destination, event_id, timeout=timeout,
|
||||
)
|
||||
|
||||
with limiter:
|
||||
transaction_data = yield self.transport_layer.get_event(
|
||||
destination, event_id, timeout=timeout,
|
||||
)
|
||||
logger.debug("transaction_data %r", transaction_data)
|
||||
|
||||
logger.debug("transaction_data %r", transaction_data)
|
||||
pdu_list = [
|
||||
self.event_from_pdu_json(p, outlier=outlier)
|
||||
for p in transaction_data["pdus"]
|
||||
]
|
||||
|
||||
pdu_list = [
|
||||
self.event_from_pdu_json(p, outlier=outlier)
|
||||
for p in transaction_data["pdus"]
|
||||
]
|
||||
if pdu_list and pdu_list[0]:
|
||||
pdu = pdu_list[0]
|
||||
|
||||
if pdu_list and pdu_list[0]:
|
||||
pdu = pdu_list[0]
|
||||
# Check signatures are correct.
|
||||
signed_pdu = yield self._check_sigs_and_hashes([pdu])[0]
|
||||
|
||||
# Check signatures are correct.
|
||||
pdu = yield self._check_sigs_and_hashes([pdu])[0]
|
||||
break
|
||||
|
||||
break
|
||||
pdu_attempts[destination] = now
|
||||
|
||||
except SynapseError:
|
||||
except SynapseError as e:
|
||||
logger.info(
|
||||
"Failed to get PDU %s from %s because %s",
|
||||
event_id, destination, e,
|
||||
)
|
||||
continue
|
||||
except CodeMessageException as e:
|
||||
if 400 <= e.code < 500:
|
||||
raise
|
||||
|
||||
logger.info(
|
||||
"Failed to get PDU %s from %s because %s",
|
||||
event_id, destination, e,
|
||||
)
|
||||
continue
|
||||
except NotRetryingDestination as e:
|
||||
logger.info(e.message)
|
||||
continue
|
||||
except Exception as e:
|
||||
pdu_attempts[destination] = now
|
||||
|
||||
logger.info(
|
||||
"Failed to get PDU %s from %s because %s",
|
||||
event_id, destination, e,
|
||||
)
|
||||
continue
|
||||
|
||||
if self._get_pdu_cache is not None and pdu:
|
||||
self._get_pdu_cache[event_id] = pdu
|
||||
if self._get_pdu_cache is not None and signed_pdu:
|
||||
self._get_pdu_cache[event_id] = signed_pdu
|
||||
|
||||
defer.returnValue(pdu)
|
||||
defer.returnValue(signed_pdu)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
@@ -311,6 +295,42 @@ class FederationClient(FederationBase):
|
||||
Deferred: Results in a list of PDUs.
|
||||
"""
|
||||
|
||||
try:
|
||||
# First we try and ask for just the IDs, as thats far quicker if
|
||||
# we have most of the state and auth_chain already.
|
||||
# However, this may 404 if the other side has an old synapse.
|
||||
result = yield self.transport_layer.get_room_state_ids(
|
||||
destination, room_id, event_id=event_id,
|
||||
)
|
||||
|
||||
state_event_ids = result["pdu_ids"]
|
||||
auth_event_ids = result.get("auth_chain_ids", [])
|
||||
|
||||
fetched_events, failed_to_fetch = yield self.get_events(
|
||||
[destination], room_id, set(state_event_ids + auth_event_ids)
|
||||
)
|
||||
|
||||
if failed_to_fetch:
|
||||
logger.warn("Failed to get %r", failed_to_fetch)
|
||||
|
||||
event_map = {
|
||||
ev.event_id: ev for ev in fetched_events
|
||||
}
|
||||
|
||||
pdus = [event_map[e_id] for e_id in state_event_ids if e_id in event_map]
|
||||
auth_chain = [
|
||||
event_map[e_id] for e_id in auth_event_ids if e_id in event_map
|
||||
]
|
||||
|
||||
auth_chain.sort(key=lambda e: e.depth)
|
||||
|
||||
defer.returnValue((pdus, auth_chain))
|
||||
except HttpResponseException as e:
|
||||
if e.code == 400 or e.code == 404:
|
||||
logger.info("Failed to use get_room_state_ids API, falling back")
|
||||
else:
|
||||
raise e
|
||||
|
||||
result = yield self.transport_layer.get_room_state(
|
||||
destination, room_id, event_id=event_id,
|
||||
)
|
||||
@@ -324,18 +344,95 @@ class FederationClient(FederationBase):
|
||||
for p in result.get("auth_chain", [])
|
||||
]
|
||||
|
||||
seen_events = yield self.store.get_events([
|
||||
ev.event_id for ev in itertools.chain(pdus, auth_chain)
|
||||
])
|
||||
|
||||
signed_pdus = yield self._check_sigs_and_hash_and_fetch(
|
||||
destination, pdus, outlier=True
|
||||
destination,
|
||||
[p for p in pdus if p.event_id not in seen_events],
|
||||
outlier=True
|
||||
)
|
||||
signed_pdus.extend(
|
||||
seen_events[p.event_id] for p in pdus if p.event_id in seen_events
|
||||
)
|
||||
|
||||
signed_auth = yield self._check_sigs_and_hash_and_fetch(
|
||||
destination, auth_chain, outlier=True
|
||||
destination,
|
||||
[p for p in auth_chain if p.event_id not in seen_events],
|
||||
outlier=True
|
||||
)
|
||||
signed_auth.extend(
|
||||
seen_events[p.event_id] for p in auth_chain if p.event_id in seen_events
|
||||
)
|
||||
|
||||
signed_auth.sort(key=lambda e: e.depth)
|
||||
|
||||
defer.returnValue((signed_pdus, signed_auth))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_events(self, destinations, room_id, event_ids, return_local=True):
|
||||
"""Fetch events from some remote destinations, checking if we already
|
||||
have them.
|
||||
|
||||
Args:
|
||||
destinations (list)
|
||||
room_id (str)
|
||||
event_ids (list)
|
||||
return_local (bool): Whether to include events we already have in
|
||||
the DB in the returned list of events
|
||||
|
||||
Returns:
|
||||
Deferred: A deferred resolving to a 2-tuple where the first is a list of
|
||||
events and the second is a list of event ids that we failed to fetch.
|
||||
"""
|
||||
if return_local:
|
||||
seen_events = yield self.store.get_events(event_ids, allow_rejected=True)
|
||||
signed_events = seen_events.values()
|
||||
else:
|
||||
seen_events = yield self.store.have_events(event_ids)
|
||||
signed_events = []
|
||||
|
||||
failed_to_fetch = set()
|
||||
|
||||
missing_events = set(event_ids)
|
||||
for k in seen_events:
|
||||
missing_events.discard(k)
|
||||
|
||||
if not missing_events:
|
||||
defer.returnValue((signed_events, failed_to_fetch))
|
||||
|
||||
def random_server_list():
|
||||
srvs = list(destinations)
|
||||
random.shuffle(srvs)
|
||||
return srvs
|
||||
|
||||
batch_size = 20
|
||||
missing_events = list(missing_events)
|
||||
for i in xrange(0, len(missing_events), batch_size):
|
||||
batch = set(missing_events[i:i + batch_size])
|
||||
|
||||
deferreds = [
|
||||
preserve_fn(self.get_pdu)(
|
||||
destinations=random_server_list(),
|
||||
event_id=e_id,
|
||||
)
|
||||
for e_id in batch
|
||||
]
|
||||
|
||||
res = yield preserve_context_over_deferred(
|
||||
defer.DeferredList(deferreds, consumeErrors=True)
|
||||
)
|
||||
for success, result in res:
|
||||
if success and result:
|
||||
signed_events.append(result)
|
||||
batch.discard(result.event_id)
|
||||
|
||||
# We removed all events we successfully fetched from `batch`
|
||||
failed_to_fetch.update(batch)
|
||||
|
||||
defer.returnValue((signed_events, failed_to_fetch))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def get_event_auth(self, destination, room_id, event_id):
|
||||
@@ -407,18 +504,25 @@ class FederationClient(FederationBase):
|
||||
if "prev_state" not in pdu_dict:
|
||||
pdu_dict["prev_state"] = []
|
||||
|
||||
ev = builder.EventBuilder(pdu_dict)
|
||||
|
||||
defer.returnValue(
|
||||
(destination, self.event_from_pdu_json(pdu_dict))
|
||||
(destination, ev)
|
||||
)
|
||||
break
|
||||
except CodeMessageException:
|
||||
raise
|
||||
except CodeMessageException as e:
|
||||
if not 500 <= e.code < 600:
|
||||
raise
|
||||
else:
|
||||
logger.warn(
|
||||
"Failed to make_%s via %s: %s",
|
||||
membership, destination, e.message
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warn(
|
||||
"Failed to make_%s via %s: %s",
|
||||
membership, destination, e.message
|
||||
)
|
||||
raise
|
||||
|
||||
raise RuntimeError("Failed to send to any server.")
|
||||
|
||||
@@ -490,8 +594,14 @@ class FederationClient(FederationBase):
|
||||
"auth_chain": signed_auth,
|
||||
"origin": destination,
|
||||
})
|
||||
except CodeMessageException:
|
||||
raise
|
||||
except CodeMessageException as e:
|
||||
if not 500 <= e.code < 600:
|
||||
raise
|
||||
else:
|
||||
logger.exception(
|
||||
"Failed to send_join via %s: %s",
|
||||
destination, e.message
|
||||
)
|
||||
except Exception as e:
|
||||
logger.exception(
|
||||
"Failed to send_join via %s: %s",
|
||||
@@ -550,6 +660,18 @@ class FederationClient(FederationBase):
|
||||
|
||||
raise RuntimeError("Failed to send to any server.")
|
||||
|
||||
def get_public_rooms(self, destination, limit=None, since_token=None,
|
||||
search_filter=None, include_all_networks=False,
|
||||
third_party_instance_id=None):
|
||||
if destination == self.server_name:
|
||||
return
|
||||
|
||||
return self.transport_layer.get_public_rooms(
|
||||
destination, limit, since_token, search_filter,
|
||||
include_all_networks=include_all_networks,
|
||||
third_party_instance_id=third_party_instance_id,
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def query_auth(self, destination, room_id, event_id, local_auth):
|
||||
"""
|
||||
@@ -592,7 +714,7 @@ class FederationClient(FederationBase):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_missing_events(self, destination, room_id, earliest_events_ids,
|
||||
latest_events, limit, min_depth):
|
||||
latest_events, limit, min_depth, timeout):
|
||||
"""Tries to fetch events we are missing. This is called when we receive
|
||||
an event without having received all of its ancestors.
|
||||
|
||||
@@ -606,6 +728,7 @@ class FederationClient(FederationBase):
|
||||
have all previous events for.
|
||||
limit (int): Maximum number of events to return.
|
||||
min_depth (int): Minimum depth of events tor return.
|
||||
timeout (int): Max time to wait in ms
|
||||
"""
|
||||
try:
|
||||
content = yield self.transport_layer.get_missing_events(
|
||||
@@ -615,6 +738,7 @@ class FederationClient(FederationBase):
|
||||
latest_events=[e.event_id for e in latest_events],
|
||||
limit=limit,
|
||||
min_depth=min_depth,
|
||||
timeout=timeout,
|
||||
)
|
||||
|
||||
events = [
|
||||
@@ -625,8 +749,6 @@ class FederationClient(FederationBase):
|
||||
signed_events = yield self._check_sigs_and_hash_and_fetch(
|
||||
destination, events, outlier=False
|
||||
)
|
||||
|
||||
have_gotten_all_from_destination = True
|
||||
except HttpResponseException as e:
|
||||
if not e.code == 400:
|
||||
raise
|
||||
@@ -634,69 +756,6 @@ class FederationClient(FederationBase):
|
||||
# We are probably hitting an old server that doesn't support
|
||||
# get_missing_events
|
||||
signed_events = []
|
||||
have_gotten_all_from_destination = False
|
||||
|
||||
if len(signed_events) >= limit:
|
||||
defer.returnValue(signed_events)
|
||||
|
||||
servers = yield self.store.get_joined_hosts_for_room(room_id)
|
||||
|
||||
servers = set(servers)
|
||||
servers.discard(self.server_name)
|
||||
|
||||
failed_to_fetch = set()
|
||||
|
||||
while len(signed_events) < limit:
|
||||
# Are we missing any?
|
||||
|
||||
seen_events = set(earliest_events_ids)
|
||||
seen_events.update(e.event_id for e in signed_events if e)
|
||||
|
||||
missing_events = {}
|
||||
for e in itertools.chain(latest_events, signed_events):
|
||||
if e.depth > min_depth:
|
||||
missing_events.update({
|
||||
e_id: e.depth for e_id, _ in e.prev_events
|
||||
if e_id not in seen_events
|
||||
and e_id not in failed_to_fetch
|
||||
})
|
||||
|
||||
if not missing_events:
|
||||
break
|
||||
|
||||
have_seen = yield self.store.have_events(missing_events)
|
||||
|
||||
for k in have_seen:
|
||||
missing_events.pop(k, None)
|
||||
|
||||
if not missing_events:
|
||||
break
|
||||
|
||||
# Okay, we haven't gotten everything yet. Lets get them.
|
||||
ordered_missing = sorted(missing_events.items(), key=lambda x: x[0])
|
||||
|
||||
if have_gotten_all_from_destination:
|
||||
servers.discard(destination)
|
||||
|
||||
def random_server_list():
|
||||
srvs = list(servers)
|
||||
random.shuffle(srvs)
|
||||
return srvs
|
||||
|
||||
deferreds = [
|
||||
self.get_pdu(
|
||||
destinations=random_server_list(),
|
||||
event_id=e_id,
|
||||
)
|
||||
for e_id, depth in ordered_missing[:limit - len(signed_events)]
|
||||
]
|
||||
|
||||
res = yield defer.DeferredList(deferreds, consumeErrors=True)
|
||||
for (result, val), (e_id, _) in zip(res, ordered_missing):
|
||||
if result and val:
|
||||
signed_events.append(val)
|
||||
else:
|
||||
failed_to_fetch.add(e_id)
|
||||
|
||||
defer.returnValue(signed_events)
|
||||
|
||||
|
||||
@@ -19,11 +19,14 @@ from twisted.internet import defer
|
||||
from .federation_base import FederationBase
|
||||
from .units import Transaction, Edu
|
||||
|
||||
from synapse.util.async import Linearizer
|
||||
from synapse.util.logutils import log_function
|
||||
from synapse.util.caches.response_cache import ResponseCache
|
||||
from synapse.events import FrozenEvent
|
||||
from synapse.types import get_domain_from_id
|
||||
import synapse.metrics
|
||||
|
||||
from synapse.api.errors import FederationError, SynapseError
|
||||
from synapse.api.errors import AuthError, FederationError, SynapseError
|
||||
|
||||
from synapse.crypto.event_signing import compute_event_signature
|
||||
|
||||
@@ -44,6 +47,17 @@ received_queries_counter = metrics.register_counter("received_queries", labels=[
|
||||
|
||||
|
||||
class FederationServer(FederationBase):
|
||||
def __init__(self, hs):
|
||||
super(FederationServer, self).__init__(hs)
|
||||
|
||||
self.auth = hs.get_auth()
|
||||
|
||||
self._server_linearizer = Linearizer("fed_server")
|
||||
|
||||
# We cache responses to state queries, as they take a while and often
|
||||
# come in waves.
|
||||
self._state_resp_cache = ResponseCache(hs, timeout_ms=30000)
|
||||
|
||||
def set_handler(self, handler):
|
||||
"""Sets the handler that the replication layer will use to communicate
|
||||
receipt of new PDUs from other home servers. The required methods are
|
||||
@@ -83,11 +97,14 @@ class FederationServer(FederationBase):
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def on_backfill_request(self, origin, room_id, versions, limit):
|
||||
pdus = yield self.handler.on_backfill_request(
|
||||
origin, room_id, versions, limit
|
||||
)
|
||||
with (yield self._server_linearizer.queue((origin, room_id))):
|
||||
pdus = yield self.handler.on_backfill_request(
|
||||
origin, room_id, versions, limit
|
||||
)
|
||||
|
||||
defer.returnValue((200, self._transaction_from_pdus(pdus).get_dict()))
|
||||
res = self._transaction_from_pdus(pdus).get_dict()
|
||||
|
||||
defer.returnValue((200, res))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
@@ -115,7 +132,7 @@ class FederationServer(FederationBase):
|
||||
|
||||
if response:
|
||||
logger.debug(
|
||||
"[%s] We've already responed to this request",
|
||||
"[%s] We've already responded to this request",
|
||||
transaction.transaction_id
|
||||
)
|
||||
defer.returnValue(response)
|
||||
@@ -126,8 +143,28 @@ class FederationServer(FederationBase):
|
||||
results = []
|
||||
|
||||
for pdu in pdu_list:
|
||||
# check that it's actually being sent from a valid destination to
|
||||
# workaround bug #1753 in 0.18.5 and 0.18.6
|
||||
if transaction.origin != get_domain_from_id(pdu.event_id):
|
||||
if not (
|
||||
pdu.type == 'm.room.member' and
|
||||
pdu.content and
|
||||
pdu.content.get("membership", None) == 'join' and
|
||||
self.hs.is_mine_id(pdu.state_key)
|
||||
):
|
||||
logger.info(
|
||||
"Discarding PDU %s from invalid origin %s",
|
||||
pdu.event_id, transaction.origin
|
||||
)
|
||||
continue
|
||||
else:
|
||||
logger.info(
|
||||
"Accepting join PDU %s from %s",
|
||||
pdu.event_id, transaction.origin
|
||||
)
|
||||
|
||||
try:
|
||||
yield self._handle_new_pdu(transaction.origin, pdu)
|
||||
yield self._handle_received_pdu(transaction.origin, pdu)
|
||||
results.append({})
|
||||
except FederationError as e:
|
||||
self.send_failure(e, transaction.origin)
|
||||
@@ -171,22 +208,64 @@ class FederationServer(FederationBase):
|
||||
except SynapseError as e:
|
||||
logger.info("Failed to handle edu %r: %r", edu_type, e)
|
||||
except Exception as e:
|
||||
logger.exception("Failed to handle edu %r", edu_type, e)
|
||||
logger.exception("Failed to handle edu %r", edu_type)
|
||||
else:
|
||||
logger.warn("Received EDU of type %s with no handler", edu_type)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def on_context_state_request(self, origin, room_id, event_id):
|
||||
if event_id:
|
||||
pdus = yield self.handler.get_state_for_pdu(
|
||||
origin, room_id, event_id,
|
||||
)
|
||||
auth_chain = yield self.store.get_auth_chain(
|
||||
[pdu.event_id for pdu in pdus]
|
||||
)
|
||||
if not event_id:
|
||||
raise NotImplementedError("Specify an event")
|
||||
|
||||
for event in auth_chain:
|
||||
in_room = yield self.auth.check_host_in_room(room_id, origin)
|
||||
if not in_room:
|
||||
raise AuthError(403, "Host not in room.")
|
||||
|
||||
result = self._state_resp_cache.get((room_id, event_id))
|
||||
if not result:
|
||||
with (yield self._server_linearizer.queue((origin, room_id))):
|
||||
resp = yield self._state_resp_cache.set(
|
||||
(room_id, event_id),
|
||||
self._on_context_state_request_compute(room_id, event_id)
|
||||
)
|
||||
else:
|
||||
resp = yield result
|
||||
|
||||
defer.returnValue((200, resp))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_state_ids_request(self, origin, room_id, event_id):
|
||||
if not event_id:
|
||||
raise NotImplementedError("Specify an event")
|
||||
|
||||
in_room = yield self.auth.check_host_in_room(room_id, origin)
|
||||
if not in_room:
|
||||
raise AuthError(403, "Host not in room.")
|
||||
|
||||
state_ids = yield self.handler.get_state_ids_for_pdu(
|
||||
room_id, event_id,
|
||||
)
|
||||
auth_chain_ids = yield self.store.get_auth_chain_ids(state_ids)
|
||||
|
||||
defer.returnValue((200, {
|
||||
"pdu_ids": state_ids,
|
||||
"auth_chain_ids": auth_chain_ids,
|
||||
}))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _on_context_state_request_compute(self, room_id, event_id):
|
||||
pdus = yield self.handler.get_state_for_pdu(
|
||||
room_id, event_id,
|
||||
)
|
||||
auth_chain = yield self.store.get_auth_chain(
|
||||
[pdu.event_id for pdu in pdus]
|
||||
)
|
||||
|
||||
for event in auth_chain:
|
||||
# We sign these again because there was a bug where we
|
||||
# incorrectly signed things the first time round
|
||||
if self.hs.is_mine_id(event.event_id):
|
||||
event.signatures.update(
|
||||
compute_event_signature(
|
||||
event,
|
||||
@@ -194,13 +273,11 @@ class FederationServer(FederationBase):
|
||||
self.hs.config.signing_key[0]
|
||||
)
|
||||
)
|
||||
else:
|
||||
raise NotImplementedError("Specify an event")
|
||||
|
||||
defer.returnValue((200, {
|
||||
defer.returnValue({
|
||||
"pdus": [pdu.get_pdu_json() for pdu in pdus],
|
||||
"auth_chain": [pdu.get_pdu_json() for pdu in auth_chain],
|
||||
}))
|
||||
})
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
@@ -274,14 +351,16 @@ class FederationServer(FederationBase):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_event_auth(self, origin, room_id, event_id):
|
||||
time_now = self._clock.time_msec()
|
||||
auth_pdus = yield self.handler.on_event_auth(event_id)
|
||||
defer.returnValue((200, {
|
||||
"auth_chain": [a.get_pdu_json(time_now) for a in auth_pdus],
|
||||
}))
|
||||
with (yield self._server_linearizer.queue((origin, room_id))):
|
||||
time_now = self._clock.time_msec()
|
||||
auth_pdus = yield self.handler.on_event_auth(event_id)
|
||||
res = {
|
||||
"auth_chain": [a.get_pdu_json(time_now) for a in auth_pdus],
|
||||
}
|
||||
defer.returnValue((200, res))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_query_auth_request(self, origin, content, event_id):
|
||||
def on_query_auth_request(self, origin, content, room_id, event_id):
|
||||
"""
|
||||
Content is a dict with keys::
|
||||
auth_chain (list): A list of events that give the auth chain.
|
||||
@@ -300,58 +379,44 @@ class FederationServer(FederationBase):
|
||||
Returns:
|
||||
Deferred: Results in `dict` with the same format as `content`
|
||||
"""
|
||||
auth_chain = [
|
||||
self.event_from_pdu_json(e)
|
||||
for e in content["auth_chain"]
|
||||
]
|
||||
with (yield self._server_linearizer.queue((origin, room_id))):
|
||||
auth_chain = [
|
||||
self.event_from_pdu_json(e)
|
||||
for e in content["auth_chain"]
|
||||
]
|
||||
|
||||
signed_auth = yield self._check_sigs_and_hash_and_fetch(
|
||||
origin, auth_chain, outlier=True
|
||||
)
|
||||
signed_auth = yield self._check_sigs_and_hash_and_fetch(
|
||||
origin, auth_chain, outlier=True
|
||||
)
|
||||
|
||||
ret = yield self.handler.on_query_auth(
|
||||
origin,
|
||||
event_id,
|
||||
signed_auth,
|
||||
content.get("rejects", []),
|
||||
content.get("missing", []),
|
||||
)
|
||||
ret = yield self.handler.on_query_auth(
|
||||
origin,
|
||||
event_id,
|
||||
signed_auth,
|
||||
content.get("rejects", []),
|
||||
content.get("missing", []),
|
||||
)
|
||||
|
||||
time_now = self._clock.time_msec()
|
||||
send_content = {
|
||||
"auth_chain": [
|
||||
e.get_pdu_json(time_now)
|
||||
for e in ret["auth_chain"]
|
||||
],
|
||||
"rejects": ret.get("rejects", []),
|
||||
"missing": ret.get("missing", []),
|
||||
}
|
||||
time_now = self._clock.time_msec()
|
||||
send_content = {
|
||||
"auth_chain": [
|
||||
e.get_pdu_json(time_now)
|
||||
for e in ret["auth_chain"]
|
||||
],
|
||||
"rejects": ret.get("rejects", []),
|
||||
"missing": ret.get("missing", []),
|
||||
}
|
||||
|
||||
defer.returnValue(
|
||||
(200, send_content)
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def on_query_client_keys(self, origin, content):
|
||||
query = []
|
||||
for user_id, device_ids in content.get("device_keys", {}).items():
|
||||
if not device_ids:
|
||||
query.append((user_id, None))
|
||||
else:
|
||||
for device_id in device_ids:
|
||||
query.append((user_id, device_id))
|
||||
return self.on_query_request("client_keys", content)
|
||||
|
||||
results = yield self.store.get_e2e_device_keys(query)
|
||||
|
||||
json_result = {}
|
||||
for user_id, device_keys in results.items():
|
||||
for device_id, json_bytes in device_keys.items():
|
||||
json_result.setdefault(user_id, {})[device_id] = json.loads(
|
||||
json_bytes
|
||||
)
|
||||
|
||||
defer.returnValue({"device_keys": json_result})
|
||||
def on_query_user_devices(self, origin, user_id):
|
||||
return self.on_query_request("user_devices", user_id)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
@@ -377,16 +442,35 @@ class FederationServer(FederationBase):
|
||||
@log_function
|
||||
def on_get_missing_events(self, origin, room_id, earliest_events,
|
||||
latest_events, limit, min_depth):
|
||||
missing_events = yield self.handler.on_get_missing_events(
|
||||
origin, room_id, earliest_events, latest_events, limit, min_depth
|
||||
)
|
||||
with (yield self._server_linearizer.queue((origin, room_id))):
|
||||
logger.info(
|
||||
"on_get_missing_events: earliest_events: %r, latest_events: %r,"
|
||||
" limit: %d, min_depth: %d",
|
||||
earliest_events, latest_events, limit, min_depth
|
||||
)
|
||||
|
||||
time_now = self._clock.time_msec()
|
||||
missing_events = yield self.handler.on_get_missing_events(
|
||||
origin, room_id, earliest_events, latest_events, limit, min_depth
|
||||
)
|
||||
|
||||
if len(missing_events) < 5:
|
||||
logger.info(
|
||||
"Returning %d events: %r", len(missing_events), missing_events
|
||||
)
|
||||
else:
|
||||
logger.info("Returning %d events", len(missing_events))
|
||||
|
||||
time_now = self._clock.time_msec()
|
||||
|
||||
defer.returnValue({
|
||||
"events": [ev.get_pdu_json(time_now) for ev in missing_events],
|
||||
})
|
||||
|
||||
@log_function
|
||||
def on_openid_userinfo(self, token):
|
||||
ts_now_ms = self._clock.time_msec()
|
||||
return self.store.get_user_id_for_open_id_token(token, ts_now_ms)
|
||||
|
||||
@log_function
|
||||
def _get_persisted_pdu(self, origin, event_id, do_auth=True):
|
||||
""" Get a PDU from the database with given origin and id.
|
||||
@@ -412,26 +496,16 @@ class FederationServer(FederationBase):
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def _handle_new_pdu(self, origin, pdu, get_missing=True):
|
||||
# We reprocess pdus when we have seen them only as outliers
|
||||
existing = yield self._get_persisted_pdu(
|
||||
origin, pdu.event_id, do_auth=False
|
||||
)
|
||||
def _handle_received_pdu(self, origin, pdu):
|
||||
""" Process a PDU received in a federation /send/ transaction.
|
||||
|
||||
# FIXME: Currently we fetch an event again when we already have it
|
||||
# if it has been marked as an outlier.
|
||||
|
||||
already_seen = (
|
||||
existing and (
|
||||
not existing.internal_metadata.is_outlier()
|
||||
or pdu.internal_metadata.is_outlier()
|
||||
)
|
||||
)
|
||||
if already_seen:
|
||||
logger.debug("Already seen pdu %s", pdu.event_id)
|
||||
return
|
||||
Args:
|
||||
origin (str): server which sent the pdu
|
||||
pdu (FrozenEvent): received pdu
|
||||
|
||||
Returns (Deferred): completes with None
|
||||
Raises: FederationError if the signatures / hash do not match
|
||||
"""
|
||||
# Check signature.
|
||||
try:
|
||||
pdu = yield self._check_sigs_and_hash(pdu)
|
||||
@@ -443,97 +517,7 @@ class FederationServer(FederationBase):
|
||||
affected=pdu.event_id,
|
||||
)
|
||||
|
||||
state = None
|
||||
|
||||
auth_chain = []
|
||||
|
||||
have_seen = yield self.store.have_events(
|
||||
[ev for ev, _ in pdu.prev_events]
|
||||
)
|
||||
|
||||
fetch_state = False
|
||||
|
||||
# Get missing pdus if necessary.
|
||||
if not pdu.internal_metadata.is_outlier():
|
||||
# We only backfill backwards to the min depth.
|
||||
min_depth = yield self.handler.get_min_depth_for_context(
|
||||
pdu.room_id
|
||||
)
|
||||
|
||||
logger.debug(
|
||||
"_handle_new_pdu min_depth for %s: %d",
|
||||
pdu.room_id, min_depth
|
||||
)
|
||||
|
||||
prevs = {e_id for e_id, _ in pdu.prev_events}
|
||||
seen = set(have_seen.keys())
|
||||
|
||||
if min_depth and pdu.depth < min_depth:
|
||||
# This is so that we don't notify the user about this
|
||||
# message, to work around the fact that some events will
|
||||
# reference really really old events we really don't want to
|
||||
# send to the clients.
|
||||
pdu.internal_metadata.outlier = True
|
||||
elif min_depth and pdu.depth > min_depth:
|
||||
if get_missing and prevs - seen:
|
||||
latest = yield self.store.get_latest_event_ids_in_room(
|
||||
pdu.room_id
|
||||
)
|
||||
|
||||
# We add the prev events that we have seen to the latest
|
||||
# list to ensure the remote server doesn't give them to us
|
||||
latest = set(latest)
|
||||
latest |= seen
|
||||
|
||||
missing_events = yield self.get_missing_events(
|
||||
origin,
|
||||
pdu.room_id,
|
||||
earliest_events_ids=list(latest),
|
||||
latest_events=[pdu],
|
||||
limit=10,
|
||||
min_depth=min_depth,
|
||||
)
|
||||
|
||||
# We want to sort these by depth so we process them and
|
||||
# tell clients about them in order.
|
||||
missing_events.sort(key=lambda x: x.depth)
|
||||
|
||||
for e in missing_events:
|
||||
yield self._handle_new_pdu(
|
||||
origin,
|
||||
e,
|
||||
get_missing=False
|
||||
)
|
||||
|
||||
have_seen = yield self.store.have_events(
|
||||
[ev for ev, _ in pdu.prev_events]
|
||||
)
|
||||
|
||||
prevs = {e_id for e_id, _ in pdu.prev_events}
|
||||
seen = set(have_seen.keys())
|
||||
if prevs - seen:
|
||||
fetch_state = True
|
||||
|
||||
if fetch_state:
|
||||
# We need to get the state at this event, since we haven't
|
||||
# processed all the prev events.
|
||||
logger.debug(
|
||||
"_handle_new_pdu getting state for %s",
|
||||
pdu.room_id
|
||||
)
|
||||
try:
|
||||
state, auth_chain = yield self.get_state_for_room(
|
||||
origin, pdu.room_id, pdu.event_id,
|
||||
)
|
||||
except:
|
||||
logger.warn("Failed to get state for event: %s", pdu.event_id)
|
||||
|
||||
yield self.handler.on_receive_pdu(
|
||||
origin,
|
||||
pdu,
|
||||
state=state,
|
||||
auth_chain=auth_chain,
|
||||
)
|
||||
yield self.handler.on_receive_pdu(origin, pdu, get_missing=True)
|
||||
|
||||
def __str__(self):
|
||||
return "<ReplicationLayer(%s)>" % self.server_name
|
||||
|
||||
@@ -20,8 +20,6 @@ a given transport.
|
||||
from .federation_client import FederationClient
|
||||
from .federation_server import FederationServer
|
||||
|
||||
from .transaction_queue import TransactionQueue
|
||||
|
||||
from .persistence import TransactionActions
|
||||
|
||||
import logging
|
||||
@@ -66,11 +64,10 @@ class ReplicationLayer(FederationClient, FederationServer):
|
||||
self._clock = hs.get_clock()
|
||||
|
||||
self.transaction_actions = TransactionActions(self.store)
|
||||
self._transaction_queue = TransactionQueue(hs, transport_layer)
|
||||
|
||||
self._order = 0
|
||||
|
||||
self.hs = hs
|
||||
|
||||
super(ReplicationLayer, self).__init__(hs)
|
||||
|
||||
def __str__(self):
|
||||
return "<ReplicationLayer(%s)>" % self.server_name
|
||||
|
||||
315
synapse/federation/send_queue.py
Normal file
315
synapse/federation/send_queue.py
Normal file
@@ -0,0 +1,315 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""A federation sender that forwards things to be sent across replication to
|
||||
a worker process.
|
||||
|
||||
It assumes there is a single worker process feeding off of it.
|
||||
|
||||
Each row in the replication stream consists of a type and some json, where the
|
||||
types indicate whether they are presence, or edus, etc.
|
||||
|
||||
Ephemeral or non-event data are queued up in-memory. When the worker requests
|
||||
updates since a particular point, all in-memory data since before that point is
|
||||
dropped. We also expire things in the queue after 5 minutes, to ensure that a
|
||||
dead worker doesn't cause the queues to grow limitlessly.
|
||||
|
||||
Events are replicated via a separate events stream.
|
||||
"""
|
||||
|
||||
from .units import Edu
|
||||
|
||||
from synapse.util.metrics import Measure
|
||||
import synapse.metrics
|
||||
|
||||
from blist import sorteddict
|
||||
import ujson
|
||||
|
||||
|
||||
metrics = synapse.metrics.get_metrics_for(__name__)
|
||||
|
||||
|
||||
PRESENCE_TYPE = "p"
|
||||
KEYED_EDU_TYPE = "k"
|
||||
EDU_TYPE = "e"
|
||||
FAILURE_TYPE = "f"
|
||||
DEVICE_MESSAGE_TYPE = "d"
|
||||
|
||||
|
||||
class FederationRemoteSendQueue(object):
|
||||
"""A drop in replacement for TransactionQueue"""
|
||||
|
||||
def __init__(self, hs):
|
||||
self.server_name = hs.hostname
|
||||
self.clock = hs.get_clock()
|
||||
self.notifier = hs.get_notifier()
|
||||
|
||||
self.presence_map = {}
|
||||
self.presence_changed = sorteddict()
|
||||
|
||||
self.keyed_edu = {}
|
||||
self.keyed_edu_changed = sorteddict()
|
||||
|
||||
self.edus = sorteddict()
|
||||
|
||||
self.failures = sorteddict()
|
||||
|
||||
self.device_messages = sorteddict()
|
||||
|
||||
self.pos = 1
|
||||
self.pos_time = sorteddict()
|
||||
|
||||
# EVERYTHING IS SAD. In particular, python only makes new scopes when
|
||||
# we make a new function, so we need to make a new function so the inner
|
||||
# lambda binds to the queue rather than to the name of the queue which
|
||||
# changes. ARGH.
|
||||
def register(name, queue):
|
||||
metrics.register_callback(
|
||||
queue_name + "_size",
|
||||
lambda: len(queue),
|
||||
)
|
||||
|
||||
for queue_name in [
|
||||
"presence_map", "presence_changed", "keyed_edu", "keyed_edu_changed",
|
||||
"edus", "failures", "device_messages", "pos_time",
|
||||
]:
|
||||
register(queue_name, getattr(self, queue_name))
|
||||
|
||||
self.clock.looping_call(self._clear_queue, 30 * 1000)
|
||||
|
||||
def _next_pos(self):
|
||||
pos = self.pos
|
||||
self.pos += 1
|
||||
self.pos_time[self.clock.time_msec()] = pos
|
||||
return pos
|
||||
|
||||
def _clear_queue(self):
|
||||
"""Clear the queues for anything older than N minutes"""
|
||||
|
||||
FIVE_MINUTES_AGO = 5 * 60 * 1000
|
||||
now = self.clock.time_msec()
|
||||
|
||||
keys = self.pos_time.keys()
|
||||
time = keys.bisect_left(now - FIVE_MINUTES_AGO)
|
||||
if not keys[:time]:
|
||||
return
|
||||
|
||||
position_to_delete = max(keys[:time])
|
||||
for key in keys[:time]:
|
||||
del self.pos_time[key]
|
||||
|
||||
self._clear_queue_before_pos(position_to_delete)
|
||||
|
||||
def _clear_queue_before_pos(self, position_to_delete):
|
||||
"""Clear all the queues from before a given position"""
|
||||
with Measure(self.clock, "send_queue._clear"):
|
||||
# Delete things out of presence maps
|
||||
keys = self.presence_changed.keys()
|
||||
i = keys.bisect_left(position_to_delete)
|
||||
for key in keys[:i]:
|
||||
del self.presence_changed[key]
|
||||
|
||||
user_ids = set(
|
||||
user_id for uids in self.presence_changed.values() for _, user_id in uids
|
||||
)
|
||||
|
||||
to_del = [
|
||||
user_id for user_id in self.presence_map if user_id not in user_ids
|
||||
]
|
||||
for user_id in to_del:
|
||||
del self.presence_map[user_id]
|
||||
|
||||
# Delete things out of keyed edus
|
||||
keys = self.keyed_edu_changed.keys()
|
||||
i = keys.bisect_left(position_to_delete)
|
||||
for key in keys[:i]:
|
||||
del self.keyed_edu_changed[key]
|
||||
|
||||
live_keys = set()
|
||||
for edu_key in self.keyed_edu_changed.values():
|
||||
live_keys.add(edu_key)
|
||||
|
||||
to_del = [edu_key for edu_key in self.keyed_edu if edu_key not in live_keys]
|
||||
for edu_key in to_del:
|
||||
del self.keyed_edu[edu_key]
|
||||
|
||||
# Delete things out of edu map
|
||||
keys = self.edus.keys()
|
||||
i = keys.bisect_left(position_to_delete)
|
||||
for key in keys[:i]:
|
||||
del self.edus[key]
|
||||
|
||||
# Delete things out of failure map
|
||||
keys = self.failures.keys()
|
||||
i = keys.bisect_left(position_to_delete)
|
||||
for key in keys[:i]:
|
||||
del self.failures[key]
|
||||
|
||||
# Delete things out of device map
|
||||
keys = self.device_messages.keys()
|
||||
i = keys.bisect_left(position_to_delete)
|
||||
for key in keys[:i]:
|
||||
del self.device_messages[key]
|
||||
|
||||
def notify_new_events(self, current_id):
|
||||
"""As per TransactionQueue"""
|
||||
# We don't need to replicate this as it gets sent down a different
|
||||
# stream.
|
||||
pass
|
||||
|
||||
def send_edu(self, destination, edu_type, content, key=None):
|
||||
"""As per TransactionQueue"""
|
||||
pos = self._next_pos()
|
||||
|
||||
edu = Edu(
|
||||
origin=self.server_name,
|
||||
destination=destination,
|
||||
edu_type=edu_type,
|
||||
content=content,
|
||||
)
|
||||
|
||||
if key:
|
||||
assert isinstance(key, tuple)
|
||||
self.keyed_edu[(destination, key)] = edu
|
||||
self.keyed_edu_changed[pos] = (destination, key)
|
||||
else:
|
||||
self.edus[pos] = edu
|
||||
|
||||
self.notifier.on_new_replication_data()
|
||||
|
||||
def send_presence(self, destination, states):
|
||||
"""As per TransactionQueue"""
|
||||
pos = self._next_pos()
|
||||
|
||||
self.presence_map.update({
|
||||
state.user_id: state
|
||||
for state in states
|
||||
})
|
||||
|
||||
self.presence_changed[pos] = [
|
||||
(destination, state.user_id) for state in states
|
||||
]
|
||||
|
||||
self.notifier.on_new_replication_data()
|
||||
|
||||
def send_failure(self, failure, destination):
|
||||
"""As per TransactionQueue"""
|
||||
pos = self._next_pos()
|
||||
|
||||
self.failures[pos] = (destination, str(failure))
|
||||
self.notifier.on_new_replication_data()
|
||||
|
||||
def send_device_messages(self, destination):
|
||||
"""As per TransactionQueue"""
|
||||
pos = self._next_pos()
|
||||
self.device_messages[pos] = destination
|
||||
self.notifier.on_new_replication_data()
|
||||
|
||||
def get_current_token(self):
|
||||
return self.pos - 1
|
||||
|
||||
def federation_ack(self, token):
|
||||
self._clear_queue_before_pos(token)
|
||||
|
||||
def get_replication_rows(self, from_token, to_token, limit, federation_ack=None):
|
||||
"""Get rows to be sent over federation between the two tokens
|
||||
|
||||
Args:
|
||||
from_token (int)
|
||||
to_token(int)
|
||||
limit (int)
|
||||
federation_ack (int): Optional. The position where the worker is
|
||||
explicitly acknowledged it has handled. Allows us to drop
|
||||
data from before that point
|
||||
"""
|
||||
# TODO: Handle limit.
|
||||
|
||||
# To handle restarts where we wrap around
|
||||
if from_token > self.pos:
|
||||
from_token = -1
|
||||
|
||||
rows = []
|
||||
|
||||
# There should be only one reader, so lets delete everything its
|
||||
# acknowledged its seen.
|
||||
if federation_ack:
|
||||
self._clear_queue_before_pos(federation_ack)
|
||||
|
||||
# Fetch changed presence
|
||||
keys = self.presence_changed.keys()
|
||||
i = keys.bisect_right(from_token)
|
||||
j = keys.bisect_right(to_token) + 1
|
||||
dest_user_ids = set(
|
||||
(pos, dest_user_id)
|
||||
for pos in keys[i:j]
|
||||
for dest_user_id in self.presence_changed[pos]
|
||||
)
|
||||
|
||||
for (key, (dest, user_id)) in dest_user_ids:
|
||||
rows.append((key, PRESENCE_TYPE, ujson.dumps({
|
||||
"destination": dest,
|
||||
"state": self.presence_map[user_id].as_dict(),
|
||||
})))
|
||||
|
||||
# Fetch changes keyed edus
|
||||
keys = self.keyed_edu_changed.keys()
|
||||
i = keys.bisect_right(from_token)
|
||||
j = keys.bisect_right(to_token) + 1
|
||||
keyed_edus = set((k, self.keyed_edu_changed[k]) for k in keys[i:j])
|
||||
|
||||
for (pos, (destination, edu_key)) in keyed_edus:
|
||||
rows.append(
|
||||
(pos, KEYED_EDU_TYPE, ujson.dumps({
|
||||
"key": edu_key,
|
||||
"edu": self.keyed_edu[(destination, edu_key)].get_internal_dict(),
|
||||
}))
|
||||
)
|
||||
|
||||
# Fetch changed edus
|
||||
keys = self.edus.keys()
|
||||
i = keys.bisect_right(from_token)
|
||||
j = keys.bisect_right(to_token) + 1
|
||||
edus = set((k, self.edus[k]) for k in keys[i:j])
|
||||
|
||||
for (pos, edu) in edus:
|
||||
rows.append((pos, EDU_TYPE, ujson.dumps(edu.get_internal_dict())))
|
||||
|
||||
# Fetch changed failures
|
||||
keys = self.failures.keys()
|
||||
i = keys.bisect_right(from_token)
|
||||
j = keys.bisect_right(to_token) + 1
|
||||
failures = set((k, self.failures[k]) for k in keys[i:j])
|
||||
|
||||
for (pos, (destination, failure)) in failures:
|
||||
rows.append((pos, FAILURE_TYPE, ujson.dumps({
|
||||
"destination": destination,
|
||||
"failure": failure,
|
||||
})))
|
||||
|
||||
# Fetch changed device messages
|
||||
keys = self.device_messages.keys()
|
||||
i = keys.bisect_right(from_token)
|
||||
j = keys.bisect_right(to_token) + 1
|
||||
device_messages = set((k, self.device_messages[k]) for k in keys[i:j])
|
||||
|
||||
for (pos, destination) in device_messages:
|
||||
rows.append((pos, DEVICE_MESSAGE_TYPE, ujson.dumps({
|
||||
"destination": destination,
|
||||
})))
|
||||
|
||||
# Sort rows based on pos
|
||||
rows.sort()
|
||||
|
||||
return rows
|
||||
@@ -12,19 +12,20 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import datetime
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from .persistence import TransactionActions
|
||||
from .units import Transaction
|
||||
from .units import Transaction, Edu
|
||||
|
||||
from synapse.api.errors import HttpResponseException
|
||||
from synapse.util.logutils import log_function
|
||||
from synapse.util.logcontext import PreserveLoggingContext
|
||||
from synapse.util.retryutils import (
|
||||
get_retry_limiter, NotRetryingDestination,
|
||||
)
|
||||
from synapse.util.async import run_on_reactor
|
||||
from synapse.util.logcontext import preserve_context_over_fn
|
||||
from synapse.util.retryutils import NotRetryingDestination, get_retry_limiter
|
||||
from synapse.util.metrics import measure_func
|
||||
from synapse.types import get_domain_from_id
|
||||
from synapse.handlers.presence import format_user_presence_state
|
||||
import synapse.metrics
|
||||
|
||||
import logging
|
||||
@@ -34,6 +35,12 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
metrics = synapse.metrics.get_metrics_for(__name__)
|
||||
|
||||
client_metrics = synapse.metrics.get_metrics_for("synapse.federation.client")
|
||||
sent_pdus_destination_dist = client_metrics.register_distribution(
|
||||
"sent_pdu_destinations"
|
||||
)
|
||||
sent_edus_counter = client_metrics.register_counter("sent_edus")
|
||||
|
||||
|
||||
class TransactionQueue(object):
|
||||
"""This class makes sure we only have one transaction in flight at
|
||||
@@ -42,15 +49,17 @@ class TransactionQueue(object):
|
||||
It batches pending PDUs into single transactions.
|
||||
"""
|
||||
|
||||
def __init__(self, hs, transport_layer):
|
||||
def __init__(self, hs):
|
||||
self.server_name = hs.hostname
|
||||
|
||||
self.store = hs.get_datastore()
|
||||
self.state = hs.get_state_handler()
|
||||
self.transaction_actions = TransactionActions(self.store)
|
||||
|
||||
self.transport_layer = transport_layer
|
||||
self.transport_layer = hs.get_federation_transport_client()
|
||||
|
||||
self._clock = hs.get_clock()
|
||||
self.clock = hs.get_clock()
|
||||
self.is_mine_id = hs.is_mine_id
|
||||
|
||||
# Is a mapping from destinations -> deferreds. Used to keep track
|
||||
# of which destinations have transactions in flight and when they are
|
||||
@@ -68,20 +77,41 @@ class TransactionQueue(object):
|
||||
# destination -> list of tuple(edu, deferred)
|
||||
self.pending_edus_by_dest = edus = {}
|
||||
|
||||
# Presence needs to be separate as we send single aggragate EDUs
|
||||
self.pending_presence_by_dest = presence = {}
|
||||
self.pending_edus_keyed_by_dest = edus_keyed = {}
|
||||
|
||||
metrics.register_callback(
|
||||
"pending_pdus",
|
||||
lambda: sum(map(len, pdus.values())),
|
||||
)
|
||||
metrics.register_callback(
|
||||
"pending_edus",
|
||||
lambda: sum(map(len, edus.values())),
|
||||
lambda: (
|
||||
sum(map(len, edus.values()))
|
||||
+ sum(map(len, presence.values()))
|
||||
+ sum(map(len, edus_keyed.values()))
|
||||
),
|
||||
)
|
||||
|
||||
# destination -> list of tuple(failure, deferred)
|
||||
self.pending_failures_by_dest = {}
|
||||
|
||||
# destination -> stream_id of last successfully sent to-device message.
|
||||
# NB: may be a long or an int.
|
||||
self.last_device_stream_id_by_dest = {}
|
||||
|
||||
# destination -> stream_id of last successfully sent device list
|
||||
# update.
|
||||
self.last_device_list_stream_id_by_dest = {}
|
||||
|
||||
# HACK to get unique tx id
|
||||
self._next_txn_id = int(self._clock.time_msec())
|
||||
self._next_txn_id = int(self.clock.time_msec())
|
||||
|
||||
self._order = 1
|
||||
|
||||
self._is_processing = False
|
||||
self._last_poked_id = -1
|
||||
|
||||
def can_send_to(self, destination):
|
||||
"""Can we send messages to the given server?
|
||||
@@ -103,11 +133,76 @@ class TransactionQueue(object):
|
||||
else:
|
||||
return not destination.startswith("localhost")
|
||||
|
||||
def enqueue_pdu(self, pdu, destinations, order):
|
||||
@defer.inlineCallbacks
|
||||
def notify_new_events(self, current_id):
|
||||
"""This gets called when we have some new events we might want to
|
||||
send out to other servers.
|
||||
"""
|
||||
self._last_poked_id = max(current_id, self._last_poked_id)
|
||||
|
||||
if self._is_processing:
|
||||
return
|
||||
|
||||
try:
|
||||
self._is_processing = True
|
||||
while True:
|
||||
last_token = yield self.store.get_federation_out_pos("events")
|
||||
next_token, events = yield self.store.get_all_new_events_stream(
|
||||
last_token, self._last_poked_id, limit=20,
|
||||
)
|
||||
|
||||
logger.debug("Handling %s -> %s", last_token, next_token)
|
||||
|
||||
if not events and next_token >= self._last_poked_id:
|
||||
break
|
||||
|
||||
for event in events:
|
||||
# Only send events for this server.
|
||||
send_on_behalf_of = event.internal_metadata.get_send_on_behalf_of()
|
||||
is_mine = self.is_mine_id(event.event_id)
|
||||
if not is_mine and send_on_behalf_of is None:
|
||||
continue
|
||||
|
||||
# Get the state from before the event.
|
||||
# We need to make sure that this is the state from before
|
||||
# the event and not from after it.
|
||||
# Otherwise if the last member on a server in a room is
|
||||
# banned then it won't receive the event because it won't
|
||||
# be in the room after the ban.
|
||||
users_in_room = yield self.state.get_current_user_in_room(
|
||||
event.room_id, latest_event_ids=[
|
||||
prev_id for prev_id, _ in event.prev_events
|
||||
],
|
||||
)
|
||||
|
||||
destinations = set(
|
||||
get_domain_from_id(user_id) for user_id in users_in_room
|
||||
)
|
||||
if send_on_behalf_of is not None:
|
||||
# If we are sending the event on behalf of another server
|
||||
# then it already has the event and there is no reason to
|
||||
# send the event to it.
|
||||
destinations.discard(send_on_behalf_of)
|
||||
|
||||
logger.debug("Sending %s to %r", event, destinations)
|
||||
|
||||
self._send_pdu(event, destinations)
|
||||
|
||||
yield self.store.update_federation_out_pos(
|
||||
"events", next_token
|
||||
)
|
||||
|
||||
finally:
|
||||
self._is_processing = False
|
||||
|
||||
def _send_pdu(self, pdu, destinations):
|
||||
# We loop through all destinations to see whether we already have
|
||||
# a transaction in progress. If we do, stick it in the pending_pdus
|
||||
# table and we'll get back to it later.
|
||||
|
||||
order = self._order
|
||||
self._order += 1
|
||||
|
||||
destinations = set(destinations)
|
||||
destinations = set(
|
||||
dest for dest in destinations if self.can_send_to(dest)
|
||||
@@ -118,86 +213,83 @@ class TransactionQueue(object):
|
||||
if not destinations:
|
||||
return
|
||||
|
||||
deferreds = []
|
||||
sent_pdus_destination_dist.inc_by(len(destinations))
|
||||
|
||||
for destination in destinations:
|
||||
deferred = defer.Deferred()
|
||||
self.pending_pdus_by_dest.setdefault(destination, []).append(
|
||||
(pdu, deferred, order)
|
||||
(pdu, order)
|
||||
)
|
||||
|
||||
def chain(failure):
|
||||
if not deferred.called:
|
||||
deferred.errback(failure)
|
||||
preserve_context_over_fn(
|
||||
self._attempt_new_transaction, destination
|
||||
)
|
||||
|
||||
def log_failure(f):
|
||||
logger.warn("Failed to send pdu to %s: %s", destination, f.value)
|
||||
def send_presence(self, destination, states):
|
||||
if not self.can_send_to(destination):
|
||||
return
|
||||
|
||||
deferred.addErrback(log_failure)
|
||||
self.pending_presence_by_dest.setdefault(destination, {}).update({
|
||||
state.user_id: state for state in states
|
||||
})
|
||||
|
||||
with PreserveLoggingContext():
|
||||
self._attempt_new_transaction(destination).addErrback(chain)
|
||||
preserve_context_over_fn(
|
||||
self._attempt_new_transaction, destination
|
||||
)
|
||||
|
||||
deferreds.append(deferred)
|
||||
|
||||
# NO inlineCallbacks
|
||||
def enqueue_edu(self, edu):
|
||||
destination = edu.destination
|
||||
def send_edu(self, destination, edu_type, content, key=None):
|
||||
edu = Edu(
|
||||
origin=self.server_name,
|
||||
destination=destination,
|
||||
edu_type=edu_type,
|
||||
content=content,
|
||||
)
|
||||
|
||||
if not self.can_send_to(destination):
|
||||
return
|
||||
|
||||
deferred = defer.Deferred()
|
||||
self.pending_edus_by_dest.setdefault(destination, []).append(
|
||||
(edu, deferred)
|
||||
sent_edus_counter.inc()
|
||||
|
||||
if key:
|
||||
self.pending_edus_keyed_by_dest.setdefault(
|
||||
destination, {}
|
||||
)[(edu.edu_type, key)] = edu
|
||||
else:
|
||||
self.pending_edus_by_dest.setdefault(destination, []).append(edu)
|
||||
|
||||
preserve_context_over_fn(
|
||||
self._attempt_new_transaction, destination
|
||||
)
|
||||
|
||||
def chain(failure):
|
||||
if not deferred.called:
|
||||
deferred.errback(failure)
|
||||
|
||||
def log_failure(f):
|
||||
logger.warn("Failed to send edu to %s: %s", destination, f.value)
|
||||
|
||||
deferred.addErrback(log_failure)
|
||||
|
||||
with PreserveLoggingContext():
|
||||
self._attempt_new_transaction(destination).addErrback(chain)
|
||||
|
||||
return deferred
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def enqueue_failure(self, failure, destination):
|
||||
def send_failure(self, failure, destination):
|
||||
if destination == self.server_name or destination == "localhost":
|
||||
return
|
||||
|
||||
deferred = defer.Deferred()
|
||||
|
||||
if not self.can_send_to(destination):
|
||||
return
|
||||
|
||||
self.pending_failures_by_dest.setdefault(
|
||||
destination, []
|
||||
).append(
|
||||
(failure, deferred)
|
||||
).append(failure)
|
||||
|
||||
preserve_context_over_fn(
|
||||
self._attempt_new_transaction, destination
|
||||
)
|
||||
|
||||
def chain(f):
|
||||
if not deferred.called:
|
||||
deferred.errback(f)
|
||||
def send_device_messages(self, destination):
|
||||
if destination == self.server_name or destination == "localhost":
|
||||
return
|
||||
|
||||
def log_failure(f):
|
||||
logger.warn("Failed to send failure to %s: %s", destination, f.value)
|
||||
if not self.can_send_to(destination):
|
||||
return
|
||||
|
||||
deferred.addErrback(log_failure)
|
||||
preserve_context_over_fn(
|
||||
self._attempt_new_transaction, destination
|
||||
)
|
||||
|
||||
with PreserveLoggingContext():
|
||||
self._attempt_new_transaction(destination).addErrback(chain)
|
||||
|
||||
yield deferred
|
||||
def get_current_token(self):
|
||||
return 0
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def _attempt_new_transaction(self, destination):
|
||||
# list of (pending_pdu, deferred, order)
|
||||
if destination in self.pending_transactions:
|
||||
@@ -211,171 +303,257 @@ class TransactionQueue(object):
|
||||
)
|
||||
return
|
||||
|
||||
pending_pdus = self.pending_pdus_by_dest.pop(destination, [])
|
||||
pending_edus = self.pending_edus_by_dest.pop(destination, [])
|
||||
pending_failures = self.pending_failures_by_dest.pop(destination, [])
|
||||
|
||||
if pending_pdus:
|
||||
logger.debug("TX [%s] len(pending_pdus_by_dest[dest]) = %d",
|
||||
destination, len(pending_pdus))
|
||||
|
||||
if not pending_pdus and not pending_edus and not pending_failures:
|
||||
logger.debug("TX [%s] Nothing to send", destination)
|
||||
return
|
||||
|
||||
pending_pdus = []
|
||||
try:
|
||||
self.pending_transactions[destination] = 1
|
||||
|
||||
logger.debug("TX [%s] _attempt_new_transaction", destination)
|
||||
# This will throw if we wouldn't retry. We do this here so we fail
|
||||
# quickly, but we will later check this again in the http client,
|
||||
# hence why we throw the result away.
|
||||
yield get_retry_limiter(destination, self.clock, self.store)
|
||||
|
||||
# Sort based on the order field
|
||||
pending_pdus.sort(key=lambda t: t[2])
|
||||
# XXX: what's this for?
|
||||
yield run_on_reactor()
|
||||
|
||||
pdus = [x[0] for x in pending_pdus]
|
||||
edus = [x[0] for x in pending_edus]
|
||||
failures = [x[0].get_dict() for x in pending_failures]
|
||||
deferreds = [
|
||||
x[1]
|
||||
for x in pending_pdus + pending_edus + pending_failures
|
||||
]
|
||||
|
||||
txn_id = str(self._next_txn_id)
|
||||
|
||||
limiter = yield get_retry_limiter(
|
||||
destination,
|
||||
self._clock,
|
||||
self.store,
|
||||
)
|
||||
|
||||
logger.debug(
|
||||
"TX [%s] {%s} Attempting new transaction"
|
||||
" (pdus: %d, edus: %d, failures: %d)",
|
||||
destination, txn_id,
|
||||
len(pending_pdus),
|
||||
len(pending_edus),
|
||||
len(pending_failures)
|
||||
)
|
||||
|
||||
logger.debug("TX [%s] Persisting transaction...", destination)
|
||||
|
||||
transaction = Transaction.create_new(
|
||||
origin_server_ts=int(self._clock.time_msec()),
|
||||
transaction_id=txn_id,
|
||||
origin=self.server_name,
|
||||
destination=destination,
|
||||
pdus=pdus,
|
||||
edus=edus,
|
||||
pdu_failures=failures,
|
||||
)
|
||||
|
||||
self._next_txn_id += 1
|
||||
|
||||
yield self.transaction_actions.prepare_to_send(transaction)
|
||||
|
||||
logger.debug("TX [%s] Persisted transaction", destination)
|
||||
logger.info(
|
||||
"TX [%s] {%s} Sending transaction [%s],"
|
||||
" (PDUs: %d, EDUs: %d, failures: %d)",
|
||||
destination, txn_id,
|
||||
transaction.transaction_id,
|
||||
len(pending_pdus),
|
||||
len(pending_edus),
|
||||
len(pending_failures),
|
||||
)
|
||||
|
||||
with limiter:
|
||||
# Actually send the transaction
|
||||
|
||||
# FIXME (erikj): This is a bit of a hack to make the Pdu age
|
||||
# keys work
|
||||
def json_data_cb():
|
||||
data = transaction.get_dict()
|
||||
now = int(self._clock.time_msec())
|
||||
if "pdus" in data:
|
||||
for p in data["pdus"]:
|
||||
if "age_ts" in p:
|
||||
unsigned = p.setdefault("unsigned", {})
|
||||
unsigned["age"] = now - int(p["age_ts"])
|
||||
del p["age_ts"]
|
||||
return data
|
||||
|
||||
try:
|
||||
response = yield self.transport_layer.send_transaction(
|
||||
transaction, json_data_cb
|
||||
)
|
||||
code = 200
|
||||
|
||||
if response:
|
||||
for e_id, r in response.get("pdus", {}).items():
|
||||
if "error" in r:
|
||||
logger.warn(
|
||||
"Transaction returned error for %s: %s",
|
||||
e_id, r,
|
||||
)
|
||||
except HttpResponseException as e:
|
||||
code = e.code
|
||||
response = e.response
|
||||
|
||||
logger.info(
|
||||
"TX [%s] {%s} got %d response",
|
||||
destination, txn_id, code
|
||||
pending_pdus = []
|
||||
while True:
|
||||
device_message_edus, device_stream_id, dev_list_id = (
|
||||
yield self._get_new_device_messages(destination)
|
||||
)
|
||||
|
||||
logger.debug("TX [%s] Sent transaction", destination)
|
||||
logger.debug("TX [%s] Marking as delivered...", destination)
|
||||
# BEGIN CRITICAL SECTION
|
||||
#
|
||||
# In order to avoid a race condition, we need to make sure that
|
||||
# the following code (from popping the queues up to the point
|
||||
# where we decide if we actually have any pending messages) is
|
||||
# atomic - otherwise new PDUs or EDUs might arrive in the
|
||||
# meantime, but not get sent because we hold the
|
||||
# pending_transactions flag.
|
||||
|
||||
yield self.transaction_actions.delivered(
|
||||
transaction, code, response
|
||||
)
|
||||
pending_pdus = self.pending_pdus_by_dest.pop(destination, [])
|
||||
pending_edus = self.pending_edus_by_dest.pop(destination, [])
|
||||
pending_presence = self.pending_presence_by_dest.pop(destination, {})
|
||||
pending_failures = self.pending_failures_by_dest.pop(destination, [])
|
||||
|
||||
logger.debug("TX [%s] Marked as delivered", destination)
|
||||
pending_edus.extend(
|
||||
self.pending_edus_keyed_by_dest.pop(destination, {}).values()
|
||||
)
|
||||
|
||||
logger.debug("TX [%s] Yielding to callbacks...", destination)
|
||||
pending_edus.extend(device_message_edus)
|
||||
if pending_presence:
|
||||
pending_edus.append(
|
||||
Edu(
|
||||
origin=self.server_name,
|
||||
destination=destination,
|
||||
edu_type="m.presence",
|
||||
content={
|
||||
"push": [
|
||||
format_user_presence_state(
|
||||
presence, self.clock.time_msec()
|
||||
)
|
||||
for presence in pending_presence.values()
|
||||
]
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
for deferred in deferreds:
|
||||
if code == 200:
|
||||
deferred.callback(None)
|
||||
if pending_pdus:
|
||||
logger.debug("TX [%s] len(pending_pdus_by_dest[dest]) = %d",
|
||||
destination, len(pending_pdus))
|
||||
|
||||
if not pending_pdus and not pending_edus and not pending_failures:
|
||||
logger.debug("TX [%s] Nothing to send", destination)
|
||||
self.last_device_stream_id_by_dest[destination] = (
|
||||
device_stream_id
|
||||
)
|
||||
return
|
||||
|
||||
# END CRITICAL SECTION
|
||||
|
||||
success = yield self._send_new_transaction(
|
||||
destination, pending_pdus, pending_edus, pending_failures,
|
||||
)
|
||||
if success:
|
||||
# Remove the acknowledged device messages from the database
|
||||
# Only bother if we actually sent some device messages
|
||||
if device_message_edus:
|
||||
yield self.store.delete_device_msgs_for_remote(
|
||||
destination, device_stream_id
|
||||
)
|
||||
logger.info("Marking as sent %r %r", destination, dev_list_id)
|
||||
yield self.store.mark_as_sent_devices_by_remote(
|
||||
destination, dev_list_id
|
||||
)
|
||||
|
||||
self.last_device_stream_id_by_dest[destination] = device_stream_id
|
||||
self.last_device_list_stream_id_by_dest[destination] = dev_list_id
|
||||
else:
|
||||
deferred.errback(RuntimeError("Got status %d" % code))
|
||||
|
||||
# Ensures we don't continue until all callbacks on that
|
||||
# deferred have fired
|
||||
try:
|
||||
yield deferred
|
||||
except:
|
||||
pass
|
||||
|
||||
logger.debug("TX [%s] Yielded to callbacks", destination)
|
||||
except NotRetryingDestination:
|
||||
logger.info(
|
||||
"TX [%s] not ready for retry yet - "
|
||||
break
|
||||
except NotRetryingDestination as e:
|
||||
logger.debug(
|
||||
"TX [%s] not ready for retry yet (next retry at %s) - "
|
||||
"dropping transaction for now",
|
||||
destination,
|
||||
)
|
||||
except RuntimeError as e:
|
||||
# We capture this here as there as nothing actually listens
|
||||
# for this finishing functions deferred.
|
||||
logger.warn(
|
||||
"TX [%s] Problem in _attempt_transaction: %s",
|
||||
destination,
|
||||
e,
|
||||
datetime.datetime.fromtimestamp(
|
||||
(e.retry_last_ts + e.retry_interval) / 1000.0
|
||||
),
|
||||
)
|
||||
except Exception as e:
|
||||
# We capture this here as there as nothing actually listens
|
||||
# for this finishing functions deferred.
|
||||
logger.warn(
|
||||
"TX [%s] Problem in _attempt_transaction: %s",
|
||||
"TX [%s] Failed to send transaction: %s",
|
||||
destination,
|
||||
e,
|
||||
)
|
||||
|
||||
for deferred in deferreds:
|
||||
if not deferred.called:
|
||||
deferred.errback(e)
|
||||
|
||||
for p, _ in pending_pdus:
|
||||
logger.info("Failed to send event %s to %s", p.event_id,
|
||||
destination)
|
||||
finally:
|
||||
# We want to be *very* sure we delete this after we stop processing
|
||||
self.pending_transactions.pop(destination, None)
|
||||
|
||||
# Check to see if there is anything else to send.
|
||||
self._attempt_new_transaction(destination)
|
||||
@defer.inlineCallbacks
|
||||
def _get_new_device_messages(self, destination):
|
||||
last_device_stream_id = self.last_device_stream_id_by_dest.get(destination, 0)
|
||||
to_device_stream_id = self.store.get_to_device_stream_token()
|
||||
contents, stream_id = yield self.store.get_new_device_msgs_for_remote(
|
||||
destination, last_device_stream_id, to_device_stream_id
|
||||
)
|
||||
edus = [
|
||||
Edu(
|
||||
origin=self.server_name,
|
||||
destination=destination,
|
||||
edu_type="m.direct_to_device",
|
||||
content=content,
|
||||
)
|
||||
for content in contents
|
||||
]
|
||||
|
||||
last_device_list = self.last_device_list_stream_id_by_dest.get(destination, 0)
|
||||
now_stream_id, results = yield self.store.get_devices_by_remote(
|
||||
destination, last_device_list
|
||||
)
|
||||
edus.extend(
|
||||
Edu(
|
||||
origin=self.server_name,
|
||||
destination=destination,
|
||||
edu_type="m.device_list_update",
|
||||
content=content,
|
||||
)
|
||||
for content in results
|
||||
)
|
||||
defer.returnValue((edus, stream_id, now_stream_id))
|
||||
|
||||
@measure_func("_send_new_transaction")
|
||||
@defer.inlineCallbacks
|
||||
def _send_new_transaction(self, destination, pending_pdus, pending_edus,
|
||||
pending_failures):
|
||||
|
||||
# Sort based on the order field
|
||||
pending_pdus.sort(key=lambda t: t[1])
|
||||
pdus = [x[0] for x in pending_pdus]
|
||||
edus = pending_edus
|
||||
failures = [x.get_dict() for x in pending_failures]
|
||||
|
||||
success = True
|
||||
|
||||
logger.debug("TX [%s] _attempt_new_transaction", destination)
|
||||
|
||||
txn_id = str(self._next_txn_id)
|
||||
|
||||
logger.debug(
|
||||
"TX [%s] {%s} Attempting new transaction"
|
||||
" (pdus: %d, edus: %d, failures: %d)",
|
||||
destination, txn_id,
|
||||
len(pdus),
|
||||
len(edus),
|
||||
len(failures)
|
||||
)
|
||||
|
||||
logger.debug("TX [%s] Persisting transaction...", destination)
|
||||
|
||||
transaction = Transaction.create_new(
|
||||
origin_server_ts=int(self.clock.time_msec()),
|
||||
transaction_id=txn_id,
|
||||
origin=self.server_name,
|
||||
destination=destination,
|
||||
pdus=pdus,
|
||||
edus=edus,
|
||||
pdu_failures=failures,
|
||||
)
|
||||
|
||||
self._next_txn_id += 1
|
||||
|
||||
yield self.transaction_actions.prepare_to_send(transaction)
|
||||
|
||||
logger.debug("TX [%s] Persisted transaction", destination)
|
||||
logger.info(
|
||||
"TX [%s] {%s} Sending transaction [%s],"
|
||||
" (PDUs: %d, EDUs: %d, failures: %d)",
|
||||
destination, txn_id,
|
||||
transaction.transaction_id,
|
||||
len(pdus),
|
||||
len(edus),
|
||||
len(failures),
|
||||
)
|
||||
|
||||
# Actually send the transaction
|
||||
|
||||
# FIXME (erikj): This is a bit of a hack to make the Pdu age
|
||||
# keys work
|
||||
def json_data_cb():
|
||||
data = transaction.get_dict()
|
||||
now = int(self.clock.time_msec())
|
||||
if "pdus" in data:
|
||||
for p in data["pdus"]:
|
||||
if "age_ts" in p:
|
||||
unsigned = p.setdefault("unsigned", {})
|
||||
unsigned["age"] = now - int(p["age_ts"])
|
||||
del p["age_ts"]
|
||||
return data
|
||||
|
||||
try:
|
||||
response = yield self.transport_layer.send_transaction(
|
||||
transaction, json_data_cb
|
||||
)
|
||||
code = 200
|
||||
|
||||
if response:
|
||||
for e_id, r in response.get("pdus", {}).items():
|
||||
if "error" in r:
|
||||
logger.warn(
|
||||
"Transaction returned error for %s: %s",
|
||||
e_id, r,
|
||||
)
|
||||
except HttpResponseException as e:
|
||||
code = e.code
|
||||
response = e.response
|
||||
|
||||
if e.code in (401, 404, 429) or 500 <= e.code:
|
||||
logger.info(
|
||||
"TX [%s] {%s} got %d response",
|
||||
destination, txn_id, code
|
||||
)
|
||||
raise e
|
||||
|
||||
logger.info(
|
||||
"TX [%s] {%s} got %d response",
|
||||
destination, txn_id, code
|
||||
)
|
||||
|
||||
logger.debug("TX [%s] Sent transaction", destination)
|
||||
logger.debug("TX [%s] Marking as delivered...", destination)
|
||||
|
||||
yield self.transaction_actions.delivered(
|
||||
transaction, code, response
|
||||
)
|
||||
|
||||
logger.debug("TX [%s] Marked as delivered", destination)
|
||||
|
||||
if code != 200:
|
||||
for p in pdus:
|
||||
logger.info(
|
||||
"Failed to send event %s to %s", p.event_id, destination
|
||||
)
|
||||
success = False
|
||||
|
||||
defer.returnValue(success)
|
||||
|
||||
@@ -54,6 +54,28 @@ class TransportLayerClient(object):
|
||||
destination, path=path, args={"event_id": event_id},
|
||||
)
|
||||
|
||||
@log_function
|
||||
def get_room_state_ids(self, destination, room_id, event_id):
|
||||
""" Requests all state for a given room from the given server at the
|
||||
given event. Returns the state's event_id's
|
||||
|
||||
Args:
|
||||
destination (str): The host name of the remote home server we want
|
||||
to get the state from.
|
||||
context (str): The name of the context we want the state of
|
||||
event_id (str): The event we want the context at.
|
||||
|
||||
Returns:
|
||||
Deferred: Results in a dict received from the remote homeserver.
|
||||
"""
|
||||
logger.debug("get_room_state_ids dest=%s, room=%s",
|
||||
destination, room_id)
|
||||
|
||||
path = PREFIX + "/state_ids/%s/" % room_id
|
||||
return self.client.get_json(
|
||||
destination, path=path, args={"event_id": event_id},
|
||||
)
|
||||
|
||||
@log_function
|
||||
def get_event(self, destination, event_id, timeout=None):
|
||||
""" Requests the pdu with give id and origin from the given server.
|
||||
@@ -141,6 +163,7 @@ class TransportLayerClient(object):
|
||||
data=json_data,
|
||||
json_data_callback=json_data_callback,
|
||||
long_retries=True,
|
||||
backoff_on_404=True, # If we get a 404 the other side has gone
|
||||
)
|
||||
|
||||
logger.debug(
|
||||
@@ -152,7 +175,8 @@ class TransportLayerClient(object):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def make_query(self, destination, query_type, args, retry_on_dns_fail):
|
||||
def make_query(self, destination, query_type, args, retry_on_dns_fail,
|
||||
ignore_backoff=False):
|
||||
path = PREFIX + "/query/%s" % query_type
|
||||
|
||||
content = yield self.client.get_json(
|
||||
@@ -161,6 +185,7 @@ class TransportLayerClient(object):
|
||||
args=args,
|
||||
retry_on_dns_fail=retry_on_dns_fail,
|
||||
timeout=10000,
|
||||
ignore_backoff=ignore_backoff,
|
||||
)
|
||||
|
||||
defer.returnValue(content)
|
||||
@@ -179,7 +204,8 @@ class TransportLayerClient(object):
|
||||
content = yield self.client.get_json(
|
||||
destination=destination,
|
||||
path=path,
|
||||
retry_on_dns_fail=True,
|
||||
retry_on_dns_fail=False,
|
||||
timeout=20000,
|
||||
)
|
||||
|
||||
defer.returnValue(content)
|
||||
@@ -219,6 +245,35 @@ class TransportLayerClient(object):
|
||||
destination=destination,
|
||||
path=path,
|
||||
data=content,
|
||||
ignore_backoff=True,
|
||||
)
|
||||
|
||||
defer.returnValue(response)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def get_public_rooms(self, remote_server, limit, since_token,
|
||||
search_filter=None, include_all_networks=False,
|
||||
third_party_instance_id=None):
|
||||
path = PREFIX + "/publicRooms"
|
||||
|
||||
args = {
|
||||
"include_all_networks": "true" if include_all_networks else "false",
|
||||
}
|
||||
if third_party_instance_id:
|
||||
args["third_party_instance_id"] = third_party_instance_id,
|
||||
if limit:
|
||||
args["limit"] = [str(limit)]
|
||||
if since_token:
|
||||
args["since"] = [since_token]
|
||||
|
||||
# TODO(erikj): Actually send the search_filter across federation.
|
||||
|
||||
response = yield self.client.get_json(
|
||||
destination=remote_server,
|
||||
path=path,
|
||||
args=args,
|
||||
ignore_backoff=True,
|
||||
)
|
||||
|
||||
defer.returnValue(response)
|
||||
@@ -263,7 +318,7 @@ class TransportLayerClient(object):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def query_client_keys(self, destination, query_content):
|
||||
def query_client_keys(self, destination, query_content, timeout):
|
||||
"""Query the device keys for a list of user ids hosted on a remote
|
||||
server.
|
||||
|
||||
@@ -292,12 +347,39 @@ class TransportLayerClient(object):
|
||||
destination=destination,
|
||||
path=path,
|
||||
data=query_content,
|
||||
timeout=timeout,
|
||||
)
|
||||
defer.returnValue(content)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def claim_client_keys(self, destination, query_content):
|
||||
def query_user_devices(self, destination, user_id, timeout):
|
||||
"""Query the devices for a user id hosted on a remote server.
|
||||
|
||||
Response:
|
||||
{
|
||||
"stream_id": "...",
|
||||
"devices": [ { ... } ]
|
||||
}
|
||||
|
||||
Args:
|
||||
destination(str): The server to query.
|
||||
query_content(dict): The user ids to query.
|
||||
Returns:
|
||||
A dict containg the device keys.
|
||||
"""
|
||||
path = PREFIX + "/user/devices/" + user_id
|
||||
|
||||
content = yield self.client.get_json(
|
||||
destination=destination,
|
||||
path=path,
|
||||
timeout=timeout,
|
||||
)
|
||||
defer.returnValue(content)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def claim_client_keys(self, destination, query_content, timeout):
|
||||
"""Claim one-time keys for a list of devices hosted on a remote server.
|
||||
|
||||
Request:
|
||||
@@ -328,13 +410,14 @@ class TransportLayerClient(object):
|
||||
destination=destination,
|
||||
path=path,
|
||||
data=query_content,
|
||||
timeout=timeout,
|
||||
)
|
||||
defer.returnValue(content)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def get_missing_events(self, destination, room_id, earliest_events,
|
||||
latest_events, limit, min_depth):
|
||||
latest_events, limit, min_depth, timeout):
|
||||
path = PREFIX + "/get_missing_events/%s" % (room_id,)
|
||||
|
||||
content = yield self.client.post_json(
|
||||
@@ -345,7 +428,8 @@ class TransportLayerClient(object):
|
||||
"min_depth": int(min_depth),
|
||||
"earliest_events": earliest_events,
|
||||
"latest_events": latest_events,
|
||||
}
|
||||
},
|
||||
timeout=timeout,
|
||||
)
|
||||
|
||||
defer.returnValue(content)
|
||||
|
||||
@@ -18,13 +18,18 @@ from twisted.internet import defer
|
||||
from synapse.api.urls import FEDERATION_PREFIX as PREFIX
|
||||
from synapse.api.errors import Codes, SynapseError
|
||||
from synapse.http.server import JsonResource
|
||||
from synapse.http.servlet import parse_json_object_from_request
|
||||
from synapse.http.servlet import (
|
||||
parse_json_object_from_request, parse_integer_from_args, parse_string_from_args,
|
||||
parse_boolean_from_args,
|
||||
)
|
||||
from synapse.util.ratelimitutils import FederationRateLimiter
|
||||
from synapse.util.versionstring import get_version_string
|
||||
from synapse.types import ThirdPartyInstanceID
|
||||
|
||||
import functools
|
||||
import logging
|
||||
import simplejson as json
|
||||
import re
|
||||
import synapse
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -37,7 +42,7 @@ class TransportLayerServer(JsonResource):
|
||||
self.hs = hs
|
||||
self.clock = hs.get_clock()
|
||||
|
||||
super(TransportLayerServer, self).__init__(hs)
|
||||
super(TransportLayerServer, self).__init__(hs, canonical_json=False)
|
||||
|
||||
self.authenticator = Authenticator(hs)
|
||||
self.ratelimiter = FederationRateLimiter(
|
||||
@@ -60,6 +65,16 @@ class TransportLayerServer(JsonResource):
|
||||
)
|
||||
|
||||
|
||||
class AuthenticationError(SynapseError):
|
||||
"""There was a problem authenticating the request"""
|
||||
pass
|
||||
|
||||
|
||||
class NoAuthenticationError(AuthenticationError):
|
||||
"""The request had no authentication information"""
|
||||
pass
|
||||
|
||||
|
||||
class Authenticator(object):
|
||||
def __init__(self, hs):
|
||||
self.keyring = hs.get_keyring()
|
||||
@@ -67,7 +82,7 @@ class Authenticator(object):
|
||||
|
||||
# A method just so we can pass 'self' as the authenticator to the Servlets
|
||||
@defer.inlineCallbacks
|
||||
def authenticate_request(self, request):
|
||||
def authenticate_request(self, request, content):
|
||||
json_request = {
|
||||
"method": request.method,
|
||||
"uri": request.uri,
|
||||
@@ -75,17 +90,10 @@ class Authenticator(object):
|
||||
"signatures": {},
|
||||
}
|
||||
|
||||
content = None
|
||||
origin = None
|
||||
if content is not None:
|
||||
json_request["content"] = content
|
||||
|
||||
if request.method in ["PUT", "POST"]:
|
||||
# TODO: Handle other method types? other content types?
|
||||
try:
|
||||
content_bytes = request.content.read()
|
||||
content = json.loads(content_bytes)
|
||||
json_request["content"] = content
|
||||
except:
|
||||
raise SynapseError(400, "Unable to parse JSON", Codes.BAD_JSON)
|
||||
origin = None
|
||||
|
||||
def parse_auth_header(header_str):
|
||||
try:
|
||||
@@ -103,14 +111,14 @@ class Authenticator(object):
|
||||
sig = strip_quotes(param_dict["sig"])
|
||||
return (origin, key, sig)
|
||||
except:
|
||||
raise SynapseError(
|
||||
raise AuthenticationError(
|
||||
400, "Malformed Authorization header", Codes.UNAUTHORIZED
|
||||
)
|
||||
|
||||
auth_headers = request.requestHeaders.getRawHeaders(b"Authorization")
|
||||
|
||||
if not auth_headers:
|
||||
raise SynapseError(
|
||||
raise NoAuthenticationError(
|
||||
401, "Missing Authorization headers", Codes.UNAUTHORIZED,
|
||||
)
|
||||
|
||||
@@ -121,7 +129,7 @@ class Authenticator(object):
|
||||
json_request["signatures"].setdefault(origin, {})[key] = sig
|
||||
|
||||
if not json_request["signatures"]:
|
||||
raise SynapseError(
|
||||
raise NoAuthenticationError(
|
||||
401, "Missing Authorization headers", Codes.UNAUTHORIZED,
|
||||
)
|
||||
|
||||
@@ -130,38 +138,59 @@ class Authenticator(object):
|
||||
logger.info("Request from %s", origin)
|
||||
request.authenticated_entity = origin
|
||||
|
||||
defer.returnValue((origin, content))
|
||||
defer.returnValue(origin)
|
||||
|
||||
|
||||
class BaseFederationServlet(object):
|
||||
def __init__(self, handler, authenticator, ratelimiter, server_name):
|
||||
REQUIRE_AUTH = True
|
||||
|
||||
def __init__(self, handler, authenticator, ratelimiter, server_name,
|
||||
room_list_handler):
|
||||
self.handler = handler
|
||||
self.authenticator = authenticator
|
||||
self.ratelimiter = ratelimiter
|
||||
self.room_list_handler = room_list_handler
|
||||
|
||||
def _wrap(self, code):
|
||||
def _wrap(self, func):
|
||||
authenticator = self.authenticator
|
||||
ratelimiter = self.ratelimiter
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@functools.wraps(code)
|
||||
def new_code(request, *args, **kwargs):
|
||||
@functools.wraps(func)
|
||||
def new_func(request, *args, **kwargs):
|
||||
content = None
|
||||
if request.method in ["PUT", "POST"]:
|
||||
# TODO: Handle other method types? other content types?
|
||||
content = parse_json_object_from_request(request)
|
||||
|
||||
try:
|
||||
(origin, content) = yield authenticator.authenticate_request(request)
|
||||
with ratelimiter.ratelimit(origin) as d:
|
||||
yield d
|
||||
response = yield code(
|
||||
origin, content, request.args, *args, **kwargs
|
||||
)
|
||||
origin = yield authenticator.authenticate_request(request, content)
|
||||
except NoAuthenticationError:
|
||||
origin = None
|
||||
if self.REQUIRE_AUTH:
|
||||
logger.exception("authenticate_request failed")
|
||||
raise
|
||||
except:
|
||||
logger.exception("authenticate_request failed")
|
||||
raise
|
||||
|
||||
if origin:
|
||||
with ratelimiter.ratelimit(origin) as d:
|
||||
yield d
|
||||
response = yield func(
|
||||
origin, content, request.args, *args, **kwargs
|
||||
)
|
||||
else:
|
||||
response = yield func(
|
||||
origin, content, request.args, *args, **kwargs
|
||||
)
|
||||
|
||||
defer.returnValue(response)
|
||||
|
||||
# Extra logic that functools.wraps() doesn't finish
|
||||
new_code.__self__ = code.__self__
|
||||
new_func.__self__ = func.__self__
|
||||
|
||||
return new_code
|
||||
return new_func
|
||||
|
||||
def register(self, server):
|
||||
pattern = re.compile("^" + PREFIX + self.PATH + "$")
|
||||
@@ -269,6 +298,17 @@ class FederationStateServlet(BaseFederationServlet):
|
||||
)
|
||||
|
||||
|
||||
class FederationStateIdsServlet(BaseFederationServlet):
|
||||
PATH = "/state_ids/(?P<room_id>[^/]*)/"
|
||||
|
||||
def on_GET(self, origin, content, query, room_id):
|
||||
return self.handler.on_state_ids_request(
|
||||
origin,
|
||||
room_id,
|
||||
query.get("event_id", [None])[0],
|
||||
)
|
||||
|
||||
|
||||
class FederationBackfillServlet(BaseFederationServlet):
|
||||
PATH = "/backfill/(?P<context>[^/]*)/"
|
||||
|
||||
@@ -323,7 +363,7 @@ class FederationSendLeaveServlet(BaseFederationServlet):
|
||||
|
||||
|
||||
class FederationEventAuthServlet(BaseFederationServlet):
|
||||
PATH = "/event_auth(?P<context>[^/]*)/(?P<event_id>[^/]*)"
|
||||
PATH = "/event_auth/(?P<context>[^/]*)/(?P<event_id>[^/]*)"
|
||||
|
||||
def on_GET(self, origin, content, query, context, event_id):
|
||||
return self.handler.on_event_auth(origin, context, event_id)
|
||||
@@ -365,10 +405,15 @@ class FederationThirdPartyInviteExchangeServlet(BaseFederationServlet):
|
||||
class FederationClientKeysQueryServlet(BaseFederationServlet):
|
||||
PATH = "/user/keys/query"
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_POST(self, origin, content, query):
|
||||
response = yield self.handler.on_query_client_keys(origin, content)
|
||||
defer.returnValue((200, response))
|
||||
return self.handler.on_query_client_keys(origin, content)
|
||||
|
||||
|
||||
class FederationUserDevicesQueryServlet(BaseFederationServlet):
|
||||
PATH = "/user/devices/(?P<user_id>[^/]*)"
|
||||
|
||||
def on_GET(self, origin, content, query, user_id):
|
||||
return self.handler.on_query_user_devices(origin, user_id)
|
||||
|
||||
|
||||
class FederationClientKeysClaimServlet(BaseFederationServlet):
|
||||
@@ -386,7 +431,7 @@ class FederationQueryAuthServlet(BaseFederationServlet):
|
||||
@defer.inlineCallbacks
|
||||
def on_POST(self, origin, content, query, context, event_id):
|
||||
new_content = yield self.handler.on_query_auth_request(
|
||||
origin, content, event_id
|
||||
origin, content, context, event_id
|
||||
)
|
||||
|
||||
defer.returnValue((200, new_content))
|
||||
@@ -418,9 +463,10 @@ class FederationGetMissingEventsServlet(BaseFederationServlet):
|
||||
class On3pidBindServlet(BaseFederationServlet):
|
||||
PATH = "/3pid/onbind"
|
||||
|
||||
REQUIRE_AUTH = False
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_POST(self, request):
|
||||
content = parse_json_object_from_request(request)
|
||||
def on_POST(self, origin, content, query):
|
||||
if "invites" in content:
|
||||
last_exception = None
|
||||
for invite in content["invites"]:
|
||||
@@ -442,10 +488,118 @@ class On3pidBindServlet(BaseFederationServlet):
|
||||
raise last_exception
|
||||
defer.returnValue((200, {}))
|
||||
|
||||
# Avoid doing remote HS authorization checks which are done by default by
|
||||
# BaseFederationServlet.
|
||||
def _wrap(self, code):
|
||||
return code
|
||||
|
||||
class OpenIdUserInfo(BaseFederationServlet):
|
||||
"""
|
||||
Exchange a bearer token for information about a user.
|
||||
|
||||
The response format should be compatible with:
|
||||
http://openid.net/specs/openid-connect-core-1_0.html#UserInfoResponse
|
||||
|
||||
GET /openid/userinfo?access_token=ABDEFGH HTTP/1.1
|
||||
|
||||
HTTP/1.1 200 OK
|
||||
Content-Type: application/json
|
||||
|
||||
{
|
||||
"sub": "@userpart:example.org",
|
||||
}
|
||||
"""
|
||||
|
||||
PATH = "/openid/userinfo"
|
||||
|
||||
REQUIRE_AUTH = False
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_GET(self, origin, content, query):
|
||||
token = query.get("access_token", [None])[0]
|
||||
if token is None:
|
||||
defer.returnValue((401, {
|
||||
"errcode": "M_MISSING_TOKEN", "error": "Access Token required"
|
||||
}))
|
||||
return
|
||||
|
||||
user_id = yield self.handler.on_openid_userinfo(token)
|
||||
|
||||
if user_id is None:
|
||||
defer.returnValue((401, {
|
||||
"errcode": "M_UNKNOWN_TOKEN",
|
||||
"error": "Access Token unknown or expired"
|
||||
}))
|
||||
|
||||
defer.returnValue((200, {"sub": user_id}))
|
||||
|
||||
|
||||
class PublicRoomList(BaseFederationServlet):
|
||||
"""
|
||||
Fetch the public room list for this server.
|
||||
|
||||
This API returns information in the same format as /publicRooms on the
|
||||
client API, but will only ever include local public rooms and hence is
|
||||
intended for consumption by other home servers.
|
||||
|
||||
GET /publicRooms HTTP/1.1
|
||||
|
||||
HTTP/1.1 200 OK
|
||||
Content-Type: application/json
|
||||
|
||||
{
|
||||
"chunk": [
|
||||
{
|
||||
"aliases": [
|
||||
"#test:localhost"
|
||||
],
|
||||
"guest_can_join": false,
|
||||
"name": "test room",
|
||||
"num_joined_members": 3,
|
||||
"room_id": "!whkydVegtvatLfXmPN:localhost",
|
||||
"world_readable": false
|
||||
}
|
||||
],
|
||||
"end": "END",
|
||||
"start": "START"
|
||||
}
|
||||
"""
|
||||
|
||||
PATH = "/publicRooms"
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_GET(self, origin, content, query):
|
||||
limit = parse_integer_from_args(query, "limit", 0)
|
||||
since_token = parse_string_from_args(query, "since", None)
|
||||
include_all_networks = parse_boolean_from_args(
|
||||
query, "include_all_networks", False
|
||||
)
|
||||
third_party_instance_id = parse_string_from_args(
|
||||
query, "third_party_instance_id", None
|
||||
)
|
||||
|
||||
if include_all_networks:
|
||||
network_tuple = None
|
||||
elif third_party_instance_id:
|
||||
network_tuple = ThirdPartyInstanceID.from_string(third_party_instance_id)
|
||||
else:
|
||||
network_tuple = ThirdPartyInstanceID(None, None)
|
||||
|
||||
data = yield self.room_list_handler.get_local_public_room_list(
|
||||
limit, since_token,
|
||||
network_tuple=network_tuple
|
||||
)
|
||||
defer.returnValue((200, data))
|
||||
|
||||
|
||||
class FederationVersionServlet(BaseFederationServlet):
|
||||
PATH = "/version"
|
||||
|
||||
REQUIRE_AUTH = False
|
||||
|
||||
def on_GET(self, origin, content, query):
|
||||
return defer.succeed((200, {
|
||||
"server": {
|
||||
"name": "Synapse",
|
||||
"version": get_version_string(synapse)
|
||||
},
|
||||
}))
|
||||
|
||||
|
||||
SERVLET_CLASSES = (
|
||||
@@ -453,6 +607,7 @@ SERVLET_CLASSES = (
|
||||
FederationPullServlet,
|
||||
FederationEventServlet,
|
||||
FederationStateServlet,
|
||||
FederationStateIdsServlet,
|
||||
FederationBackfillServlet,
|
||||
FederationQueryServlet,
|
||||
FederationMakeJoinServlet,
|
||||
@@ -465,9 +620,13 @@ SERVLET_CLASSES = (
|
||||
FederationGetMissingEventsServlet,
|
||||
FederationEventAuthServlet,
|
||||
FederationClientKeysQueryServlet,
|
||||
FederationUserDevicesQueryServlet,
|
||||
FederationClientKeysClaimServlet,
|
||||
FederationThirdPartyInviteExchangeServlet,
|
||||
On3pidBindServlet,
|
||||
OpenIdUserInfo,
|
||||
PublicRoomList,
|
||||
FederationVersionServlet,
|
||||
)
|
||||
|
||||
|
||||
@@ -478,4 +637,5 @@ def register_servlets(hs, resource, authenticator, ratelimiter):
|
||||
authenticator=authenticator,
|
||||
ratelimiter=ratelimiter,
|
||||
server_name=hs.hostname,
|
||||
room_list_handler=hs.get_room_list_handler(),
|
||||
).register(resource)
|
||||
|
||||
@@ -13,35 +13,37 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from synapse.appservice.scheduler import AppServiceScheduler
|
||||
from synapse.appservice.api import ApplicationServiceApi
|
||||
from .register import RegistrationHandler
|
||||
from .room import (
|
||||
RoomCreationHandler, RoomListHandler, RoomContextHandler,
|
||||
RoomCreationHandler, RoomContextHandler,
|
||||
)
|
||||
from .room_member import RoomMemberHandler
|
||||
from .message import MessageHandler
|
||||
from .events import EventStreamHandler, EventHandler
|
||||
from .federation import FederationHandler
|
||||
from .profile import ProfileHandler
|
||||
from .presence import PresenceHandler
|
||||
from .directory import DirectoryHandler
|
||||
from .typing import TypingNotificationHandler
|
||||
from .admin import AdminHandler
|
||||
from .appservice import ApplicationServicesHandler
|
||||
from .sync import SyncHandler
|
||||
from .auth import AuthHandler
|
||||
from .identity import IdentityHandler
|
||||
from .receipts import ReceiptsHandler
|
||||
from .search import SearchHandler
|
||||
|
||||
|
||||
class Handlers(object):
|
||||
|
||||
""" A collection of all the event handlers.
|
||||
""" Deprecated. A collection of handlers.
|
||||
|
||||
There's no need to lazily create these; we'll just make them all eagerly
|
||||
at construction time.
|
||||
At some point most of the classes whose name ended "Handler" were
|
||||
accessed through this class.
|
||||
|
||||
However this makes it painful to unit test the handlers and to run cut
|
||||
down versions of synapse that only use specific handlers because using a
|
||||
single handler required creating all of the handlers. So some of the
|
||||
handlers have been lifted out of the Handlers object and are now accessed
|
||||
directly through the homeserver object itself.
|
||||
|
||||
Any new handlers should follow the new pattern of being accessed through
|
||||
the homeserver object and should not be added to the Handlers object.
|
||||
|
||||
The remaining handlers should be moved out of the handlers object.
|
||||
"""
|
||||
|
||||
def __init__(self, hs):
|
||||
@@ -49,26 +51,10 @@ class Handlers(object):
|
||||
self.message_handler = MessageHandler(hs)
|
||||
self.room_creation_handler = RoomCreationHandler(hs)
|
||||
self.room_member_handler = RoomMemberHandler(hs)
|
||||
self.event_stream_handler = EventStreamHandler(hs)
|
||||
self.event_handler = EventHandler(hs)
|
||||
self.federation_handler = FederationHandler(hs)
|
||||
self.profile_handler = ProfileHandler(hs)
|
||||
self.presence_handler = PresenceHandler(hs)
|
||||
self.room_list_handler = RoomListHandler(hs)
|
||||
self.directory_handler = DirectoryHandler(hs)
|
||||
self.typing_notification_handler = TypingNotificationHandler(hs)
|
||||
self.admin_handler = AdminHandler(hs)
|
||||
self.receipts_handler = ReceiptsHandler(hs)
|
||||
asapi = ApplicationServiceApi(hs)
|
||||
self.appservice_handler = ApplicationServicesHandler(
|
||||
hs, asapi, AppServiceScheduler(
|
||||
clock=hs.get_clock(),
|
||||
store=hs.get_datastore(),
|
||||
as_api=asapi
|
||||
)
|
||||
)
|
||||
self.sync_handler = SyncHandler(hs)
|
||||
self.auth_handler = AuthHandler(hs)
|
||||
self.identity_handler = IdentityHandler(hs)
|
||||
self.search_handler = SearchHandler(hs)
|
||||
self.room_context_handler = RoomContextHandler(hs)
|
||||
|
||||
@@ -13,49 +13,33 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.errors import LimitExceededError, SynapseError, AuthError
|
||||
from synapse.crypto.event_signing import add_hashes_and_signatures
|
||||
import synapse.types
|
||||
from synapse.api.constants import Membership, EventTypes
|
||||
from synapse.types import UserID, RoomAlias, Requester
|
||||
from synapse.push.action_generator import ActionGenerator
|
||||
|
||||
from synapse.util.logcontext import PreserveLoggingContext
|
||||
|
||||
import logging
|
||||
from synapse.api.errors import LimitExceededError
|
||||
from synapse.types import UserID
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
VISIBILITY_PRIORITY = (
|
||||
"world_readable",
|
||||
"shared",
|
||||
"invited",
|
||||
"joined",
|
||||
)
|
||||
|
||||
|
||||
MEMBERSHIP_PRIORITY = (
|
||||
Membership.JOIN,
|
||||
Membership.INVITE,
|
||||
Membership.KNOCK,
|
||||
Membership.LEAVE,
|
||||
Membership.BAN,
|
||||
)
|
||||
|
||||
|
||||
class BaseHandler(object):
|
||||
"""
|
||||
Common base class for the event handlers.
|
||||
|
||||
Attributes:
|
||||
store (synapse.storage.events.StateStore):
|
||||
store (synapse.storage.DataStore):
|
||||
state_handler (synapse.state.StateHandler):
|
||||
"""
|
||||
|
||||
def __init__(self, hs):
|
||||
"""
|
||||
Args:
|
||||
hs (synapse.server.HomeServer):
|
||||
"""
|
||||
self.store = hs.get_datastore()
|
||||
self.auth = hs.get_auth()
|
||||
self.notifier = hs.get_notifier()
|
||||
@@ -65,165 +49,26 @@ class BaseHandler(object):
|
||||
self.clock = hs.get_clock()
|
||||
self.hs = hs
|
||||
|
||||
self.signing_key = hs.config.signing_key[0]
|
||||
self.server_name = hs.hostname
|
||||
|
||||
self.event_builder_factory = hs.get_event_builder_factory()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def filter_events_for_clients(self, user_tuples, events, event_id_to_state):
|
||||
""" Returns dict of user_id -> list of events that user is allowed to
|
||||
see.
|
||||
|
||||
Args:
|
||||
user_tuples (str, bool): (user id, is_peeking) for each user to be
|
||||
checked. is_peeking should be true if:
|
||||
* the user is not currently a member of the room, and:
|
||||
* the user has not been a member of the room since the
|
||||
given events
|
||||
events ([synapse.events.EventBase]): list of events to filter
|
||||
"""
|
||||
forgotten = yield defer.gatherResults([
|
||||
self.store.who_forgot_in_room(
|
||||
room_id,
|
||||
)
|
||||
for room_id in frozenset(e.room_id for e in events)
|
||||
], consumeErrors=True)
|
||||
|
||||
# Set of membership event_ids that have been forgotten
|
||||
event_id_forgotten = frozenset(
|
||||
row["event_id"] for rows in forgotten for row in rows
|
||||
)
|
||||
|
||||
def allowed(event, user_id, is_peeking):
|
||||
"""
|
||||
Args:
|
||||
event (synapse.events.EventBase): event to check
|
||||
user_id (str)
|
||||
is_peeking (bool)
|
||||
"""
|
||||
state = event_id_to_state[event.event_id]
|
||||
|
||||
# get the room_visibility at the time of the event.
|
||||
visibility_event = state.get((EventTypes.RoomHistoryVisibility, ""), None)
|
||||
if visibility_event:
|
||||
visibility = visibility_event.content.get("history_visibility", "shared")
|
||||
else:
|
||||
visibility = "shared"
|
||||
|
||||
if visibility not in VISIBILITY_PRIORITY:
|
||||
visibility = "shared"
|
||||
|
||||
# if it was world_readable, it's easy: everyone can read it
|
||||
if visibility == "world_readable":
|
||||
return True
|
||||
|
||||
# Always allow history visibility events on boundaries. This is done
|
||||
# by setting the effective visibility to the least restrictive
|
||||
# of the old vs new.
|
||||
if event.type == EventTypes.RoomHistoryVisibility:
|
||||
prev_content = event.unsigned.get("prev_content", {})
|
||||
prev_visibility = prev_content.get("history_visibility", None)
|
||||
|
||||
if prev_visibility not in VISIBILITY_PRIORITY:
|
||||
prev_visibility = "shared"
|
||||
|
||||
new_priority = VISIBILITY_PRIORITY.index(visibility)
|
||||
old_priority = VISIBILITY_PRIORITY.index(prev_visibility)
|
||||
if old_priority < new_priority:
|
||||
visibility = prev_visibility
|
||||
|
||||
# likewise, if the event is the user's own membership event, use
|
||||
# the 'most joined' membership
|
||||
membership = None
|
||||
if event.type == EventTypes.Member and event.state_key == user_id:
|
||||
membership = event.content.get("membership", None)
|
||||
if membership not in MEMBERSHIP_PRIORITY:
|
||||
membership = "leave"
|
||||
|
||||
prev_content = event.unsigned.get("prev_content", {})
|
||||
prev_membership = prev_content.get("membership", None)
|
||||
if prev_membership not in MEMBERSHIP_PRIORITY:
|
||||
prev_membership = "leave"
|
||||
|
||||
new_priority = MEMBERSHIP_PRIORITY.index(membership)
|
||||
old_priority = MEMBERSHIP_PRIORITY.index(prev_membership)
|
||||
if old_priority < new_priority:
|
||||
membership = prev_membership
|
||||
|
||||
# otherwise, get the user's membership at the time of the event.
|
||||
if membership is None:
|
||||
membership_event = state.get((EventTypes.Member, user_id), None)
|
||||
if membership_event:
|
||||
if membership_event.event_id not in event_id_forgotten:
|
||||
membership = membership_event.membership
|
||||
|
||||
# if the user was a member of the room at the time of the event,
|
||||
# they can see it.
|
||||
if membership == Membership.JOIN:
|
||||
return True
|
||||
|
||||
if visibility == "joined":
|
||||
# we weren't a member at the time of the event, so we can't
|
||||
# see this event.
|
||||
return False
|
||||
|
||||
elif visibility == "invited":
|
||||
# user can also see the event if they were *invited* at the time
|
||||
# of the event.
|
||||
return membership == Membership.INVITE
|
||||
|
||||
else:
|
||||
# visibility is shared: user can also see the event if they have
|
||||
# become a member since the event
|
||||
#
|
||||
# XXX: if the user has subsequently joined and then left again,
|
||||
# ideally we would share history up to the point they left. But
|
||||
# we don't know when they left.
|
||||
return not is_peeking
|
||||
|
||||
defer.returnValue({
|
||||
user_id: [
|
||||
event
|
||||
for event in events
|
||||
if allowed(event, user_id, is_peeking)
|
||||
]
|
||||
for user_id, is_peeking in user_tuples
|
||||
})
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _filter_events_for_client(self, user_id, events, is_peeking=False):
|
||||
"""
|
||||
Check which events a user is allowed to see
|
||||
|
||||
Args:
|
||||
user_id(str): user id to be checked
|
||||
events([synapse.events.EventBase]): list of events to be checked
|
||||
is_peeking(bool): should be True if:
|
||||
* the user is not currently a member of the room, and:
|
||||
* the user has not been a member of the room since the given
|
||||
events
|
||||
|
||||
Returns:
|
||||
[synapse.events.EventBase]
|
||||
"""
|
||||
types = (
|
||||
(EventTypes.RoomHistoryVisibility, ""),
|
||||
(EventTypes.Member, user_id),
|
||||
)
|
||||
event_id_to_state = yield self.store.get_state_for_events(
|
||||
frozenset(e.event_id for e in events),
|
||||
types=types
|
||||
)
|
||||
res = yield self.filter_events_for_clients(
|
||||
[(user_id, is_peeking)], events, event_id_to_state
|
||||
)
|
||||
defer.returnValue(res.get(user_id, []))
|
||||
|
||||
def ratelimit(self, requester):
|
||||
time_now = self.clock.time()
|
||||
user_id = requester.user.to_string()
|
||||
|
||||
# The AS user itself is never rate limited.
|
||||
app_service = self.store.get_app_service_by_user_id(user_id)
|
||||
if app_service is not None:
|
||||
return # do not ratelimit app service senders
|
||||
|
||||
# Disable rate limiting of users belonging to any AS that is configured
|
||||
# not to be rate limited in its registration file (rate_limited: true|false).
|
||||
if requester.app_service and not requester.app_service.is_rate_limited():
|
||||
return
|
||||
|
||||
allowed, time_allowed = self.ratelimiter.send_message(
|
||||
requester.user.to_string(), time_now,
|
||||
user_id, time_now,
|
||||
msg_rate_hz=self.hs.config.rc_messages_per_second,
|
||||
burst_count=self.hs.config.rc_message_burst_count,
|
||||
)
|
||||
@@ -233,213 +78,24 @@ class BaseHandler(object):
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _create_new_client_event(self, builder, prev_event_ids=None):
|
||||
if prev_event_ids:
|
||||
prev_events = yield self.store.add_event_hashes(prev_event_ids)
|
||||
prev_max_depth = yield self.store.get_max_depth_of_events(prev_event_ids)
|
||||
depth = prev_max_depth + 1
|
||||
else:
|
||||
latest_ret = yield self.store.get_latest_event_ids_and_hashes_in_room(
|
||||
builder.room_id,
|
||||
)
|
||||
|
||||
if latest_ret:
|
||||
depth = max([d for _, _, d in latest_ret]) + 1
|
||||
else:
|
||||
depth = 1
|
||||
|
||||
prev_events = [
|
||||
(event_id, prev_hashes)
|
||||
for event_id, prev_hashes, _ in latest_ret
|
||||
]
|
||||
|
||||
builder.prev_events = prev_events
|
||||
builder.depth = depth
|
||||
|
||||
state_handler = self.state_handler
|
||||
|
||||
context = yield state_handler.compute_event_context(builder)
|
||||
|
||||
if builder.is_state():
|
||||
builder.prev_state = yield self.store.add_event_hashes(
|
||||
context.prev_state_events
|
||||
)
|
||||
|
||||
yield self.auth.add_auth_events(builder, context)
|
||||
|
||||
add_hashes_and_signatures(
|
||||
builder, self.server_name, self.signing_key
|
||||
)
|
||||
|
||||
event = builder.build()
|
||||
|
||||
logger.debug(
|
||||
"Created event %s with current state: %s",
|
||||
event.event_id, context.current_state,
|
||||
)
|
||||
|
||||
defer.returnValue(
|
||||
(event, context,)
|
||||
)
|
||||
|
||||
def is_host_in_room(self, current_state):
|
||||
room_members = [
|
||||
(state_key, event.membership)
|
||||
for ((event_type, state_key), event) in current_state.items()
|
||||
if event_type == EventTypes.Member
|
||||
]
|
||||
if len(room_members) == 0:
|
||||
# Have we just created the room, and is this about to be the very
|
||||
# first member event?
|
||||
create_event = current_state.get(("m.room.create", ""))
|
||||
if create_event:
|
||||
return True
|
||||
for (state_key, membership) in room_members:
|
||||
if (
|
||||
UserID.from_string(state_key).domain == self.hs.hostname
|
||||
and membership == Membership.JOIN
|
||||
):
|
||||
return True
|
||||
return False
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def handle_new_client_event(
|
||||
self,
|
||||
requester,
|
||||
event,
|
||||
context,
|
||||
ratelimit=True,
|
||||
extra_users=[]
|
||||
):
|
||||
# We now need to go and hit out to wherever we need to hit out to.
|
||||
|
||||
if ratelimit:
|
||||
self.ratelimit(requester)
|
||||
|
||||
self.auth.check(event, auth_events=context.current_state)
|
||||
|
||||
yield self.maybe_kick_guest_users(event, context.current_state.values())
|
||||
|
||||
if event.type == EventTypes.CanonicalAlias:
|
||||
# Check the alias is acually valid (at this time at least)
|
||||
room_alias_str = event.content.get("alias", None)
|
||||
if room_alias_str:
|
||||
room_alias = RoomAlias.from_string(room_alias_str)
|
||||
directory_handler = self.hs.get_handlers().directory_handler
|
||||
mapping = yield directory_handler.get_association(room_alias)
|
||||
|
||||
if mapping["room_id"] != event.room_id:
|
||||
raise SynapseError(
|
||||
400,
|
||||
"Room alias %s does not point to the room" % (
|
||||
room_alias_str,
|
||||
)
|
||||
)
|
||||
|
||||
federation_handler = self.hs.get_handlers().federation_handler
|
||||
|
||||
if event.type == EventTypes.Member:
|
||||
if event.content["membership"] == Membership.INVITE:
|
||||
def is_inviter_member_event(e):
|
||||
return (
|
||||
e.type == EventTypes.Member and
|
||||
e.sender == event.sender
|
||||
)
|
||||
|
||||
event.unsigned["invite_room_state"] = [
|
||||
{
|
||||
"type": e.type,
|
||||
"state_key": e.state_key,
|
||||
"content": e.content,
|
||||
"sender": e.sender,
|
||||
}
|
||||
for k, e in context.current_state.items()
|
||||
if e.type in self.hs.config.room_invite_state_types
|
||||
or is_inviter_member_event(e)
|
||||
]
|
||||
|
||||
invitee = UserID.from_string(event.state_key)
|
||||
if not self.hs.is_mine(invitee):
|
||||
# TODO: Can we add signature from remote server in a nicer
|
||||
# way? If we have been invited by a remote server, we need
|
||||
# to get them to sign the event.
|
||||
|
||||
returned_invite = yield federation_handler.send_invite(
|
||||
invitee.domain,
|
||||
event,
|
||||
)
|
||||
|
||||
event.unsigned.pop("room_state", None)
|
||||
|
||||
# TODO: Make sure the signatures actually are correct.
|
||||
event.signatures.update(
|
||||
returned_invite.signatures
|
||||
)
|
||||
|
||||
if event.type == EventTypes.Redaction:
|
||||
if self.auth.check_redaction(event, auth_events=context.current_state):
|
||||
original_event = yield self.store.get_event(
|
||||
event.redacts,
|
||||
check_redacted=False,
|
||||
get_prev_content=False,
|
||||
allow_rejected=False,
|
||||
allow_none=False
|
||||
)
|
||||
if event.user_id != original_event.user_id:
|
||||
raise AuthError(
|
||||
403,
|
||||
"You don't have permission to redact events"
|
||||
)
|
||||
|
||||
if event.type == EventTypes.Create and context.current_state:
|
||||
raise AuthError(
|
||||
403,
|
||||
"Changing the room create event is forbidden",
|
||||
)
|
||||
|
||||
action_generator = ActionGenerator(self.hs)
|
||||
yield action_generator.handle_push_actions_for_event(
|
||||
event, context, self
|
||||
)
|
||||
|
||||
(event_stream_id, max_stream_id) = yield self.store.persist_event(
|
||||
event, context=context
|
||||
)
|
||||
|
||||
destinations = set()
|
||||
for k, s in context.current_state.items():
|
||||
try:
|
||||
if k[0] == EventTypes.Member:
|
||||
if s.content["membership"] == Membership.JOIN:
|
||||
destinations.add(
|
||||
UserID.from_string(s.state_key).domain
|
||||
)
|
||||
except SynapseError:
|
||||
logger.warn(
|
||||
"Failed to get destination from event %s", s.event_id
|
||||
)
|
||||
|
||||
with PreserveLoggingContext():
|
||||
# Don't block waiting on waking up all the listeners.
|
||||
self.notifier.on_new_room_event(
|
||||
event, event_stream_id, max_stream_id,
|
||||
extra_users=extra_users
|
||||
)
|
||||
|
||||
# If invite, remove room_state from unsigned before sending.
|
||||
event.unsigned.pop("invite_room_state", None)
|
||||
|
||||
federation_handler.handle_new_event(
|
||||
event, destinations=destinations,
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def maybe_kick_guest_users(self, event, current_state):
|
||||
def maybe_kick_guest_users(self, event, context=None):
|
||||
# Technically this function invalidates current_state by changing it.
|
||||
# Hopefully this isn't that important to the caller.
|
||||
if event.type == EventTypes.GuestAccess:
|
||||
guest_access = event.content.get("guest_access", "forbidden")
|
||||
if guest_access != "can_join":
|
||||
if context:
|
||||
current_state = yield self.store.get_events(
|
||||
context.current_state_ids.values()
|
||||
)
|
||||
else:
|
||||
current_state = yield self.state_handler.get_current_state(
|
||||
event.room_id
|
||||
)
|
||||
|
||||
current_state = current_state.values()
|
||||
|
||||
logger.info("maybe_kick_guest_users %r", current_state)
|
||||
yield self.kick_guest_users(current_state)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@@ -472,7 +128,8 @@ class BaseHandler(object):
|
||||
# and having homeservers have their own users leave keeps more
|
||||
# of that decision-making and control local to the guest-having
|
||||
# homeserver.
|
||||
requester = Requester(target_user, "", True)
|
||||
requester = synapse.types.create_requester(
|
||||
target_user, is_guest=True)
|
||||
handler = self.hs.get_handlers().room_member_handler
|
||||
yield handler.update_membership(
|
||||
requester,
|
||||
|
||||
@@ -19,7 +19,6 @@ from ._base import BaseHandler
|
||||
|
||||
import logging
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@@ -54,3 +53,46 @@ class AdminHandler(BaseHandler):
|
||||
}
|
||||
|
||||
defer.returnValue(ret)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_users(self):
|
||||
"""Function to reterive a list of users in users table.
|
||||
|
||||
Args:
|
||||
Returns:
|
||||
defer.Deferred: resolves to list[dict[str, Any]]
|
||||
"""
|
||||
ret = yield self.store.get_users()
|
||||
|
||||
defer.returnValue(ret)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_users_paginate(self, order, start, limit):
|
||||
"""Function to reterive a paginated list of users from
|
||||
users list. This will return a json object, which contains
|
||||
list of users and the total number of users in users table.
|
||||
|
||||
Args:
|
||||
order (str): column name to order the select by this column
|
||||
start (int): start number to begin the query from
|
||||
limit (int): number of rows to reterive
|
||||
Returns:
|
||||
defer.Deferred: resolves to json object {list[dict[str, Any]], count}
|
||||
"""
|
||||
ret = yield self.store.get_users_paginate(order, start, limit)
|
||||
|
||||
defer.returnValue(ret)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def search_users(self, term):
|
||||
"""Function to search users list for one or more users with
|
||||
the matched term.
|
||||
|
||||
Args:
|
||||
term (str): search term
|
||||
Returns:
|
||||
defer.Deferred: resolves to list[dict[str, Any]]
|
||||
"""
|
||||
ret = yield self.store.search_users(term)
|
||||
|
||||
defer.returnValue(ret)
|
||||
|
||||
@@ -16,8 +16,8 @@
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.constants import EventTypes
|
||||
from synapse.appservice import ApplicationService
|
||||
from synapse.types import UserID
|
||||
from synapse.util.metrics import Measure
|
||||
from synapse.util.logcontext import preserve_fn, preserve_context_over_deferred
|
||||
|
||||
import logging
|
||||
|
||||
@@ -35,47 +35,81 @@ def log_failure(failure):
|
||||
)
|
||||
|
||||
|
||||
# NB: Purposefully not inheriting BaseHandler since that contains way too much
|
||||
# setup code which this handler does not need or use. This makes testing a lot
|
||||
# easier.
|
||||
class ApplicationServicesHandler(object):
|
||||
|
||||
def __init__(self, hs, appservice_api, appservice_scheduler):
|
||||
def __init__(self, hs):
|
||||
self.store = hs.get_datastore()
|
||||
self.hs = hs
|
||||
self.appservice_api = appservice_api
|
||||
self.scheduler = appservice_scheduler
|
||||
self.is_mine_id = hs.is_mine_id
|
||||
self.appservice_api = hs.get_application_service_api()
|
||||
self.scheduler = hs.get_application_service_scheduler()
|
||||
self.started_scheduler = False
|
||||
self.clock = hs.get_clock()
|
||||
self.notify_appservices = hs.config.notify_appservices
|
||||
|
||||
self.current_max = 0
|
||||
self.is_processing = False
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def notify_interested_services(self, event):
|
||||
def notify_interested_services(self, current_id):
|
||||
"""Notifies (pushes) all application services interested in this event.
|
||||
|
||||
Pushing is done asynchronously, so this method won't block for any
|
||||
prolonged length of time.
|
||||
|
||||
Args:
|
||||
event(Event): The event to push out to interested services.
|
||||
current_id(int): The current maximum ID.
|
||||
"""
|
||||
# Gather interested services
|
||||
services = yield self._get_services_for_event(event)
|
||||
if len(services) == 0:
|
||||
return # no services need notifying
|
||||
services = self.store.get_app_services()
|
||||
if not services or not self.notify_appservices:
|
||||
return
|
||||
|
||||
# Do we know this user exists? If not, poke the user query API for
|
||||
# all services which match that user regex. This needs to block as these
|
||||
# user queries need to be made BEFORE pushing the event.
|
||||
yield self._check_user_exists(event.sender)
|
||||
if event.type == EventTypes.Member:
|
||||
yield self._check_user_exists(event.state_key)
|
||||
self.current_max = max(self.current_max, current_id)
|
||||
if self.is_processing:
|
||||
return
|
||||
|
||||
if not self.started_scheduler:
|
||||
self.scheduler.start().addErrback(log_failure)
|
||||
self.started_scheduler = True
|
||||
with Measure(self.clock, "notify_interested_services"):
|
||||
self.is_processing = True
|
||||
try:
|
||||
upper_bound = self.current_max
|
||||
limit = 100
|
||||
while True:
|
||||
upper_bound, events = yield self.store.get_new_events_for_appservice(
|
||||
upper_bound, limit
|
||||
)
|
||||
|
||||
# Fork off pushes to these services
|
||||
for service in services:
|
||||
self.scheduler.submit_event_for_as(service, event)
|
||||
if not events:
|
||||
break
|
||||
|
||||
for event in events:
|
||||
# Gather interested services
|
||||
services = yield self._get_services_for_event(event)
|
||||
if len(services) == 0:
|
||||
continue # no services need notifying
|
||||
|
||||
# Do we know this user exists? If not, poke the user
|
||||
# query API for all services which match that user regex.
|
||||
# This needs to block as these user queries need to be
|
||||
# made BEFORE pushing the event.
|
||||
yield self._check_user_exists(event.sender)
|
||||
if event.type == EventTypes.Member:
|
||||
yield self._check_user_exists(event.state_key)
|
||||
|
||||
if not self.started_scheduler:
|
||||
self.scheduler.start().addErrback(log_failure)
|
||||
self.started_scheduler = True
|
||||
|
||||
# Fork off pushes to these services
|
||||
for service in services:
|
||||
preserve_fn(self.scheduler.submit_event_for_as)(
|
||||
service, event
|
||||
)
|
||||
|
||||
yield self.store.set_appservice_last_pos(upper_bound)
|
||||
|
||||
if len(events) < limit:
|
||||
break
|
||||
finally:
|
||||
self.is_processing = False
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def query_user_exists(self, user_id):
|
||||
@@ -108,11 +142,12 @@ class ApplicationServicesHandler(object):
|
||||
association can be found.
|
||||
"""
|
||||
room_alias_str = room_alias.to_string()
|
||||
alias_query_services = yield self._get_services_for_event(
|
||||
event=None,
|
||||
restrict_to=ApplicationService.NS_ALIASES,
|
||||
alias_list=[room_alias_str]
|
||||
)
|
||||
services = self.store.get_app_services()
|
||||
alias_query_services = [
|
||||
s for s in services if (
|
||||
s.is_interested_in_alias(room_alias_str)
|
||||
)
|
||||
]
|
||||
for alias_service in alias_query_services:
|
||||
is_known_alias = yield self.appservice_api.query_alias(
|
||||
alias_service, room_alias_str
|
||||
@@ -125,52 +160,97 @@ class ApplicationServicesHandler(object):
|
||||
defer.returnValue(result)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _get_services_for_event(self, event, restrict_to="", alias_list=None):
|
||||
def query_3pe(self, kind, protocol, fields):
|
||||
services = yield self._get_services_for_3pn(protocol)
|
||||
|
||||
results = yield preserve_context_over_deferred(defer.DeferredList([
|
||||
preserve_fn(self.appservice_api.query_3pe)(service, kind, protocol, fields)
|
||||
for service in services
|
||||
], consumeErrors=True))
|
||||
|
||||
ret = []
|
||||
for (success, result) in results:
|
||||
if success:
|
||||
ret.extend(result)
|
||||
|
||||
defer.returnValue(ret)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_3pe_protocols(self, only_protocol=None):
|
||||
services = self.store.get_app_services()
|
||||
protocols = {}
|
||||
|
||||
# Collect up all the individual protocol responses out of the ASes
|
||||
for s in services:
|
||||
for p in s.protocols:
|
||||
if only_protocol is not None and p != only_protocol:
|
||||
continue
|
||||
|
||||
if p not in protocols:
|
||||
protocols[p] = []
|
||||
|
||||
info = yield self.appservice_api.get_3pe_protocol(s, p)
|
||||
|
||||
if info is not None:
|
||||
protocols[p].append(info)
|
||||
|
||||
def _merge_instances(infos):
|
||||
if not infos:
|
||||
return {}
|
||||
|
||||
# Merge the 'instances' lists of multiple results, but just take
|
||||
# the other fields from the first as they ought to be identical
|
||||
# copy the result so as not to corrupt the cached one
|
||||
combined = dict(infos[0])
|
||||
combined["instances"] = list(combined["instances"])
|
||||
|
||||
for info in infos[1:]:
|
||||
combined["instances"].extend(info["instances"])
|
||||
|
||||
return combined
|
||||
|
||||
for p in protocols.keys():
|
||||
protocols[p] = _merge_instances(protocols[p])
|
||||
|
||||
defer.returnValue(protocols)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _get_services_for_event(self, event):
|
||||
"""Retrieve a list of application services interested in this event.
|
||||
|
||||
Args:
|
||||
event(Event): The event to check. Can be None if alias_list is not.
|
||||
restrict_to(str): The namespace to restrict regex tests to.
|
||||
alias_list: A list of aliases to get services for. If None, this
|
||||
list is obtained from the database.
|
||||
Returns:
|
||||
list<ApplicationService>: A list of services interested in this
|
||||
event based on the service regex.
|
||||
"""
|
||||
member_list = None
|
||||
if hasattr(event, "room_id"):
|
||||
# We need to know the aliases associated with this event.room_id,
|
||||
# if any.
|
||||
if not alias_list:
|
||||
alias_list = yield self.store.get_aliases_for_room(
|
||||
event.room_id
|
||||
)
|
||||
# We need to know the members associated with this event.room_id,
|
||||
# if any.
|
||||
member_list = yield self.store.get_users_in_room(event.room_id)
|
||||
|
||||
services = yield self.store.get_app_services()
|
||||
services = self.store.get_app_services()
|
||||
interested_list = [
|
||||
s for s in services if (
|
||||
s.is_interested(event, restrict_to, alias_list, member_list)
|
||||
yield s.is_interested(event, self.store)
|
||||
)
|
||||
]
|
||||
defer.returnValue(interested_list)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _get_services_for_user(self, user_id):
|
||||
services = yield self.store.get_app_services()
|
||||
services = self.store.get_app_services()
|
||||
interested_list = [
|
||||
s for s in services if (
|
||||
s.is_interested_in_user(user_id)
|
||||
)
|
||||
]
|
||||
defer.returnValue(interested_list)
|
||||
return defer.succeed(interested_list)
|
||||
|
||||
def _get_services_for_3pn(self, protocol):
|
||||
services = self.store.get_app_services()
|
||||
interested_list = [
|
||||
s for s in services if s.is_interested_in_protocol(protocol)
|
||||
]
|
||||
return defer.succeed(interested_list)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _is_unknown_user(self, user_id):
|
||||
user = UserID.from_string(user_id)
|
||||
if not self.hs.is_mine(user):
|
||||
if not self.is_mine_id(user_id):
|
||||
# we don't know if they are unknown or not since it isn't one of our
|
||||
# users. We can't poke ASes.
|
||||
defer.returnValue(False)
|
||||
@@ -182,7 +262,7 @@ class ApplicationServicesHandler(object):
|
||||
return
|
||||
|
||||
# user not found; could be the AS though, so check.
|
||||
services = yield self.store.get_app_services()
|
||||
services = self.store.get_app_services()
|
||||
service_list = [s for s in services if s.sender == user_id]
|
||||
defer.returnValue(len(service_list) == 0)
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user