mirror of
https://github.com/element-hq/synapse.git
synced 2025-12-11 01:40:27 +00:00
Compare commits
2661 Commits
erikj/file
...
release-v0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6ba4fabdb9 | ||
|
|
9e2c22c97f | ||
|
|
39dc52157d | ||
|
|
0d437698b2 | ||
|
|
0be99858f3 | ||
|
|
eaaabc6c4f | ||
|
|
ce6d4914f4 | ||
|
|
ecf198aab8 | ||
|
|
3267b81b81 | ||
|
|
d03cfc4258 | ||
|
|
1de557975f | ||
|
|
ffba978077 | ||
|
|
13e16cf302 | ||
|
|
bd0d84bf92 | ||
|
|
1135193dfd | ||
|
|
29812c628b | ||
|
|
58fbbe0f1d | ||
|
|
631d7b87b5 | ||
|
|
6070647774 | ||
|
|
d6237859f6 | ||
|
|
0ef0aeceac | ||
|
|
b4a6b7f720 | ||
|
|
c7d46510d7 | ||
|
|
ffd3f1a783 | ||
|
|
29bafe2f7e | ||
|
|
287dd1ee2c | ||
|
|
513c23bfd9 | ||
|
|
011d03a0f6 | ||
|
|
9ab859f27b | ||
|
|
f4f65ef93e | ||
|
|
bd5718d0ad | ||
|
|
161a862ffb | ||
|
|
69994c385a | ||
|
|
b5dbbac308 | ||
|
|
582bd19ee9 | ||
|
|
74f99f227c | ||
|
|
c2bd177ea0 | ||
|
|
fe6e9f580b | ||
|
|
7216c76654 | ||
|
|
dbdfd8967d | ||
|
|
b8e40d146f | ||
|
|
4cc8bb0767 | ||
|
|
4e242b3e20 | ||
|
|
a6245478c8 | ||
|
|
2e9f5ea31a | ||
|
|
a6ad8148b9 | ||
|
|
5b5f35ccc0 | ||
|
|
9b714abf35 | ||
|
|
33122c5a1b | ||
|
|
a9c2e930ac | ||
|
|
c05e6015cc | ||
|
|
e0a75e0c25 | ||
|
|
85f5674e44 | ||
|
|
c43e8a9736 | ||
|
|
a3ac4f6b0a | ||
|
|
5dfd0350c7 | ||
|
|
ca96d609e4 | ||
|
|
2c5972f87f | ||
|
|
6079d0027a | ||
|
|
99a6c9dbf2 | ||
|
|
9342bcfce0 | ||
|
|
e504816977 | ||
|
|
b2e02084b8 | ||
|
|
db3d84f46c | ||
|
|
1b6b0b1e66 | ||
|
|
6b725cf56a | ||
|
|
64665b57d0 | ||
|
|
2b24416e90 | ||
|
|
b92a8e6e4a | ||
|
|
931fc43cc8 | ||
|
|
31aa7bd8d1 | ||
|
|
ad1911bbf4 | ||
|
|
c021c39cbd | ||
|
|
1f43d22397 | ||
|
|
a675bd08bd | ||
|
|
4d7e1dde70 | ||
|
|
ae5d18617a | ||
|
|
9732ec6797 | ||
|
|
0e28281a02 | ||
|
|
505371414f | ||
|
|
e3428d26ca | ||
|
|
35332298ef | ||
|
|
64db043a71 | ||
|
|
b60859d6cc | ||
|
|
d76621a47b | ||
|
|
4ae85ae121 | ||
|
|
cc505b4b5e | ||
|
|
1259a76047 | ||
|
|
802ca12d05 | ||
|
|
e283b555b1 | ||
|
|
b77a13812c | ||
|
|
6dfde6d485 | ||
|
|
c8eeef6947 | ||
|
|
67cb89fbdf | ||
|
|
bf4fb1fb40 | ||
|
|
f807f7f804 | ||
|
|
b8d8ed1ba9 | ||
|
|
cc794d60e7 | ||
|
|
8dd0c85ac5 | ||
|
|
76fa695241 | ||
|
|
f30c4ed2bc | ||
|
|
b752507b48 | ||
|
|
af94ba9d02 | ||
|
|
818b08d0e4 | ||
|
|
ea18996f54 | ||
|
|
68fd82e840 | ||
|
|
4fad8efbfb | ||
|
|
b78bae2d51 | ||
|
|
271f5601f3 | ||
|
|
c3b7a45e84 | ||
|
|
c3e190ce67 | ||
|
|
b75d443caf | ||
|
|
27e727a146 | ||
|
|
4ce4379235 | ||
|
|
c2c47550f9 | ||
|
|
535cc49f27 | ||
|
|
dfbf73408c | ||
|
|
bc7f3eb32f | ||
|
|
ec954f47fb | ||
|
|
81a5e0073c | ||
|
|
ab1bc9bf5f | ||
|
|
0f1eb3e914 | ||
|
|
84e27a592d | ||
|
|
c9f034b4ac | ||
|
|
a9f9d68631 | ||
|
|
707374d5dc | ||
|
|
89fa00ddff | ||
|
|
79bea15830 | ||
|
|
426f8b0f66 | ||
|
|
6a6cc27aee | ||
|
|
4c7c4d4061 | ||
|
|
4d24becf7f | ||
|
|
ba5b9b80a5 | ||
|
|
c7b0678356 | ||
|
|
a6e3222fe5 | ||
|
|
3cc852d339 | ||
|
|
0eeaa25694 | ||
|
|
aa3fac8057 | ||
|
|
c1c81ee2a4 | ||
|
|
e8496efe84 | ||
|
|
01bbacf3c4 | ||
|
|
148428ce76 | ||
|
|
c8f568ddf9 | ||
|
|
3ddda939d3 | ||
|
|
5de926d66f | ||
|
|
f878e6f8af | ||
|
|
269af961e9 | ||
|
|
ed80c6b6cc | ||
|
|
e433393c4f | ||
|
|
985ce80375 | ||
|
|
b9b9714fd5 | ||
|
|
fa969cfdde | ||
|
|
44f8e383f3 | ||
|
|
0c8da8b519 | ||
|
|
eaaa837e00 | ||
|
|
cbe3c3fdd4 | ||
|
|
6748f0a579 | ||
|
|
93b0cf7a99 | ||
|
|
d8ce68b09b | ||
|
|
78d4ced829 | ||
|
|
197c14dbcf | ||
|
|
5f20a91fa1 | ||
|
|
1e2ac54351 | ||
|
|
1e375468de | ||
|
|
c2c188b699 | ||
|
|
c46a0d7eb4 | ||
|
|
bd769a81e1 | ||
|
|
537088e7dc | ||
|
|
41fd9989a2 | ||
|
|
11d62f43c9 | ||
|
|
e4ab96021e | ||
|
|
2a7ed700d5 | ||
|
|
84716d267c | ||
|
|
e4779be97a | ||
|
|
f2da6df568 | ||
|
|
30848c0fcd | ||
|
|
e585c83209 | ||
|
|
6c1bb1601e | ||
|
|
ea87cb1ba5 | ||
|
|
3fed5bb25f | ||
|
|
27955056e0 | ||
|
|
90d70af269 | ||
|
|
b23cb8fba8 | ||
|
|
e4a709eda3 | ||
|
|
7fc1aad195 | ||
|
|
cafb8de132 | ||
|
|
d5325d7ef1 | ||
|
|
d5694ac5fa | ||
|
|
e43de3ae4b | ||
|
|
75e67b9ee4 | ||
|
|
768f00dedb | ||
|
|
4dc07e93a8 | ||
|
|
7cc483aa0e | ||
|
|
e1e7d76cf1 | ||
|
|
93247a424a | ||
|
|
5f501ec7e2 | ||
|
|
761d255fdf | ||
|
|
ace8079086 | ||
|
|
7a44c01d89 | ||
|
|
c9bc4b7031 | ||
|
|
ae79764fe5 | ||
|
|
77f1d24de3 | ||
|
|
9ccb4226ba | ||
|
|
bf86a41ef1 | ||
|
|
8090fd4664 | ||
|
|
3a743f649c | ||
|
|
adec03395d | ||
|
|
74e494b010 | ||
|
|
ef3a5ae787 | ||
|
|
8c06dd6071 | ||
|
|
60c78666ab | ||
|
|
1786b0e768 | ||
|
|
8ad5f34908 | ||
|
|
6cd5fcd536 | ||
|
|
ccc67d445b | ||
|
|
9fd086e506 | ||
|
|
0b03a97708 | ||
|
|
4824a33c31 | ||
|
|
1e5fcfd14a | ||
|
|
17b8e2bd02 | ||
|
|
a8e2a3df32 | ||
|
|
0d7c7fd907 | ||
|
|
95298783bb | ||
|
|
1a398b19fd | ||
|
|
f4c8cd5e85 | ||
|
|
b8d832a08c | ||
|
|
e3edca3b5d | ||
|
|
cacfa04cb6 | ||
|
|
e591f7b3f0 | ||
|
|
7141f1a5cc | ||
|
|
44edac0497 | ||
|
|
29e1c717c3 | ||
|
|
94133d7ce8 | ||
|
|
b15c2b7971 | ||
|
|
ba8fdc925c | ||
|
|
79b3cf3e02 | ||
|
|
b4fd710e1a | ||
|
|
b68b0ede7a | ||
|
|
68f737702b | ||
|
|
f65e31d22f | ||
|
|
f496399ac4 | ||
|
|
3166ed55b2 | ||
|
|
e1dec2f1a7 | ||
|
|
bb746a9de1 | ||
|
|
ae8d4bb0f0 | ||
|
|
c94ab5976a | ||
|
|
197d82dc07 | ||
|
|
069ae2df12 | ||
|
|
6de74ea6d7 | ||
|
|
72472456d8 | ||
|
|
c5c24c239b | ||
|
|
c5b0e9f485 | ||
|
|
abdefb8a01 | ||
|
|
afbd773dc6 | ||
|
|
2a4b9ea233 | ||
|
|
3b98439eca | ||
|
|
fde63b880d | ||
|
|
2d511defd9 | ||
|
|
dd1ea9763a | ||
|
|
e76d1135dd | ||
|
|
fcf2c0fd1a | ||
|
|
9864efa532 | ||
|
|
aa620d09a0 | ||
|
|
2eabdf3f98 | ||
|
|
5ed109d59f | ||
|
|
47d9848dc4 | ||
|
|
93e504d04e | ||
|
|
b5feaa5a49 | ||
|
|
3f405b34e9 | ||
|
|
290777b3d9 | ||
|
|
77c81ca6ea | ||
|
|
2d1b7955ae | ||
|
|
862c8da560 | ||
|
|
2d9f341c3e | ||
|
|
436ee0a2ea | ||
|
|
b393f5db51 | ||
|
|
a2562f9d74 | ||
|
|
d6dadd95ac | ||
|
|
993d3f710b | ||
|
|
4a94eb3ea4 | ||
|
|
3a0cee28d6 | ||
|
|
4f845a0713 | ||
|
|
473700f016 | ||
|
|
9ce866ed4f | ||
|
|
69ef4987a6 | ||
|
|
53cc8ad35a | ||
|
|
e2fcba038c | ||
|
|
5f59f20636 | ||
|
|
59de2c7afa | ||
|
|
4b616c8cf2 | ||
|
|
4dd61df6f8 | ||
|
|
c0c31656ff | ||
|
|
8b16b43b7f | ||
|
|
dff396de0f | ||
|
|
f06ffdb6fa | ||
|
|
6e67aaa7f2 | ||
|
|
7f0d0ba3bc | ||
|
|
4a9b1cf253 | ||
|
|
6d8799af1a | ||
|
|
258409ef61 | ||
|
|
bf81f3cf2c | ||
|
|
27ebc5c8f2 | ||
|
|
97c544f91f | ||
|
|
934ab76835 | ||
|
|
fc9878f6a4 | ||
|
|
a4d3bfe3d6 | ||
|
|
a7effa8400 | ||
|
|
a04c6bbf8f | ||
|
|
77ea8cbdd7 | ||
|
|
2800983f3e | ||
|
|
8b50fe5330 | ||
|
|
73b4e18c62 | ||
|
|
20b3660495 | ||
|
|
175a01f56c | ||
|
|
046b659ce2 | ||
|
|
413c270723 | ||
|
|
ec3a2dc773 | ||
|
|
012875258c | ||
|
|
692250c6be | ||
|
|
d2352347cf | ||
|
|
92168cbbc5 | ||
|
|
963015005e | ||
|
|
10d8b701a1 | ||
|
|
543c794a76 | ||
|
|
57cd0c3dea | ||
|
|
b524dd4c35 | ||
|
|
09703609fc | ||
|
|
ba3ff7918b | ||
|
|
ef8e578677 | ||
|
|
b880ff190a | ||
|
|
05e21285aa | ||
|
|
eae04f1952 | ||
|
|
5699b05072 | ||
|
|
a1e67bcb97 | ||
|
|
09552f9d9c | ||
|
|
f18373dc5d | ||
|
|
ebbaae5526 | ||
|
|
966a70f1fa | ||
|
|
629cdfb124 | ||
|
|
ed666d3969 | ||
|
|
b76ef6ccb8 | ||
|
|
851aeae7c7 | ||
|
|
d5e32c843f | ||
|
|
96917d5552 | ||
|
|
0401604222 | ||
|
|
b238cf7f6b | ||
|
|
960dae3340 | ||
|
|
2cc998fed8 | ||
|
|
139fe30f47 | ||
|
|
4d793626ff | ||
|
|
c544188ee3 | ||
|
|
0ab153d201 | ||
|
|
8209b5f033 | ||
|
|
b27429729d | ||
|
|
60a9a49f83 | ||
|
|
b3bf6a1218 | ||
|
|
57826d645b | ||
|
|
d7d24750be | ||
|
|
6f443a74cf | ||
|
|
14a34f12d7 | ||
|
|
3431ec55dc | ||
|
|
6027b1992f | ||
|
|
e884ff31d8 | ||
|
|
05c13f6c22 | ||
|
|
94ecd871a0 | ||
|
|
12ed4ee48e | ||
|
|
332839f6ea | ||
|
|
e5ea6dd021 | ||
|
|
cccfcfa7b9 | ||
|
|
68f34e85ce | ||
|
|
3e703eb04e | ||
|
|
508460f240 | ||
|
|
6e9f147faa | ||
|
|
4540730111 | ||
|
|
e96ee95a7e | ||
|
|
2f9eafdd36 | ||
|
|
b3de67234e | ||
|
|
514c2d3c4d | ||
|
|
bfde076022 | ||
|
|
cb3aee8219 | ||
|
|
85fda57208 | ||
|
|
4b203bdba5 | ||
|
|
d3862812ff | ||
|
|
8d26385d76 | ||
|
|
3b0470dba5 | ||
|
|
8575e3160f | ||
|
|
67b7b904ba | ||
|
|
f60218ec41 | ||
|
|
a78cda4baf | ||
|
|
7a39da8cc6 | ||
|
|
5bbb53580a | ||
|
|
26451a09eb | ||
|
|
8d55877c9e | ||
|
|
a62406aaa5 | ||
|
|
91818723a1 | ||
|
|
e9aec001f4 | ||
|
|
28e8c46f29 | ||
|
|
6d586dc05c | ||
|
|
410b4e14a1 | ||
|
|
fe4e885f54 | ||
|
|
bbb739d24a | ||
|
|
26752df503 | ||
|
|
e52c391cd4 | ||
|
|
0aac30d53b | ||
|
|
0184a97dbd | ||
|
|
85b9f76f1d | ||
|
|
6322fbbd41 | ||
|
|
8ba89f1050 | ||
|
|
429925a5e9 | ||
|
|
83936293eb | ||
|
|
e2cb760dcc | ||
|
|
925b3638ff | ||
|
|
9a6fd3ef29 | ||
|
|
2f82de18ee | ||
|
|
b8ca494ee9 | ||
|
|
6e16aca8b0 | ||
|
|
d4d12daed9 | ||
|
|
f467a8f66d | ||
|
|
c9184ed87e | ||
|
|
1fc4a962e4 | ||
|
|
08284c86ed | ||
|
|
f502b0dea1 | ||
|
|
1200f28d66 | ||
|
|
76ed3476d3 | ||
|
|
58dc1f2c78 | ||
|
|
5a7f561a9b | ||
|
|
ed9a7f5436 | ||
|
|
1f64207f26 | ||
|
|
42b50483be | ||
|
|
6264cf9666 | ||
|
|
f386632800 | ||
|
|
5e49a57ecc | ||
|
|
3d31b39297 | ||
|
|
73cfe48031 | ||
|
|
05538587ef | ||
|
|
f92d7416d7 | ||
|
|
1f12d808e7 | ||
|
|
29a4066a4d | ||
|
|
7afb4e3f54 | ||
|
|
495f075b41 | ||
|
|
b5e8d529e6 | ||
|
|
3e279411fe | ||
|
|
47574c9cba | ||
|
|
6ff14ddd2e | ||
|
|
5946aa0877 | ||
|
|
d800ab2847 | ||
|
|
2c365f4723 | ||
|
|
a1a253ea50 | ||
|
|
c72058bcc6 | ||
|
|
27f26e48b7 | ||
|
|
8c23221666 | ||
|
|
731f3c37a0 | ||
|
|
4b444723f0 | ||
|
|
816605a137 | ||
|
|
78cefd78d6 | ||
|
|
a0a561ae85 | ||
|
|
ed3d0170d9 | ||
|
|
976128f368 | ||
|
|
d04d672a80 | ||
|
|
036f439f53 | ||
|
|
1bce3e6b35 | ||
|
|
e3cbec10c1 | ||
|
|
8abdd7b553 | ||
|
|
ff13c5e7af | ||
|
|
27bd0b9a91 | ||
|
|
bce144595c | ||
|
|
75eba3b07d | ||
|
|
1591eddaea | ||
|
|
4fec80ba6f | ||
|
|
7fe8ed1787 | ||
|
|
e204062310 | ||
|
|
44c722931b | ||
|
|
2d520a9826 | ||
|
|
24d894e2e2 | ||
|
|
ccfcef6b59 | ||
|
|
e0004aa28a | ||
|
|
b668112320 | ||
|
|
dae9a00a28 | ||
|
|
71995e1397 | ||
|
|
8177563ebe | ||
|
|
4202fba82a | ||
|
|
812c030e87 | ||
|
|
1217c7da91 | ||
|
|
7d69f2d956 | ||
|
|
385dcb7c60 | ||
|
|
b8b936a6ea | ||
|
|
b5f665de32 | ||
|
|
e5ae386ea4 | ||
|
|
36e51aad3c | ||
|
|
b490299a3b | ||
|
|
5db7070dd1 | ||
|
|
d7fe6b356c | ||
|
|
fcf01dd88e | ||
|
|
4f66312df8 | ||
|
|
3fafb7b189 | ||
|
|
776a070421 | ||
|
|
dfeca6cf40 | ||
|
|
6aa5bc8635 | ||
|
|
d8f47d2efa | ||
|
|
0a9315bbc7 | ||
|
|
1ff419d343 | ||
|
|
24df576795 | ||
|
|
fdf1ca30f0 | ||
|
|
052c5d19d5 | ||
|
|
5ddd199870 | ||
|
|
a9d6fa8b2b | ||
|
|
4564b05483 | ||
|
|
72613bc379 | ||
|
|
ebcd55d641 | ||
|
|
4b461a6931 | ||
|
|
93e7a38370 | ||
|
|
617304b2cf | ||
|
|
ba502fb89a | ||
|
|
6c6b9689bb | ||
|
|
d9fd937e39 | ||
|
|
fe9dc522d4 | ||
|
|
505e7e8b9d | ||
|
|
6fd7e6db3d | ||
|
|
fdca6e36ee | ||
|
|
90ae0cffec | ||
|
|
de4cb50ca6 | ||
|
|
a09e09ce76 | ||
|
|
48d2949416 | ||
|
|
6ae8373d40 | ||
|
|
b58e24cc3c | ||
|
|
d53fe399eb | ||
|
|
a837765e8c | ||
|
|
f540b494a4 | ||
|
|
8060974344 | ||
|
|
b0d975e216 | ||
|
|
e54d7d536e | ||
|
|
1e9b4d5a95 | ||
|
|
efc2b7db95 | ||
|
|
bfd68019c2 | ||
|
|
1946867bc2 | ||
|
|
1664948e41 | ||
|
|
935e588799 | ||
|
|
eed59dcc1e | ||
|
|
2cac7623a5 | ||
|
|
298d83b340 | ||
|
|
0185b75381 | ||
|
|
7132e5cdff | ||
|
|
98bdb4468b | ||
|
|
ea11ee09f3 | ||
|
|
c62c480dc6 | ||
|
|
197bd126f0 | ||
|
|
f45f07ab86 | ||
|
|
a053ff3979 | ||
|
|
ecdd2a3658 | ||
|
|
2f34ad31ac | ||
|
|
671f0afa1d | ||
|
|
64ed74c01e | ||
|
|
1a81a1898e | ||
|
|
6ba21bf2b8 | ||
|
|
09e4bc0501 | ||
|
|
6e2a7ee1bc | ||
|
|
65f0513a33 | ||
|
|
6f83c4537c | ||
|
|
cca94272fa | ||
|
|
66b121b2fc | ||
|
|
8d34120a53 | ||
|
|
1a01af079e | ||
|
|
87e5e05aea | ||
|
|
4d039aa2ca | ||
|
|
21e255a8f1 | ||
|
|
d5477c7afd | ||
|
|
02a6108235 | ||
|
|
7233341eac | ||
|
|
8be6fd95a3 | ||
|
|
59dbb47065 | ||
|
|
9c7db2491b | ||
|
|
0fe6f3c521 | ||
|
|
036362ede6 | ||
|
|
a757dd4863 | ||
|
|
f5cc22bdc6 | ||
|
|
5dd1b2c525 | ||
|
|
cc7609aa9f | ||
|
|
f1378aef91 | ||
|
|
b2d8d07109 | ||
|
|
f9791498ae | ||
|
|
f091061711 | ||
|
|
4abcff0177 | ||
|
|
63c58c2a3f | ||
|
|
304880d185 | ||
|
|
5d79d728f5 | ||
|
|
dc51af3d03 | ||
|
|
350622a107 | ||
|
|
63fda37e20 | ||
|
|
293ef29655 | ||
|
|
535c99f157 | ||
|
|
45a5df5914 | ||
|
|
3b5f22ca40 | ||
|
|
b5db4ed5f6 | ||
|
|
168524543f | ||
|
|
3e123b8497 | ||
|
|
42137efde7 | ||
|
|
eeb2f9e546 | ||
|
|
5dbaa520a5 | ||
|
|
dd48f7204c | ||
|
|
04095f7581 | ||
|
|
a584a81b3e | ||
|
|
619e8ecd0c | ||
|
|
23da638360 | ||
|
|
dfbda5e025 | ||
|
|
2b03751c3c | ||
|
|
dbc0dfd2d5 | ||
|
|
11f139a647 | ||
|
|
6e614e9e10 | ||
|
|
c049472b8a | ||
|
|
9a804b2812 | ||
|
|
fbbc40f385 | ||
|
|
8cf9f0a3e7 | ||
|
|
e6618ece2d | ||
|
|
58c4720293 | ||
|
|
836d5c44b6 | ||
|
|
11c2a3655f | ||
|
|
539aa4d333 | ||
|
|
f85a415279 | ||
|
|
6489455bed | ||
|
|
d668caa79c | ||
|
|
74bf4ee7bf | ||
|
|
33ba90c6e9 | ||
|
|
ccd62415ac | ||
|
|
bd7bb5df71 | ||
|
|
e3417a06e2 | ||
|
|
7fb80b5eae | ||
|
|
2d17b09a6d | ||
|
|
24c8f38784 | ||
|
|
25f03cf8e9 | ||
|
|
270e1c904a | ||
|
|
b4f59c7e27 | ||
|
|
ab4ee2e524 | ||
|
|
58ebb96cce | ||
|
|
99713dc7d3 | ||
|
|
1c1c0257f4 | ||
|
|
cafe659f72 | ||
|
|
72ed8196b3 | ||
|
|
107ac7ac96 | ||
|
|
234772db6d | ||
|
|
760625acba | ||
|
|
c57789d138 | ||
|
|
f33df30732 | ||
|
|
3accee1a8c | ||
|
|
a5425b2e5b | ||
|
|
6e381180ae | ||
|
|
056ba9b795 | ||
|
|
88664afe14 | ||
|
|
f98efea9b1 | ||
|
|
d9e3a4b5db | ||
|
|
66d8ffabbd | ||
|
|
ace23463c5 | ||
|
|
bbfe4e996c | ||
|
|
9f430fa07f | ||
|
|
7c53a27801 | ||
|
|
a8bc7cae56 | ||
|
|
bf1050f7cf | ||
|
|
c6f4ff1475 | ||
|
|
3a431a126d | ||
|
|
ac08316548 | ||
|
|
85e8092cca | ||
|
|
ad53fc3cf4 | ||
|
|
6fa8148ccb | ||
|
|
7c69849a0d | ||
|
|
11bc21b6d9 | ||
|
|
13f540ef1b | ||
|
|
ec5c4499f4 | ||
|
|
f2a5b6dbfd | ||
|
|
b8492b6c2f | ||
|
|
331570ea6f | ||
|
|
55af207321 | ||
|
|
d648f65aaf | ||
|
|
608b5a6317 | ||
|
|
64953c8ed2 | ||
|
|
f451b64c8f | ||
|
|
2c9475b58e | ||
|
|
6d17573c23 | ||
|
|
d12ae7fd1c | ||
|
|
224137fcf9 | ||
|
|
e4435b014e | ||
|
|
871605f4e2 | ||
|
|
e0d2f6d5b0 | ||
|
|
bfbc907cec | ||
|
|
5e9d75b4a5 | ||
|
|
627e6ea2b0 | ||
|
|
9da4316ca5 | ||
|
|
eb7cbf27bc | ||
|
|
6b95e35e96 | ||
|
|
ff3d810ea8 | ||
|
|
34194aaff7 | ||
|
|
114f290947 | ||
|
|
baafb85ba4 | ||
|
|
29ded770b1 | ||
|
|
dc026bb16f | ||
|
|
328378f9cb | ||
|
|
c1935f0a41 | ||
|
|
43cd86ba8a | ||
|
|
8e345ce465 | ||
|
|
b64d312421 | ||
|
|
ccad2ed824 | ||
|
|
369195caa5 | ||
|
|
57ed7f6772 | ||
|
|
a3648f84b2 | ||
|
|
5331cd150a | ||
|
|
7313a23dba | ||
|
|
f7278e612e | ||
|
|
b990b2fce5 | ||
|
|
aedaba018f | ||
|
|
de042b3b88 | ||
|
|
a7e9d8762d | ||
|
|
ca238bc023 | ||
|
|
40dcf0d856 | ||
|
|
d3c3026496 | ||
|
|
093f7e47cc | ||
|
|
6a12998a83 | ||
|
|
b9c84f3f3a | ||
|
|
ffad4fe35b | ||
|
|
94e6ad71f5 | ||
|
|
8571f864d2 | ||
|
|
fc6d4974a6 | ||
|
|
738ccf61c0 | ||
|
|
dcabef952c | ||
|
|
771c8a83c7 | ||
|
|
6631985990 | ||
|
|
e0f20e9425 | ||
|
|
fe7c1b969c | ||
|
|
78f306a6f7 | ||
|
|
9ac98197bb | ||
|
|
27c28eaa27 | ||
|
|
be2672716d | ||
|
|
653d90c1a5 | ||
|
|
310b1ccdc1 | ||
|
|
a59b0ad1a1 | ||
|
|
7b222fc56e | ||
|
|
d0debb2116 | ||
|
|
66f371e8b8 | ||
|
|
b843631d71 | ||
|
|
c2ddd773bc | ||
|
|
7dd3bf5e24 | ||
|
|
db7d0c3127 | ||
|
|
f346048a6e | ||
|
|
e3aa8a7aa8 | ||
|
|
cf589f2c1e | ||
|
|
8af4569583 | ||
|
|
b25db11d08 | ||
|
|
587f07543f | ||
|
|
aa93cb9f44 | ||
|
|
537dbadea0 | ||
|
|
07a07588a0 | ||
|
|
dfaa58f72d | ||
|
|
9ac263ed1b | ||
|
|
d2d8ed4884 | ||
|
|
5d8290429c | ||
|
|
6aa423a1a8 | ||
|
|
3669065466 | ||
|
|
7ebf518c02 | ||
|
|
34ed4f4206 | ||
|
|
60833c8978 | ||
|
|
482a2ad122 | ||
|
|
c0380402bc | ||
|
|
cdbf38728d | ||
|
|
0c27383dd7 | ||
|
|
ef862186dd | ||
|
|
2c2dcf81d0 | ||
|
|
1827057acc | ||
|
|
8346e6e696 | ||
|
|
e4c15fcb5c | ||
|
|
3e5a62ecd8 | ||
|
|
82475a18d9 | ||
|
|
2e996271fe | ||
|
|
a2c89a225c | ||
|
|
7166854f41 | ||
|
|
3033261891 | ||
|
|
2347efc065 | ||
|
|
9b147cd730 | ||
|
|
3a9f5bf6dd | ||
|
|
ab37bef83b | ||
|
|
ad8b316939 | ||
|
|
421fdf7460 | ||
|
|
25a96e0c63 | ||
|
|
46826bb078 | ||
|
|
f87b287291 | ||
|
|
bb9246e525 | ||
|
|
c84770b877 | ||
|
|
380fb87ecc | ||
|
|
87ae59f5e9 | ||
|
|
e42b4ebf0f | ||
|
|
d3c150411c | ||
|
|
1e166470ab | ||
|
|
34e682d385 | ||
|
|
7239258ae6 | ||
|
|
5fd12dce01 | ||
|
|
82ae0238f9 | ||
|
|
81804909d3 | ||
|
|
c366276056 | ||
|
|
1a9255c12e | ||
|
|
94f36b0273 | ||
|
|
c45dc6c62a | ||
|
|
f053a1409e | ||
|
|
22f935ab7c | ||
|
|
9388eece2b | ||
|
|
acb58bfb6a | ||
|
|
f7181615f2 | ||
|
|
f144365281 | ||
|
|
d9aa645f86 | ||
|
|
22f3d3ae76 | ||
|
|
b4da08cad8 | ||
|
|
efab1dadde | ||
|
|
33d5134b59 | ||
|
|
119cb9bbcf | ||
|
|
e6e2627636 | ||
|
|
30f7bfa121 | ||
|
|
7af825bae4 | ||
|
|
26bcda31b8 | ||
|
|
d134d0935e | ||
|
|
e4f3431116 | ||
|
|
a46982cee9 | ||
|
|
70caf49914 | ||
|
|
719aec4064 | ||
|
|
cea7839911 | ||
|
|
a1595cec78 | ||
|
|
2e165295b7 | ||
|
|
a90a0f5c8a | ||
|
|
91b3981800 | ||
|
|
0cdb32fc43 | ||
|
|
838810b76a | ||
|
|
736b9a4784 | ||
|
|
4903ccf159 | ||
|
|
51fb884c52 | ||
|
|
d4040e9e28 | ||
|
|
3fb8784c92 | ||
|
|
c02b6a37d6 | ||
|
|
814fb032eb | ||
|
|
54f9a4cb59 | ||
|
|
8e780b113d | ||
|
|
574d573ac2 | ||
|
|
78f0ddbfad | ||
|
|
6a70647d45 | ||
|
|
c1f52a321d | ||
|
|
b9557064bf | ||
|
|
cf6121e3da | ||
|
|
247c736b9b | ||
|
|
8fbc0d29ee | ||
|
|
c06c00190f | ||
|
|
c0aba0a23e | ||
|
|
b9676a75f6 | ||
|
|
69a18514e9 | ||
|
|
122cd52ce4 | ||
|
|
26ae5178a4 | ||
|
|
bf9060156a | ||
|
|
82301b6c29 | ||
|
|
1745069543 | ||
|
|
c7ddb5ef7a | ||
|
|
7b41013102 | ||
|
|
77fb2b72ae | ||
|
|
7f94709066 | ||
|
|
867822fa1e | ||
|
|
73880268ef | ||
|
|
131485ef66 | ||
|
|
11dbceb761 | ||
|
|
0127423027 | ||
|
|
85657eedf8 | ||
|
|
b48045a8f5 | ||
|
|
6f65e2f90c | ||
|
|
323634bf8b | ||
|
|
9c712a366f | ||
|
|
a8c8e4efd4 | ||
|
|
414522aed5 | ||
|
|
2be8a281d2 | ||
|
|
6308ac45b0 | ||
|
|
b9b72bc6e2 | ||
|
|
d892079844 | ||
|
|
e263c26690 | ||
|
|
f3cf3ff8b6 | ||
|
|
4902db1fc9 | ||
|
|
d563b8d944 | ||
|
|
85a0d6c7ab | ||
|
|
34840cdcef | ||
|
|
28a4649785 | ||
|
|
7c551ec445 | ||
|
|
84fbb80c8f | ||
|
|
40453b3f84 | ||
|
|
29574fd5b3 | ||
|
|
2e6f5a4910 | ||
|
|
405ba4178a | ||
|
|
efcb6db688 | ||
|
|
0018491af2 | ||
|
|
0364d23210 | ||
|
|
8c5f03cec7 | ||
|
|
f8434db549 | ||
|
|
ab904caf33 | ||
|
|
54a59adc7c | ||
|
|
64765e5199 | ||
|
|
0cd01f5c9c | ||
|
|
2a3e822f44 | ||
|
|
a828a64b75 | ||
|
|
d4d176e5d0 | ||
|
|
449d1297ca | ||
|
|
d72667fcce | ||
|
|
a41fe500d6 | ||
|
|
54f59bd7d4 | ||
|
|
98ce212093 | ||
|
|
8a1137ceab | ||
|
|
877c029c16 | ||
|
|
944692ef69 | ||
|
|
391712a4f9 | ||
|
|
ad544c803a | ||
|
|
dbf87282d3 | ||
|
|
69b3fd485d | ||
|
|
5058292537 | ||
|
|
fcc803b2bf | ||
|
|
ea0152b132 | ||
|
|
3f213d908d | ||
|
|
1ca0e78ca1 | ||
|
|
b43d3267e2 | ||
|
|
b5cb6347a4 | ||
|
|
96b9b6c127 | ||
|
|
f10ce8944b | ||
|
|
a5c401bd12 | ||
|
|
b9caf4f726 | ||
|
|
d1d5362267 | ||
|
|
9f26d3b75b | ||
|
|
a76886726b | ||
|
|
ac66e11f2b | ||
|
|
4264ceb31c | ||
|
|
023ee197be | ||
|
|
d1605794ad | ||
|
|
3376f16012 | ||
|
|
6ce6bbedcb | ||
|
|
27cc627e42 | ||
|
|
62b89daac6 | ||
|
|
773e64cc1a | ||
|
|
2d05eb3cf5 | ||
|
|
ac63b92b64 | ||
|
|
30bcbf775a | ||
|
|
7eb9f34cc3 | ||
|
|
0b08c48fc5 | ||
|
|
65e1683680 | ||
|
|
feb496056e | ||
|
|
e2eebf1696 | ||
|
|
36c28bc467 | ||
|
|
3a1f3f8388 | ||
|
|
52bfa604e1 | ||
|
|
0a6a966e2b | ||
|
|
773e1c6d68 | ||
|
|
0d1c85e643 | ||
|
|
1df7c28661 | ||
|
|
36d2b66f90 | ||
|
|
8a240e4f9c | ||
|
|
ec039e6790 | ||
|
|
142b6b4abf | ||
|
|
2a06b44be2 | ||
|
|
2dc57e7413 | ||
|
|
07a32d192c | ||
|
|
9a27448b1b | ||
|
|
9ee397b440 | ||
|
|
9d0170ac6c | ||
|
|
b4276a3896 | ||
|
|
bfcf016714 | ||
|
|
9cee0ce7db | ||
|
|
350333a09a | ||
|
|
639d9ae9a0 | ||
|
|
4d17add8de | ||
|
|
27b1b4a2c9 | ||
|
|
5b5b171f3e | ||
|
|
b282fe7170 | ||
|
|
9ff4e0e91b | ||
|
|
0834d1a70c | ||
|
|
63fcc42990 | ||
|
|
6194a64ae9 | ||
|
|
eefd9fee81 | ||
|
|
014fee93b3 | ||
|
|
86780a8bc3 | ||
|
|
31e0fe9031 | ||
|
|
3ba2859e0c | ||
|
|
e9dd8370b0 | ||
|
|
4d7fc7f977 | ||
|
|
f9b4bb05e0 | ||
|
|
7450693435 | ||
|
|
8da6f0be48 | ||
|
|
11880103b1 | ||
|
|
7984708a55 | ||
|
|
24d35ab47b | ||
|
|
30348c924c | ||
|
|
6cdca71079 | ||
|
|
305d16d612 | ||
|
|
a3810136fe | ||
|
|
3ce8d59176 | ||
|
|
b9c2ae6788 | ||
|
|
85be3dde81 | ||
|
|
2f8b580b64 | ||
|
|
c5b0bdd542 | ||
|
|
e4df0e189d | ||
|
|
4ad613f6be | ||
|
|
ac6bc55512 | ||
|
|
69efd77749 | ||
|
|
276af7b59b | ||
|
|
51b156d48a | ||
|
|
30f5ffdca2 | ||
|
|
650f0e69f2 | ||
|
|
d28db583da | ||
|
|
58a35366be | ||
|
|
dc56a6b8c8 | ||
|
|
bac9bf1b12 | ||
|
|
d82c42837f | ||
|
|
35b4aa04be | ||
|
|
2a28b79e04 | ||
|
|
281553afe6 | ||
|
|
987f4945b4 | ||
|
|
31d56c3fb5 | ||
|
|
09f79aaad0 | ||
|
|
23e0ff840a | ||
|
|
48e7697911 | ||
|
|
f136c89d5e | ||
|
|
01fc847f7f | ||
|
|
7fc1f1e2b6 | ||
|
|
57cfa513f5 | ||
|
|
d58b1ffe94 | ||
|
|
e71940aa64 | ||
|
|
e36950dec5 | ||
|
|
f902e89d4b | ||
|
|
a380f041c2 | ||
|
|
9397edb28b | ||
|
|
06ce7335e9 | ||
|
|
13c8749ac9 | ||
|
|
e1f1784f99 | ||
|
|
86e865d7d2 | ||
|
|
a2dfab12c5 | ||
|
|
00957d1aa4 | ||
|
|
6af0096f4f | ||
|
|
250ce11ab9 | ||
|
|
566641a0b5 | ||
|
|
acafcf1c5b | ||
|
|
e56c79c114 | ||
|
|
6ebe2d23b1 | ||
|
|
0bfea9a2be | ||
|
|
e64655c25d | ||
|
|
5a16cb4bf0 | ||
|
|
b88a323ffb | ||
|
|
59358cd3e7 | ||
|
|
55366814a6 | ||
|
|
b2d40d7363 | ||
|
|
4bd597d9fc | ||
|
|
ad8a26e361 | ||
|
|
19b9366d73 | ||
|
|
e08f81d96a | ||
|
|
8d1dd7eb30 | ||
|
|
35e0cfb54d | ||
|
|
7b67848042 | ||
|
|
95f21c7a66 | ||
|
|
d101488c5f | ||
|
|
64778693be | ||
|
|
37a187bfab | ||
|
|
733896e046 | ||
|
|
a188056364 | ||
|
|
961e242aaf | ||
|
|
e0e214556a | ||
|
|
633dcc316c | ||
|
|
c36d15d2de | ||
|
|
737f283a07 | ||
|
|
aac6d1fc9b | ||
|
|
bd08ee7a46 | ||
|
|
a4cb21659b | ||
|
|
eddce9d74a | ||
|
|
2e05f5d7a4 | ||
|
|
d78d08981a | ||
|
|
7b53e9ebfd | ||
|
|
be20243549 | ||
|
|
f40c2db05a | ||
|
|
067b00d49d | ||
|
|
994d7ae7c5 | ||
|
|
d2d146a314 | ||
|
|
61f471f779 | ||
|
|
0c01f829ae | ||
|
|
5068fb16a5 | ||
|
|
2abe85d50e | ||
|
|
be44558886 | ||
|
|
9adf1991ca | ||
|
|
248eb4638d | ||
|
|
da146657c9 | ||
|
|
a158c36a8a | ||
|
|
6957bfdca6 | ||
|
|
2ccf3b241c | ||
|
|
9ce53a3861 | ||
|
|
54d2b7e596 | ||
|
|
9d527191bc | ||
|
|
a8f96c63aa | ||
|
|
c144292373 | ||
|
|
f83ac78201 | ||
|
|
e6032054bf | ||
|
|
ebf5a6b14c | ||
|
|
e892457a03 | ||
|
|
a297155a97 | ||
|
|
6c82de5100 | ||
|
|
0ad44acb5a | ||
|
|
5f14e7e982 | ||
|
|
0970e0307e | ||
|
|
5aa42d4292 | ||
|
|
e0ff66251f | ||
|
|
29ed09e80a | ||
|
|
3b2dd1b3c2 | ||
|
|
872e75a3d5 | ||
|
|
7827251daf | ||
|
|
b5d1c68beb | ||
|
|
f2ed64eaaf | ||
|
|
ef328b2fc1 | ||
|
|
bad72b0b8e | ||
|
|
1bf84c4b6b | ||
|
|
fd2eef49c8 | ||
|
|
1d09586599 | ||
|
|
7f237800e9 | ||
|
|
1ece06273e | ||
|
|
bb256ac96f | ||
|
|
6a3c5d6891 | ||
|
|
cc7a294e2e | ||
|
|
7b6ed9871e | ||
|
|
d79a687d85 | ||
|
|
f29d85d9e4 | ||
|
|
a175963ba5 | ||
|
|
bbeeb97f75 | ||
|
|
0a9945220e | ||
|
|
73a5f06652 | ||
|
|
c077c3277b | ||
|
|
31f3ca1b2b | ||
|
|
c81f33f73d | ||
|
|
170ccc9de5 | ||
|
|
45c7f12d2a | ||
|
|
2cad971ab4 | ||
|
|
8d86d11fdf | ||
|
|
6037a9804c | ||
|
|
3c69f32402 | ||
|
|
6bfe8e32b5 | ||
|
|
5fc9261929 | ||
|
|
0162994983 | ||
|
|
254b7c5b15 | ||
|
|
672dcf59d3 | ||
|
|
7eae6eaa2f | ||
|
|
8b0f2afbaf | ||
|
|
79926e016e | ||
|
|
a61dd408ed | ||
|
|
53254551f0 | ||
|
|
8ffbe43ba1 | ||
|
|
bcfa5cd00c | ||
|
|
d84bd51e95 | ||
|
|
9072a8c627 | ||
|
|
3872c7a107 | ||
|
|
8f267fa8a8 | ||
|
|
64d62e41b8 | ||
|
|
3545e17f43 | ||
|
|
29235901b8 | ||
|
|
e8b1721290 | ||
|
|
3406333a58 | ||
|
|
45d173a59a | ||
|
|
663396e45d | ||
|
|
ece7e00048 | ||
|
|
9d0d40fc15 | ||
|
|
3edc57296d | ||
|
|
727124a762 | ||
|
|
6ad71cc29d | ||
|
|
d4d3629aaf | ||
|
|
3170c56e07 | ||
|
|
c1f18892bb | ||
|
|
1c99934b28 | ||
|
|
a9e2b9ec16 | ||
|
|
85bb322333 | ||
|
|
65d43f3ca5 | ||
|
|
0e0aee25c4 | ||
|
|
82c5e7de25 | ||
|
|
2e27339add | ||
|
|
88df6c0c9a | ||
|
|
402a7bf63d | ||
|
|
00466e2feb | ||
|
|
c98d91fe94 | ||
|
|
ac5491f563 | ||
|
|
b0effa2160 | ||
|
|
82f7f1543b | ||
|
|
96d79bb532 | ||
|
|
f2581ee8b8 | ||
|
|
9834367eea | ||
|
|
da52d3af31 | ||
|
|
ad882cd54d | ||
|
|
3557cf34dc | ||
|
|
856a18f7a8 | ||
|
|
d766343668 | ||
|
|
0bf2c7f3bc | ||
|
|
36be39b8b3 | ||
|
|
3365117151 | ||
|
|
92312aa3e6 | ||
|
|
6b1ffa5f3d | ||
|
|
f4e7545d88 | ||
|
|
e933a2712d | ||
|
|
d638a7484b | ||
|
|
7eff3afa05 | ||
|
|
b84907bdbb | ||
|
|
e4919b9329 | ||
|
|
8a12b6f1eb | ||
|
|
848cf95ea0 | ||
|
|
9037787f0b | ||
|
|
eda96586ca | ||
|
|
64a2cef9bb | ||
|
|
a41dce8f8a | ||
|
|
c0d6045776 | ||
|
|
49f4bc4709 | ||
|
|
8eec652de5 | ||
|
|
fc5d876dba | ||
|
|
f58dbb02a6 | ||
|
|
ca7ea2a4b5 | ||
|
|
c80439a320 | ||
|
|
acf6d4d2e3 | ||
|
|
aea5461488 | ||
|
|
bf92b7201f | ||
|
|
1a4f8022e6 | ||
|
|
b2d20e94fa | ||
|
|
7455ba436a | ||
|
|
b7442c3e2b | ||
|
|
a3708a1885 | ||
|
|
3346a21324 | ||
|
|
30ecfef5a3 | ||
|
|
c927d6de9b | ||
|
|
0c4cf9372b | ||
|
|
6226a27bf8 | ||
|
|
efff39c030 | ||
|
|
b5c268738b | ||
|
|
17673404fb | ||
|
|
7f026792e1 | ||
|
|
11940d462a | ||
|
|
6184f6fcbc | ||
|
|
e556aefe0a | ||
|
|
7efb38d1dd | ||
|
|
20746d8150 | ||
|
|
699be7d1be | ||
|
|
2fa14fd48a | ||
|
|
66eb0bd548 | ||
|
|
5aae844e60 | ||
|
|
ec8d7603e6 | ||
|
|
8c87bb550e | ||
|
|
4aa29508af | ||
|
|
b4017539d4 | ||
|
|
b6557f2cfe | ||
|
|
138e030cfe | ||
|
|
502ae6c663 | ||
|
|
e6acf0c399 | ||
|
|
04eca2589d | ||
|
|
474c9aadbe | ||
|
|
7dcbcca68c | ||
|
|
fa467e62a9 | ||
|
|
355d62c499 | ||
|
|
ce3e583d94 | ||
|
|
fc2f29c1d0 | ||
|
|
ce3c8df6df | ||
|
|
095b45c165 | ||
|
|
795f8e3fe7 | ||
|
|
d7457c7661 | ||
|
|
359c97f506 | ||
|
|
9e617cd4c2 | ||
|
|
d0497425f8 | ||
|
|
808ddf0ae7 | ||
|
|
feb15dc99f | ||
|
|
ecd7e36047 | ||
|
|
6bba80241c | ||
|
|
3a46280ca3 | ||
|
|
e1a12e24d2 | ||
|
|
6a3743b0d4 | ||
|
|
481f6c87e7 | ||
|
|
df4407d665 | ||
|
|
70a00eacf9 | ||
|
|
a02d609b1f | ||
|
|
5c3cb8778a | ||
|
|
1beda9c8a7 | ||
|
|
27c005ae2c | ||
|
|
505bfd82bb | ||
|
|
fdbd90e25d | ||
|
|
52cd019a54 | ||
|
|
f20cd34858 | ||
|
|
7723b4caa4 | ||
|
|
9adcd3a514 | ||
|
|
063a1251a9 | ||
|
|
af6da6db2d | ||
|
|
131c0134f5 | ||
|
|
fad3a84335 | ||
|
|
38434a7fbb | ||
|
|
84f600b2ee | ||
|
|
aec1708c53 | ||
|
|
f3c8658217 | ||
|
|
a5d9303283 | ||
|
|
38258a0976 | ||
|
|
a597994fb6 | ||
|
|
82b3e0851c | ||
|
|
f8c407a13b | ||
|
|
8da976fe00 | ||
|
|
1232ae41cf | ||
|
|
99fa03e8b5 | ||
|
|
a8331897aa | ||
|
|
0f3e296cb7 | ||
|
|
6826593b81 | ||
|
|
6b61060b51 | ||
|
|
46ecd9fd6d | ||
|
|
9efcc3f3be | ||
|
|
832e9c52ca | ||
|
|
54a79c1d37 | ||
|
|
2849d3f29d | ||
|
|
5ae38b65c1 | ||
|
|
bfe3f5815f | ||
|
|
cc01eae332 | ||
|
|
85e98fd4e8 | ||
|
|
51adaac953 | ||
|
|
10e0737569 | ||
|
|
fac3c03087 | ||
|
|
14d5e22700 | ||
|
|
fbfe44bb4d | ||
|
|
d61a04583e | ||
|
|
7e919bdbd0 | ||
|
|
96355d2f2f | ||
|
|
df4ecff5a9 | ||
|
|
6d6591880e | ||
|
|
bd84387ac6 | ||
|
|
ebfaff84c9 | ||
|
|
73d676dc8b | ||
|
|
62f6b86ba7 | ||
|
|
f6124311fd | ||
|
|
88a4d54883 | ||
|
|
368c88c487 | ||
|
|
5deaf9e30b | ||
|
|
acb501c46d | ||
|
|
97479d0c54 | ||
|
|
06567ec513 | ||
|
|
692daf6f54 | ||
|
|
458b6f4733 | ||
|
|
fe08db2713 | ||
|
|
21b7375778 | ||
|
|
4c0ec15bdc | ||
|
|
85c590105f | ||
|
|
ae7a132f38 | ||
|
|
ac001dabdc | ||
|
|
bfb3d255b1 | ||
|
|
ab55794b6f | ||
|
|
d3169e8d28 | ||
|
|
05b9f48ee5 | ||
|
|
4c9812f5da | ||
|
|
4b3403ca9b | ||
|
|
1c13c9f6b6 | ||
|
|
c7a26b7c32 | ||
|
|
fd1c18c088 | ||
|
|
c2c9a78db9 | ||
|
|
e75a779d9e | ||
|
|
828db669ec | ||
|
|
9636b2407d | ||
|
|
3670025e64 | ||
|
|
4ac363a168 | ||
|
|
d360c97ae1 | ||
|
|
76100203ab | ||
|
|
d1e1fd6210 | ||
|
|
252b503fc8 | ||
|
|
84a35f32c7 | ||
|
|
c517a19c2d | ||
|
|
738a2867c8 | ||
|
|
755adff0e4 | ||
|
|
888c59c955 | ||
|
|
f25a4a4692 | ||
|
|
b3e1f2aa7a | ||
|
|
31aca5589c | ||
|
|
76d40f4904 | ||
|
|
fbfad76c03 | ||
|
|
c974116f19 | ||
|
|
e978247fe5 | ||
|
|
51e9fe36e4 | ||
|
|
2367c5568c | ||
|
|
10e48d8310 | ||
|
|
ba8e144554 | ||
|
|
f5b46482f4 | ||
|
|
fdf2a31a51 | ||
|
|
41dab8a222 | ||
|
|
c77b24c092 | ||
|
|
5d2134d485 | ||
|
|
a55fa2047f | ||
|
|
3d9d48fffb | ||
|
|
a0d03f2e15 | ||
|
|
d0897dead5 | ||
|
|
567aa35b67 | ||
|
|
f2f40e64a9 | ||
|
|
4c6a31cd6e | ||
|
|
83333498a5 | ||
|
|
86063d4321 | ||
|
|
09eb08f910 | ||
|
|
97efe99ae9 | ||
|
|
691c8198b7 | ||
|
|
86e6165687 | ||
|
|
1e38be3a7a | ||
|
|
841c228533 | ||
|
|
c430111d0e | ||
|
|
97d3918377 | ||
|
|
6f6bf2a1eb | ||
|
|
8c5009b628 | ||
|
|
ae7b4da4cc | ||
|
|
fc7cae8aa3 | ||
|
|
f9058ca785 | ||
|
|
f648313f98 | ||
|
|
15f012032c | ||
|
|
4ec1cf49e2 | ||
|
|
f878f64f43 | ||
|
|
5f027d1fc5 | ||
|
|
380dba1020 | ||
|
|
ed4d176152 | ||
|
|
c6064a7ba6 | ||
|
|
a8594fd19f | ||
|
|
7fae460402 | ||
|
|
37b4c7d8a9 | ||
|
|
e5d2df9c34 | ||
|
|
04006bb7f0 | ||
|
|
ce59a2faad | ||
|
|
633f97151c | ||
|
|
e6153e1bd1 | ||
|
|
5d6bad1b3c | ||
|
|
e8ecbb6f20 | ||
|
|
d11d7cdf87 | ||
|
|
9e8e236d98 | ||
|
|
d6c75cb7c2 | ||
|
|
1ccd5676e3 | ||
|
|
d906206049 | ||
|
|
f85b6ca494 | ||
|
|
f2f179dce2 | ||
|
|
6d00213e80 | ||
|
|
897f8752da | ||
|
|
beda469bc6 | ||
|
|
46aebbbcbf | ||
|
|
01521299c7 | ||
|
|
2fae34bd2c | ||
|
|
95a22ae194 | ||
|
|
ec0a523ac3 | ||
|
|
e178feca3f | ||
|
|
f0325a9ccc | ||
|
|
c050f493dd | ||
|
|
a3e4a198e3 | ||
|
|
8b2fa38256 | ||
|
|
641ccdbb14 | ||
|
|
6f5e41e420 | ||
|
|
0d37a7bf83 | ||
|
|
ebf94aff8d | ||
|
|
7a13fe16f7 | ||
|
|
bf5c9706d9 | ||
|
|
7b62d0bc70 | ||
|
|
7e6c2937c3 | ||
|
|
b1dfd20292 | ||
|
|
edd6cdfc9a | ||
|
|
3cb1799347 | ||
|
|
8a0fddfd73 | ||
|
|
d524bc9110 | ||
|
|
d2b00d0866 | ||
|
|
ab655dca33 | ||
|
|
5a32e9273e | ||
|
|
caddadfc5a | ||
|
|
dd52d4de4c | ||
|
|
024eb98524 | ||
|
|
32019c9897 | ||
|
|
657488113e | ||
|
|
3b4de17d2b | ||
|
|
7d0981b312 | ||
|
|
07c3c08fad | ||
|
|
f477370c0c | ||
|
|
586f474a44 | ||
|
|
6823fe5241 | ||
|
|
f7085ac84f | ||
|
|
9898bbd9dc | ||
|
|
9a8ae6f1bf | ||
|
|
2f4b2f4783 | ||
|
|
6d363cea9d | ||
|
|
f0e4bac64e | ||
|
|
4304e7e593 | ||
|
|
6515b9c0d4 | ||
|
|
8c48971b51 | ||
|
|
e10c527930 | ||
|
|
2f5be2d8dc | ||
|
|
4086026524 | ||
|
|
9d914454c8 | ||
|
|
19e2fb4386 | ||
|
|
189fd15564 | ||
|
|
8404f132c3 | ||
|
|
b2850e62db | ||
|
|
06c00bd19b | ||
|
|
b42a972b71 | ||
|
|
2c8ac84a26 | ||
|
|
1ef6084b75 | ||
|
|
bd85434cb3 | ||
|
|
c18f7fc410 | ||
|
|
dafd50d178 | ||
|
|
883ff92a7f | ||
|
|
d79d165761 | ||
|
|
8cfc0165e9 | ||
|
|
62451800e7 | ||
|
|
b31ed22738 | ||
|
|
7738329672 | ||
|
|
dd3df11c55 | ||
|
|
e1c5463efc | ||
|
|
468749c9fc | ||
|
|
eedf400d05 | ||
|
|
5175094707 | ||
|
|
8e82611f37 | ||
|
|
6028718b1a | ||
|
|
f784980d2b | ||
|
|
0d766c8ccf | ||
|
|
e02bdaf08b | ||
|
|
b6b67715ed | ||
|
|
555d702e34 | ||
|
|
899a3a1268 | ||
|
|
f3de4f8cb7 | ||
|
|
321d5b73d8 | ||
|
|
62ce3034f3 | ||
|
|
0aff09f6c9 | ||
|
|
48c3b7dc19 | ||
|
|
cc50b1ae53 | ||
|
|
f576c34594 | ||
|
|
0eac4fa525 | ||
|
|
822cb39dfa | ||
|
|
342fb8dae9 | ||
|
|
f023be9293 | ||
|
|
828c58522e | ||
|
|
97ffc5690b | ||
|
|
b4bc6fef5b | ||
|
|
68030fd37b | ||
|
|
b7336ff32d | ||
|
|
5b6672c66d | ||
|
|
84cf00c645 | ||
|
|
bea15fb599 | ||
|
|
0c88ab1844 | ||
|
|
b7f4f902fa | ||
|
|
702c020e58 | ||
|
|
09f15918be | ||
|
|
da2c8f3c94 | ||
|
|
a58e4e0d48 | ||
|
|
f2a5aebf98 | ||
|
|
a9c1b419a9 | ||
|
|
f5cd5ebd7b | ||
|
|
1859af9b2a | ||
|
|
c95e9fff99 | ||
|
|
7dfd70fc83 | ||
|
|
b2f8642d3d | ||
|
|
f5a4001bb1 | ||
|
|
b9b6d17ab1 | ||
|
|
c824dc727a | ||
|
|
edc6a1e4f9 | ||
|
|
35129ac998 | ||
|
|
ed02a0018c | ||
|
|
8bb8cc993a | ||
|
|
aa1336c00a | ||
|
|
4da3fc0ea0 | ||
|
|
24c16fc349 | ||
|
|
b8255eba26 | ||
|
|
b2999a7055 | ||
|
|
c3208e45c9 | ||
|
|
9d95351cad | ||
|
|
1de53a7a1a | ||
|
|
bae1115e55 | ||
|
|
b3d398343e | ||
|
|
0648e76979 | ||
|
|
8588d0eb3d | ||
|
|
1574b839e0 | ||
|
|
7ec2bf9b77 | ||
|
|
d431c0924c | ||
|
|
2bf5a47b3e | ||
|
|
d3bd94805f | ||
|
|
09cbcb78d3 | ||
|
|
631376e2ac | ||
|
|
abed247182 | ||
|
|
9240948346 | ||
|
|
62e6d40b39 | ||
|
|
d45c984653 | ||
|
|
d53a80af25 | ||
|
|
85cd30b1fd | ||
|
|
deca951241 | ||
|
|
9f07f4c559 | ||
|
|
6e18805ac2 | ||
|
|
77692b52b5 | ||
|
|
efa4ccfaee | ||
|
|
e721a7f2c1 | ||
|
|
1233d244ff | ||
|
|
b541fac7c3 | ||
|
|
af32d3b773 | ||
|
|
2fda8134f1 | ||
|
|
8b34f71bea | ||
|
|
fbaf868f62 | ||
|
|
4a9c38bfa3 | ||
|
|
be14c24cea | ||
|
|
1697f6a323 | ||
|
|
52d12ca782 | ||
|
|
c45d8e9ba2 | ||
|
|
da13b4aa86 | ||
|
|
b08f76bd23 | ||
|
|
bd07a35c29 | ||
|
|
de796f27e6 | ||
|
|
2687af82d4 | ||
|
|
3727d66a0e | ||
|
|
0d81e26769 | ||
|
|
59bc64328f | ||
|
|
f32fb65552 | ||
|
|
39a76b9cba | ||
|
|
1529c19675 | ||
|
|
194b6259c5 | ||
|
|
5a2c33c12e | ||
|
|
7dae7087d3 | ||
|
|
12aefb9dfc | ||
|
|
9609c91e7d | ||
|
|
338df4f409 | ||
|
|
3e90250ea3 | ||
|
|
0b1e287e81 | ||
|
|
6c9a0ba415 | ||
|
|
0697bb2247 | ||
|
|
24081224d1 | ||
|
|
c46e7a9c9b | ||
|
|
a2849a18a5 | ||
|
|
59984e9f58 | ||
|
|
546ec1a5cf | ||
|
|
7a00178832 | ||
|
|
9df84dd22d | ||
|
|
3f23154088 | ||
|
|
f6270a8fe2 | ||
|
|
235407a78e | ||
|
|
77bf92e3c6 | ||
|
|
bb3d0c270d | ||
|
|
f8c45d428c | ||
|
|
153535fc56 | ||
|
|
a8d8225ead | ||
|
|
cc03f4c58b | ||
|
|
32c8b5507c | ||
|
|
971edd04af | ||
|
|
471200074b | ||
|
|
6841d8ff55 | ||
|
|
12f3b9000c | ||
|
|
aa09d6b8f0 | ||
|
|
dc4b23e1a1 | ||
|
|
8379a741cc | ||
|
|
321fe5c44c | ||
|
|
b5b3a7e867 | ||
|
|
4febfe47f0 | ||
|
|
77eca2487c | ||
|
|
1c4f05db41 | ||
|
|
7d855447ef | ||
|
|
debbea5b29 | ||
|
|
5c4edc83b5 | ||
|
|
b6146537d2 | ||
|
|
f62b69e32a | ||
|
|
7f02e4d008 | ||
|
|
9192e593ec | ||
|
|
11bfe438a2 | ||
|
|
aaecffba3a | ||
|
|
e1d7c96814 | ||
|
|
7e03f9a484 | ||
|
|
46ca345b06 | ||
|
|
f36ea03741 | ||
|
|
c9d4e7b716 | ||
|
|
f681aab895 | ||
|
|
11254bdf6d | ||
|
|
1985860c6e | ||
|
|
2ac516850b | ||
|
|
302fbd218d | ||
|
|
b2d6e63b79 | ||
|
|
feec718265 | ||
|
|
ee5e8d71ac | ||
|
|
26072df6af | ||
|
|
b69f76c106 | ||
|
|
4d9b5c60f9 | ||
|
|
0163466d72 | ||
|
|
4c79a63fd7 | ||
|
|
54fed21c04 | ||
|
|
90565d015e | ||
|
|
0cf2a64974 | ||
|
|
83bcdcee61 | ||
|
|
d4a459f7cb | ||
|
|
c3d963ac24 | ||
|
|
6d4e6d4cba | ||
|
|
baf9e74a73 | ||
|
|
f9834a3d1a | ||
|
|
aac06e8f74 | ||
|
|
2bbc4cab60 | ||
|
|
cea4e4e7b2 | ||
|
|
0a8b0eeca1 | ||
|
|
51e89709aa | ||
|
|
53b27bbf06 | ||
|
|
70a2157b64 | ||
|
|
f97511a1f3 | ||
|
|
73dc099645 | ||
|
|
88d85ebae1 | ||
|
|
50934ce460 | ||
|
|
e90fcd9edd | ||
|
|
9687e039e7 | ||
|
|
a28ec23273 | ||
|
|
a2a6c1c22f | ||
|
|
524d61bf7e | ||
|
|
7c9cdb2245 | ||
|
|
a289150943 | ||
|
|
544722bad2 | ||
|
|
f8ee66250a | ||
|
|
ed787cf09e | ||
|
|
1587b5a033 | ||
|
|
59ef517e6b | ||
|
|
847d5db1d1 | ||
|
|
daec6fc355 | ||
|
|
0e830d3770 | ||
|
|
dc6cede78e | ||
|
|
c7546b3cdb | ||
|
|
d56c39cf24 | ||
|
|
f9d156d270 | ||
|
|
9d58ccc547 | ||
|
|
9355a5c42b | ||
|
|
3991b4cbdb | ||
|
|
af4a1bac50 | ||
|
|
0964005d84 | ||
|
|
1c93cd9f9f | ||
|
|
8ecaff51a1 | ||
|
|
f6c48802f5 | ||
|
|
a88bc67f88 | ||
|
|
42c43cfafd | ||
|
|
c7daf3136c | ||
|
|
64038b806c | ||
|
|
2bd4513a4d | ||
|
|
d073cb7ead | ||
|
|
8a8ad46f48 | ||
|
|
2771447c29 | ||
|
|
6cc4fcf25c | ||
|
|
ac507e7ab8 | ||
|
|
e6651e8046 | ||
|
|
291628d42a | ||
|
|
3c09818d91 | ||
|
|
27d3f2e7ab | ||
|
|
17e0a58020 | ||
|
|
587d8ac60f | ||
|
|
34449cfc6c | ||
|
|
a4632783fb | ||
|
|
24772ba56e | ||
|
|
eeda4e618c | ||
|
|
d24197bead | ||
|
|
c6bbad109b | ||
|
|
16dc9064d4 | ||
|
|
63772443e6 | ||
|
|
a3f6576084 | ||
|
|
7fc2b5c063 | ||
|
|
89e3e39d52 | ||
|
|
2938a00825 | ||
|
|
5219f7e060 | ||
|
|
93ebeb2aa8 | ||
|
|
c1b077cd19 | ||
|
|
06cc0bb762 | ||
|
|
64c6566980 | ||
|
|
8fd4d9129f | ||
|
|
9164bfa1c3 | ||
|
|
9084720993 | ||
|
|
80d5d3baa1 | ||
|
|
b1c27975d0 | ||
|
|
dc155f4c2c | ||
|
|
2746e805fe | ||
|
|
0aeb1324b7 | ||
|
|
4a9055d446 | ||
|
|
3c91c5b216 | ||
|
|
f6e8019b9c | ||
|
|
760469c812 | ||
|
|
47ed4d84bb | ||
|
|
f09d2b692f | ||
|
|
4c3eb14d68 | ||
|
|
1d4d518b50 | ||
|
|
159434a133 | ||
|
|
264f6c2a39 | ||
|
|
82e71a259c | ||
|
|
490b97d3e7 | ||
|
|
f9d5b60a24 | ||
|
|
1cc22da600 | ||
|
|
aac13b1f9a | ||
|
|
ccc1a3d54d | ||
|
|
665e53524e | ||
|
|
e438699c59 | ||
|
|
a9111786f9 | ||
|
|
1fc1bc2a51 | ||
|
|
db0609f1ec | ||
|
|
ab731d8f8e | ||
|
|
45bdacd9a7 | ||
|
|
177f104432 | ||
|
|
22fbf86e4f | ||
|
|
f138bb40e2 | ||
|
|
855645c719 | ||
|
|
25423f50aa | ||
|
|
2ef617bc06 | ||
|
|
e83a08d795 | ||
|
|
b6800a8ecd | ||
|
|
d04e2ff3a4 | ||
|
|
a842fed418 | ||
|
|
e01a1bc92d | ||
|
|
6fdd31915b | ||
|
|
07caa749bf | ||
|
|
f09db236b1 | ||
|
|
8bfd01f619 | ||
|
|
1b17d1a106 | ||
|
|
b01aaadd48 | ||
|
|
1071c7d963 | ||
|
|
6453d03edd | ||
|
|
3ae48a1f99 | ||
|
|
4cedd53224 | ||
|
|
5663137e03 | ||
|
|
b202531be6 | ||
|
|
1b179455fc | ||
|
|
981f852d54 | ||
|
|
def63649df | ||
|
|
06f1ad1625 | ||
|
|
95fc70216d | ||
|
|
9b0316c75a | ||
|
|
03c2720940 | ||
|
|
b21b9dbc37 | ||
|
|
78c083f159 | ||
|
|
3aa8925091 | ||
|
|
f2f74ffce6 | ||
|
|
7d2cf7e960 | ||
|
|
0108ed8ae6 | ||
|
|
a7f48320b1 | ||
|
|
df2a616c7b | ||
|
|
550308c7a1 | ||
|
|
e8b1d2a452 | ||
|
|
5b54d51d1e | ||
|
|
f6955db970 | ||
|
|
8ca05b5755 | ||
|
|
f0ca088280 | ||
|
|
50ac1d843d | ||
|
|
513e600f63 | ||
|
|
b95dbdcba4 | ||
|
|
927a67ee1a | ||
|
|
6942d68247 | ||
|
|
b59994b454 | ||
|
|
816988baaa | ||
|
|
2869a29fd7 | ||
|
|
d43b63818c | ||
|
|
a68ade6ed3 | ||
|
|
29c5922021 | ||
|
|
d9350b0db8 | ||
|
|
bcb1245a2d | ||
|
|
62073992c5 | ||
|
|
0393c4203c | ||
|
|
6f7540ada4 | ||
|
|
1d107d8484 | ||
|
|
f7aed3d7a2 | ||
|
|
9009143fb9 | ||
|
|
fbd3866bc6 | ||
|
|
9e18e0b1cb | ||
|
|
c61ddeedac | ||
|
|
0af6213019 | ||
|
|
35e2cc8b52 | ||
|
|
6e9f3ab415 | ||
|
|
e641115421 | ||
|
|
3061dac53e | ||
|
|
668f91d707 | ||
|
|
0061e8744f | ||
|
|
fa74fcf512 | ||
|
|
a2f2516199 | ||
|
|
a940618c94 | ||
|
|
c57f871184 | ||
|
|
8681aff4f1 | ||
|
|
5d9546f9f4 | ||
|
|
7b5546d077 | ||
|
|
5d34e32d42 | ||
|
|
f382117852 | ||
|
|
3de7c8a4d0 | ||
|
|
2ff2d36b80 | ||
|
|
9bfc617791 | ||
|
|
503c0ab78b | ||
|
|
e779ee0ee2 | ||
|
|
4285be791d | ||
|
|
b5665f7516 | ||
|
|
6d3513740d | ||
|
|
850b103b36 | ||
|
|
21185e3e8a | ||
|
|
24a70e19c7 | ||
|
|
04aa2f2863 | ||
|
|
f7bcdbe56c | ||
|
|
3027ea22b0 | ||
|
|
5875a65253 | ||
|
|
36d621201b | ||
|
|
9040c9ffa1 | ||
|
|
4a18127917 | ||
|
|
adae348fdf | ||
|
|
4974147aa3 | ||
|
|
13122e5e24 | ||
|
|
cf3e1cc200 | ||
|
|
a38d46249e | ||
|
|
aab6a31c96 | ||
|
|
748d8fdc7b | ||
|
|
655891d179 | ||
|
|
4225a97f4e | ||
|
|
22578545a0 | ||
|
|
667fcd54e8 | ||
|
|
f96020550f | ||
|
|
81964aeb90 | ||
|
|
2e9ee30969 | ||
|
|
a61e4522b5 | ||
|
|
1168cbd54d | ||
|
|
bbc0d9617f | ||
|
|
8009d84364 | ||
|
|
dc692556d6 | ||
|
|
dc78db8c56 | ||
|
|
4f78108d8c | ||
|
|
0b78d8adf2 | ||
|
|
85827eef2d | ||
|
|
90c070c850 | ||
|
|
87528f0756 | ||
|
|
88acb99747 | ||
|
|
2b8ff4659f | ||
|
|
ddfcdd4778 | ||
|
|
6f0c5e5d9b | ||
|
|
49cf205dc7 | ||
|
|
39af634dd2 | ||
|
|
3f6ec271ba | ||
|
|
4d49e0bdfd | ||
|
|
81570abfb2 | ||
|
|
ddc89df89d | ||
|
|
eb24aecf8c | ||
|
|
e1ba98d724 | ||
|
|
a298331de4 | ||
|
|
71edaae981 | ||
|
|
64527f94cc | ||
|
|
883df2e983 | ||
|
|
5336acd46f | ||
|
|
fa9d2c7295 | ||
|
|
19fe990476 | ||
|
|
995f2f032f | ||
|
|
9e1283c824 | ||
|
|
a68807d426 | ||
|
|
2e67cabd7f | ||
|
|
b7b62bf9ea | ||
|
|
d84319ae10 | ||
|
|
23b6701a28 | ||
|
|
e58a9d781c | ||
|
|
74d4cdee25 | ||
|
|
418bcd4309 | ||
|
|
098db4aa52 | ||
|
|
c33b25fd8d | ||
|
|
de4f798f01 | ||
|
|
ea6dc356b0 | ||
|
|
955f34d23e | ||
|
|
241d7d2d62 | ||
|
|
1535f21eb5 | ||
|
|
4be85281f9 | ||
|
|
cb3edec6af | ||
|
|
923f77cff3 | ||
|
|
55e6fc917c | ||
|
|
68c1ed4d1a | ||
|
|
b82fa849c8 | ||
|
|
e457034e99 | ||
|
|
1d98cf26be | ||
|
|
211786ecd6 | ||
|
|
4fb65a1091 | ||
|
|
5810cffd33 | ||
|
|
f3eead0660 | ||
|
|
4131381123 | ||
|
|
6a5ded5988 | ||
|
|
4f181f361d | ||
|
|
c566f0ee17 | ||
|
|
772c6067a3 | ||
|
|
baffe96d95 | ||
|
|
264a48aedf | ||
|
|
21c88016bd | ||
|
|
ed992ae6ba | ||
|
|
3e6e8a1c03 | ||
|
|
e0b6db29ed | ||
|
|
a70a43bc51 | ||
|
|
f2b2cd8eb4 | ||
|
|
00f51493f5 | ||
|
|
d5ae1f1291 | ||
|
|
1b01488d27 | ||
|
|
0f73f0e70e | ||
|
|
ca35e54d6b | ||
|
|
497f053344 | ||
|
|
ad816b0add | ||
|
|
0c057736ac | ||
|
|
43253c10b8 | ||
|
|
18ab019a4a | ||
|
|
76b09c29b0 | ||
|
|
ba6bc2faa0 | ||
|
|
edbcb4152b | ||
|
|
949c2c5435 | ||
|
|
b17af156c7 | ||
|
|
1c9da43a95 | ||
|
|
0b32bb20bb | ||
|
|
c94de0ab60 | ||
|
|
502c901e11 | ||
|
|
48a5a7552d | ||
|
|
706b5d76ed | ||
|
|
7c679b1118 | ||
|
|
d080b3425c | ||
|
|
03a98aff3c | ||
|
|
fa20c9ce94 | ||
|
|
5ef5435529 | ||
|
|
aa7b890cfe | ||
|
|
7cd6edb947 | ||
|
|
0294c14ec4 | ||
|
|
7fe42cf949 | ||
|
|
15ca0c6a4d | ||
|
|
0baf498bd1 | ||
|
|
a232e06100 | ||
|
|
4a32d25d4c | ||
|
|
31f85f9db9 | ||
|
|
ec609f8094 | ||
|
|
caef86f428 | ||
|
|
54417999b6 | ||
|
|
45dc260060 | ||
|
|
d1c217c823 | ||
|
|
897d57bc58 | ||
|
|
555460ae1b | ||
|
|
4162f820ff | ||
|
|
29205e9596 | ||
|
|
d213884c41 | ||
|
|
b91e2833b3 | ||
|
|
f2acc3dcf9 | ||
|
|
3ddec016ff | ||
|
|
8e01263587 | ||
|
|
3265def8c7 | ||
|
|
af4701b311 | ||
|
|
44330a21e9 | ||
|
|
464ffd1b5e | ||
|
|
327425764e | ||
|
|
dbff7e9436 | ||
|
|
a4339de9de | ||
|
|
8aee5aa068 | ||
|
|
52b2318777 | ||
|
|
56f38d1776 | ||
|
|
8cb252d00c | ||
|
|
776594f99d | ||
|
|
ed44c475d8 | ||
|
|
ab80d5e0a9 | ||
|
|
f25d74f69c | ||
|
|
ea05155a8c | ||
|
|
d271383e63 | ||
|
|
0fc0a3bdff | ||
|
|
6c4d582144 | ||
|
|
685da5a3b0 | ||
|
|
a6c6750166 | ||
|
|
bdbcfc2a80 | ||
|
|
6eb0c8a2e4 | ||
|
|
6b54fa81de | ||
|
|
25eb769b26 | ||
|
|
0b6b999e7b | ||
|
|
3328428d05 | ||
|
|
4598682b43 | ||
|
|
033d43e419 | ||
|
|
647c724573 | ||
|
|
a15ba15e64 | ||
|
|
6a6cbfcf1e | ||
|
|
d2688d7f03 | ||
|
|
303b6f29f0 | ||
|
|
1fe7ca1362 | ||
|
|
9bba6ebaa9 | ||
|
|
66efcbbff1 | ||
|
|
0877157353 | ||
|
|
2ffec928e2 | ||
|
|
b390756150 | ||
|
|
b8f84f99ff | ||
|
|
43b77c5d97 | ||
|
|
2f267ee160 | ||
|
|
7d5b142547 | ||
|
|
c3276aef25 | ||
|
|
fa722a699c | ||
|
|
023143f9ae | ||
|
|
5c688739d6 | ||
|
|
ebb46497ba | ||
|
|
91ec972277 | ||
|
|
5beda10bbd | ||
|
|
257025ac89 | ||
|
|
3f9889bfd6 | ||
|
|
caa22334b3 | ||
|
|
5834c6178c | ||
|
|
b152ee71fe | ||
|
|
d987353840 | ||
|
|
a1c8f268e5 | ||
|
|
8b93af662d | ||
|
|
2117c409a0 | ||
|
|
fa9d36e050 | ||
|
|
4ef222ab61 | ||
|
|
61cd9af09b | ||
|
|
791658b576 | ||
|
|
2982d16e07 | ||
|
|
c5b49eb7ca | ||
|
|
b568ca309c | ||
|
|
3c320c006c | ||
|
|
85b51fdd6b | ||
|
|
43954d000e | ||
|
|
2a0159b8ae | ||
|
|
cb98ac261b | ||
|
|
31a07d2335 | ||
|
|
91279fd218 | ||
|
|
513188aa56 | ||
|
|
fadb01551a | ||
|
|
d25c20ccbe | ||
|
|
7d893beebe | ||
|
|
94a83b534f | ||
|
|
74cbfdc7de | ||
|
|
d4a35ada28 | ||
|
|
e020834e4f | ||
|
|
2ad72da931 | ||
|
|
8da7d0e4f9 | ||
|
|
3c4208a057 | ||
|
|
f4164edb70 | ||
|
|
2eed4d7af4 | ||
|
|
438ef47637 | ||
|
|
74a3b4a650 | ||
|
|
9b69c85f7c | ||
|
|
d51b8a1674 | ||
|
|
662b031a30 | ||
|
|
4ec67a3d21 | ||
|
|
0595413c0f | ||
|
|
a7032abb2e | ||
|
|
9e6d88f4e2 | ||
|
|
8c93e0bae7 | ||
|
|
70332a12dd | ||
|
|
373654c635 | ||
|
|
485d999c8a | ||
|
|
69054e3d4c | ||
|
|
0237a0d1a5 | ||
|
|
69a2d4e38c | ||
|
|
19275b3030 | ||
|
|
f12993ec16 | ||
|
|
940d4fad24 | ||
|
|
bb36b93f71 | ||
|
|
d87b87adf7 | ||
|
|
caed150363 | ||
|
|
80a6a445fa | ||
|
|
628e65721b | ||
|
|
274c2f50a5 | ||
|
|
a99e933550 | ||
|
|
3847fa38c4 | ||
|
|
f2690c6423 | ||
|
|
81b94c5750 | ||
|
|
65fa37ac5e | ||
|
|
3baf641a48 | ||
|
|
c0238ecbed | ||
|
|
273b6bcf22 | ||
|
|
f7f1027d3d | ||
|
|
34e5e17f91 | ||
|
|
b96c6c3185 | ||
|
|
1ffe9578d1 | ||
|
|
cce957e254 | ||
|
|
bd9b8d87ae | ||
|
|
2aa39db681 | ||
|
|
657847e4c6 | ||
|
|
965168a842 | ||
|
|
2854ee2a52 | ||
|
|
598317927c | ||
|
|
7ed5acacf4 | ||
|
|
c1c38da586 | ||
|
|
051a9ea921 | ||
|
|
265d847ffd | ||
|
|
f4778d4cd9 | ||
|
|
9e25443db8 | ||
|
|
44982606ee | ||
|
|
516a272aca | ||
|
|
0cfd6c3161 | ||
|
|
5405351b14 | ||
|
|
1b91ff685f | ||
|
|
ed7a703d4c | ||
|
|
1671913287 | ||
|
|
f51888530d | ||
|
|
826ca61745 | ||
|
|
fbd2615de4 | ||
|
|
c10cb581c6 | ||
|
|
ef0cc648cf | ||
|
|
761f9fccff | ||
|
|
a662252758 | ||
|
|
1aa3e1d287 | ||
|
|
1bb8ec296d | ||
|
|
d80f64d370 | ||
|
|
998666be64 | ||
|
|
c882783535 | ||
|
|
572acde483 | ||
|
|
5dc2a702cf | ||
|
|
3e784eff74 | ||
|
|
16b652f0a3 | ||
|
|
e82247f990 | ||
|
|
c7f665d700 | ||
|
|
d3f108b6bb | ||
|
|
097330bae8 | ||
|
|
21b977ccfe | ||
|
|
928d337c16 | ||
|
|
bc1a8b1f7a | ||
|
|
b3be9e4376 | ||
|
|
67f0c990f8 | ||
|
|
fba1111dd6 | ||
|
|
c8cd87b21b | ||
|
|
55e17d3697 | ||
|
|
1ee6285905 | ||
|
|
68e1a872fd | ||
|
|
55fc17cf4b | ||
|
|
ffc807af50 | ||
|
|
41788bba50 | ||
|
|
873f870e5a | ||
|
|
5acbe09b67 | ||
|
|
8c1e746f54 | ||
|
|
93b32d4515 | ||
|
|
bed10f9880 | ||
|
|
4bbef62124 | ||
|
|
3cf15edef7 | ||
|
|
a234e895cf | ||
|
|
c943d8d2e8 | ||
|
|
4daa397a00 | ||
|
|
c7cd35d682 | ||
|
|
54cc69154e | ||
|
|
11faa4296d | ||
|
|
f6338d6a3e | ||
|
|
1ccdc1e93a | ||
|
|
25414b44a2 | ||
|
|
3f11953fcb | ||
|
|
50943ab942 | ||
|
|
30961182f2 | ||
|
|
c1a133a6b6 | ||
|
|
778fa85f47 | ||
|
|
1a1e198f72 | ||
|
|
3b8d0ceb22 | ||
|
|
7356d52e73 | ||
|
|
9459137f1e | ||
|
|
1294d4a329 | ||
|
|
ab34fdecb7 | ||
|
|
b162cb2e41 | ||
|
|
0e1900d819 | ||
|
|
641efb6a39 | ||
|
|
e7af8be5ae | ||
|
|
142983b4ea | ||
|
|
721414d98a | ||
|
|
e993925279 | ||
|
|
a3dc1e9cbe | ||
|
|
d9dcb2ba3a | ||
|
|
adf53f04ce | ||
|
|
c435bfee9c | ||
|
|
db7283cc6b | ||
|
|
d0b8d49f71 | ||
|
|
5474824975 | ||
|
|
17f4f14df7 | ||
|
|
cd5b264b03 | ||
|
|
eb6a7cf3f4 | ||
|
|
37638c06c5 | ||
|
|
60a015550a | ||
|
|
90d5983d7a | ||
|
|
d89f8683dc | ||
|
|
c20cb5160d | ||
|
|
fda97dd58a | ||
|
|
8e1ed09dff | ||
|
|
965f33c901 | ||
|
|
9899824b85 | ||
|
|
9219139351 | ||
|
|
63c19e1df9 | ||
|
|
3e86dcf1c0 | ||
|
|
86bcf4d6a7 | ||
|
|
ba07d4a70e | ||
|
|
928b2187ea | ||
|
|
4b31426a02 | ||
|
|
122c7a43c9 | ||
|
|
14047126d8 | ||
|
|
d143f211c8 | ||
|
|
e9fe9af068 | ||
|
|
aad8a1a825 | ||
|
|
689f4cb914 | ||
|
|
c8f9b45bc2 | ||
|
|
e65bc7d315 | ||
|
|
33f3624ff7 | ||
|
|
8c52160b07 | ||
|
|
a093fab253 | ||
|
|
6e80c03d45 | ||
|
|
6372efbdc3 | ||
|
|
58d6c93103 | ||
|
|
b7ffa0e2cd | ||
|
|
d77ef276fa | ||
|
|
27e0178da9 | ||
|
|
6d1a94d218 | ||
|
|
8731197e54 | ||
|
|
afbf6b33fc | ||
|
|
37adde32dc | ||
|
|
04fc8bbcb0 | ||
|
|
39b900b316 | ||
|
|
47dd8f02a1 | ||
|
|
2426c2f21a | ||
|
|
39242090e3 | ||
|
|
e6784daf07 | ||
|
|
45fd2c8942 | ||
|
|
c0d7d9d642 | ||
|
|
dc76a3e909 | ||
|
|
f164fd9220 | ||
|
|
ba214a5e32 | ||
|
|
4161ff2fc4 | ||
|
|
290763f559 | ||
|
|
b770435389 | ||
|
|
5674ea3e6c | ||
|
|
1e4217c90c | ||
|
|
0acdd0f1ea | ||
|
|
65201631a4 | ||
|
|
697872cf08 | ||
|
|
b515f844ee | ||
|
|
602c84cd9c | ||
|
|
2a91799fcc | ||
|
|
be088b32d8 | ||
|
|
fcf1dec809 | ||
|
|
105ff162d4 | ||
|
|
06964c4a0a | ||
|
|
f3afd6ef1a | ||
|
|
bcbd74dc5b | ||
|
|
d7b42afc74 | ||
|
|
80f4740c8f | ||
|
|
522c804f6b | ||
|
|
19a625362b | ||
|
|
90b8b7706f | ||
|
|
07229bbdae | ||
|
|
434bbf2cb5 | ||
|
|
d5bf7a4a99 | ||
|
|
718ffcf8bb | ||
|
|
3856582741 | ||
|
|
f0c73a1e7a | ||
|
|
b3511adb92 | ||
|
|
6762a7268c | ||
|
|
9da84a9a1e | ||
|
|
403ecd8a2c | ||
|
|
47fbff7a05 | ||
|
|
396624864a | ||
|
|
e73dcb66da | ||
|
|
ea166f2ac9 | ||
|
|
3ec10dffd6 | ||
|
|
732cf72b86 | ||
|
|
abcb9aee5b | ||
|
|
2a0d8f8219 | ||
|
|
320dfe523c | ||
|
|
0af9e1a637 | ||
|
|
fa87c981e1 | ||
|
|
0d7cef0943 | ||
|
|
f90b3d83a3 | ||
|
|
f743471380 | ||
|
|
b9e888858c | ||
|
|
973d67a033 | ||
|
|
e3e3fbc23a | ||
|
|
e885024523 | ||
|
|
7321f45457 | ||
|
|
d87c9092c9 | ||
|
|
b9abf3e4e3 | ||
|
|
92d39126d7 | ||
|
|
b835ebcc79 | ||
|
|
62c5245c87 | ||
|
|
49043f5ff3 | ||
|
|
949629291c | ||
|
|
ad42322257 | ||
|
|
0bba2799b6 | ||
|
|
64a2acb161 | ||
|
|
1594eba29e | ||
|
|
16284039c6 | ||
|
|
1119b4cebe | ||
|
|
109a560905 | ||
|
|
48b5829aea | ||
|
|
7c6f4f9427 | ||
|
|
25c2332071 | ||
|
|
2ee1bd124c | ||
|
|
a2427981b7 | ||
|
|
6cbd1b495e | ||
|
|
1c7c317df1 | ||
|
|
dc3a00f24f | ||
|
|
75299af4fc | ||
|
|
89e786bd85 | ||
|
|
d9664344ec | ||
|
|
784a2d4f2c | ||
|
|
0be963472b | ||
|
|
64e7e11853 | ||
|
|
4d70d1f80e | ||
|
|
99bbd90b0d | ||
|
|
8a57cc3123 | ||
|
|
2edb7c7676 | ||
|
|
dfaf0fee31 | ||
|
|
e380538b59 | ||
|
|
4e1cebd56f | ||
|
|
5006c4b30d | ||
|
|
866a5320de | ||
|
|
448ac6cf0d | ||
|
|
832799dbff | ||
|
|
b4ecf0b886 | ||
|
|
5b5148b7ec | ||
|
|
c9c1541fd0 | ||
|
|
1d40373c9d | ||
|
|
5202679edc | ||
|
|
c315922b5f | ||
|
|
ca8abfbf30 | ||
|
|
5aeadb7414 | ||
|
|
c9f724caa4 | ||
|
|
487bc49bf8 | ||
|
|
739ea29d1e | ||
|
|
ea8c4094db | ||
|
|
7f41bcbeec | ||
|
|
11fdfaf03b | ||
|
|
2510db3e76 | ||
|
|
f91df1f761 | ||
|
|
3bc9629be5 | ||
|
|
d45489474d | ||
|
|
fa1ce4d8ad | ||
|
|
79ebfbe7c6 | ||
|
|
cd41c6ece2 | ||
|
|
27771b2495 | ||
|
|
d3250499c1 | ||
|
|
a8bcc7274d | ||
|
|
65666fedd5 | ||
|
|
0682ca04b3 | ||
|
|
6fe6a6f029 | ||
|
|
d330d45e2d | ||
|
|
ccd4290777 | ||
|
|
9cb42507f8 | ||
|
|
d960910c72 | ||
|
|
b46b8a5efb | ||
|
|
27fe3e2d4f | ||
|
|
3410142741 | ||
|
|
e7674eb759 | ||
|
|
7c1a92274c | ||
|
|
f5deaff424 | ||
|
|
5f360182c6 | ||
|
|
46453bfc2f | ||
|
|
6bf6bc1d1d | ||
|
|
f45be05305 | ||
|
|
24f36469bc | ||
|
|
597c79be10 | ||
|
|
e6021c370e | ||
|
|
a8b946decb | ||
|
|
3f5ac150b2 | ||
|
|
5bcccfde6c | ||
|
|
c95dd7a426 | ||
|
|
4d87d3659a | ||
|
|
32fc39fd4c | ||
|
|
93acf49e9b | ||
|
|
b2c290a6e5 | ||
|
|
499dc1b349 | ||
|
|
9377509dcd | ||
|
|
2d4de61fb7 | ||
|
|
87ef315ad5 | ||
|
|
fccadb7719 | ||
|
|
f0fa66f495 | ||
|
|
1515d1b581 | ||
|
|
a2b7102eea | ||
|
|
a5d7968b3e | ||
|
|
b5525c76d1 | ||
|
|
e97648c4e2 | ||
|
|
835ceeee76 | ||
|
|
b3682df2ca | ||
|
|
8ad8490cff | ||
|
|
59fa91fe88 | ||
|
|
1b5436ad78 | ||
|
|
257c41cc2e | ||
|
|
b4e2290d89 | ||
|
|
e3ee63578f | ||
|
|
ea0b767114 | ||
|
|
ab03912e94 | ||
|
|
1fc50712d6 | ||
|
|
7c7786d4e1 | ||
|
|
b0a14bf53e | ||
|
|
f131cd9e53 | ||
|
|
edb33eb163 | ||
|
|
bcc9cda8ca | ||
|
|
05e3354047 | ||
|
|
3364d96c6b | ||
|
|
98385888b8 | ||
|
|
68264d7404 | ||
|
|
91fa69e029 | ||
|
|
4c56bedee3 | ||
|
|
520ee9bd2c | ||
|
|
a60a2eaa02 | ||
|
|
e3a720217a | ||
|
|
530bc862dc | ||
|
|
a6f5cc65d9 | ||
|
|
e555bc6551 | ||
|
|
a843868fe9 | ||
|
|
f5da3bacb2 | ||
|
|
97f072db74 | ||
|
|
80ad710217 | ||
|
|
4fec5e57be | ||
|
|
a8a32d2714 | ||
|
|
921f17f938 | ||
|
|
9a2f296fa2 | ||
|
|
58c9653c6b | ||
|
|
6b58ade2f0 | ||
|
|
9e66c58ceb | ||
|
|
f83f5fbce8 | ||
|
|
aecaec3e10 | ||
|
|
1efee2f52b | ||
|
|
49e047c55e | ||
|
|
59a2c6d60e | ||
|
|
06f812b95c | ||
|
|
c9154b970c | ||
|
|
b3d5c4ad9d | ||
|
|
456544b621 | ||
|
|
d199f2248f | ||
|
|
8f650bd338 | ||
|
|
7b0f6293f2 | ||
|
|
fcde5b2a97 | ||
|
|
342e072024 | ||
|
|
55e8a87888 | ||
|
|
26a8c1d7ab | ||
|
|
54de6a812a | ||
|
|
733bf44290 | ||
|
|
f14cd95342 | ||
|
|
986615b0b2 | ||
|
|
bfeaab6dfc | ||
|
|
b260f92936 | ||
|
|
271d3e7865 | ||
|
|
18b7eb830b | ||
|
|
74106ba171 | ||
|
|
2a9ce8c422 | ||
|
|
cbea0c7044 | ||
|
|
5aa024e501 | ||
|
|
c51a52f300 | ||
|
|
3d13c3a295 | ||
|
|
a679a01dbe | ||
|
|
8dad08a950 | ||
|
|
0a7d3cd00f | ||
|
|
ec8b217722 | ||
|
|
328ad6901d | ||
|
|
76b89d0edb | ||
|
|
370135ad0b | ||
|
|
0fcbca531f | ||
|
|
1e2740caab | ||
|
|
6ede23ff1b | ||
|
|
591ad2268c | ||
|
|
3c3246c078 | ||
|
|
5fb41a955c | ||
|
|
367b594183 | ||
|
|
7861cfec0a | ||
|
|
019cf013d6 | ||
|
|
4329f20e3f | ||
|
|
a285194021 | ||
|
|
bf81e38d36 | ||
|
|
78cac3e594 | ||
|
|
7871790db1 | ||
|
|
18e044628e | ||
|
|
b557b682d9 | ||
|
|
389c890f14 | ||
|
|
cd8738ab63 | ||
|
|
f6f8f81a48 | ||
|
|
ecd5e6bfa4 | ||
|
|
fda078f995 | ||
|
|
ab5580e152 | ||
|
|
e8d212d92e | ||
|
|
40e539683c | ||
|
|
05f6447301 | ||
|
|
5238960850 | ||
|
|
ccec25e2c6 | ||
|
|
c38b7c4104 | ||
|
|
29b25d59c6 | ||
|
|
884b800899 | ||
|
|
09d31815b4 | ||
|
|
fe1b369946 | ||
|
|
26cb0efa88 | ||
|
|
d47115ff8b | ||
|
|
2e3d90d67c | ||
|
|
a4b06b619c | ||
|
|
c63b1697f4 | ||
|
|
87ffd21b29 | ||
|
|
2452611d0f | ||
|
|
eb359eced4 | ||
|
|
c824b29e77 | ||
|
|
33d7776473 | ||
|
|
9ad8d9b17c | ||
|
|
5b1825ba5b | ||
|
|
9c4cf83259 | ||
|
|
05e7e5e972 | ||
|
|
db4f823d34 | ||
|
|
8e02494166 | ||
|
|
a6f06ce3e2 | ||
|
|
d34e9f93b7 | ||
|
|
efeb6176c1 | ||
|
|
1a54513cf1 | ||
|
|
242c52d607 | ||
|
|
012b4c1913 | ||
|
|
436bffd15f | ||
|
|
1b3c3e6d68 | ||
|
|
33d08e8433 | ||
|
|
8f7f4cb92b | ||
|
|
2623cec874 | ||
|
|
4fcdf7b4b2 | ||
|
|
955ef1f06c | ||
|
|
2ee4c9ee02 | ||
|
|
9dbd903f41 | ||
|
|
bf3de7b90b | ||
|
|
e73ad8de3b | ||
|
|
42f4feb2b7 | ||
|
|
f16f0e169d | ||
|
|
465117d7ca | ||
|
|
7ed58bb347 | ||
|
|
dad2da7e54 | ||
|
|
363786845b | ||
|
|
ec5717caf5 | ||
|
|
d26b660aa6 | ||
|
|
aede7248ab | ||
|
|
68a92afcff | ||
|
|
55abbe1850 | ||
|
|
2c28e25bda | ||
|
|
1e6e370b76 | ||
|
|
1c3c202b96 | ||
|
|
406f7aa0f6 | ||
|
|
34f56b40fd | ||
|
|
c445f5fec7 | ||
|
|
44adde498e | ||
|
|
cf94a78872 | ||
|
|
1a64dffb00 | ||
|
|
081e5d55e6 | ||
|
|
248e6770ca | ||
|
|
40a1c96617 | ||
|
|
7314bf4682 | ||
|
|
e9e3eaa67d | ||
|
|
d36b1d849d | ||
|
|
742056be0d | ||
|
|
bc8f265f0a | ||
|
|
ec041b335e | ||
|
|
053e83dafb | ||
|
|
b97a1356b1 | ||
|
|
b73dc0ef4d | ||
|
|
499e3281e6 | ||
|
|
66868119dc | ||
|
|
aba0b2a39b | ||
|
|
57dca35692 | ||
|
|
c68518dfbb | ||
|
|
e967bc86e7 | ||
|
|
1e2a7f18a1 | ||
|
|
f91faf09b3 | ||
|
|
4430b1ceb3 | ||
|
|
3413f1e284 | ||
|
|
40cbffb2d2 | ||
|
|
b9e997f561 | ||
|
|
9a7a77a22a | ||
|
|
8f6281ab0c | ||
|
|
0da0d0a29d | ||
|
|
022b9176fe | ||
|
|
0c62c958fd | ||
|
|
c41d52a042 | ||
|
|
7e554aac86 | ||
|
|
f863a52cea | ||
|
|
93efcb8526 | ||
|
|
dcfd71aa4c | ||
|
|
fca90b3445 | ||
|
|
a292454aa1 | ||
|
|
4f81edbd4f | ||
|
|
6344db659f | ||
|
|
511a52afc8 | ||
|
|
e885e2a623 | ||
|
|
d137e03231 | ||
|
|
f52565de50 | ||
|
|
a2d288c6a9 | ||
|
|
bd7c51921d | ||
|
|
978fa53cc2 | ||
|
|
eec9609e96 | ||
|
|
9e1b43bcbf | ||
|
|
a3036ac37e | ||
|
|
ebdafd8114 | ||
|
|
a98d215204 | ||
|
|
d554ca5e1d | ||
|
|
209e04fa11 | ||
|
|
e5142f65a6 | ||
|
|
b64aa6d687 | ||
|
|
848d3bf2e1 | ||
|
|
b55c770271 | ||
|
|
d543b72562 | ||
|
|
0136a522b1 | ||
|
|
2cb758ac75 | ||
|
|
560c71c735 | ||
|
|
a37ee2293c | ||
|
|
c55ad2e375 | ||
|
|
aaa9d9f0e1 | ||
|
|
75fa7f6b3c | ||
|
|
a5db0026ed | ||
|
|
9c491366c5 | ||
|
|
385aec4010 | ||
|
|
37b7e84620 | ||
|
|
b791a530da | ||
|
|
a24bc5b2dc | ||
|
|
3bb3f02517 |
47
.github/ISSUE_TEMPLATE.md
vendored
Normal file
47
.github/ISSUE_TEMPLATE.md
vendored
Normal file
@@ -0,0 +1,47 @@
|
|||||||
|
<!--
|
||||||
|
|
||||||
|
**IF YOU HAVE SUPPORT QUESTIONS ABOUT RUNNING OR CONFIGURING YOUR OWN HOME SERVER**:
|
||||||
|
You will likely get better support more quickly if you ask in ** #matrix:matrix.org ** ;)
|
||||||
|
|
||||||
|
|
||||||
|
This is a bug report template. By following the instructions below and
|
||||||
|
filling out the sections with your information, you will help the us to get all
|
||||||
|
the necessary data to fix your issue.
|
||||||
|
|
||||||
|
You can also preview your report before submitting it. You may remove sections
|
||||||
|
that aren't relevant to your particular case.
|
||||||
|
|
||||||
|
Text between <!-- and --> marks will be invisible in the report.
|
||||||
|
|
||||||
|
-->
|
||||||
|
|
||||||
|
### Description
|
||||||
|
|
||||||
|
Describe here the problem that you are experiencing, or the feature you are requesting.
|
||||||
|
|
||||||
|
### Steps to reproduce
|
||||||
|
|
||||||
|
- For bugs, list the steps
|
||||||
|
- that reproduce the bug
|
||||||
|
- using hyphens as bullet points
|
||||||
|
|
||||||
|
Describe how what happens differs from what you expected.
|
||||||
|
|
||||||
|
If you can identify any relevant log snippets from _homeserver.log_, please include
|
||||||
|
those here (please be careful to remove any personal or private data):
|
||||||
|
|
||||||
|
### Version information
|
||||||
|
|
||||||
|
<!-- IMPORTANT: please answer the following questions, to help us narrow down the problem -->
|
||||||
|
|
||||||
|
- **Homeserver**: Was this issue identified on matrix.org or another homeserver?
|
||||||
|
|
||||||
|
If not matrix.org:
|
||||||
|
- **Version**: What version of Synapse is running? <!--
|
||||||
|
You can find the Synapse version by inspecting the server headers (replace matrix.org with
|
||||||
|
your own homeserver domain):
|
||||||
|
$ curl -v https://matrix.org/_matrix/client/versions 2>&1 | grep "Server:"
|
||||||
|
-->
|
||||||
|
- **Install method**: package manager/git clone/pip
|
||||||
|
- **Platform**: Tell us about the environment in which your homeserver is operating
|
||||||
|
- distro, hardware, if it's running in a vm/container, etc.
|
||||||
8
.gitignore
vendored
8
.gitignore
vendored
@@ -24,10 +24,10 @@ homeserver*.yaml
|
|||||||
.coverage
|
.coverage
|
||||||
htmlcov
|
htmlcov
|
||||||
|
|
||||||
demo/*.db
|
demo/*/*.db
|
||||||
demo/*.log
|
demo/*/*.log
|
||||||
demo/*.log.*
|
demo/*/*.log.*
|
||||||
demo/*.pid
|
demo/*/*.pid
|
||||||
demo/media_store.*
|
demo/media_store.*
|
||||||
demo/etc
|
demo/etc
|
||||||
|
|
||||||
|
|||||||
17
.travis.yml
Normal file
17
.travis.yml
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
sudo: false
|
||||||
|
language: python
|
||||||
|
python: 2.7
|
||||||
|
|
||||||
|
# tell travis to cache ~/.cache/pip
|
||||||
|
cache: pip
|
||||||
|
|
||||||
|
env:
|
||||||
|
- TOX_ENV=packaging
|
||||||
|
- TOX_ENV=pep8
|
||||||
|
- TOX_ENV=py27
|
||||||
|
|
||||||
|
install:
|
||||||
|
- pip install tox
|
||||||
|
|
||||||
|
script:
|
||||||
|
- tox -e $TOX_ENV
|
||||||
1006
CHANGES.rst
1006
CHANGES.rst
File diff suppressed because it is too large
Load Diff
@@ -14,6 +14,7 @@ recursive-include docs *
|
|||||||
recursive-include res *
|
recursive-include res *
|
||||||
recursive-include scripts *
|
recursive-include scripts *
|
||||||
recursive-include scripts-dev *
|
recursive-include scripts-dev *
|
||||||
|
recursive-include synapse *.pyi
|
||||||
recursive-include tests *.py
|
recursive-include tests *.py
|
||||||
|
|
||||||
recursive-include synapse/static *.css
|
recursive-include synapse/static *.css
|
||||||
@@ -23,5 +24,8 @@ recursive-include synapse/static *.js
|
|||||||
|
|
||||||
exclude jenkins.sh
|
exclude jenkins.sh
|
||||||
exclude jenkins*.sh
|
exclude jenkins*.sh
|
||||||
|
exclude jenkins*
|
||||||
|
recursive-exclude jenkins *.sh
|
||||||
|
|
||||||
|
prune .github
|
||||||
prune demo/etc
|
prune demo/etc
|
||||||
|
|||||||
683
README.rst
683
README.rst
@@ -11,8 +11,8 @@ VoIP. The basics you need to know to get up and running are:
|
|||||||
like ``#matrix:matrix.org`` or ``#test:localhost:8448``.
|
like ``#matrix:matrix.org`` or ``#test:localhost:8448``.
|
||||||
|
|
||||||
- Matrix user IDs look like ``@matthew:matrix.org`` (although in the future
|
- Matrix user IDs look like ``@matthew:matrix.org`` (although in the future
|
||||||
you will normally refer to yourself and others using a 3PID: email
|
you will normally refer to yourself and others using a third party identifier
|
||||||
address, phone number, etc rather than manipulating Matrix user IDs)
|
(3PID): email address, phone number, etc rather than manipulating Matrix user IDs)
|
||||||
|
|
||||||
The overall architecture is::
|
The overall architecture is::
|
||||||
|
|
||||||
@@ -20,12 +20,13 @@ The overall architecture is::
|
|||||||
https://somewhere.org/_matrix https://elsewhere.net/_matrix
|
https://somewhere.org/_matrix https://elsewhere.net/_matrix
|
||||||
|
|
||||||
``#matrix:matrix.org`` is the official support room for Matrix, and can be
|
``#matrix:matrix.org`` is the official support room for Matrix, and can be
|
||||||
accessed by any client from https://matrix.org/blog/try-matrix-now or via IRC
|
accessed by any client from https://matrix.org/docs/projects/try-matrix-now.html or
|
||||||
bridge at irc://irc.freenode.net/matrix.
|
via IRC bridge at irc://irc.freenode.net/matrix.
|
||||||
|
|
||||||
Synapse is currently in rapid development, but as of version 0.5 we believe it
|
Synapse is currently in rapid development, but as of version 0.5 we believe it
|
||||||
is sufficiently stable to be run as an internet-facing service for real usage!
|
is sufficiently stable to be run as an internet-facing service for real usage!
|
||||||
|
|
||||||
|
|
||||||
About Matrix
|
About Matrix
|
||||||
============
|
============
|
||||||
|
|
||||||
@@ -52,10 +53,10 @@ generation of fully open and interoperable messaging and VoIP apps for the
|
|||||||
internet.
|
internet.
|
||||||
|
|
||||||
Synapse is a reference "homeserver" implementation of Matrix from the core
|
Synapse is a reference "homeserver" implementation of Matrix from the core
|
||||||
development team at matrix.org, written in Python/Twisted for clarity and
|
development team at matrix.org, written in Python/Twisted. It is intended to
|
||||||
simplicity. It is intended to showcase the concept of Matrix and let folks see
|
showcase the concept of Matrix and let folks see the spec in the context of a
|
||||||
the spec in the context of a codebase and let you run your own homeserver and
|
codebase and let you run your own homeserver and generally help bootstrap the
|
||||||
generally help bootstrap the ecosystem.
|
ecosystem.
|
||||||
|
|
||||||
In Matrix, every user runs one or more Matrix clients, which connect through to
|
In Matrix, every user runs one or more Matrix clients, which connect through to
|
||||||
a Matrix homeserver. The homeserver stores all their personal chat history and
|
a Matrix homeserver. The homeserver stores all their personal chat history and
|
||||||
@@ -66,26 +67,16 @@ hosted by someone else (e.g. matrix.org) - there is no single point of control
|
|||||||
or mandatory service provider in Matrix, unlike WhatsApp, Facebook, Hangouts,
|
or mandatory service provider in Matrix, unlike WhatsApp, Facebook, Hangouts,
|
||||||
etc.
|
etc.
|
||||||
|
|
||||||
Synapse ships with two basic demo Matrix clients: webclient (a basic group chat
|
|
||||||
web client demo implemented in AngularJS) and cmdclient (a basic Python
|
|
||||||
command line utility which lets you easily see what the JSON APIs are up to).
|
|
||||||
|
|
||||||
Meanwhile, iOS and Android SDKs and clients are available from:
|
|
||||||
|
|
||||||
- https://github.com/matrix-org/matrix-ios-sdk
|
|
||||||
- https://github.com/matrix-org/matrix-ios-kit
|
|
||||||
- https://github.com/matrix-org/matrix-ios-console
|
|
||||||
- https://github.com/matrix-org/matrix-android-sdk
|
|
||||||
|
|
||||||
We'd like to invite you to join #matrix:matrix.org (via
|
We'd like to invite you to join #matrix:matrix.org (via
|
||||||
https://matrix.org/blog/try-matrix-now), run a homeserver, take a look at the
|
https://matrix.org/docs/projects/try-matrix-now.html), run a homeserver, take a look
|
||||||
Matrix spec at https://matrix.org/docs/spec and API docs at
|
at the `Matrix spec <https://matrix.org/docs/spec>`_, and experiment with the
|
||||||
https://matrix.org/docs/api, experiment with the APIs and the demo clients, and
|
`APIs <https://matrix.org/docs/api>`_ and `Client SDKs
|
||||||
report any bugs via https://matrix.org/jira.
|
<http://matrix.org/docs/projects/try-matrix-now.html#client-sdks>`_.
|
||||||
|
|
||||||
Thanks for using Matrix!
|
Thanks for using Matrix!
|
||||||
|
|
||||||
[1] End-to-end encryption is currently in development - see https://matrix.org/git/olm
|
[1] End-to-end encryption is currently in beta: `blog post <https://matrix.org/blog/2016/11/21/matrixs-olm-end-to-end-encryption-security-assessment-released-and-implemented-cross-platform-on-riot-at-last>`_.
|
||||||
|
|
||||||
|
|
||||||
Synapse Installation
|
Synapse Installation
|
||||||
====================
|
====================
|
||||||
@@ -93,11 +84,17 @@ Synapse Installation
|
|||||||
Synapse is the reference python/twisted Matrix homeserver implementation.
|
Synapse is the reference python/twisted Matrix homeserver implementation.
|
||||||
|
|
||||||
System requirements:
|
System requirements:
|
||||||
|
|
||||||
- POSIX-compliant system (tested on Linux & OS X)
|
- POSIX-compliant system (tested on Linux & OS X)
|
||||||
- Python 2.7
|
- Python 2.7
|
||||||
- At least 512 MB RAM.
|
- At least 1GB of free RAM if you want to join large public rooms like #matrix:matrix.org
|
||||||
|
|
||||||
Synapse is written in python but some of the libraries is uses are written in
|
Installing from source
|
||||||
|
----------------------
|
||||||
|
(Prebuilt packages are available for some platforms - see `Platform-Specific
|
||||||
|
Instructions`_.)
|
||||||
|
|
||||||
|
Synapse is written in python but some of the libraries it uses are written in
|
||||||
C. So before we can install synapse itself we need a working C compiler and the
|
C. So before we can install synapse itself we need a working C compiler and the
|
||||||
header files for python C extensions.
|
header files for python C extensions.
|
||||||
|
|
||||||
@@ -112,10 +109,10 @@ Installing prerequisites on ArchLinux::
|
|||||||
sudo pacman -S base-devel python2 python-pip \
|
sudo pacman -S base-devel python2 python-pip \
|
||||||
python-setuptools python-virtualenv sqlite3
|
python-setuptools python-virtualenv sqlite3
|
||||||
|
|
||||||
Installing prerequisites on CentOS 7::
|
Installing prerequisites on CentOS 7 or Fedora 25::
|
||||||
|
|
||||||
sudo yum install libtiff-devel libjpeg-devel libzip-devel freetype-devel \
|
sudo yum install libtiff-devel libjpeg-devel libzip-devel freetype-devel \
|
||||||
lcms2-devel libwebp-devel tcl-devel tk-devel \
|
lcms2-devel libwebp-devel tcl-devel tk-devel redhat-rpm-config \
|
||||||
python-virtualenv libffi-devel openssl-devel
|
python-virtualenv libffi-devel openssl-devel
|
||||||
sudo yum groupinstall "Development Tools"
|
sudo yum groupinstall "Development Tools"
|
||||||
|
|
||||||
@@ -124,6 +121,7 @@ Installing prerequisites on Mac OS X::
|
|||||||
xcode-select --install
|
xcode-select --install
|
||||||
sudo easy_install pip
|
sudo easy_install pip
|
||||||
sudo pip install virtualenv
|
sudo pip install virtualenv
|
||||||
|
brew install pkg-config libffi
|
||||||
|
|
||||||
Installing prerequisites on Raspbian::
|
Installing prerequisites on Raspbian::
|
||||||
|
|
||||||
@@ -134,10 +132,22 @@ Installing prerequisites on Raspbian::
|
|||||||
sudo pip install --upgrade ndg-httpsclient
|
sudo pip install --upgrade ndg-httpsclient
|
||||||
sudo pip install --upgrade virtualenv
|
sudo pip install --upgrade virtualenv
|
||||||
|
|
||||||
|
Installing prerequisites on openSUSE::
|
||||||
|
|
||||||
|
sudo zypper in -t pattern devel_basis
|
||||||
|
sudo zypper in python-pip python-setuptools sqlite3 python-virtualenv \
|
||||||
|
python-devel libffi-devel libopenssl-devel libjpeg62-devel
|
||||||
|
|
||||||
|
Installing prerequisites on OpenBSD::
|
||||||
|
|
||||||
|
doas pkg_add python libffi py-pip py-setuptools sqlite3 py-virtualenv \
|
||||||
|
libxslt
|
||||||
|
|
||||||
To install the synapse homeserver run::
|
To install the synapse homeserver run::
|
||||||
|
|
||||||
virtualenv -p python2.7 ~/.synapse
|
virtualenv -p python2.7 ~/.synapse
|
||||||
source ~/.synapse/bin/activate
|
source ~/.synapse/bin/activate
|
||||||
|
pip install --upgrade pip
|
||||||
pip install --upgrade setuptools
|
pip install --upgrade setuptools
|
||||||
pip install https://github.com/matrix-org/synapse/tarball/master
|
pip install https://github.com/matrix-org/synapse/tarball/master
|
||||||
|
|
||||||
@@ -145,7 +155,7 @@ This installs synapse, along with the libraries it uses, into a virtual
|
|||||||
environment under ``~/.synapse``. Feel free to pick a different directory
|
environment under ``~/.synapse``. Feel free to pick a different directory
|
||||||
if you prefer.
|
if you prefer.
|
||||||
|
|
||||||
In case of problems, please see the _Troubleshooting section below.
|
In case of problems, please see the _`Troubleshooting` section below.
|
||||||
|
|
||||||
Alternatively, Silvio Fricke has contributed a Dockerfile to automate the
|
Alternatively, Silvio Fricke has contributed a Dockerfile to automate the
|
||||||
above in Docker at https://registry.hub.docker.com/u/silviof/docker-matrix/.
|
above in Docker at https://registry.hub.docker.com/u/silviof/docker-matrix/.
|
||||||
@@ -154,29 +164,65 @@ Also, Martin Giess has created an auto-deployment process with vagrant/ansible,
|
|||||||
tested with VirtualBox/AWS/DigitalOcean - see https://github.com/EMnify/matrix-synapse-auto-deploy
|
tested with VirtualBox/AWS/DigitalOcean - see https://github.com/EMnify/matrix-synapse-auto-deploy
|
||||||
for details.
|
for details.
|
||||||
|
|
||||||
To set up your homeserver, run (in your virtualenv, as before)::
|
Configuring synapse
|
||||||
|
-------------------
|
||||||
|
|
||||||
|
Before you can start Synapse, you will need to generate a configuration
|
||||||
|
file. To do this, run (in your virtualenv, as before)::
|
||||||
|
|
||||||
cd ~/.synapse
|
cd ~/.synapse
|
||||||
python -m synapse.app.homeserver \
|
python -m synapse.app.homeserver \
|
||||||
--server-name machine.my.domain.name \
|
--server-name my.domain.name \
|
||||||
--config-path homeserver.yaml \
|
--config-path homeserver.yaml \
|
||||||
--generate-config \
|
--generate-config \
|
||||||
--report-stats=[yes|no]
|
--report-stats=[yes|no]
|
||||||
|
|
||||||
...substituting your host and domain name as appropriate.
|
... substituting an appropriate value for ``--server-name``. The server name
|
||||||
|
determines the "domain" part of user-ids for users on your server: these will
|
||||||
|
all be of the format ``@user:my.domain.name``. It also determines how other
|
||||||
|
matrix servers will reach yours for `Federation`_. For a test configuration,
|
||||||
|
set this to the hostname of your server. For a more production-ready setup, you
|
||||||
|
will probably want to specify your domain (``example.com``) rather than a
|
||||||
|
matrix-specific hostname here (in the same way that your email address is
|
||||||
|
probably ``user@example.com`` rather than ``user@email.example.com``) - but
|
||||||
|
doing so may require more advanced setup - see `Setting up
|
||||||
|
Federation`_. Beware that the server name cannot be changed later.
|
||||||
|
|
||||||
This will generate you a config file that you can then customise, but it will
|
This command will generate you a config file that you can then customise, but it will
|
||||||
also generate a set of keys for you. These keys will allow your Home Server to
|
also generate a set of keys for you. These keys will allow your Home Server to
|
||||||
identify itself to other Home Servers, so don't lose or delete them. It would be
|
identify itself to other Home Servers, so don't lose or delete them. It would be
|
||||||
wise to back them up somewhere safe. If, for whatever reason, you do need to
|
wise to back them up somewhere safe. (If, for whatever reason, you do need to
|
||||||
change your Home Server's keys, you may find that other Home Servers have the
|
change your Home Server's keys, you may find that other Home Servers have the
|
||||||
old key cached. If you update the signing key, you should change the name of the
|
old key cached. If you update the signing key, you should change the name of the
|
||||||
key in the <server name>.signing.key file (the second word) to something different.
|
key in the ``<server name>.signing.key`` file (the second word) to something
|
||||||
|
different. See `the spec`__ for more information on key management.)
|
||||||
|
|
||||||
By default, registration of new users is disabled. You can either enable
|
.. __: `key_management`_
|
||||||
registration in the config by specifying ``enable_registration: true``
|
|
||||||
(it is then recommended to also set up CAPTCHA - see docs/CAPTCHA_SETUP), or
|
The default configuration exposes two HTTP ports: 8008 and 8448. Port 8008 is
|
||||||
you can use the command line to register new users::
|
configured without TLS; it should be behind a reverse proxy for TLS/SSL
|
||||||
|
termination on port 443 which in turn should be used for clients. Port 8448
|
||||||
|
is configured to use TLS with a self-signed certificate. If you would like
|
||||||
|
to do initial test with a client without having to setup a reverse proxy,
|
||||||
|
you can temporarly use another certificate. (Note that a self-signed
|
||||||
|
certificate is fine for `Federation`_). You can do so by changing
|
||||||
|
``tls_certificate_path``, ``tls_private_key_path`` and ``tls_dh_params_path``
|
||||||
|
in ``homeserver.yaml``; alternatively, you can use a reverse-proxy, but be sure
|
||||||
|
to read `Using a reverse proxy with Synapse`_ when doing so.
|
||||||
|
|
||||||
|
Apart from port 8448 using TLS, both ports are the same in the default
|
||||||
|
configuration.
|
||||||
|
|
||||||
|
Registering a user
|
||||||
|
------------------
|
||||||
|
|
||||||
|
You will need at least one user on your server in order to use a Matrix
|
||||||
|
client. Users can be registered either `via a Matrix client`__, or via a
|
||||||
|
commandline script.
|
||||||
|
|
||||||
|
.. __: `client-user-reg`_
|
||||||
|
|
||||||
|
To get started, it is easiest to use the command line to register new users::
|
||||||
|
|
||||||
$ source ~/.synapse/bin/activate
|
$ source ~/.synapse/bin/activate
|
||||||
$ synctl start # if not already running
|
$ synctl start # if not already running
|
||||||
@@ -184,10 +230,41 @@ you can use the command line to register new users::
|
|||||||
New user localpart: erikj
|
New user localpart: erikj
|
||||||
Password:
|
Password:
|
||||||
Confirm password:
|
Confirm password:
|
||||||
|
Make admin [no]:
|
||||||
Success!
|
Success!
|
||||||
|
|
||||||
|
This process uses a setting ``registration_shared_secret`` in
|
||||||
|
``homeserver.yaml``, which is shared between Synapse itself and the
|
||||||
|
``register_new_matrix_user`` script. It doesn't matter what it is (a random
|
||||||
|
value is generated by ``--generate-config``), but it should be kept secret, as
|
||||||
|
anyone with knowledge of it can register users on your server even if
|
||||||
|
``enable_registration`` is ``false``.
|
||||||
|
|
||||||
|
Setting up a TURN server
|
||||||
|
------------------------
|
||||||
|
|
||||||
For reliable VoIP calls to be routed via this homeserver, you MUST configure
|
For reliable VoIP calls to be routed via this homeserver, you MUST configure
|
||||||
a TURN server. See docs/turn-howto.rst for details.
|
a TURN server. See `<docs/turn-howto.rst>`_ for details.
|
||||||
|
|
||||||
|
IPv6
|
||||||
|
----
|
||||||
|
|
||||||
|
As of Synapse 0.19 we finally support IPv6, many thanks to @kyrias and @glyph
|
||||||
|
for providing PR #1696.
|
||||||
|
|
||||||
|
However, for federation to work on hosts with IPv6 DNS servers you **must**
|
||||||
|
be running Twisted 17.1.0 or later - see https://github.com/matrix-org/synapse/issues/1002
|
||||||
|
for details. We can't make Synapse depend on Twisted 17.1 by default
|
||||||
|
yet as it will break most older distributions (see https://github.com/matrix-org/synapse/pull/1909)
|
||||||
|
so if you are using operating system dependencies you'll have to install your
|
||||||
|
own Twisted 17.1 package via pip or backports etc.
|
||||||
|
|
||||||
|
If you're running in a virtualenv then pip should have installed the newest
|
||||||
|
Twisted automatically, but if your virtualenv is old you will need to manually
|
||||||
|
upgrade to a newer Twisted dependency via:
|
||||||
|
|
||||||
|
pip install Twisted>=17.1.0
|
||||||
|
|
||||||
|
|
||||||
Running Synapse
|
Running Synapse
|
||||||
===============
|
===============
|
||||||
@@ -199,29 +276,72 @@ run (e.g. ``~/.synapse``), and::
|
|||||||
source ./bin/activate
|
source ./bin/activate
|
||||||
synctl start
|
synctl start
|
||||||
|
|
||||||
Using PostgreSQL
|
|
||||||
================
|
|
||||||
|
|
||||||
As of Synapse 0.9, `PostgreSQL <http://www.postgresql.org>`_ is supported as an
|
Connecting to Synapse from a client
|
||||||
alternative to the `SQLite <http://sqlite.org/>`_ database that Synapse has
|
===================================
|
||||||
traditionally used for convenience and simplicity.
|
|
||||||
|
|
||||||
The advantages of Postgres include:
|
The easiest way to try out your new Synapse installation is by connecting to it
|
||||||
|
from a web client. The easiest option is probably the one at
|
||||||
|
http://riot.im/app. You will need to specify a "Custom server" when you log on
|
||||||
|
or register: set this to ``https://domain.tld`` if you setup a reverse proxy
|
||||||
|
following the recommended setup, or ``https://localhost:8448`` - remember to specify the
|
||||||
|
port (``:8448``) if not ``:443`` unless you changed the configuration. (Leave the identity
|
||||||
|
server as the default - see `Identity servers`_.)
|
||||||
|
|
||||||
* significant performance improvements due to the superior threading and
|
If using port 8448 you will run into errors until you accept the self-signed
|
||||||
caching model, smarter query optimiser
|
certificate. You can easily do this by going to ``https://localhost:8448``
|
||||||
* allowing the DB to be run on separate hardware
|
directly with your browser and accept the presented certificate. You can then
|
||||||
* allowing basic active/backup high-availability with a "hot spare" synapse
|
go back in your web client and proceed further.
|
||||||
pointing at the same DB master, as well as enabling DB replication in
|
|
||||||
synapse itself.
|
|
||||||
|
|
||||||
The only disadvantage is that the code is relatively new as of April 2015 and
|
If all goes well you should at least be able to log in, create a room, and
|
||||||
may have a few regressions relative to SQLite.
|
start sending messages.
|
||||||
|
|
||||||
For information on how to install and use PostgreSQL, please see
|
(The homeserver runs a web client by default at https://localhost:8448/, though
|
||||||
`docs/postgres.rst <docs/postgres.rst>`_.
|
as of the time of writing it is somewhat outdated and not really recommended -
|
||||||
|
https://github.com/matrix-org/synapse/issues/1527).
|
||||||
|
|
||||||
Platform Specific Instructions
|
.. _`client-user-reg`:
|
||||||
|
|
||||||
|
Registering a new user from a client
|
||||||
|
------------------------------------
|
||||||
|
|
||||||
|
By default, registration of new users via Matrix clients is disabled. To enable
|
||||||
|
it, specify ``enable_registration: true`` in ``homeserver.yaml``. (It is then
|
||||||
|
recommended to also set up CAPTCHA - see `<docs/CAPTCHA_SETUP.rst>`_.)
|
||||||
|
|
||||||
|
Once ``enable_registration`` is set to ``true``, it is possible to register a
|
||||||
|
user via `riot.im <https://riot.im/app/#/register>`_ or other Matrix clients.
|
||||||
|
|
||||||
|
Your new user name will be formed partly from the ``server_name`` (see
|
||||||
|
`Configuring synapse`_), and partly from a localpart you specify when you
|
||||||
|
create the account. Your name will take the form of::
|
||||||
|
|
||||||
|
@localpart:my.domain.name
|
||||||
|
|
||||||
|
(pronounced "at localpart on my dot domain dot name").
|
||||||
|
|
||||||
|
As when logging in, you will need to specify a "Custom server". Specify your
|
||||||
|
desired ``localpart`` in the 'User name' box.
|
||||||
|
|
||||||
|
|
||||||
|
Security Note
|
||||||
|
=============
|
||||||
|
|
||||||
|
Matrix serves raw user generated data in some APIs - specifically the `content
|
||||||
|
repository endpoints <http://matrix.org/docs/spec/client_server/latest.html#get-matrix-media-r0-download-servername-mediaid>`_.
|
||||||
|
|
||||||
|
Whilst we have tried to mitigate against possible XSS attacks (e.g.
|
||||||
|
https://github.com/matrix-org/synapse/pull/1021) we recommend running
|
||||||
|
matrix homeservers on a dedicated domain name, to limit any malicious user generated
|
||||||
|
content served to web browsers a matrix API from being able to attack webapps hosted
|
||||||
|
on the same domain. This is particularly true of sharing a matrix webclient and
|
||||||
|
server on the same domain.
|
||||||
|
|
||||||
|
See https://github.com/vector-im/vector-web/issues/1977 and
|
||||||
|
https://developer.github.com/changes/2014-04-25-user-content-security for more details.
|
||||||
|
|
||||||
|
|
||||||
|
Platform-Specific Instructions
|
||||||
==============================
|
==============================
|
||||||
|
|
||||||
Debian
|
Debian
|
||||||
@@ -229,7 +349,7 @@ Debian
|
|||||||
|
|
||||||
Matrix provides official Debian packages via apt from http://matrix.org/packages/debian/.
|
Matrix provides official Debian packages via apt from http://matrix.org/packages/debian/.
|
||||||
Note that these packages do not include a client - choose one from
|
Note that these packages do not include a client - choose one from
|
||||||
https://matrix.org/blog/try-matrix-now/ (or build your own with one of our SDKs :)
|
https://matrix.org/docs/projects/try-matrix-now.html (or build your own with one of our SDKs :)
|
||||||
|
|
||||||
Fedora
|
Fedora
|
||||||
------
|
------
|
||||||
@@ -240,10 +360,12 @@ https://obs.infoserver.lv/project/monitor/matrix-synapse
|
|||||||
ArchLinux
|
ArchLinux
|
||||||
---------
|
---------
|
||||||
|
|
||||||
The quickest way to get up and running with ArchLinux is probably with Ivan
|
The quickest way to get up and running with ArchLinux is probably with the community package
|
||||||
Shapovalov's AUR package from
|
https://www.archlinux.org/packages/community/any/matrix-synapse/, which should pull in most of
|
||||||
https://aur.archlinux.org/packages/matrix-synapse/, which should pull in all
|
the necessary dependencies. If the default web client is to be served (enabled by default in
|
||||||
the necessary dependencies.
|
the generated config),
|
||||||
|
https://www.archlinux.org/packages/community/any/python2-matrix-angular-sdk/ will also need to
|
||||||
|
be installed.
|
||||||
|
|
||||||
Alternatively, to install using pip a few changes may be needed as ArchLinux
|
Alternatively, to install using pip a few changes may be needed as ArchLinux
|
||||||
defaults to python 3, but synapse currently assumes python 2.7 by default:
|
defaults to python 3, but synapse currently assumes python 2.7 by default:
|
||||||
@@ -280,9 +402,35 @@ FreeBSD
|
|||||||
|
|
||||||
Synapse can be installed via FreeBSD Ports or Packages contributed by Brendan Molloy from:
|
Synapse can be installed via FreeBSD Ports or Packages contributed by Brendan Molloy from:
|
||||||
|
|
||||||
- Ports: ``cd /usr/ports/net/py-matrix-synapse && make install clean``
|
- Ports: ``cd /usr/ports/net-im/py-matrix-synapse && make install clean``
|
||||||
- Packages: ``pkg install py27-matrix-synapse``
|
- Packages: ``pkg install py27-matrix-synapse``
|
||||||
|
|
||||||
|
|
||||||
|
OpenBSD
|
||||||
|
-------
|
||||||
|
|
||||||
|
There is currently no port for OpenBSD. Additionally, OpenBSD's security
|
||||||
|
settings require a slightly more difficult installation process.
|
||||||
|
|
||||||
|
1) Create a new directory in ``/usr/local`` called ``_synapse``. Also, create a
|
||||||
|
new user called ``_synapse`` and set that directory as the new user's home.
|
||||||
|
This is required because, by default, OpenBSD only allows binaries which need
|
||||||
|
write and execute permissions on the same memory space to be run from
|
||||||
|
``/usr/local``.
|
||||||
|
2) ``su`` to the new ``_synapse`` user and change to their home directory.
|
||||||
|
3) Create a new virtualenv: ``virtualenv -p python2.7 ~/.synapse``
|
||||||
|
4) Source the virtualenv configuration located at
|
||||||
|
``/usr/local/_synapse/.synapse/bin/activate``. This is done in ``ksh`` by
|
||||||
|
using the ``.`` command, rather than ``bash``'s ``source``.
|
||||||
|
5) Optionally, use ``pip`` to install ``lxml``, which Synapse needs to parse
|
||||||
|
webpages for their titles.
|
||||||
|
6) Use ``pip`` to install this repository: ``pip install
|
||||||
|
https://github.com/matrix-org/synapse/tarball/master``
|
||||||
|
7) Optionally, change ``_synapse``'s shell to ``/bin/false`` to reduce the
|
||||||
|
chance of a compromised Synapse server being used to take over your box.
|
||||||
|
|
||||||
|
After this, you may proceed with the rest of the install directions.
|
||||||
|
|
||||||
NixOS
|
NixOS
|
||||||
-----
|
-----
|
||||||
|
|
||||||
@@ -322,6 +470,7 @@ Troubleshooting:
|
|||||||
you do, you may need to create a symlink to ``libsodium.a`` so ``ld`` can find
|
you do, you may need to create a symlink to ``libsodium.a`` so ``ld`` can find
|
||||||
it: ``ln -s /usr/local/lib/libsodium.a /usr/lib/libsodium.a``
|
it: ``ln -s /usr/local/lib/libsodium.a /usr/lib/libsodium.a``
|
||||||
|
|
||||||
|
|
||||||
Troubleshooting
|
Troubleshooting
|
||||||
===============
|
===============
|
||||||
|
|
||||||
@@ -385,6 +534,30 @@ fix try re-installing from PyPI or directly from
|
|||||||
# Install from github
|
# Install from github
|
||||||
pip install --user https://github.com/pyca/pynacl/tarball/master
|
pip install --user https://github.com/pyca/pynacl/tarball/master
|
||||||
|
|
||||||
|
Running out of File Handles
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
If synapse runs out of filehandles, it typically fails badly - live-locking
|
||||||
|
at 100% CPU, and/or failing to accept new TCP connections (blocking the
|
||||||
|
connecting client). Matrix currently can legitimately use a lot of file handles,
|
||||||
|
thanks to busy rooms like #matrix:matrix.org containing hundreds of participating
|
||||||
|
servers. The first time a server talks in a room it will try to connect
|
||||||
|
simultaneously to all participating servers, which could exhaust the available
|
||||||
|
file descriptors between DNS queries & HTTPS sockets, especially if DNS is slow
|
||||||
|
to respond. (We need to improve the routing algorithm used to be better than
|
||||||
|
full mesh, but as of June 2017 this hasn't happened yet).
|
||||||
|
|
||||||
|
If you hit this failure mode, we recommend increasing the maximum number of
|
||||||
|
open file handles to be at least 4096 (assuming a default of 1024 or 256).
|
||||||
|
This is typically done by editing ``/etc/security/limits.conf``
|
||||||
|
|
||||||
|
Separately, Synapse may leak file handles if inbound HTTP requests get stuck
|
||||||
|
during processing - e.g. blocked behind a lock or talking to a remote server etc.
|
||||||
|
This is best diagnosed by matching up the 'Received request' and 'Processed request'
|
||||||
|
log lines and looking for any 'Processed request' lines which take more than
|
||||||
|
a few seconds to execute. Please let us know at #matrix-dev:matrix.org if
|
||||||
|
you see this failure mode so we can help debug it, however.
|
||||||
|
|
||||||
ArchLinux
|
ArchLinux
|
||||||
~~~~~~~~~
|
~~~~~~~~~
|
||||||
|
|
||||||
@@ -395,37 +568,6 @@ you will need to explicitly call Python2.7 - either running as::
|
|||||||
|
|
||||||
...or by editing synctl with the correct python executable.
|
...or by editing synctl with the correct python executable.
|
||||||
|
|
||||||
Synapse Development
|
|
||||||
===================
|
|
||||||
|
|
||||||
To check out a synapse for development, clone the git repo into a working
|
|
||||||
directory of your choice::
|
|
||||||
|
|
||||||
git clone https://github.com/matrix-org/synapse.git
|
|
||||||
cd synapse
|
|
||||||
|
|
||||||
Synapse has a number of external dependencies, that are easiest
|
|
||||||
to install using pip and a virtualenv::
|
|
||||||
|
|
||||||
virtualenv env
|
|
||||||
source env/bin/activate
|
|
||||||
python synapse/python_dependencies.py | xargs -n1 pip install
|
|
||||||
pip install setuptools_trial mock
|
|
||||||
|
|
||||||
This will run a process of downloading and installing all the needed
|
|
||||||
dependencies into a virtual env.
|
|
||||||
|
|
||||||
Once this is done, you may wish to run Synapse's unit tests, to
|
|
||||||
check that everything is installed as it should be::
|
|
||||||
|
|
||||||
python setup.py test
|
|
||||||
|
|
||||||
This should end with a 'PASSED' result::
|
|
||||||
|
|
||||||
Ran 143 tests in 0.601s
|
|
||||||
|
|
||||||
PASSED (successes=143)
|
|
||||||
|
|
||||||
|
|
||||||
Upgrading an existing Synapse
|
Upgrading an existing Synapse
|
||||||
=============================
|
=============================
|
||||||
@@ -436,140 +578,249 @@ versions of synapse.
|
|||||||
|
|
||||||
.. _UPGRADE.rst: UPGRADE.rst
|
.. _UPGRADE.rst: UPGRADE.rst
|
||||||
|
|
||||||
|
.. _federation:
|
||||||
|
|
||||||
Setting up Federation
|
Setting up Federation
|
||||||
=====================
|
=====================
|
||||||
|
|
||||||
In order for other homeservers to send messages to your server, it will need to
|
Federation is the process by which users on different servers can participate
|
||||||
be publicly visible on the internet, and they will need to know its host name.
|
in the same room. For this to work, those other servers must be able to contact
|
||||||
You have two choices here, which will influence the form of your Matrix user
|
yours to send messages.
|
||||||
IDs:
|
|
||||||
|
|
||||||
1) Use the machine's own hostname as available on public DNS in the form of
|
As explained in `Configuring synapse`_, the ``server_name`` in your
|
||||||
its A or AAAA records. This is easier to set up initially, perhaps for
|
``homeserver.yaml`` file determines the way that other servers will reach
|
||||||
testing, but lacks the flexibility of SRV.
|
yours. By default, they will treat it as a hostname and try to connect to
|
||||||
|
port 8448. This is easy to set up and will work with the default configuration,
|
||||||
|
provided you set the ``server_name`` to match your machine's public DNS
|
||||||
|
hostname.
|
||||||
|
|
||||||
2) Set up a SRV record for your domain name. This requires you create a SRV
|
For a more flexible configuration, you can set up a DNS SRV record. This allows
|
||||||
record in DNS, but gives the flexibility to run the server on your own
|
you to run your server on a machine that might not have the same name as your
|
||||||
choice of TCP port, on a machine that might not be the same name as the
|
domain name. For example, you might want to run your server at
|
||||||
domain name.
|
``synapse.example.com``, but have your Matrix user-ids look like
|
||||||
|
``@user:example.com``. (A SRV record also allows you to change the port from
|
||||||
|
the default 8448. However, if you are thinking of using a reverse-proxy on the
|
||||||
|
federation port, which is not recommended, be sure to read
|
||||||
|
`Reverse-proxying the federation port`_ first.)
|
||||||
|
|
||||||
For the first form, simply pass the required hostname (of the machine) as the
|
To use a SRV record, first create your SRV record and publish it in DNS. This
|
||||||
--server-name parameter::
|
should have the format ``_matrix._tcp.<yourdomain.com> <ttl> IN SRV 10 0 <port>
|
||||||
|
<synapse.server.name>``. The DNS record should then look something like::
|
||||||
|
|
||||||
|
$ dig -t srv _matrix._tcp.example.com
|
||||||
|
_matrix._tcp.example.com. 3600 IN SRV 10 0 8448 synapse.example.com.
|
||||||
|
|
||||||
|
You can then configure your homeserver to use ``<yourdomain.com>`` as the domain in
|
||||||
|
its user-ids, by setting ``server_name``::
|
||||||
|
|
||||||
python -m synapse.app.homeserver \
|
python -m synapse.app.homeserver \
|
||||||
--server-name machine.my.domain.name \
|
--server-name <yourdomain.com> \
|
||||||
--config-path homeserver.yaml \
|
--config-path homeserver.yaml \
|
||||||
--generate-config
|
--generate-config
|
||||||
python -m synapse.app.homeserver --config-path homeserver.yaml
|
python -m synapse.app.homeserver --config-path homeserver.yaml
|
||||||
|
|
||||||
Alternatively, you can run ``synctl start`` to guide you through the process.
|
If you've already generated the config file, you need to edit the ``server_name``
|
||||||
|
in your ``homeserver.yaml`` file. If you've already started Synapse and a
|
||||||
For the second form, first create your SRV record and publish it in DNS. This
|
|
||||||
needs to be named _matrix._tcp.YOURDOMAIN, and point at at least one hostname
|
|
||||||
and port where the server is running. (At the current time synapse does not
|
|
||||||
support clustering multiple servers into a single logical homeserver). The DNS
|
|
||||||
record would then look something like::
|
|
||||||
|
|
||||||
$ dig -t srv _matrix._tcp.machine.my.domain.name
|
|
||||||
_matrix._tcp IN SRV 10 0 8448 machine.my.domain.name.
|
|
||||||
|
|
||||||
|
|
||||||
At this point, you should then run the homeserver with the hostname of this
|
|
||||||
SRV record, as that is the name other machines will expect it to have::
|
|
||||||
|
|
||||||
python -m synapse.app.homeserver \
|
|
||||||
--server-name YOURDOMAIN \
|
|
||||||
--config-path homeserver.yaml \
|
|
||||||
--generate-config
|
|
||||||
python -m synapse.app.homeserver --config-path homeserver.yaml
|
|
||||||
|
|
||||||
|
|
||||||
If you've already generated the config file, you need to edit the "server_name"
|
|
||||||
in you ```homeserver.yaml``` file. If you've already started Synapse and a
|
|
||||||
database has been created, you will have to recreate the database.
|
database has been created, you will have to recreate the database.
|
||||||
|
|
||||||
You may additionally want to pass one or more "-v" options, in order to
|
If all goes well, you should be able to `connect to your server with a client`__,
|
||||||
increase the verbosity of logging output; at least for initial testing.
|
and then join a room via federation. (Try ``#matrix-dev:matrix.org`` as a first
|
||||||
|
step. "Matrix HQ"'s sheer size and activity level tends to make even the
|
||||||
|
largest boxes pause for thought.)
|
||||||
|
|
||||||
|
.. __: `Connecting to Synapse from a client`_
|
||||||
|
|
||||||
|
Troubleshooting
|
||||||
|
---------------
|
||||||
|
The typical failure mode with federation is that when you try to join a room,
|
||||||
|
it is rejected with "401: Unauthorized". Generally this means that other
|
||||||
|
servers in the room couldn't access yours. (Joining a room over federation is a
|
||||||
|
complicated dance which requires connections in both directions).
|
||||||
|
|
||||||
|
So, things to check are:
|
||||||
|
|
||||||
|
* If you are trying to use a reverse-proxy, read `Reverse-proxying the
|
||||||
|
federation port`_.
|
||||||
|
* If you are not using a SRV record, check that your ``server_name`` (the part
|
||||||
|
of your user-id after the ``:``) matches your hostname, and that port 8448 on
|
||||||
|
that hostname is reachable from outside your network.
|
||||||
|
* If you *are* using a SRV record, check that it matches your ``server_name``
|
||||||
|
(it should be ``_matrix._tcp.<server_name>``), and that the port and hostname
|
||||||
|
it specifies are reachable from outside your network.
|
||||||
|
|
||||||
Running a Demo Federation of Synapses
|
Running a Demo Federation of Synapses
|
||||||
-------------------------------------
|
-------------------------------------
|
||||||
|
|
||||||
If you want to get up and running quickly with a trio of homeservers in a
|
If you want to get up and running quickly with a trio of homeservers in a
|
||||||
private federation (``localhost:8080``, ``localhost:8081`` and
|
private federation, there is a script in the ``demo`` directory. This is mainly
|
||||||
``localhost:8082``) which you can then access through the webclient running at
|
useful just for development purposes. See `<demo/README>`_.
|
||||||
http://localhost:8080. Simply run::
|
|
||||||
|
|
||||||
demo/start.sh
|
|
||||||
|
|
||||||
This is mainly useful just for development purposes.
|
|
||||||
|
|
||||||
Running The Demo Web Client
|
|
||||||
===========================
|
|
||||||
|
|
||||||
The homeserver runs a web client by default at https://localhost:8448/.
|
|
||||||
|
|
||||||
If this is the first time you have used the client from that browser (it uses
|
|
||||||
HTML5 local storage to remember its config), you will need to log in to your
|
|
||||||
account. If you don't yet have an account, because you've just started the
|
|
||||||
homeserver for the first time, then you'll need to register one.
|
|
||||||
|
|
||||||
|
|
||||||
Registering A New Account
|
Using PostgreSQL
|
||||||
-------------------------
|
================
|
||||||
|
|
||||||
Your new user name will be formed partly from the hostname your server is
|
As of Synapse 0.9, `PostgreSQL <http://www.postgresql.org>`_ is supported as an
|
||||||
running as, and partly from a localpart you specify when you create the
|
alternative to the `SQLite <http://sqlite.org/>`_ database that Synapse has
|
||||||
account. Your name will take the form of::
|
traditionally used for convenience and simplicity.
|
||||||
|
|
||||||
@localpart:my.domain.here
|
The advantages of Postgres include:
|
||||||
(pronounced "at localpart on my dot domain dot here")
|
|
||||||
|
|
||||||
Specify your desired localpart in the topmost box of the "Register for an
|
* significant performance improvements due to the superior threading and
|
||||||
account" form, and click the "Register" button. Hostnames can contain ports if
|
caching model, smarter query optimiser
|
||||||
required due to lack of SRV records (e.g. @matthew:localhost:8448 on an
|
* allowing the DB to be run on separate hardware
|
||||||
internal synapse sandbox running on localhost).
|
* allowing basic active/backup high-availability with a "hot spare" synapse
|
||||||
|
pointing at the same DB master, as well as enabling DB replication in
|
||||||
|
synapse itself.
|
||||||
|
|
||||||
If registration fails, you may need to enable it in the homeserver (see
|
For information on how to install and use PostgreSQL, please see
|
||||||
`Synapse Installation`_ above)
|
`docs/postgres.rst <docs/postgres.rst>`_.
|
||||||
|
|
||||||
|
|
||||||
Logging In To An Existing Account
|
.. _reverse-proxy:
|
||||||
---------------------------------
|
|
||||||
|
Using a reverse proxy with Synapse
|
||||||
|
==================================
|
||||||
|
|
||||||
|
It is recommended to put a reverse proxy such as
|
||||||
|
`nginx <https://nginx.org/en/docs/http/ngx_http_proxy_module.html>`_,
|
||||||
|
`Apache <https://httpd.apache.org/docs/current/mod/mod_proxy_http.html>`_ or
|
||||||
|
`HAProxy <http://www.haproxy.org/>`_ in front of Synapse. One advantage of
|
||||||
|
doing so is that it means that you can expose the default https port (443) to
|
||||||
|
Matrix clients without needing to run Synapse with root privileges.
|
||||||
|
|
||||||
|
The most important thing to know here is that Matrix clients and other Matrix
|
||||||
|
servers do not necessarily need to connect to your server via the same
|
||||||
|
port. Indeed, clients will use port 443 by default, whereas servers default to
|
||||||
|
port 8448. Where these are different, we refer to the 'client port' and the
|
||||||
|
'federation port'.
|
||||||
|
|
||||||
|
The next most important thing to know is that using a reverse-proxy on the
|
||||||
|
federation port has a number of pitfalls. It is possible, but be sure to read
|
||||||
|
`Reverse-proxying the federation port`_.
|
||||||
|
|
||||||
|
The recommended setup is therefore to configure your reverse-proxy on port 443
|
||||||
|
to port 8008 of synapse for client connections, but to also directly expose port
|
||||||
|
8448 for server-server connections. All the Matrix endpoints begin ``/_matrix``,
|
||||||
|
so an example nginx configuration might look like::
|
||||||
|
|
||||||
|
server {
|
||||||
|
listen 443 ssl;
|
||||||
|
listen [::]:443 ssl;
|
||||||
|
server_name matrix.example.com;
|
||||||
|
|
||||||
|
location /_matrix {
|
||||||
|
proxy_pass http://localhost:8008;
|
||||||
|
proxy_set_header X-Forwarded-For $remote_addr;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
You will also want to set ``bind_addresses: ['127.0.0.1']`` and ``x_forwarded: true``
|
||||||
|
for port 8008 in ``homeserver.yaml`` to ensure that client IP addresses are
|
||||||
|
recorded correctly.
|
||||||
|
|
||||||
|
Having done so, you can then use ``https://matrix.example.com`` (instead of
|
||||||
|
``https://matrix.example.com:8448``) as the "Custom server" when `Connecting to
|
||||||
|
Synapse from a client`_.
|
||||||
|
|
||||||
|
Reverse-proxying the federation port
|
||||||
|
------------------------------------
|
||||||
|
|
||||||
|
There are two issues to consider before using a reverse-proxy on the federation
|
||||||
|
port:
|
||||||
|
|
||||||
|
* Due to the way SSL certificates are managed in the Matrix federation protocol
|
||||||
|
(see `spec`__), Synapse needs to be configured with the path to the SSL
|
||||||
|
certificate, *even if you do not terminate SSL at Synapse*.
|
||||||
|
|
||||||
|
.. __: `key_management`_
|
||||||
|
|
||||||
|
* Synapse does not currently support SNI on the federation protocol
|
||||||
|
(`bug #1491 <https://github.com/matrix-org/synapse/issues/1491>`_), which
|
||||||
|
means that using name-based virtual hosting is unreliable.
|
||||||
|
|
||||||
|
Furthermore, a number of the normal reasons for using a reverse-proxy do not
|
||||||
|
apply:
|
||||||
|
|
||||||
|
* Other servers will connect on port 8448 by default, so there is no need to
|
||||||
|
listen on port 443 (for federation, at least), which avoids the need for root
|
||||||
|
privileges and virtual hosting.
|
||||||
|
|
||||||
|
* A self-signed SSL certificate is fine for federation, so there is no need to
|
||||||
|
automate renewals. (The certificate generated by ``--generate-config`` is
|
||||||
|
valid for 10 years.)
|
||||||
|
|
||||||
|
If you want to set up a reverse-proxy on the federation port despite these
|
||||||
|
caveats, you will need to do the following:
|
||||||
|
|
||||||
|
* In ``homeserver.yaml``, set ``tls_certificate_path`` to the path to the SSL
|
||||||
|
certificate file used by your reverse-proxy, and set ``no_tls`` to ``True``.
|
||||||
|
(``tls_private_key_path`` will be ignored if ``no_tls`` is ``True``.)
|
||||||
|
|
||||||
|
* In your reverse-proxy configuration:
|
||||||
|
|
||||||
|
* If there are other virtual hosts on the same port, make sure that the
|
||||||
|
*default* one uses the certificate configured above.
|
||||||
|
|
||||||
|
* Forward ``/_matrix`` to Synapse.
|
||||||
|
|
||||||
|
* If your reverse-proxy is not listening on port 8448, publish a SRV record to
|
||||||
|
tell other servers how to find you. See `Setting up Federation`_.
|
||||||
|
|
||||||
|
When updating the SSL certificate, just update the file pointed to by
|
||||||
|
``tls_certificate_path``: there is no need to restart synapse. (You may like to
|
||||||
|
use a symbolic link to help make this process atomic.)
|
||||||
|
|
||||||
|
The most common mistake when setting up federation is not to tell Synapse about
|
||||||
|
your SSL certificate. To check it, you can visit
|
||||||
|
``https://matrix.org/federationtester/api/report?server_name=<your_server_name>``.
|
||||||
|
Unfortunately, there is no UI for this yet, but, you should see
|
||||||
|
``"MatchingTLSFingerprint": true``. If not, check that
|
||||||
|
``Certificates[0].SHA256Fingerprint`` (the fingerprint of the certificate
|
||||||
|
presented by your reverse-proxy) matches ``Keys.tls_fingerprints[0].sha256``
|
||||||
|
(the fingerprint of the certificate Synapse is using).
|
||||||
|
|
||||||
Just enter the ``@localpart:my.domain.here`` Matrix user ID and password into
|
|
||||||
the form and click the Login button.
|
|
||||||
|
|
||||||
Identity Servers
|
Identity Servers
|
||||||
================
|
================
|
||||||
|
|
||||||
The job of authenticating 3PIDs and tracking which 3PIDs are associated with a
|
Identity servers have the job of mapping email addresses and other 3rd Party
|
||||||
given Matrix user is very security-sensitive, as there is obvious risk of spam
|
IDs (3PIDs) to Matrix user IDs, as well as verifying the ownership of 3PIDs
|
||||||
if it is too easy to sign up for Matrix accounts or harvest 3PID data.
|
before creating that mapping.
|
||||||
Meanwhile the job of publishing the end-to-end encryption public keys for
|
|
||||||
Matrix users is also very security-sensitive for similar reasons.
|
|
||||||
|
|
||||||
Therefore the role of managing trusted identity in the Matrix ecosystem is
|
**They are not where accounts or credentials are stored - these live on home
|
||||||
farmed out to a cluster of known trusted ecosystem partners, who run 'Matrix
|
servers. Identity Servers are just for mapping 3rd party IDs to matrix IDs.**
|
||||||
Identity Servers' such as ``sydent``, whose role is purely to authenticate and
|
|
||||||
track 3PID logins and publish end-user public keys.
|
|
||||||
|
|
||||||
It's currently early days for identity servers as Matrix is not yet using 3PIDs
|
This process is very security-sensitive, as there is obvious risk of spam if it
|
||||||
as the primary means of identity and E2E encryption is not complete. As such,
|
is too easy to sign up for Matrix accounts or harvest 3PID data. In the longer
|
||||||
we are running a single identity server (https://matrix.org) at the current
|
term, we hope to create a decentralised system to manage it (`matrix-doc #712
|
||||||
time.
|
<https://github.com/matrix-org/matrix-doc/issues/712>`_), but in the meantime,
|
||||||
|
the role of managing trusted identity in the Matrix ecosystem is farmed out to
|
||||||
|
a cluster of known trusted ecosystem partners, who run 'Matrix Identity
|
||||||
|
Servers' such as `Sydent <https://github.com/matrix-org/sydent>`_, whose role
|
||||||
|
is purely to authenticate and track 3PID logins and publish end-user public
|
||||||
|
keys.
|
||||||
|
|
||||||
|
You can host your own copy of Sydent, but this will prevent you reaching other
|
||||||
|
users in the Matrix ecosystem via their email address, and prevent them finding
|
||||||
|
you. We therefore recommend that you use one of the centralised identity servers
|
||||||
|
at ``https://matrix.org`` or ``https://vector.im`` for now.
|
||||||
|
|
||||||
|
To reiterate: the Identity server will only be used if you choose to associate
|
||||||
|
an email address with your account, or send an invite to another user via their
|
||||||
|
email address.
|
||||||
|
|
||||||
|
|
||||||
URL Previews
|
URL Previews
|
||||||
============
|
============
|
||||||
|
|
||||||
Synapse 0.15.0 introduces an experimental new API for previewing URLs at
|
Synapse 0.15.0 introduces a new API for previewing URLs at
|
||||||
/_matrix/media/r0/preview_url. This is disabled by default. To turn it on
|
``/_matrix/media/r0/preview_url``. This is disabled by default. To turn it on
|
||||||
you must enable the `url_preview_enabled: True` config parameter and explicitly
|
you must enable the ``url_preview_enabled: True`` config parameter and
|
||||||
specify the IP ranges that Synapse is not allowed to spider for previewing in
|
explicitly specify the IP ranges that Synapse is not allowed to spider for
|
||||||
the `url_preview_ip_range_blacklist` configuration parameter. This is critical
|
previewing in the ``url_preview_ip_range_blacklist`` configuration parameter.
|
||||||
from a security perspective to stop arbitrary Matrix users spidering 'internal'
|
This is critical from a security perspective to stop arbitrary Matrix users
|
||||||
URLs on your network. At the very least we recommend that your loopback and
|
spidering 'internal' URLs on your network. At the very least we recommend that
|
||||||
RFC1918 IP addresses are blacklisted.
|
your loopback and RFC1918 IP addresses are blacklisted.
|
||||||
|
|
||||||
This also requires the optional lxml and netaddr python dependencies to be
|
This also requires the optional lxml and netaddr python dependencies to be
|
||||||
installed.
|
installed.
|
||||||
@@ -583,7 +834,7 @@ server, they can request a password-reset token via clients such as Vector.
|
|||||||
|
|
||||||
A manual password reset can be done via direct database access as follows.
|
A manual password reset can be done via direct database access as follows.
|
||||||
|
|
||||||
First calculate the hash of the new password:
|
First calculate the hash of the new password::
|
||||||
|
|
||||||
$ source ~/.synapse/bin/activate
|
$ source ~/.synapse/bin/activate
|
||||||
$ ./scripts/hash_password
|
$ ./scripts/hash_password
|
||||||
@@ -591,16 +842,46 @@ First calculate the hash of the new password:
|
|||||||
Confirm password:
|
Confirm password:
|
||||||
$2a$12$xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
$2a$12$xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
||||||
|
|
||||||
Then update the `users` table in the database:
|
Then update the `users` table in the database::
|
||||||
|
|
||||||
UPDATE users SET password_hash='$2a$12$xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'
|
UPDATE users SET password_hash='$2a$12$xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'
|
||||||
WHERE name='@test:test.com';
|
WHERE name='@test:test.com';
|
||||||
|
|
||||||
Where's the spec?!
|
|
||||||
==================
|
|
||||||
|
|
||||||
The source of the matrix spec lives at https://github.com/matrix-org/matrix-doc.
|
Synapse Development
|
||||||
A recent HTML snapshot of this lives at http://matrix.org/docs/spec
|
===================
|
||||||
|
|
||||||
|
Before setting up a development environment for synapse, make sure you have the
|
||||||
|
system dependencies (such as the python header files) installed - see
|
||||||
|
`Installing from source`_.
|
||||||
|
|
||||||
|
To check out a synapse for development, clone the git repo into a working
|
||||||
|
directory of your choice::
|
||||||
|
|
||||||
|
git clone https://github.com/matrix-org/synapse.git
|
||||||
|
cd synapse
|
||||||
|
|
||||||
|
Synapse has a number of external dependencies, that are easiest
|
||||||
|
to install using pip and a virtualenv::
|
||||||
|
|
||||||
|
virtualenv -p python2.7 env
|
||||||
|
source env/bin/activate
|
||||||
|
python synapse/python_dependencies.py | xargs pip install
|
||||||
|
pip install lxml mock
|
||||||
|
|
||||||
|
This will run a process of downloading and installing all the needed
|
||||||
|
dependencies into a virtual env.
|
||||||
|
|
||||||
|
Once this is done, you may wish to run Synapse's unit tests, to
|
||||||
|
check that everything is installed as it should be::
|
||||||
|
|
||||||
|
PYTHONPATH="." trial tests
|
||||||
|
|
||||||
|
This should end with a 'PASSED' result::
|
||||||
|
|
||||||
|
Ran 143 tests in 0.601s
|
||||||
|
|
||||||
|
PASSED (successes=143)
|
||||||
|
|
||||||
|
|
||||||
Building Internal API Documentation
|
Building Internal API Documentation
|
||||||
@@ -617,7 +898,6 @@ Building internal API documentation::
|
|||||||
python setup.py build_sphinx
|
python setup.py build_sphinx
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
Help!! Synapse eats all my RAM!
|
Help!! Synapse eats all my RAM!
|
||||||
===============================
|
===============================
|
||||||
|
|
||||||
@@ -626,10 +906,9 @@ cache a lot of recent room data and metadata in RAM in order to speed up
|
|||||||
common requests. We'll improve this in future, but for now the easiest
|
common requests. We'll improve this in future, but for now the easiest
|
||||||
way to either reduce the RAM usage (at the risk of slowing things down)
|
way to either reduce the RAM usage (at the risk of slowing things down)
|
||||||
is to set the almost-undocumented ``SYNAPSE_CACHE_FACTOR`` environment
|
is to set the almost-undocumented ``SYNAPSE_CACHE_FACTOR`` environment
|
||||||
variable. Roughly speaking, a SYNAPSE_CACHE_FACTOR of 1.0 will max out
|
variable. The default is 0.5, which can be decreased to reduce RAM usage
|
||||||
at around 3-4GB of resident memory - this is what we currently run the
|
in memory constrained enviroments, or increased if performance starts to
|
||||||
matrix.org on. The default setting is currently 0.1, which is probably
|
degrade.
|
||||||
around a ~700MB footprint. You can dial it down further to 0.02 if
|
|
||||||
desired, which targets roughly ~512MB. Conversely you can dial it up if
|
|
||||||
you need performance for lots of users and have a box with a lot of RAM.
|
|
||||||
|
|
||||||
|
|
||||||
|
.. _`key_management`: https://matrix.org/docs/spec/server_server/unstable.html#retrieving-server-keys
|
||||||
|
|||||||
26
UPGRADE.rst
26
UPGRADE.rst
@@ -5,20 +5,25 @@ Before upgrading check if any special steps are required to upgrade from the
|
|||||||
what you currently have installed to current version of synapse. The extra
|
what you currently have installed to current version of synapse. The extra
|
||||||
instructions that may be required are listed later in this document.
|
instructions that may be required are listed later in this document.
|
||||||
|
|
||||||
If synapse was installed in a virtualenv then active that virtualenv before
|
1. If synapse was installed in a virtualenv then active that virtualenv before
|
||||||
upgrading. If synapse is installed in a virtualenv in ``~/.synapse/`` then run:
|
upgrading. If synapse is installed in a virtualenv in ``~/.synapse/`` then
|
||||||
|
run:
|
||||||
|
|
||||||
.. code:: bash
|
.. code:: bash
|
||||||
|
|
||||||
source ~/.synapse/bin/activate
|
source ~/.synapse/bin/activate
|
||||||
|
|
||||||
If synapse was installed using pip then upgrade to the latest version by
|
2. If synapse was installed using pip then upgrade to the latest version by
|
||||||
running:
|
running:
|
||||||
|
|
||||||
.. code:: bash
|
.. code:: bash
|
||||||
|
|
||||||
pip install --upgrade --process-dependency-links https://github.com/matrix-org/synapse/tarball/master
|
pip install --upgrade --process-dependency-links https://github.com/matrix-org/synapse/tarball/master
|
||||||
|
|
||||||
|
# restart synapse
|
||||||
|
synctl restart
|
||||||
|
|
||||||
|
|
||||||
If synapse was installed using git then upgrade to the latest version by
|
If synapse was installed using git then upgrade to the latest version by
|
||||||
running:
|
running:
|
||||||
|
|
||||||
@@ -27,8 +32,21 @@ running:
|
|||||||
# Pull the latest version of the master branch.
|
# Pull the latest version of the master branch.
|
||||||
git pull
|
git pull
|
||||||
# Update the versions of synapse's python dependencies.
|
# Update the versions of synapse's python dependencies.
|
||||||
python synapse/python_dependencies.py | xargs -n1 pip install
|
python synapse/python_dependencies.py | xargs pip install --upgrade
|
||||||
|
|
||||||
|
# restart synapse
|
||||||
|
./synctl restart
|
||||||
|
|
||||||
|
|
||||||
|
To check whether your update was sucessful, you can check the Server header
|
||||||
|
returned by the Client-Server API:
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
# replace <host.name> with the hostname of your synapse homeserver.
|
||||||
|
# You may need to specify a port (eg, :8448) if your server is not
|
||||||
|
# configured on port 443.
|
||||||
|
curl -kv https://<host.name>/_matrix/client/versions 2>&1 | grep "Server:"
|
||||||
|
|
||||||
Upgrading to v0.15.0
|
Upgrading to v0.15.0
|
||||||
====================
|
====================
|
||||||
|
|||||||
@@ -32,7 +32,7 @@ import urlparse
|
|||||||
import nacl.signing
|
import nacl.signing
|
||||||
import nacl.encoding
|
import nacl.encoding
|
||||||
|
|
||||||
from syutil.crypto.jsonsign import verify_signed_json, SignatureVerifyException
|
from signedjson.sign import verify_signed_json, SignatureVerifyException
|
||||||
|
|
||||||
CONFIG_JSON = "cmdclient_config.json"
|
CONFIG_JSON = "cmdclient_config.json"
|
||||||
|
|
||||||
|
|||||||
@@ -36,15 +36,13 @@ class HttpClient(object):
|
|||||||
the request body. This will be encoded as JSON.
|
the request body. This will be encoded as JSON.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Deferred: Succeeds when we get *any* HTTP response.
|
Deferred: Succeeds when we get a 2xx HTTP response. The result
|
||||||
|
will be the decoded JSON body.
|
||||||
The result of the deferred is a tuple of `(code, response)`,
|
|
||||||
where `response` is a dict representing the decoded JSON body.
|
|
||||||
"""
|
"""
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def get_json(self, url, args=None):
|
def get_json(self, url, args=None):
|
||||||
""" Get's some json from the given host homeserver and path
|
""" Gets some json from the given host homeserver and path
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
url (str): The URL to GET data from.
|
url (str): The URL to GET data from.
|
||||||
@@ -54,10 +52,8 @@ class HttpClient(object):
|
|||||||
and *not* a string.
|
and *not* a string.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Deferred: Succeeds when we get *any* HTTP response.
|
Deferred: Succeeds when we get a 2xx HTTP response. The result
|
||||||
|
will be the decoded JSON body.
|
||||||
The result of the deferred is a tuple of `(code, response)`,
|
|
||||||
where `response` is a dict representing the decoded JSON body.
|
|
||||||
"""
|
"""
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|||||||
50
contrib/example_log_config.yaml
Normal file
50
contrib/example_log_config.yaml
Normal file
@@ -0,0 +1,50 @@
|
|||||||
|
# Example log_config file for synapse. To enable, point `log_config` to it in
|
||||||
|
# `homeserver.yaml`, and restart synapse.
|
||||||
|
#
|
||||||
|
# This configuration will produce similar results to the defaults within
|
||||||
|
# synapse, but can be edited to give more flexibility.
|
||||||
|
|
||||||
|
version: 1
|
||||||
|
|
||||||
|
formatters:
|
||||||
|
fmt:
|
||||||
|
format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s- %(message)s'
|
||||||
|
|
||||||
|
filters:
|
||||||
|
context:
|
||||||
|
(): synapse.util.logcontext.LoggingContextFilter
|
||||||
|
request: ""
|
||||||
|
|
||||||
|
handlers:
|
||||||
|
# example output to console
|
||||||
|
console:
|
||||||
|
class: logging.StreamHandler
|
||||||
|
filters: [context]
|
||||||
|
|
||||||
|
# example output to file - to enable, edit 'root' config below.
|
||||||
|
file:
|
||||||
|
class: logging.handlers.RotatingFileHandler
|
||||||
|
formatter: fmt
|
||||||
|
filename: /var/log/synapse/homeserver.log
|
||||||
|
maxBytes: 100000000
|
||||||
|
backupCount: 3
|
||||||
|
filters: [context]
|
||||||
|
|
||||||
|
|
||||||
|
root:
|
||||||
|
level: INFO
|
||||||
|
handlers: [console] # to use file handler instead, switch to [file]
|
||||||
|
|
||||||
|
loggers:
|
||||||
|
synapse:
|
||||||
|
level: INFO
|
||||||
|
|
||||||
|
synapse.storage.SQL:
|
||||||
|
# beware: increasing this to DEBUG will make synapse log sensitive
|
||||||
|
# information such as access tokens.
|
||||||
|
level: INFO
|
||||||
|
|
||||||
|
# example of enabling debugging for a component:
|
||||||
|
#
|
||||||
|
# synapse.federation.transport.server:
|
||||||
|
# level: DEBUG
|
||||||
20
contrib/prometheus/README
Normal file
20
contrib/prometheus/README
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
This directory contains some sample monitoring config for using the
|
||||||
|
'Prometheus' monitoring server against synapse.
|
||||||
|
|
||||||
|
To use it, first install prometheus by following the instructions at
|
||||||
|
|
||||||
|
http://prometheus.io/
|
||||||
|
|
||||||
|
Then add a new job to the main prometheus.conf file:
|
||||||
|
|
||||||
|
job: {
|
||||||
|
name: "synapse"
|
||||||
|
|
||||||
|
target_group: {
|
||||||
|
target: "http://SERVER.LOCATION.HERE:PORT/_synapse/metrics"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Metrics are disabled by default when running synapse; they must be enabled
|
||||||
|
with the 'enable-metrics' option, either in the synapse config file or as a
|
||||||
|
command-line option.
|
||||||
395
contrib/prometheus/consoles/synapse.html
Normal file
395
contrib/prometheus/consoles/synapse.html
Normal file
@@ -0,0 +1,395 @@
|
|||||||
|
{{ template "head" . }}
|
||||||
|
|
||||||
|
{{ template "prom_content_head" . }}
|
||||||
|
<h1>System Resources</h1>
|
||||||
|
|
||||||
|
<h3>CPU</h3>
|
||||||
|
<div id="process_resource_utime"></div>
|
||||||
|
<script>
|
||||||
|
new PromConsole.Graph({
|
||||||
|
node: document.querySelector("#process_resource_utime"),
|
||||||
|
expr: "rate(process_cpu_seconds_total[2m]) * 100",
|
||||||
|
name: "[[job]]",
|
||||||
|
min: 0,
|
||||||
|
max: 100,
|
||||||
|
renderer: "line",
|
||||||
|
height: 150,
|
||||||
|
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yUnits: "%",
|
||||||
|
yTitle: "CPU Usage"
|
||||||
|
})
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<h3>Memory</h3>
|
||||||
|
<div id="process_resource_maxrss"></div>
|
||||||
|
<script>
|
||||||
|
new PromConsole.Graph({
|
||||||
|
node: document.querySelector("#process_resource_maxrss"),
|
||||||
|
expr: "process_psutil_rss:max",
|
||||||
|
name: "Maxrss",
|
||||||
|
min: 0,
|
||||||
|
renderer: "line",
|
||||||
|
height: 150,
|
||||||
|
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||||
|
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||||
|
yUnits: "bytes",
|
||||||
|
yTitle: "Usage"
|
||||||
|
})
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<h3>File descriptors</h3>
|
||||||
|
<div id="process_fds"></div>
|
||||||
|
<script>
|
||||||
|
new PromConsole.Graph({
|
||||||
|
node: document.querySelector("#process_fds"),
|
||||||
|
expr: "process_open_fds{job='synapse'}",
|
||||||
|
name: "FDs",
|
||||||
|
min: 0,
|
||||||
|
renderer: "line",
|
||||||
|
height: 150,
|
||||||
|
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yUnits: "",
|
||||||
|
yTitle: "Descriptors"
|
||||||
|
})
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<h1>Reactor</h1>
|
||||||
|
|
||||||
|
<h3>Total reactor time</h3>
|
||||||
|
<div id="reactor_total_time"></div>
|
||||||
|
<script>
|
||||||
|
new PromConsole.Graph({
|
||||||
|
node: document.querySelector("#reactor_total_time"),
|
||||||
|
expr: "rate(python_twisted_reactor_tick_time:total[2m]) / 1000",
|
||||||
|
name: "time",
|
||||||
|
max: 1,
|
||||||
|
min: 0,
|
||||||
|
renderer: "area",
|
||||||
|
height: 150,
|
||||||
|
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yUnits: "s/s",
|
||||||
|
yTitle: "Usage"
|
||||||
|
})
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<h3>Average reactor tick time</h3>
|
||||||
|
<div id="reactor_average_time"></div>
|
||||||
|
<script>
|
||||||
|
new PromConsole.Graph({
|
||||||
|
node: document.querySelector("#reactor_average_time"),
|
||||||
|
expr: "rate(python_twisted_reactor_tick_time:total[2m]) / rate(python_twisted_reactor_tick_time:count[2m]) / 1000",
|
||||||
|
name: "time",
|
||||||
|
min: 0,
|
||||||
|
renderer: "line",
|
||||||
|
height: 150,
|
||||||
|
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yUnits: "s",
|
||||||
|
yTitle: "Time"
|
||||||
|
})
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<h3>Pending calls per tick</h3>
|
||||||
|
<div id="reactor_pending_calls"></div>
|
||||||
|
<script>
|
||||||
|
new PromConsole.Graph({
|
||||||
|
node: document.querySelector("#reactor_pending_calls"),
|
||||||
|
expr: "rate(python_twisted_reactor_pending_calls:total[30s])/rate(python_twisted_reactor_pending_calls:count[30s])",
|
||||||
|
name: "calls",
|
||||||
|
min: 0,
|
||||||
|
renderer: "line",
|
||||||
|
height: 150,
|
||||||
|
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yTitle: "Pending Cals"
|
||||||
|
})
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<h1>Storage</h1>
|
||||||
|
|
||||||
|
<h3>Queries</h3>
|
||||||
|
<div id="synapse_storage_query_time"></div>
|
||||||
|
<script>
|
||||||
|
new PromConsole.Graph({
|
||||||
|
node: document.querySelector("#synapse_storage_query_time"),
|
||||||
|
expr: "rate(synapse_storage_query_time:count[2m])",
|
||||||
|
name: "[[verb]]",
|
||||||
|
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||||
|
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||||
|
yUnits: "queries/s",
|
||||||
|
yTitle: "Queries"
|
||||||
|
})
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<h3>Transactions</h3>
|
||||||
|
<div id="synapse_storage_transaction_time"></div>
|
||||||
|
<script>
|
||||||
|
new PromConsole.Graph({
|
||||||
|
node: document.querySelector("#synapse_storage_transaction_time"),
|
||||||
|
expr: "rate(synapse_storage_transaction_time:count[2m])",
|
||||||
|
name: "[[desc]]",
|
||||||
|
min: 0,
|
||||||
|
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||||
|
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||||
|
yUnits: "txn/s",
|
||||||
|
yTitle: "Transactions"
|
||||||
|
})
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<h3>Transaction execution time</h3>
|
||||||
|
<div id="synapse_storage_transactions_time_msec"></div>
|
||||||
|
<script>
|
||||||
|
new PromConsole.Graph({
|
||||||
|
node: document.querySelector("#synapse_storage_transactions_time_msec"),
|
||||||
|
expr: "rate(synapse_storage_transaction_time:total[2m]) / 1000",
|
||||||
|
name: "[[desc]]",
|
||||||
|
min: 0,
|
||||||
|
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yUnits: "s/s",
|
||||||
|
yTitle: "Usage"
|
||||||
|
})
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<h3>Database scheduling latency</h3>
|
||||||
|
<div id="synapse_storage_schedule_time"></div>
|
||||||
|
<script>
|
||||||
|
new PromConsole.Graph({
|
||||||
|
node: document.querySelector("#synapse_storage_schedule_time"),
|
||||||
|
expr: "rate(synapse_storage_schedule_time:total[2m]) / 1000",
|
||||||
|
name: "Total latency",
|
||||||
|
min: 0,
|
||||||
|
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yUnits: "s/s",
|
||||||
|
yTitle: "Usage"
|
||||||
|
})
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<h3>Cache hit ratio</h3>
|
||||||
|
<div id="synapse_cache_ratio"></div>
|
||||||
|
<script>
|
||||||
|
new PromConsole.Graph({
|
||||||
|
node: document.querySelector("#synapse_cache_ratio"),
|
||||||
|
expr: "rate(synapse_util_caches_cache:total[2m]) * 100",
|
||||||
|
name: "[[name]]",
|
||||||
|
min: 0,
|
||||||
|
max: 100,
|
||||||
|
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||||
|
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||||
|
yUnits: "%",
|
||||||
|
yTitle: "Percentage"
|
||||||
|
})
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<h3>Cache size</h3>
|
||||||
|
<div id="synapse_cache_size"></div>
|
||||||
|
<script>
|
||||||
|
new PromConsole.Graph({
|
||||||
|
node: document.querySelector("#synapse_cache_size"),
|
||||||
|
expr: "synapse_util_caches_cache:size",
|
||||||
|
name: "[[name]]",
|
||||||
|
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||||
|
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||||
|
yUnits: "",
|
||||||
|
yTitle: "Items"
|
||||||
|
})
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<h1>Requests</h1>
|
||||||
|
|
||||||
|
<h3>Requests by Servlet</h3>
|
||||||
|
<div id="synapse_http_server_requests_servlet"></div>
|
||||||
|
<script>
|
||||||
|
new PromConsole.Graph({
|
||||||
|
node: document.querySelector("#synapse_http_server_requests_servlet"),
|
||||||
|
expr: "rate(synapse_http_server_requests:servlet[2m])",
|
||||||
|
name: "[[servlet]]",
|
||||||
|
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yUnits: "req/s",
|
||||||
|
yTitle: "Requests"
|
||||||
|
})
|
||||||
|
</script>
|
||||||
|
<h4> (without <tt>EventStreamRestServlet</tt> or <tt>SyncRestServlet</tt>)</h4>
|
||||||
|
<div id="synapse_http_server_requests_servlet_minus_events"></div>
|
||||||
|
<script>
|
||||||
|
new PromConsole.Graph({
|
||||||
|
node: document.querySelector("#synapse_http_server_requests_servlet_minus_events"),
|
||||||
|
expr: "rate(synapse_http_server_requests:servlet{servlet!=\"EventStreamRestServlet\", servlet!=\"SyncRestServlet\"}[2m])",
|
||||||
|
name: "[[servlet]]",
|
||||||
|
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yUnits: "req/s",
|
||||||
|
yTitle: "Requests"
|
||||||
|
})
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<h3>Average response times</h3>
|
||||||
|
<div id="synapse_http_server_response_time_avg"></div>
|
||||||
|
<script>
|
||||||
|
new PromConsole.Graph({
|
||||||
|
node: document.querySelector("#synapse_http_server_response_time_avg"),
|
||||||
|
expr: "rate(synapse_http_server_response_time:total[2m]) / rate(synapse_http_server_response_time:count[2m]) / 1000",
|
||||||
|
name: "[[servlet]]",
|
||||||
|
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yUnits: "s/req",
|
||||||
|
yTitle: "Response time"
|
||||||
|
})
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<h3>All responses by code</h3>
|
||||||
|
<div id="synapse_http_server_responses"></div>
|
||||||
|
<script>
|
||||||
|
new PromConsole.Graph({
|
||||||
|
node: document.querySelector("#synapse_http_server_responses"),
|
||||||
|
expr: "rate(synapse_http_server_responses[2m])",
|
||||||
|
name: "[[method]] / [[code]]",
|
||||||
|
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yUnits: "req/s",
|
||||||
|
yTitle: "Requests"
|
||||||
|
})
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<h3>Error responses by code</h3>
|
||||||
|
<div id="synapse_http_server_responses_err"></div>
|
||||||
|
<script>
|
||||||
|
new PromConsole.Graph({
|
||||||
|
node: document.querySelector("#synapse_http_server_responses_err"),
|
||||||
|
expr: "rate(synapse_http_server_responses{code=~\"[45]..\"}[2m])",
|
||||||
|
name: "[[method]] / [[code]]",
|
||||||
|
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yUnits: "req/s",
|
||||||
|
yTitle: "Requests"
|
||||||
|
})
|
||||||
|
</script>
|
||||||
|
|
||||||
|
|
||||||
|
<h3>CPU Usage</h3>
|
||||||
|
<div id="synapse_http_server_response_ru_utime"></div>
|
||||||
|
<script>
|
||||||
|
new PromConsole.Graph({
|
||||||
|
node: document.querySelector("#synapse_http_server_response_ru_utime"),
|
||||||
|
expr: "rate(synapse_http_server_response_ru_utime:total[2m])",
|
||||||
|
name: "[[servlet]]",
|
||||||
|
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yUnits: "s/s",
|
||||||
|
yTitle: "CPU Usage"
|
||||||
|
})
|
||||||
|
</script>
|
||||||
|
|
||||||
|
|
||||||
|
<h3>DB Usage</h3>
|
||||||
|
<div id="synapse_http_server_response_db_txn_duration"></div>
|
||||||
|
<script>
|
||||||
|
new PromConsole.Graph({
|
||||||
|
node: document.querySelector("#synapse_http_server_response_db_txn_duration"),
|
||||||
|
expr: "rate(synapse_http_server_response_db_txn_duration:total[2m])",
|
||||||
|
name: "[[servlet]]",
|
||||||
|
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yUnits: "s/s",
|
||||||
|
yTitle: "DB Usage"
|
||||||
|
})
|
||||||
|
</script>
|
||||||
|
|
||||||
|
|
||||||
|
<h3>Average event send times</h3>
|
||||||
|
<div id="synapse_http_server_send_time_avg"></div>
|
||||||
|
<script>
|
||||||
|
new PromConsole.Graph({
|
||||||
|
node: document.querySelector("#synapse_http_server_send_time_avg"),
|
||||||
|
expr: "rate(synapse_http_server_response_time:total{servlet='RoomSendEventRestServlet'}[2m]) / rate(synapse_http_server_response_time:count{servlet='RoomSendEventRestServlet'}[2m]) / 1000",
|
||||||
|
name: "[[servlet]]",
|
||||||
|
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yUnits: "s/req",
|
||||||
|
yTitle: "Response time"
|
||||||
|
})
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<h1>Federation</h1>
|
||||||
|
|
||||||
|
<h3>Sent Messages</h3>
|
||||||
|
<div id="synapse_federation_client_sent"></div>
|
||||||
|
<script>
|
||||||
|
new PromConsole.Graph({
|
||||||
|
node: document.querySelector("#synapse_federation_client_sent"),
|
||||||
|
expr: "rate(synapse_federation_client_sent[2m])",
|
||||||
|
name: "[[type]]",
|
||||||
|
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yUnits: "req/s",
|
||||||
|
yTitle: "Requests"
|
||||||
|
})
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<h3>Received Messages</h3>
|
||||||
|
<div id="synapse_federation_server_received"></div>
|
||||||
|
<script>
|
||||||
|
new PromConsole.Graph({
|
||||||
|
node: document.querySelector("#synapse_federation_server_received"),
|
||||||
|
expr: "rate(synapse_federation_server_received[2m])",
|
||||||
|
name: "[[type]]",
|
||||||
|
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yUnits: "req/s",
|
||||||
|
yTitle: "Requests"
|
||||||
|
})
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<h3>Pending</h3>
|
||||||
|
<div id="synapse_federation_transaction_queue_pending"></div>
|
||||||
|
<script>
|
||||||
|
new PromConsole.Graph({
|
||||||
|
node: document.querySelector("#synapse_federation_transaction_queue_pending"),
|
||||||
|
expr: "synapse_federation_transaction_queue_pending",
|
||||||
|
name: "[[type]]",
|
||||||
|
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||||
|
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||||
|
yUnits: "",
|
||||||
|
yTitle: "Units"
|
||||||
|
})
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<h1>Clients</h1>
|
||||||
|
|
||||||
|
<h3>Notifiers</h3>
|
||||||
|
<div id="synapse_notifier_listeners"></div>
|
||||||
|
<script>
|
||||||
|
new PromConsole.Graph({
|
||||||
|
node: document.querySelector("#synapse_notifier_listeners"),
|
||||||
|
expr: "synapse_notifier_listeners",
|
||||||
|
name: "listeners",
|
||||||
|
min: 0,
|
||||||
|
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||||
|
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||||
|
yUnits: "",
|
||||||
|
yTitle: "Listeners"
|
||||||
|
})
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<h3>Notified Events</h3>
|
||||||
|
<div id="synapse_notifier_notified_events"></div>
|
||||||
|
<script>
|
||||||
|
new PromConsole.Graph({
|
||||||
|
node: document.querySelector("#synapse_notifier_notified_events"),
|
||||||
|
expr: "rate(synapse_notifier_notified_events[2m])",
|
||||||
|
name: "events",
|
||||||
|
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||||
|
yUnits: "events/s",
|
||||||
|
yTitle: "Event rate"
|
||||||
|
})
|
||||||
|
</script>
|
||||||
|
|
||||||
|
{{ template "prom_content_tail" . }}
|
||||||
|
|
||||||
|
{{ template "tail" }}
|
||||||
21
contrib/prometheus/synapse.rules
Normal file
21
contrib/prometheus/synapse.rules
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
synapse_federation_transaction_queue_pendingEdus:total = sum(synapse_federation_transaction_queue_pendingEdus or absent(synapse_federation_transaction_queue_pendingEdus)*0)
|
||||||
|
synapse_federation_transaction_queue_pendingPdus:total = sum(synapse_federation_transaction_queue_pendingPdus or absent(synapse_federation_transaction_queue_pendingPdus)*0)
|
||||||
|
|
||||||
|
synapse_http_server_requests:method{servlet=""} = sum(synapse_http_server_requests) by (method)
|
||||||
|
synapse_http_server_requests:servlet{method=""} = sum(synapse_http_server_requests) by (servlet)
|
||||||
|
|
||||||
|
synapse_http_server_requests:total{servlet=""} = sum(synapse_http_server_requests:by_method) by (servlet)
|
||||||
|
|
||||||
|
synapse_cache:hit_ratio_5m = rate(synapse_util_caches_cache:hits[5m]) / rate(synapse_util_caches_cache:total[5m])
|
||||||
|
synapse_cache:hit_ratio_30s = rate(synapse_util_caches_cache:hits[30s]) / rate(synapse_util_caches_cache:total[30s])
|
||||||
|
|
||||||
|
synapse_federation_client_sent{type="EDU"} = synapse_federation_client_sent_edus + 0
|
||||||
|
synapse_federation_client_sent{type="PDU"} = synapse_federation_client_sent_pdu_destinations:count + 0
|
||||||
|
synapse_federation_client_sent{type="Query"} = sum(synapse_federation_client_sent_queries) by (job)
|
||||||
|
|
||||||
|
synapse_federation_server_received{type="EDU"} = synapse_federation_server_received_edus + 0
|
||||||
|
synapse_federation_server_received{type="PDU"} = synapse_federation_server_received_pdus + 0
|
||||||
|
synapse_federation_server_received{type="Query"} = sum(synapse_federation_server_received_queries) by (job)
|
||||||
|
|
||||||
|
synapse_federation_transaction_queue_pending{type="EDU"} = synapse_federation_transaction_queue_pending_edus + 0
|
||||||
|
synapse_federation_transaction_queue_pending{type="PDU"} = synapse_federation_transaction_queue_pending_pdus + 0
|
||||||
@@ -1,5 +1,5 @@
|
|||||||
# This assumes that Synapse has been installed as a system package
|
# This assumes that Synapse has been installed as a system package
|
||||||
# (e.g. https://aur.archlinux.org/packages/matrix-synapse/ for ArchLinux)
|
# (e.g. https://www.archlinux.org/packages/community/any/matrix-synapse/ for ArchLinux)
|
||||||
# rather than in a user home directory or similar under virtualenv.
|
# rather than in a user home directory or similar under virtualenv.
|
||||||
|
|
||||||
[Unit]
|
[Unit]
|
||||||
@@ -9,9 +9,10 @@ Description=Synapse Matrix homeserver
|
|||||||
Type=simple
|
Type=simple
|
||||||
User=synapse
|
User=synapse
|
||||||
Group=synapse
|
Group=synapse
|
||||||
EnvironmentFile=-/etc/sysconfig/synapse
|
|
||||||
WorkingDirectory=/var/lib/synapse
|
WorkingDirectory=/var/lib/synapse
|
||||||
ExecStart=/usr/bin/python2.7 -m synapse.app.homeserver --config-path=/etc/synapse/homeserver.yaml --log-config=/etc/synapse/log_config.yaml
|
ExecStart=/usr/bin/python2.7 -m synapse.app.homeserver --config-path=/etc/synapse/homeserver.yaml
|
||||||
|
ExecStop=/usr/bin/synctl stop /etc/synapse/homeserver.yaml
|
||||||
|
|
||||||
[Install]
|
[Install]
|
||||||
WantedBy=multi-user.target
|
WantedBy=multi-user.target
|
||||||
|
|
||||||
|
|||||||
@@ -11,12 +11,12 @@ https://developers.google.com/recaptcha/
|
|||||||
Setting ReCaptcha Keys
|
Setting ReCaptcha Keys
|
||||||
----------------------
|
----------------------
|
||||||
The keys are a config option on the home server config. If they are not
|
The keys are a config option on the home server config. If they are not
|
||||||
visible, you can generate them via --generate-config. Set the following value:
|
visible, you can generate them via --generate-config. Set the following value::
|
||||||
|
|
||||||
recaptcha_public_key: YOUR_PUBLIC_KEY
|
recaptcha_public_key: YOUR_PUBLIC_KEY
|
||||||
recaptcha_private_key: YOUR_PRIVATE_KEY
|
recaptcha_private_key: YOUR_PRIVATE_KEY
|
||||||
|
|
||||||
In addition, you MUST enable captchas via:
|
In addition, you MUST enable captchas via::
|
||||||
|
|
||||||
enable_registration_captcha: true
|
enable_registration_captcha: true
|
||||||
|
|
||||||
@@ -25,7 +25,5 @@ Configuring IP used for auth
|
|||||||
The ReCaptcha API requires that the IP address of the user who solved the
|
The ReCaptcha API requires that the IP address of the user who solved the
|
||||||
captcha is sent. If the client is connecting through a proxy or load balancer,
|
captcha is sent. If the client is connecting through a proxy or load balancer,
|
||||||
it may be required to use the X-Forwarded-For (XFF) header instead of the origin
|
it may be required to use the X-Forwarded-For (XFF) header instead of the origin
|
||||||
IP address. This can be configured as an option on the home server like so:
|
IP address. This can be configured using the x_forwarded directive in the
|
||||||
|
listeners section of the homeserver.yaml configuration file.
|
||||||
captcha_ip_origin_is_x_forwarded: true
|
|
||||||
|
|
||||||
12
docs/admin_api/README.rst
Normal file
12
docs/admin_api/README.rst
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
Admin APIs
|
||||||
|
==========
|
||||||
|
|
||||||
|
This directory includes documentation for the various synapse specific admin
|
||||||
|
APIs available.
|
||||||
|
|
||||||
|
Only users that are server admins can use these APIs. A user can be marked as a
|
||||||
|
server admin by updating the database directly, e.g.:
|
||||||
|
|
||||||
|
``UPDATE users SET admin = 1 WHERE name = '@foo:bar.com'``
|
||||||
|
|
||||||
|
Restarting may be required for the changes to register.
|
||||||
17
docs/admin_api/purge_history_api.rst
Normal file
17
docs/admin_api/purge_history_api.rst
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
Purge History API
|
||||||
|
=================
|
||||||
|
|
||||||
|
The purge history API allows server admins to purge historic events from their
|
||||||
|
database, reclaiming disk space.
|
||||||
|
|
||||||
|
**NB!** This will not delete local events (locally sent messages content etc) from the database, but will remove lots of the metadata about them and does dramatically reduce the on disk space usage
|
||||||
|
|
||||||
|
Depending on the amount of history being purged a call to the API may take
|
||||||
|
several minutes or longer. During this period users will not be able to
|
||||||
|
paginate further back in the room from the point being purged from.
|
||||||
|
|
||||||
|
The API is simply:
|
||||||
|
|
||||||
|
``POST /_matrix/client/r0/admin/purge_history/<room_id>/<event_id>``
|
||||||
|
|
||||||
|
including an ``access_token`` of a server admin.
|
||||||
17
docs/admin_api/purge_remote_media.rst
Normal file
17
docs/admin_api/purge_remote_media.rst
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
Purge Remote Media API
|
||||||
|
======================
|
||||||
|
|
||||||
|
The purge remote media API allows server admins to purge old cached remote
|
||||||
|
media.
|
||||||
|
|
||||||
|
The API is::
|
||||||
|
|
||||||
|
POST /_matrix/client/r0/admin/purge_media_cache?before_ts=<unix_timestamp_in_ms>&access_token=<access_token>
|
||||||
|
|
||||||
|
{}
|
||||||
|
|
||||||
|
Which will remove all cached media that was last accessed before
|
||||||
|
``<unix_timestamp_in_ms>``.
|
||||||
|
|
||||||
|
If the user re-requests purged remote media, synapse will re-request the media
|
||||||
|
from the originating server.
|
||||||
73
docs/admin_api/user_admin_api.rst
Normal file
73
docs/admin_api/user_admin_api.rst
Normal file
@@ -0,0 +1,73 @@
|
|||||||
|
Query Account
|
||||||
|
=============
|
||||||
|
|
||||||
|
This API returns information about a specific user account.
|
||||||
|
|
||||||
|
The api is::
|
||||||
|
|
||||||
|
GET /_matrix/client/r0/admin/whois/<user_id>
|
||||||
|
|
||||||
|
including an ``access_token`` of a server admin.
|
||||||
|
|
||||||
|
It returns a JSON body like the following:
|
||||||
|
|
||||||
|
.. code:: json
|
||||||
|
|
||||||
|
{
|
||||||
|
"user_id": "<user_id>",
|
||||||
|
"devices": {
|
||||||
|
"": {
|
||||||
|
"sessions": [
|
||||||
|
{
|
||||||
|
"connections": [
|
||||||
|
{
|
||||||
|
"ip": "1.2.3.4",
|
||||||
|
"last_seen": 1417222374433,
|
||||||
|
"user_agent": "Mozilla/5.0 ..."
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ip": "1.2.3.10",
|
||||||
|
"last_seen": 1417222374500,
|
||||||
|
"user_agent": "Dalvik/2.1.0 ..."
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
``last_seen`` is measured in milliseconds since the Unix epoch.
|
||||||
|
|
||||||
|
Deactivate Account
|
||||||
|
==================
|
||||||
|
|
||||||
|
This API deactivates an account. It removes active access tokens, resets the
|
||||||
|
password, and deletes third-party IDs (to prevent the user requesting a
|
||||||
|
password reset).
|
||||||
|
|
||||||
|
The api is::
|
||||||
|
|
||||||
|
POST /_matrix/client/r0/admin/deactivate/<user_id>
|
||||||
|
|
||||||
|
including an ``access_token`` of a server admin, and an empty request body.
|
||||||
|
|
||||||
|
|
||||||
|
Reset password
|
||||||
|
==============
|
||||||
|
|
||||||
|
Changes the password of another user.
|
||||||
|
|
||||||
|
The api is::
|
||||||
|
|
||||||
|
POST /_matrix/client/r0/admin/reset_password/<user_id>
|
||||||
|
|
||||||
|
with a body of:
|
||||||
|
|
||||||
|
.. code:: json
|
||||||
|
|
||||||
|
{
|
||||||
|
"new_password": "<secret>"
|
||||||
|
}
|
||||||
|
|
||||||
|
including an ``access_token`` of a server admin.
|
||||||
@@ -1,10 +1,446 @@
|
|||||||
What do I do about "Unexpected logging context" debug log-lines everywhere?
|
Log contexts
|
||||||
|
============
|
||||||
|
|
||||||
<Mjark> The logging context lives in thread local storage
|
.. contents::
|
||||||
<Mjark> Sometimes it gets out of sync with what it should actually be, usually because something scheduled something to run on the reactor without preserving the logging context.
|
|
||||||
<Matthew> what is the impact of it getting out of sync? and how and when should we preserve log context?
|
|
||||||
<Mjark> The impact is that some of the CPU and database metrics will be under-reported, and some log lines will be mis-attributed.
|
|
||||||
<Mjark> It should happen auto-magically in all the APIs that do IO or otherwise defer to the reactor.
|
|
||||||
<Erik> Mjark: the other place is if we branch, e.g. using defer.gatherResults
|
|
||||||
|
|
||||||
Unanswered: how and when should we preserve log context?
|
To help track the processing of individual requests, synapse uses a
|
||||||
|
'log context' to track which request it is handling at any given moment. This
|
||||||
|
is done via a thread-local variable; a ``logging.Filter`` is then used to fish
|
||||||
|
the information back out of the thread-local variable and add it to each log
|
||||||
|
record.
|
||||||
|
|
||||||
|
Logcontexts are also used for CPU and database accounting, so that we can track
|
||||||
|
which requests were responsible for high CPU use or database activity.
|
||||||
|
|
||||||
|
The ``synapse.util.logcontext`` module provides a facilities for managing the
|
||||||
|
current log context (as well as providing the ``LoggingContextFilter`` class).
|
||||||
|
|
||||||
|
Deferreds make the whole thing complicated, so this document describes how it
|
||||||
|
all works, and how to write code which follows the rules.
|
||||||
|
|
||||||
|
Logcontexts without Deferreds
|
||||||
|
-----------------------------
|
||||||
|
|
||||||
|
In the absence of any Deferred voodoo, things are simple enough. As with any
|
||||||
|
code of this nature, the rule is that our function should leave things as it
|
||||||
|
found them:
|
||||||
|
|
||||||
|
.. code:: python
|
||||||
|
|
||||||
|
from synapse.util import logcontext # omitted from future snippets
|
||||||
|
|
||||||
|
def handle_request(request_id):
|
||||||
|
request_context = logcontext.LoggingContext()
|
||||||
|
|
||||||
|
calling_context = logcontext.LoggingContext.current_context()
|
||||||
|
logcontext.LoggingContext.set_current_context(request_context)
|
||||||
|
try:
|
||||||
|
request_context.request = request_id
|
||||||
|
do_request_handling()
|
||||||
|
logger.debug("finished")
|
||||||
|
finally:
|
||||||
|
logcontext.LoggingContext.set_current_context(calling_context)
|
||||||
|
|
||||||
|
def do_request_handling():
|
||||||
|
logger.debug("phew") # this will be logged against request_id
|
||||||
|
|
||||||
|
|
||||||
|
LoggingContext implements the context management methods, so the above can be
|
||||||
|
written much more succinctly as:
|
||||||
|
|
||||||
|
.. code:: python
|
||||||
|
|
||||||
|
def handle_request(request_id):
|
||||||
|
with logcontext.LoggingContext() as request_context:
|
||||||
|
request_context.request = request_id
|
||||||
|
do_request_handling()
|
||||||
|
logger.debug("finished")
|
||||||
|
|
||||||
|
def do_request_handling():
|
||||||
|
logger.debug("phew")
|
||||||
|
|
||||||
|
|
||||||
|
Using logcontexts with Deferreds
|
||||||
|
--------------------------------
|
||||||
|
|
||||||
|
Deferreds — and in particular, ``defer.inlineCallbacks`` — break
|
||||||
|
the linear flow of code so that there is no longer a single entry point where
|
||||||
|
we should set the logcontext and a single exit point where we should remove it.
|
||||||
|
|
||||||
|
Consider the example above, where ``do_request_handling`` needs to do some
|
||||||
|
blocking operation, and returns a deferred:
|
||||||
|
|
||||||
|
.. code:: python
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def handle_request(request_id):
|
||||||
|
with logcontext.LoggingContext() as request_context:
|
||||||
|
request_context.request = request_id
|
||||||
|
yield do_request_handling()
|
||||||
|
logger.debug("finished")
|
||||||
|
|
||||||
|
|
||||||
|
In the above flow:
|
||||||
|
|
||||||
|
* The logcontext is set
|
||||||
|
* ``do_request_handling`` is called, and returns a deferred
|
||||||
|
* ``handle_request`` yields the deferred
|
||||||
|
* The ``inlineCallbacks`` wrapper of ``handle_request`` returns a deferred
|
||||||
|
|
||||||
|
So we have stopped processing the request (and will probably go on to start
|
||||||
|
processing the next), without clearing the logcontext.
|
||||||
|
|
||||||
|
To circumvent this problem, synapse code assumes that, wherever you have a
|
||||||
|
deferred, you will want to yield on it. To that end, whereever functions return
|
||||||
|
a deferred, we adopt the following conventions:
|
||||||
|
|
||||||
|
**Rules for functions returning deferreds:**
|
||||||
|
|
||||||
|
* If the deferred is already complete, the function returns with the same
|
||||||
|
logcontext it started with.
|
||||||
|
* If the deferred is incomplete, the function clears the logcontext before
|
||||||
|
returning; when the deferred completes, it restores the logcontext before
|
||||||
|
running any callbacks.
|
||||||
|
|
||||||
|
That sounds complicated, but actually it means a lot of code (including the
|
||||||
|
example above) "just works". There are two cases:
|
||||||
|
|
||||||
|
* If ``do_request_handling`` returns a completed deferred, then the logcontext
|
||||||
|
will still be in place. In this case, execution will continue immediately
|
||||||
|
after the ``yield``; the "finished" line will be logged against the right
|
||||||
|
context, and the ``with`` block restores the original context before we
|
||||||
|
return to the caller.
|
||||||
|
|
||||||
|
* If the returned deferred is incomplete, ``do_request_handling`` clears the
|
||||||
|
logcontext before returning. The logcontext is therefore clear when
|
||||||
|
``handle_request`` yields the deferred. At that point, the ``inlineCallbacks``
|
||||||
|
wrapper adds a callback to the deferred, and returns another (incomplete)
|
||||||
|
deferred to the caller, and it is safe to begin processing the next request.
|
||||||
|
|
||||||
|
Once ``do_request_handling``'s deferred completes, it will reinstate the
|
||||||
|
logcontext, before running the callback added by the ``inlineCallbacks``
|
||||||
|
wrapper. That callback runs the second half of ``handle_request``, so again
|
||||||
|
the "finished" line will be logged against the right
|
||||||
|
context, and the ``with`` block restores the original context.
|
||||||
|
|
||||||
|
As an aside, it's worth noting that ``handle_request`` follows our rules -
|
||||||
|
though that only matters if the caller has its own logcontext which it cares
|
||||||
|
about.
|
||||||
|
|
||||||
|
The following sections describe pitfalls and helpful patterns when implementing
|
||||||
|
these rules.
|
||||||
|
|
||||||
|
Always yield your deferreds
|
||||||
|
---------------------------
|
||||||
|
|
||||||
|
Whenever you get a deferred back from a function, you should ``yield`` on it
|
||||||
|
as soon as possible. (Returning it directly to your caller is ok too, if you're
|
||||||
|
not doing ``inlineCallbacks``.) Do not pass go; do not do any logging; do not
|
||||||
|
call any other functions.
|
||||||
|
|
||||||
|
.. code:: python
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def fun():
|
||||||
|
logger.debug("starting")
|
||||||
|
yield do_some_stuff() # just like this
|
||||||
|
|
||||||
|
d = more_stuff()
|
||||||
|
result = yield d # also fine, of course
|
||||||
|
|
||||||
|
defer.returnValue(result)
|
||||||
|
|
||||||
|
def nonInlineCallbacksFun():
|
||||||
|
logger.debug("just a wrapper really")
|
||||||
|
return do_some_stuff() # this is ok too - the caller will yield on
|
||||||
|
# it anyway.
|
||||||
|
|
||||||
|
Provided this pattern is followed all the way back up to the callchain to where
|
||||||
|
the logcontext was set, this will make things work out ok: provided
|
||||||
|
``do_some_stuff`` and ``more_stuff`` follow the rules above, then so will
|
||||||
|
``fun`` (as wrapped by ``inlineCallbacks``) and ``nonInlineCallbacksFun``.
|
||||||
|
|
||||||
|
It's all too easy to forget to ``yield``: for instance if we forgot that
|
||||||
|
``do_some_stuff`` returned a deferred, we might plough on regardless. This
|
||||||
|
leads to a mess; it will probably work itself out eventually, but not before
|
||||||
|
a load of stuff has been logged against the wrong content. (Normally, other
|
||||||
|
things will break, more obviously, if you forget to ``yield``, so this tends
|
||||||
|
not to be a major problem in practice.)
|
||||||
|
|
||||||
|
Of course sometimes you need to do something a bit fancier with your Deferreds
|
||||||
|
- not all code follows the linear A-then-B-then-C pattern. Notes on
|
||||||
|
implementing more complex patterns are in later sections.
|
||||||
|
|
||||||
|
Where you create a new Deferred, make it follow the rules
|
||||||
|
---------------------------------------------------------
|
||||||
|
|
||||||
|
Most of the time, a Deferred comes from another synapse function. Sometimes,
|
||||||
|
though, we need to make up a new Deferred, or we get a Deferred back from
|
||||||
|
external code. We need to make it follow our rules.
|
||||||
|
|
||||||
|
The easy way to do it is with a combination of ``defer.inlineCallbacks``, and
|
||||||
|
``logcontext.PreserveLoggingContext``. Suppose we want to implement ``sleep``,
|
||||||
|
which returns a deferred which will run its callbacks after a given number of
|
||||||
|
seconds. That might look like:
|
||||||
|
|
||||||
|
.. code:: python
|
||||||
|
|
||||||
|
# not a logcontext-rules-compliant function
|
||||||
|
def get_sleep_deferred(seconds):
|
||||||
|
d = defer.Deferred()
|
||||||
|
reactor.callLater(seconds, d.callback, None)
|
||||||
|
return d
|
||||||
|
|
||||||
|
That doesn't follow the rules, but we can fix it by wrapping it with
|
||||||
|
``PreserveLoggingContext`` and ``yield`` ing on it:
|
||||||
|
|
||||||
|
.. code:: python
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def sleep(seconds):
|
||||||
|
with PreserveLoggingContext():
|
||||||
|
yield get_sleep_deferred(seconds)
|
||||||
|
|
||||||
|
This technique works equally for external functions which return deferreds,
|
||||||
|
or deferreds we have made ourselves.
|
||||||
|
|
||||||
|
You can also use ``logcontext.make_deferred_yieldable``, which just does the
|
||||||
|
boilerplate for you, so the above could be written:
|
||||||
|
|
||||||
|
.. code:: python
|
||||||
|
|
||||||
|
def sleep(seconds):
|
||||||
|
return logcontext.make_deferred_yieldable(get_sleep_deferred(seconds))
|
||||||
|
|
||||||
|
|
||||||
|
Fire-and-forget
|
||||||
|
---------------
|
||||||
|
|
||||||
|
Sometimes you want to fire off a chain of execution, but not wait for its
|
||||||
|
result. That might look a bit like this:
|
||||||
|
|
||||||
|
.. code:: python
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def do_request_handling():
|
||||||
|
yield foreground_operation()
|
||||||
|
|
||||||
|
# *don't* do this
|
||||||
|
background_operation()
|
||||||
|
|
||||||
|
logger.debug("Request handling complete")
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def background_operation():
|
||||||
|
yield first_background_step()
|
||||||
|
logger.debug("Completed first step")
|
||||||
|
yield second_background_step()
|
||||||
|
logger.debug("Completed second step")
|
||||||
|
|
||||||
|
The above code does a couple of steps in the background after
|
||||||
|
``do_request_handling`` has finished. The log lines are still logged against
|
||||||
|
the ``request_context`` logcontext, which may or may not be desirable. There
|
||||||
|
are two big problems with the above, however. The first problem is that, if
|
||||||
|
``background_operation`` returns an incomplete Deferred, it will expect its
|
||||||
|
caller to ``yield`` immediately, so will have cleared the logcontext. In this
|
||||||
|
example, that means that 'Request handling complete' will be logged without any
|
||||||
|
context.
|
||||||
|
|
||||||
|
The second problem, which is potentially even worse, is that when the Deferred
|
||||||
|
returned by ``background_operation`` completes, it will restore the original
|
||||||
|
logcontext. There is nothing waiting on that Deferred, so the logcontext will
|
||||||
|
leak into the reactor and possibly get attached to some arbitrary future
|
||||||
|
operation.
|
||||||
|
|
||||||
|
There are two potential solutions to this.
|
||||||
|
|
||||||
|
One option is to surround the call to ``background_operation`` with a
|
||||||
|
``PreserveLoggingContext`` call. That will reset the logcontext before
|
||||||
|
starting ``background_operation`` (so the context restored when the deferred
|
||||||
|
completes will be the empty logcontext), and will restore the current
|
||||||
|
logcontext before continuing the foreground process:
|
||||||
|
|
||||||
|
.. code:: python
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def do_request_handling():
|
||||||
|
yield foreground_operation()
|
||||||
|
|
||||||
|
# start background_operation off in the empty logcontext, to
|
||||||
|
# avoid leaking the current context into the reactor.
|
||||||
|
with PreserveLoggingContext():
|
||||||
|
background_operation()
|
||||||
|
|
||||||
|
# this will now be logged against the request context
|
||||||
|
logger.debug("Request handling complete")
|
||||||
|
|
||||||
|
Obviously that option means that the operations done in
|
||||||
|
``background_operation`` would be not be logged against a logcontext (though
|
||||||
|
that might be fixed by setting a different logcontext via a ``with
|
||||||
|
LoggingContext(...)`` in ``background_operation``).
|
||||||
|
|
||||||
|
The second option is to use ``logcontext.preserve_fn``, which wraps a function
|
||||||
|
so that it doesn't reset the logcontext even when it returns an incomplete
|
||||||
|
deferred, and adds a callback to the returned deferred to reset the
|
||||||
|
logcontext. In other words, it turns a function that follows the Synapse rules
|
||||||
|
about logcontexts and Deferreds into one which behaves more like an external
|
||||||
|
function — the opposite operation to that described in the previous section.
|
||||||
|
It can be used like this:
|
||||||
|
|
||||||
|
.. code:: python
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def do_request_handling():
|
||||||
|
yield foreground_operation()
|
||||||
|
|
||||||
|
logcontext.preserve_fn(background_operation)()
|
||||||
|
|
||||||
|
# this will now be logged against the request context
|
||||||
|
logger.debug("Request handling complete")
|
||||||
|
|
||||||
|
XXX: I think ``preserve_context_over_fn`` is supposed to do the first option,
|
||||||
|
but the fact that it does ``preserve_context_over_deferred`` on its results
|
||||||
|
means that its use is fraught with difficulty.
|
||||||
|
|
||||||
|
Passing synapse deferreds into third-party functions
|
||||||
|
----------------------------------------------------
|
||||||
|
|
||||||
|
A typical example of this is where we want to collect together two or more
|
||||||
|
deferred via ``defer.gatherResults``:
|
||||||
|
|
||||||
|
.. code:: python
|
||||||
|
|
||||||
|
d1 = operation1()
|
||||||
|
d2 = operation2()
|
||||||
|
d3 = defer.gatherResults([d1, d2])
|
||||||
|
|
||||||
|
This is really a variation of the fire-and-forget problem above, in that we are
|
||||||
|
firing off ``d1`` and ``d2`` without yielding on them. The difference
|
||||||
|
is that we now have third-party code attached to their callbacks. Anyway either
|
||||||
|
technique given in the `Fire-and-forget`_ section will work.
|
||||||
|
|
||||||
|
Of course, the new Deferred returned by ``gatherResults`` needs to be wrapped
|
||||||
|
in order to make it follow the logcontext rules before we can yield it, as
|
||||||
|
described in `Where you create a new Deferred, make it follow the rules`_.
|
||||||
|
|
||||||
|
So, option one: reset the logcontext before starting the operations to be
|
||||||
|
gathered:
|
||||||
|
|
||||||
|
.. code:: python
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def do_request_handling():
|
||||||
|
with PreserveLoggingContext():
|
||||||
|
d1 = operation1()
|
||||||
|
d2 = operation2()
|
||||||
|
result = yield defer.gatherResults([d1, d2])
|
||||||
|
|
||||||
|
In this case particularly, though, option two, of using
|
||||||
|
``logcontext.preserve_fn`` almost certainly makes more sense, so that
|
||||||
|
``operation1`` and ``operation2`` are both logged against the original
|
||||||
|
logcontext. This looks like:
|
||||||
|
|
||||||
|
.. code:: python
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def do_request_handling():
|
||||||
|
d1 = logcontext.preserve_fn(operation1)()
|
||||||
|
d2 = logcontext.preserve_fn(operation2)()
|
||||||
|
|
||||||
|
with PreserveLoggingContext():
|
||||||
|
result = yield defer.gatherResults([d1, d2])
|
||||||
|
|
||||||
|
|
||||||
|
Was all this really necessary?
|
||||||
|
------------------------------
|
||||||
|
|
||||||
|
The conventions used work fine for a linear flow where everything happens in
|
||||||
|
series via ``defer.inlineCallbacks`` and ``yield``, but are certainly tricky to
|
||||||
|
follow for any more exotic flows. It's hard not to wonder if we could have done
|
||||||
|
something else.
|
||||||
|
|
||||||
|
We're not going to rewrite Synapse now, so the following is entirely of
|
||||||
|
academic interest, but I'd like to record some thoughts on an alternative
|
||||||
|
approach.
|
||||||
|
|
||||||
|
I briefly prototyped some code following an alternative set of rules. I think
|
||||||
|
it would work, but I certainly didn't get as far as thinking how it would
|
||||||
|
interact with concepts as complicated as the cache descriptors.
|
||||||
|
|
||||||
|
My alternative rules were:
|
||||||
|
|
||||||
|
* functions always preserve the logcontext of their caller, whether or not they
|
||||||
|
are returning a Deferred.
|
||||||
|
|
||||||
|
* Deferreds returned by synapse functions run their callbacks in the same
|
||||||
|
context as the function was orignally called in.
|
||||||
|
|
||||||
|
The main point of this scheme is that everywhere that sets the logcontext is
|
||||||
|
responsible for clearing it before returning control to the reactor.
|
||||||
|
|
||||||
|
So, for example, if you were the function which started a ``with
|
||||||
|
LoggingContext`` block, you wouldn't ``yield`` within it — instead you'd start
|
||||||
|
off the background process, and then leave the ``with`` block to wait for it:
|
||||||
|
|
||||||
|
.. code:: python
|
||||||
|
|
||||||
|
def handle_request(request_id):
|
||||||
|
with logcontext.LoggingContext() as request_context:
|
||||||
|
request_context.request = request_id
|
||||||
|
d = do_request_handling()
|
||||||
|
|
||||||
|
def cb(r):
|
||||||
|
logger.debug("finished")
|
||||||
|
|
||||||
|
d.addCallback(cb)
|
||||||
|
return d
|
||||||
|
|
||||||
|
(in general, mixing ``with LoggingContext`` blocks and
|
||||||
|
``defer.inlineCallbacks`` in the same function leads to slighly
|
||||||
|
counter-intuitive code, under this scheme).
|
||||||
|
|
||||||
|
Because we leave the original ``with`` block as soon as the Deferred is
|
||||||
|
returned (as opposed to waiting for it to be resolved, as we do today), the
|
||||||
|
logcontext is cleared before control passes back to the reactor; so if there is
|
||||||
|
some code within ``do_request_handling`` which needs to wait for a Deferred to
|
||||||
|
complete, there is no need for it to worry about clearing the logcontext before
|
||||||
|
doing so:
|
||||||
|
|
||||||
|
.. code:: python
|
||||||
|
|
||||||
|
def handle_request():
|
||||||
|
r = do_some_stuff()
|
||||||
|
r.addCallback(do_some_more_stuff)
|
||||||
|
return r
|
||||||
|
|
||||||
|
— and provided ``do_some_stuff`` follows the rules of returning a Deferred which
|
||||||
|
runs its callbacks in the original logcontext, all is happy.
|
||||||
|
|
||||||
|
The business of a Deferred which runs its callbacks in the original logcontext
|
||||||
|
isn't hard to achieve — we have it today, in the shape of
|
||||||
|
``logcontext._PreservingContextDeferred``:
|
||||||
|
|
||||||
|
.. code:: python
|
||||||
|
|
||||||
|
def do_some_stuff():
|
||||||
|
deferred = do_some_io()
|
||||||
|
pcd = _PreservingContextDeferred(LoggingContext.current_context())
|
||||||
|
deferred.chainDeferred(pcd)
|
||||||
|
return pcd
|
||||||
|
|
||||||
|
It turns out that, thanks to the way that Deferreds chain together, we
|
||||||
|
automatically get the property of a context-preserving deferred with
|
||||||
|
``defer.inlineCallbacks``, provided the final Defered the function ``yields``
|
||||||
|
on has that property. So we can just write:
|
||||||
|
|
||||||
|
.. code:: python
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def handle_request():
|
||||||
|
yield do_some_stuff()
|
||||||
|
yield do_some_more_stuff()
|
||||||
|
|
||||||
|
To conclude: I think this scheme would have worked equally well, with less
|
||||||
|
danger of messing it up, and probably made some more esoteric code easier to
|
||||||
|
write. But again — changing the conventions of the entire Synapse codebase is
|
||||||
|
not a sensible option for the marginal improvement offered.
|
||||||
|
|||||||
@@ -1,50 +1,68 @@
|
|||||||
How to monitor Synapse metrics using Prometheus
|
How to monitor Synapse metrics using Prometheus
|
||||||
===============================================
|
===============================================
|
||||||
|
|
||||||
1: Install prometheus:
|
1. Install prometheus:
|
||||||
|
|
||||||
Follow instructions at http://prometheus.io/docs/introduction/install/
|
Follow instructions at http://prometheus.io/docs/introduction/install/
|
||||||
|
|
||||||
2: Enable synapse metrics:
|
2. Enable synapse metrics:
|
||||||
|
|
||||||
Simply setting a (local) port number will enable it. Pick a port.
|
Simply setting a (local) port number will enable it. Pick a port.
|
||||||
prometheus itself defaults to 9090, so starting just above that for
|
prometheus itself defaults to 9090, so starting just above that for
|
||||||
locally monitored services seems reasonable. E.g. 9092:
|
locally monitored services seems reasonable. E.g. 9092:
|
||||||
|
|
||||||
Add to homeserver.yaml
|
Add to homeserver.yaml::
|
||||||
|
|
||||||
metrics_port: 9092
|
metrics_port: 9092
|
||||||
|
|
||||||
Restart synapse
|
Also ensure that ``enable_metrics`` is set to ``True``.
|
||||||
|
|
||||||
3: Check out synapse-prometheus-config
|
Restart synapse.
|
||||||
https://github.com/matrix-org/synapse-prometheus-config
|
|
||||||
|
|
||||||
4: Add ``synapse.html`` and ``synapse.rules``
|
3. Add a prometheus target for synapse.
|
||||||
The ``.html`` file needs to appear in prometheus's ``consoles`` directory,
|
|
||||||
and the ``.rules`` file needs to be invoked somewhere in the main config
|
|
||||||
file. A symlink to each from the git checkout into the prometheus directory
|
|
||||||
might be easiest to ensure ``git pull`` keeps it updated.
|
|
||||||
|
|
||||||
5: Add a prometheus target for synapse
|
It needs to set the ``metrics_path`` to a non-default value (under ``scrape_configs``)::
|
||||||
This is easiest if prometheus runs on the same machine as synapse, as it can
|
|
||||||
then just use localhost::
|
|
||||||
|
|
||||||
global: {
|
- job_name: "synapse"
|
||||||
rule_file: "synapse.rules"
|
metrics_path: "/_synapse/metrics"
|
||||||
}
|
static_configs:
|
||||||
|
- targets: ["my.server.here:9092"]
|
||||||
|
|
||||||
job: {
|
If your prometheus is older than 1.5.2, you will need to replace
|
||||||
name: "synapse"
|
``static_configs`` in the above with ``target_groups``.
|
||||||
|
|
||||||
target_group: {
|
Restart prometheus.
|
||||||
target: "http://localhost:9092/"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
6: Start prometheus::
|
Standard Metric Names
|
||||||
|
---------------------
|
||||||
|
|
||||||
./prometheus -config.file=prometheus.conf
|
As of synapse version 0.18.2, the format of the process-wide metrics has been
|
||||||
|
changed to fit prometheus standard naming conventions. Additionally the units
|
||||||
|
have been changed to seconds, from miliseconds.
|
||||||
|
|
||||||
7: Wait a few seconds for it to start and perform the first scrape,
|
================================== =============================
|
||||||
then visit the console:
|
New name Old name
|
||||||
|
---------------------------------- -----------------------------
|
||||||
|
process_cpu_user_seconds_total process_resource_utime / 1000
|
||||||
|
process_cpu_system_seconds_total process_resource_stime / 1000
|
||||||
|
process_open_fds (no 'type' label) process_fds
|
||||||
|
================================== =============================
|
||||||
|
|
||||||
http://server-where-prometheus-runs:9090/consoles/synapse.html
|
The python-specific counts of garbage collector performance have been renamed.
|
||||||
|
|
||||||
|
=========================== ======================
|
||||||
|
New name Old name
|
||||||
|
--------------------------- ----------------------
|
||||||
|
python_gc_time reactor_gc_time
|
||||||
|
python_gc_unreachable_total reactor_gc_unreachable
|
||||||
|
python_gc_counts reactor_gc_counts
|
||||||
|
=========================== ======================
|
||||||
|
|
||||||
|
The twisted-specific reactor metrics have been renamed.
|
||||||
|
|
||||||
|
==================================== =====================
|
||||||
|
New name Old name
|
||||||
|
------------------------------------ ---------------------
|
||||||
|
python_twisted_reactor_pending_calls reactor_pending_calls
|
||||||
|
python_twisted_reactor_tick_time reactor_tick_time
|
||||||
|
==================================== =====================
|
||||||
|
|||||||
@@ -1,6 +1,8 @@
|
|||||||
Using Postgres
|
Using Postgres
|
||||||
--------------
|
--------------
|
||||||
|
|
||||||
|
Postgres version 9.4 or later is known to work.
|
||||||
|
|
||||||
Set up database
|
Set up database
|
||||||
===============
|
===============
|
||||||
|
|
||||||
@@ -112,9 +114,9 @@ script one last time, e.g. if the SQLite database is at ``homeserver.db``
|
|||||||
run::
|
run::
|
||||||
|
|
||||||
synapse_port_db --sqlite-database homeserver.db \
|
synapse_port_db --sqlite-database homeserver.db \
|
||||||
--postgres-config database_config.yaml
|
--postgres-config homeserver-postgres.yaml
|
||||||
|
|
||||||
Once that has completed, change the synapse config to point at the PostgreSQL
|
Once that has completed, change the synapse config to point at the PostgreSQL
|
||||||
database configuration file using the ``database_config`` parameter (see
|
database configuration file ``homeserver-postgres.yaml`` (i.e. rename it to
|
||||||
`Synapse Config`_) and restart synapse. Synapse should now be running against
|
``homeserver.yaml``) and restart synapse. Synapse should now be running against
|
||||||
PostgreSQL.
|
PostgreSQL.
|
||||||
|
|||||||
@@ -26,28 +26,10 @@ expose the append-only log to the readers should be fairly minimal.
|
|||||||
Architecture
|
Architecture
|
||||||
------------
|
------------
|
||||||
|
|
||||||
The Replication API
|
The Replication Protocol
|
||||||
~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
Synapse will optionally expose a long poll HTTP API for extracting updates. The
|
See ``tcp_replication.rst``
|
||||||
API will have a similar shape to /sync in that clients provide tokens
|
|
||||||
indicating where in the log they have reached and a timeout. The synapse server
|
|
||||||
then either responds with updates immediately if it already has updates or it
|
|
||||||
waits until the timeout for more updates. If the timeout expires and nothing
|
|
||||||
happened then the server returns an empty response.
|
|
||||||
|
|
||||||
However unlike the /sync API this replication API is returning synapse specific
|
|
||||||
data rather than trying to implement a matrix specification. The replication
|
|
||||||
results are returned as arrays of rows where the rows are mostly lifted
|
|
||||||
directly from the database. This avoids unnecessary JSON parsing on the server
|
|
||||||
and hopefully avoids an impedance mismatch between the data returned and the
|
|
||||||
required updates to the datastore.
|
|
||||||
|
|
||||||
This does not replicate all the database tables as many of the database tables
|
|
||||||
are indexes that can be recovered from the contents of other tables.
|
|
||||||
|
|
||||||
The format and parameters for the api are documented in
|
|
||||||
``synapse/replication/resource.py``.
|
|
||||||
|
|
||||||
|
|
||||||
The Slaved DataStore
|
The Slaved DataStore
|
||||||
|
|||||||
@@ -50,7 +50,7 @@ master_doc = 'index'
|
|||||||
|
|
||||||
# General information about the project.
|
# General information about the project.
|
||||||
project = u'Synapse'
|
project = u'Synapse'
|
||||||
copyright = u'2014, TNG'
|
copyright = u'Copyright 2014-2017 OpenMarket Ltd, 2017 Vector Creations Ltd, 2017 New Vector Ltd'
|
||||||
|
|
||||||
# The version info for the project you're documenting, acts as replacement for
|
# The version info for the project you're documenting, acts as replacement for
|
||||||
# |version| and |release|, also used in various other places throughout the
|
# |version| and |release|, also used in various other places throughout the
|
||||||
|
|||||||
223
docs/tcp_replication.rst
Normal file
223
docs/tcp_replication.rst
Normal file
@@ -0,0 +1,223 @@
|
|||||||
|
TCP Replication
|
||||||
|
===============
|
||||||
|
|
||||||
|
Motivation
|
||||||
|
----------
|
||||||
|
|
||||||
|
Previously the workers used an HTTP long poll mechanism to get updates from the
|
||||||
|
master, which had the problem of causing a lot of duplicate work on the server.
|
||||||
|
This TCP protocol replaces those APIs with the aim of increased efficiency.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
Overview
|
||||||
|
--------
|
||||||
|
|
||||||
|
The protocol is based on fire and forget, line based commands. An example flow
|
||||||
|
would be (where '>' indicates master to worker and '<' worker to master flows)::
|
||||||
|
|
||||||
|
> SERVER example.com
|
||||||
|
< REPLICATE events 53
|
||||||
|
> RDATA events 54 ["$foo1:bar.com", ...]
|
||||||
|
> RDATA events 55 ["$foo4:bar.com", ...]
|
||||||
|
|
||||||
|
The example shows the server accepting a new connection and sending its identity
|
||||||
|
with the ``SERVER`` command, followed by the client asking to subscribe to the
|
||||||
|
``events`` stream from the token ``53``. The server then periodically sends ``RDATA``
|
||||||
|
commands which have the format ``RDATA <stream_name> <token> <row>``, where the
|
||||||
|
format of ``<row>`` is defined by the individual streams.
|
||||||
|
|
||||||
|
Error reporting happens by either the client or server sending an `ERROR`
|
||||||
|
command, and usually the connection will be closed.
|
||||||
|
|
||||||
|
|
||||||
|
Since the protocol is a simple line based, its possible to manually connect to
|
||||||
|
the server using a tool like netcat. A few things should be noted when manually
|
||||||
|
using the protocol:
|
||||||
|
|
||||||
|
* When subscribing to a stream using ``REPLICATE``, the special token ``NOW`` can
|
||||||
|
be used to get all future updates. The special stream name ``ALL`` can be used
|
||||||
|
with ``NOW`` to subscribe to all available streams.
|
||||||
|
* The federation stream is only available if federation sending has been
|
||||||
|
disabled on the main process.
|
||||||
|
* The server will only time connections out that have sent a ``PING`` command.
|
||||||
|
If a ping is sent then the connection will be closed if no further commands
|
||||||
|
are receieved within 15s. Both the client and server protocol implementations
|
||||||
|
will send an initial PING on connection and ensure at least one command every
|
||||||
|
5s is sent (not necessarily ``PING``).
|
||||||
|
* ``RDATA`` commands *usually* include a numeric token, however if the stream
|
||||||
|
has multiple rows to replicate per token the server will send multiple
|
||||||
|
``RDATA`` commands, with all but the last having a token of ``batch``. See
|
||||||
|
the documentation on ``commands.RdataCommand`` for further details.
|
||||||
|
|
||||||
|
|
||||||
|
Architecture
|
||||||
|
------------
|
||||||
|
|
||||||
|
The basic structure of the protocol is line based, where the initial word of
|
||||||
|
each line specifies the command. The rest of the line is parsed based on the
|
||||||
|
command. For example, the `RDATA` command is defined as::
|
||||||
|
|
||||||
|
RDATA <stream_name> <token> <row_json>
|
||||||
|
|
||||||
|
(Note that `<row_json>` may contains spaces, but cannot contain newlines.)
|
||||||
|
|
||||||
|
Blank lines are ignored.
|
||||||
|
|
||||||
|
|
||||||
|
Keep alives
|
||||||
|
~~~~~~~~~~~
|
||||||
|
|
||||||
|
Both sides are expected to send at least one command every 5s or so, and
|
||||||
|
should send a ``PING`` command if necessary. If either side do not receive a
|
||||||
|
command within e.g. 15s then the connection should be closed.
|
||||||
|
|
||||||
|
Because the server may be connected to manually using e.g. netcat, the timeouts
|
||||||
|
aren't enabled until an initial ``PING`` command is seen. Both the client and
|
||||||
|
server implementations below send a ``PING`` command immediately on connection to
|
||||||
|
ensure the timeouts are enabled.
|
||||||
|
|
||||||
|
This ensures that both sides can quickly realize if the tcp connection has gone
|
||||||
|
and handle the situation appropriately.
|
||||||
|
|
||||||
|
|
||||||
|
Start up
|
||||||
|
~~~~~~~~
|
||||||
|
|
||||||
|
When a new connection is made, the server:
|
||||||
|
|
||||||
|
* Sends a ``SERVER`` command, which includes the identity of the server, allowing
|
||||||
|
the client to detect if its connected to the expected server
|
||||||
|
* Sends a ``PING`` command as above, to enable the client to time out connections
|
||||||
|
promptly.
|
||||||
|
|
||||||
|
The client:
|
||||||
|
|
||||||
|
* Sends a ``NAME`` command, allowing the server to associate a human friendly
|
||||||
|
name with the connection. This is optional.
|
||||||
|
* Sends a ``PING`` as above
|
||||||
|
* For each stream the client wishes to subscribe to it sends a ``REPLICATE``
|
||||||
|
with the stream_name and token it wants to subscribe from.
|
||||||
|
* On receipt of a ``SERVER`` command, checks that the server name matches the
|
||||||
|
expected server name.
|
||||||
|
|
||||||
|
|
||||||
|
Error handling
|
||||||
|
~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
If either side detects an error it can send an ``ERROR`` command and close the
|
||||||
|
connection.
|
||||||
|
|
||||||
|
If the client side loses the connection to the server it should reconnect,
|
||||||
|
following the steps above.
|
||||||
|
|
||||||
|
|
||||||
|
Congestion
|
||||||
|
~~~~~~~~~~
|
||||||
|
|
||||||
|
If the server sends messages faster than the client can consume them the server
|
||||||
|
will first buffer a (fairly large) number of commands and then disconnect the
|
||||||
|
client. This ensures that we don't queue up an unbounded number of commands in
|
||||||
|
memory and gives us a potential oppurtunity to squawk loudly. When/if the client
|
||||||
|
recovers it can reconnect to the server and ask for missed messages.
|
||||||
|
|
||||||
|
|
||||||
|
Reliability
|
||||||
|
~~~~~~~~~~~
|
||||||
|
|
||||||
|
In general the replication stream should be considered an unreliable transport
|
||||||
|
since e.g. commands are not resent if the connection disappears.
|
||||||
|
|
||||||
|
The exception to that are the replication streams, i.e. RDATA commands, since
|
||||||
|
these include tokens which can be used to restart the stream on connection
|
||||||
|
errors.
|
||||||
|
|
||||||
|
The client should keep track of the token in the last RDATA command received
|
||||||
|
for each stream so that on reconneciton it can start streaming from the correct
|
||||||
|
place. Note: not all RDATA have valid tokens due to batching. See
|
||||||
|
``RdataCommand`` for more details.
|
||||||
|
|
||||||
|
|
||||||
|
Example
|
||||||
|
~~~~~~~
|
||||||
|
|
||||||
|
An example iteraction is shown below. Each line is prefixed with '>' or '<' to
|
||||||
|
indicate which side is sending, these are *not* included on the wire::
|
||||||
|
|
||||||
|
* connection established *
|
||||||
|
> SERVER localhost:8823
|
||||||
|
> PING 1490197665618
|
||||||
|
< NAME synapse.app.appservice
|
||||||
|
< PING 1490197665618
|
||||||
|
< REPLICATE events 1
|
||||||
|
< REPLICATE backfill 1
|
||||||
|
< REPLICATE caches 1
|
||||||
|
> POSITION events 1
|
||||||
|
> POSITION backfill 1
|
||||||
|
> POSITION caches 1
|
||||||
|
> RDATA caches 2 ["get_user_by_id",["@01register-user:localhost:8823"],1490197670513]
|
||||||
|
> RDATA events 14 ["$149019767112vOHxz:localhost:8823",
|
||||||
|
"!AFDCvgApUmpdfVjIXm:localhost:8823","m.room.guest_access","",null]
|
||||||
|
< PING 1490197675618
|
||||||
|
> ERROR server stopping
|
||||||
|
* connection closed by server *
|
||||||
|
|
||||||
|
The ``POSITION`` command sent by the server is used to set the clients position
|
||||||
|
without needing to send data with the ``RDATA`` command.
|
||||||
|
|
||||||
|
|
||||||
|
An example of a batched set of ``RDATA`` is::
|
||||||
|
|
||||||
|
> RDATA caches batch ["get_user_by_id",["@test:localhost:8823"],1490197670513]
|
||||||
|
> RDATA caches batch ["get_user_by_id",["@test2:localhost:8823"],1490197670513]
|
||||||
|
> RDATA caches batch ["get_user_by_id",["@test3:localhost:8823"],1490197670513]
|
||||||
|
> RDATA caches 54 ["get_user_by_id",["@test4:localhost:8823"],1490197670513]
|
||||||
|
|
||||||
|
In this case the client shouldn't advance their caches token until it sees the
|
||||||
|
the last ``RDATA``.
|
||||||
|
|
||||||
|
|
||||||
|
List of commands
|
||||||
|
~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
The list of valid commands, with which side can send it: server (S) or client (C):
|
||||||
|
|
||||||
|
SERVER (S)
|
||||||
|
Sent at the start to identify which server the client is talking to
|
||||||
|
|
||||||
|
RDATA (S)
|
||||||
|
A single update in a stream
|
||||||
|
|
||||||
|
POSITION (S)
|
||||||
|
The position of the stream has been updated
|
||||||
|
|
||||||
|
ERROR (S, C)
|
||||||
|
There was an error
|
||||||
|
|
||||||
|
PING (S, C)
|
||||||
|
Sent periodically to ensure the connection is still alive
|
||||||
|
|
||||||
|
NAME (C)
|
||||||
|
Sent at the start by client to inform the server who they are
|
||||||
|
|
||||||
|
REPLICATE (C)
|
||||||
|
Asks the server to replicate a given stream
|
||||||
|
|
||||||
|
USER_SYNC (C)
|
||||||
|
A user has started or stopped syncing
|
||||||
|
|
||||||
|
FEDERATION_ACK (C)
|
||||||
|
Acknowledge receipt of some federation data
|
||||||
|
|
||||||
|
REMOVE_PUSHER (C)
|
||||||
|
Inform the server a pusher should be removed
|
||||||
|
|
||||||
|
INVALIDATE_CACHE (C)
|
||||||
|
Inform the server a cache should be invalidated
|
||||||
|
|
||||||
|
SYNC (S, C)
|
||||||
|
Used exclusively in tests
|
||||||
|
|
||||||
|
|
||||||
|
See ``synapse/replication/tcp/commands.py`` for a detailed description and the
|
||||||
|
format of each command.
|
||||||
@@ -50,14 +50,37 @@ You may be able to setup coturn via your package manager, or set it up manually
|
|||||||
|
|
||||||
pwgen -s 64 1
|
pwgen -s 64 1
|
||||||
|
|
||||||
5. Ensure youe firewall allows traffic into the TURN server on
|
5. Consider your security settings. TURN lets users request a relay
|
||||||
the ports you've configured it to listen on (remember to allow
|
which will connect to arbitrary IP addresses and ports. At the least
|
||||||
both TCP and UDP if you've enabled both).
|
we recommend:
|
||||||
|
|
||||||
6. If you've configured coturn to support TLS/DTLS, generate or
|
# VoIP traffic is all UDP. There is no reason to let users connect to arbitrary TCP endpoints via the relay.
|
||||||
|
no-tcp-relay
|
||||||
|
|
||||||
|
# don't let the relay ever try to connect to private IP address ranges within your network (if any)
|
||||||
|
# given the turn server is likely behind your firewall, remember to include any privileged public IPs too.
|
||||||
|
denied-peer-ip=10.0.0.0-10.255.255.255
|
||||||
|
denied-peer-ip=192.168.0.0-192.168.255.255
|
||||||
|
denied-peer-ip=172.16.0.0-172.31.255.255
|
||||||
|
|
||||||
|
# special case the turn server itself so that client->TURN->TURN->client flows work
|
||||||
|
allowed-peer-ip=10.0.0.1
|
||||||
|
|
||||||
|
# consider whether you want to limit the quota of relayed streams per user (or total) to avoid risk of DoS.
|
||||||
|
user-quota=12 # 4 streams per video call, so 12 streams = 3 simultaneous relayed calls per user.
|
||||||
|
total-quota=1200
|
||||||
|
|
||||||
|
Ideally coturn should refuse to relay traffic which isn't SRTP;
|
||||||
|
see https://github.com/matrix-org/synapse/issues/2009
|
||||||
|
|
||||||
|
6. Ensure your firewall allows traffic into the TURN server on
|
||||||
|
the ports you've configured it to listen on (remember to allow
|
||||||
|
both TCP and UDP TURN traffic)
|
||||||
|
|
||||||
|
7. If you've configured coturn to support TLS/DTLS, generate or
|
||||||
import your private key and certificate.
|
import your private key and certificate.
|
||||||
|
|
||||||
7. Start the turn server::
|
8. Start the turn server::
|
||||||
|
|
||||||
bin/turnserver -o
|
bin/turnserver -o
|
||||||
|
|
||||||
@@ -83,12 +106,19 @@ Your home server configuration file needs the following extra keys:
|
|||||||
to refresh credentials. The TURN REST API specification recommends
|
to refresh credentials. The TURN REST API specification recommends
|
||||||
one day (86400000).
|
one day (86400000).
|
||||||
|
|
||||||
|
4. "turn_allow_guests": Whether to allow guest users to use the TURN
|
||||||
|
server. This is enabled by default, as otherwise VoIP will not
|
||||||
|
work reliably for guests. However, it does introduce a security risk
|
||||||
|
as it lets guests connect to arbitrary endpoints without having gone
|
||||||
|
through a CAPTCHA or similar to register a real account.
|
||||||
|
|
||||||
As an example, here is the relevant section of the config file for
|
As an example, here is the relevant section of the config file for
|
||||||
matrix.org::
|
matrix.org::
|
||||||
|
|
||||||
turn_uris: [ "turn:turn.matrix.org:3478?transport=udp", "turn:turn.matrix.org:3478?transport=tcp" ]
|
turn_uris: [ "turn:turn.matrix.org:3478?transport=udp", "turn:turn.matrix.org:3478?transport=tcp" ]
|
||||||
turn_shared_secret: n0t4ctuAllymatr1Xd0TorgSshar3d5ecret4obvIousreAsons
|
turn_shared_secret: n0t4ctuAllymatr1Xd0TorgSshar3d5ecret4obvIousreAsons
|
||||||
turn_user_lifetime: 86400000
|
turn_user_lifetime: 86400000
|
||||||
|
turn_allow_guests: True
|
||||||
|
|
||||||
Now, restart synapse::
|
Now, restart synapse::
|
||||||
|
|
||||||
|
|||||||
94
docs/workers.rst
Normal file
94
docs/workers.rst
Normal file
@@ -0,0 +1,94 @@
|
|||||||
|
Scaling synapse via workers
|
||||||
|
---------------------------
|
||||||
|
|
||||||
|
Synapse has experimental support for splitting out functionality into
|
||||||
|
multiple separate python processes, helping greatly with scalability. These
|
||||||
|
processes are called 'workers', and are (eventually) intended to scale
|
||||||
|
horizontally independently.
|
||||||
|
|
||||||
|
All processes continue to share the same database instance, and as such, workers
|
||||||
|
only work with postgres based synapse deployments (sharing a single sqlite
|
||||||
|
across multiple processes is a recipe for disaster, plus you should be using
|
||||||
|
postgres anyway if you care about scalability).
|
||||||
|
|
||||||
|
The workers communicate with the master synapse process via a synapse-specific
|
||||||
|
TCP protocol called 'replication' - analogous to MySQL or Postgres style
|
||||||
|
database replication; feeding a stream of relevant data to the workers so they
|
||||||
|
can be kept in sync with the main synapse process and database state.
|
||||||
|
|
||||||
|
To enable workers, you need to add a replication listener to the master synapse, e.g.::
|
||||||
|
|
||||||
|
listeners:
|
||||||
|
- port: 9092
|
||||||
|
bind_address: '127.0.0.1'
|
||||||
|
type: replication
|
||||||
|
|
||||||
|
Under **no circumstances** should this replication API listener be exposed to the
|
||||||
|
public internet; it currently implements no authentication whatsoever and is
|
||||||
|
unencrypted.
|
||||||
|
|
||||||
|
You then create a set of configs for the various worker processes. These should be
|
||||||
|
worker configuration files should be stored in a dedicated subdirectory, to allow
|
||||||
|
synctl to manipulate them.
|
||||||
|
|
||||||
|
The current available worker applications are:
|
||||||
|
* synapse.app.pusher - handles sending push notifications to sygnal and email
|
||||||
|
* synapse.app.synchrotron - handles /sync endpoints. can scales horizontally through multiple instances.
|
||||||
|
* synapse.app.appservice - handles output traffic to Application Services
|
||||||
|
* synapse.app.federation_reader - handles receiving federation traffic (including public_rooms API)
|
||||||
|
* synapse.app.media_repository - handles the media repository.
|
||||||
|
* synapse.app.client_reader - handles client API endpoints like /publicRooms
|
||||||
|
|
||||||
|
Each worker configuration file inherits the configuration of the main homeserver
|
||||||
|
configuration file. You can then override configuration specific to that worker,
|
||||||
|
e.g. the HTTP listener that it provides (if any); logging configuration; etc.
|
||||||
|
You should minimise the number of overrides though to maintain a usable config.
|
||||||
|
|
||||||
|
You must specify the type of worker application (worker_app) and the replication
|
||||||
|
endpoint that it's talking to on the main synapse process (worker_replication_host
|
||||||
|
and worker_replication_port).
|
||||||
|
|
||||||
|
For instance::
|
||||||
|
|
||||||
|
worker_app: synapse.app.synchrotron
|
||||||
|
|
||||||
|
# The replication listener on the synapse to talk to.
|
||||||
|
worker_replication_host: 127.0.0.1
|
||||||
|
worker_replication_port: 9092
|
||||||
|
|
||||||
|
worker_listeners:
|
||||||
|
- type: http
|
||||||
|
port: 8083
|
||||||
|
resources:
|
||||||
|
- names:
|
||||||
|
- client
|
||||||
|
|
||||||
|
worker_daemonize: True
|
||||||
|
worker_pid_file: /home/matrix/synapse/synchrotron.pid
|
||||||
|
worker_log_config: /home/matrix/synapse/config/synchrotron_log_config.yaml
|
||||||
|
|
||||||
|
...is a full configuration for a synchrotron worker instance, which will expose a
|
||||||
|
plain HTTP /sync endpoint on port 8083 separately from the /sync endpoint provided
|
||||||
|
by the main synapse.
|
||||||
|
|
||||||
|
Obviously you should configure your loadbalancer to route the /sync endpoint to
|
||||||
|
the synchrotron instance(s) in this instance.
|
||||||
|
|
||||||
|
Finally, to actually run your worker-based synapse, you must pass synctl the -a
|
||||||
|
commandline option to tell it to operate on all the worker configurations found
|
||||||
|
in the given directory, e.g.::
|
||||||
|
|
||||||
|
synctl -a $CONFIG/workers start
|
||||||
|
|
||||||
|
Currently one should always restart all workers when restarting or upgrading
|
||||||
|
synapse, unless you explicitly know it's safe not to. For instance, restarting
|
||||||
|
synapse without restarting all the synchrotrons may result in broken typing
|
||||||
|
notifications.
|
||||||
|
|
||||||
|
To manipulate a specific worker, you pass the -w option to synctl::
|
||||||
|
|
||||||
|
synctl -w $CONFIG/workers/synchrotron.yaml restart
|
||||||
|
|
||||||
|
All of the above is highly experimental and subject to change as Synapse evolves,
|
||||||
|
but documenting it here to help folks needing highly scalable Synapses similar
|
||||||
|
to the one running matrix.org!
|
||||||
23
jenkins-dendron-haproxy-postgres.sh
Executable file
23
jenkins-dendron-haproxy-postgres.sh
Executable file
@@ -0,0 +1,23 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -eux
|
||||||
|
|
||||||
|
: ${WORKSPACE:="$(pwd)"}
|
||||||
|
|
||||||
|
export WORKSPACE
|
||||||
|
export PYTHONDONTWRITEBYTECODE=yep
|
||||||
|
export SYNAPSE_CACHE_FACTOR=1
|
||||||
|
|
||||||
|
export HAPROXY_BIN=/home/haproxy/haproxy-1.6.11/haproxy
|
||||||
|
|
||||||
|
./jenkins/prepare_synapse.sh
|
||||||
|
./jenkins/clone.sh sytest https://github.com/matrix-org/sytest.git
|
||||||
|
./jenkins/clone.sh dendron https://github.com/matrix-org/dendron.git
|
||||||
|
./dendron/jenkins/build_dendron.sh
|
||||||
|
./sytest/jenkins/prep_sytest_for_postgres.sh
|
||||||
|
|
||||||
|
./sytest/jenkins/install_and_run.sh \
|
||||||
|
--python $WORKSPACE/.tox/py27/bin/python \
|
||||||
|
--synapse-directory $WORKSPACE \
|
||||||
|
--dendron $WORKSPACE/dendron/bin/dendron \
|
||||||
|
--haproxy \
|
||||||
@@ -4,84 +4,17 @@ set -eux
|
|||||||
|
|
||||||
: ${WORKSPACE:="$(pwd)"}
|
: ${WORKSPACE:="$(pwd)"}
|
||||||
|
|
||||||
|
export WORKSPACE
|
||||||
export PYTHONDONTWRITEBYTECODE=yep
|
export PYTHONDONTWRITEBYTECODE=yep
|
||||||
export SYNAPSE_CACHE_FACTOR=1
|
export SYNAPSE_CACHE_FACTOR=1
|
||||||
|
|
||||||
# Output test results as junit xml
|
./jenkins/prepare_synapse.sh
|
||||||
export TRIAL_FLAGS="--reporter=subunit"
|
./jenkins/clone.sh sytest https://github.com/matrix-org/sytest.git
|
||||||
export TOXSUFFIX="| subunit-1to2 | subunit2junitxml --no-passthrough --output-to=results.xml"
|
./jenkins/clone.sh dendron https://github.com/matrix-org/dendron.git
|
||||||
# Write coverage reports to a separate file for each process
|
./dendron/jenkins/build_dendron.sh
|
||||||
export COVERAGE_OPTS="-p"
|
./sytest/jenkins/prep_sytest_for_postgres.sh
|
||||||
export DUMP_COVERAGE_COMMAND="coverage help"
|
|
||||||
|
|
||||||
# Output flake8 violations to violations.flake8.log
|
./sytest/jenkins/install_and_run.sh \
|
||||||
# Don't exit with non-0 status code on Jenkins,
|
--python $WORKSPACE/.tox/py27/bin/python \
|
||||||
# so that the build steps continue and a later step can decided whether to
|
|
||||||
# UNSTABLE or FAILURE this build.
|
|
||||||
export PEP8SUFFIX="--output-file=violations.flake8.log || echo flake8 finished with status code \$?"
|
|
||||||
|
|
||||||
rm .coverage* || echo "No coverage files to remove"
|
|
||||||
|
|
||||||
tox --notest -e py27
|
|
||||||
|
|
||||||
TOX_BIN=$WORKSPACE/.tox/py27/bin
|
|
||||||
python synapse/python_dependencies.py | xargs -n1 $TOX_BIN/pip install
|
|
||||||
$TOX_BIN/pip install psycopg2
|
|
||||||
$TOX_BIN/pip install lxml
|
|
||||||
|
|
||||||
: ${GIT_BRANCH:="origin/$(git rev-parse --abbrev-ref HEAD)"}
|
|
||||||
|
|
||||||
if [[ ! -e .dendron-base ]]; then
|
|
||||||
git clone https://github.com/matrix-org/dendron.git .dendron-base --mirror
|
|
||||||
else
|
|
||||||
(cd .dendron-base; git fetch -p)
|
|
||||||
fi
|
|
||||||
|
|
||||||
rm -rf dendron
|
|
||||||
git clone .dendron-base dendron --shared
|
|
||||||
cd dendron
|
|
||||||
|
|
||||||
: ${GOPATH:=${WORKSPACE}/.gopath}
|
|
||||||
if [[ "${GOPATH}" != *:* ]]; then
|
|
||||||
mkdir -p "${GOPATH}"
|
|
||||||
export PATH="${GOPATH}/bin:${PATH}"
|
|
||||||
fi
|
|
||||||
export GOPATH
|
|
||||||
|
|
||||||
git checkout "${GIT_BRANCH}" || (echo >&2 "No ref ${GIT_BRANCH} found, falling back to develop" ; git checkout develop)
|
|
||||||
|
|
||||||
go get github.com/constabulary/gb/...
|
|
||||||
gb generate
|
|
||||||
gb build
|
|
||||||
|
|
||||||
cd ..
|
|
||||||
|
|
||||||
|
|
||||||
if [[ ! -e .sytest-base ]]; then
|
|
||||||
git clone https://github.com/matrix-org/sytest.git .sytest-base --mirror
|
|
||||||
else
|
|
||||||
(cd .sytest-base; git fetch -p)
|
|
||||||
fi
|
|
||||||
|
|
||||||
rm -rf sytest
|
|
||||||
git clone .sytest-base sytest --shared
|
|
||||||
cd sytest
|
|
||||||
|
|
||||||
git checkout "${GIT_BRANCH}" || (echo >&2 "No ref ${GIT_BRANCH} found, falling back to develop" ; git checkout develop)
|
|
||||||
|
|
||||||
: ${PORT_BASE:=8000}
|
|
||||||
: ${PORT_COUNT=20}
|
|
||||||
|
|
||||||
./jenkins/prep_sytest_for_postgres.sh
|
|
||||||
|
|
||||||
mkdir -p var
|
|
||||||
|
|
||||||
echo >&2 "Running sytest with PostgreSQL";
|
|
||||||
./jenkins/install_and_run.sh --python $TOX_BIN/python \
|
|
||||||
--synapse-directory $WORKSPACE \
|
--synapse-directory $WORKSPACE \
|
||||||
--dendron $WORKSPACE/dendron/bin/dendron \
|
--dendron $WORKSPACE/dendron/bin/dendron \
|
||||||
--pusher \
|
|
||||||
--synchrotron \
|
|
||||||
--port-range ${PORT_BASE}:$((PORT_BASE+PORT_COUNT-1))
|
|
||||||
|
|
||||||
cd ..
|
|
||||||
|
|||||||
@@ -4,61 +4,15 @@ set -eux
|
|||||||
|
|
||||||
: ${WORKSPACE:="$(pwd)"}
|
: ${WORKSPACE:="$(pwd)"}
|
||||||
|
|
||||||
|
export WORKSPACE
|
||||||
export PYTHONDONTWRITEBYTECODE=yep
|
export PYTHONDONTWRITEBYTECODE=yep
|
||||||
export SYNAPSE_CACHE_FACTOR=1
|
export SYNAPSE_CACHE_FACTOR=1
|
||||||
|
|
||||||
# Output test results as junit xml
|
./jenkins/prepare_synapse.sh
|
||||||
export TRIAL_FLAGS="--reporter=subunit"
|
./jenkins/clone.sh sytest https://github.com/matrix-org/sytest.git
|
||||||
export TOXSUFFIX="| subunit-1to2 | subunit2junitxml --no-passthrough --output-to=results.xml"
|
|
||||||
# Write coverage reports to a separate file for each process
|
|
||||||
export COVERAGE_OPTS="-p"
|
|
||||||
export DUMP_COVERAGE_COMMAND="coverage help"
|
|
||||||
|
|
||||||
# Output flake8 violations to violations.flake8.log
|
./sytest/jenkins/prep_sytest_for_postgres.sh
|
||||||
# Don't exit with non-0 status code on Jenkins,
|
|
||||||
# so that the build steps continue and a later step can decided whether to
|
|
||||||
# UNSTABLE or FAILURE this build.
|
|
||||||
export PEP8SUFFIX="--output-file=violations.flake8.log || echo flake8 finished with status code \$?"
|
|
||||||
|
|
||||||
rm .coverage* || echo "No coverage files to remove"
|
./sytest/jenkins/install_and_run.sh \
|
||||||
|
--python $WORKSPACE/.tox/py27/bin/python \
|
||||||
tox --notest -e py27
|
|
||||||
|
|
||||||
TOX_BIN=$WORKSPACE/.tox/py27/bin
|
|
||||||
python synapse/python_dependencies.py | xargs -n1 $TOX_BIN/pip install
|
|
||||||
$TOX_BIN/pip install psycopg2
|
|
||||||
$TOX_BIN/pip install lxml
|
|
||||||
|
|
||||||
: ${GIT_BRANCH:="origin/$(git rev-parse --abbrev-ref HEAD)"}
|
|
||||||
|
|
||||||
if [[ ! -e .sytest-base ]]; then
|
|
||||||
git clone https://github.com/matrix-org/sytest.git .sytest-base --mirror
|
|
||||||
else
|
|
||||||
(cd .sytest-base; git fetch -p)
|
|
||||||
fi
|
|
||||||
|
|
||||||
rm -rf sytest
|
|
||||||
git clone .sytest-base sytest --shared
|
|
||||||
cd sytest
|
|
||||||
|
|
||||||
git checkout "${GIT_BRANCH}" || (echo >&2 "No ref ${GIT_BRANCH} found, falling back to develop" ; git checkout develop)
|
|
||||||
|
|
||||||
: ${PORT_BASE:=8000}
|
|
||||||
: ${PORT_COUNT=20}
|
|
||||||
|
|
||||||
./jenkins/prep_sytest_for_postgres.sh
|
|
||||||
|
|
||||||
echo >&2 "Running sytest with PostgreSQL";
|
|
||||||
./jenkins/install_and_run.sh --coverage \
|
|
||||||
--python $TOX_BIN/python \
|
|
||||||
--synapse-directory $WORKSPACE \
|
--synapse-directory $WORKSPACE \
|
||||||
--port-range ${PORT_BASE}:$((PORT_BASE+PORT_COUNT-1)) \
|
|
||||||
|
|
||||||
cd ..
|
|
||||||
cp sytest/.coverage.* .
|
|
||||||
|
|
||||||
# Combine the coverage reports
|
|
||||||
echo "Combining:" .coverage.*
|
|
||||||
$TOX_BIN/python -m coverage combine
|
|
||||||
# Output coverage to coverage.xml
|
|
||||||
$TOX_BIN/coverage xml -o coverage.xml
|
|
||||||
|
|||||||
@@ -4,55 +4,13 @@ set -eux
|
|||||||
|
|
||||||
: ${WORKSPACE:="$(pwd)"}
|
: ${WORKSPACE:="$(pwd)"}
|
||||||
|
|
||||||
|
export WORKSPACE
|
||||||
export PYTHONDONTWRITEBYTECODE=yep
|
export PYTHONDONTWRITEBYTECODE=yep
|
||||||
export SYNAPSE_CACHE_FACTOR=1
|
export SYNAPSE_CACHE_FACTOR=1
|
||||||
|
|
||||||
# Output test results as junit xml
|
./jenkins/prepare_synapse.sh
|
||||||
export TRIAL_FLAGS="--reporter=subunit"
|
./jenkins/clone.sh sytest https://github.com/matrix-org/sytest.git
|
||||||
export TOXSUFFIX="| subunit-1to2 | subunit2junitxml --no-passthrough --output-to=results.xml"
|
|
||||||
# Write coverage reports to a separate file for each process
|
|
||||||
export COVERAGE_OPTS="-p"
|
|
||||||
export DUMP_COVERAGE_COMMAND="coverage help"
|
|
||||||
|
|
||||||
# Output flake8 violations to violations.flake8.log
|
./sytest/jenkins/install_and_run.sh \
|
||||||
# Don't exit with non-0 status code on Jenkins,
|
--python $WORKSPACE/.tox/py27/bin/python \
|
||||||
# so that the build steps continue and a later step can decided whether to
|
|
||||||
# UNSTABLE or FAILURE this build.
|
|
||||||
export PEP8SUFFIX="--output-file=violations.flake8.log || echo flake8 finished with status code \$?"
|
|
||||||
|
|
||||||
rm .coverage* || echo "No coverage files to remove"
|
|
||||||
|
|
||||||
tox --notest -e py27
|
|
||||||
TOX_BIN=$WORKSPACE/.tox/py27/bin
|
|
||||||
python synapse/python_dependencies.py | xargs -n1 $TOX_BIN/pip install
|
|
||||||
$TOX_BIN/pip install lxml
|
|
||||||
|
|
||||||
: ${GIT_BRANCH:="origin/$(git rev-parse --abbrev-ref HEAD)"}
|
|
||||||
|
|
||||||
if [[ ! -e .sytest-base ]]; then
|
|
||||||
git clone https://github.com/matrix-org/sytest.git .sytest-base --mirror
|
|
||||||
else
|
|
||||||
(cd .sytest-base; git fetch -p)
|
|
||||||
fi
|
|
||||||
|
|
||||||
rm -rf sytest
|
|
||||||
git clone .sytest-base sytest --shared
|
|
||||||
cd sytest
|
|
||||||
|
|
||||||
git checkout "${GIT_BRANCH}" || (echo >&2 "No ref ${GIT_BRANCH} found, falling back to develop" ; git checkout develop)
|
|
||||||
|
|
||||||
: ${PORT_COUNT=20}
|
|
||||||
: ${PORT_BASE:=8000}
|
|
||||||
./jenkins/install_and_run.sh --coverage \
|
|
||||||
--python $TOX_BIN/python \
|
|
||||||
--synapse-directory $WORKSPACE \
|
--synapse-directory $WORKSPACE \
|
||||||
--port-range ${PORT_BASE}:$((PORT_BASE+PORT_COUNT-1)) \
|
|
||||||
|
|
||||||
cd ..
|
|
||||||
cp sytest/.coverage.* .
|
|
||||||
|
|
||||||
# Combine the coverage reports
|
|
||||||
echo "Combining:" .coverage.*
|
|
||||||
$TOX_BIN/python -m coverage combine
|
|
||||||
# Output coverage to coverage.xml
|
|
||||||
$TOX_BIN/coverage xml -o coverage.xml
|
|
||||||
|
|||||||
@@ -22,4 +22,9 @@ export PEP8SUFFIX="--output-file=violations.flake8.log || echo flake8 finished w
|
|||||||
|
|
||||||
rm .coverage* || echo "No coverage files to remove"
|
rm .coverage* || echo "No coverage files to remove"
|
||||||
|
|
||||||
|
tox --notest -e py27
|
||||||
|
TOX_BIN=$WORKSPACE/.tox/py27/bin
|
||||||
|
python synapse/python_dependencies.py | xargs -n1 $TOX_BIN/pip install
|
||||||
|
$TOX_BIN/pip install lxml
|
||||||
|
|
||||||
tox -e py27
|
tox -e py27
|
||||||
|
|||||||
44
jenkins/clone.sh
Executable file
44
jenkins/clone.sh
Executable file
@@ -0,0 +1,44 @@
|
|||||||
|
#! /bin/bash
|
||||||
|
|
||||||
|
# This clones a project from github into a named subdirectory
|
||||||
|
# If the project has a branch with the same name as this branch
|
||||||
|
# then it will checkout that branch after cloning.
|
||||||
|
# Otherwise it will checkout "origin/develop."
|
||||||
|
# The first argument is the name of the directory to checkout
|
||||||
|
# the branch into.
|
||||||
|
# The second argument is the URL of the remote repository to checkout.
|
||||||
|
# Usually something like https://github.com/matrix-org/sytest.git
|
||||||
|
|
||||||
|
set -eux
|
||||||
|
|
||||||
|
NAME=$1
|
||||||
|
PROJECT=$2
|
||||||
|
BASE=".$NAME-base"
|
||||||
|
|
||||||
|
# Update our mirror.
|
||||||
|
if [ ! -d ".$NAME-base" ]; then
|
||||||
|
# Create a local mirror of the source repository.
|
||||||
|
# This saves us from having to download the entire repository
|
||||||
|
# when this script is next run.
|
||||||
|
git clone "$PROJECT" "$BASE" --mirror
|
||||||
|
else
|
||||||
|
# Fetch any updates from the source repository.
|
||||||
|
(cd "$BASE"; git fetch -p)
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Remove the existing repository so that we have a clean copy
|
||||||
|
rm -rf "$NAME"
|
||||||
|
# Cloning with --shared means that we will share portions of the
|
||||||
|
# .git directory with our local mirror.
|
||||||
|
git clone "$BASE" "$NAME" --shared
|
||||||
|
|
||||||
|
# Jenkins may have supplied us with the name of the branch in the
|
||||||
|
# environment. Otherwise we will have to guess based on the current
|
||||||
|
# commit.
|
||||||
|
: ${GIT_BRANCH:="origin/$(git rev-parse --abbrev-ref HEAD)"}
|
||||||
|
cd "$NAME"
|
||||||
|
# check out the relevant branch
|
||||||
|
git checkout "${GIT_BRANCH}" || (
|
||||||
|
echo >&2 "No ref ${GIT_BRANCH} found, falling back to develop"
|
||||||
|
git checkout "origin/develop"
|
||||||
|
)
|
||||||
20
jenkins/prepare_synapse.sh
Executable file
20
jenkins/prepare_synapse.sh
Executable file
@@ -0,0 +1,20 @@
|
|||||||
|
#! /bin/bash
|
||||||
|
|
||||||
|
cd "`dirname $0`/.."
|
||||||
|
|
||||||
|
TOX_DIR=$WORKSPACE/.tox
|
||||||
|
|
||||||
|
mkdir -p $TOX_DIR
|
||||||
|
|
||||||
|
if ! [ $TOX_DIR -ef .tox ]; then
|
||||||
|
ln -s "$TOX_DIR" .tox
|
||||||
|
fi
|
||||||
|
|
||||||
|
# set up the virtualenv
|
||||||
|
tox -e py27 --notest -v
|
||||||
|
|
||||||
|
TOX_BIN=$TOX_DIR/py27/bin
|
||||||
|
$TOX_BIN/pip install setuptools
|
||||||
|
{ python synapse/python_dependencies.py
|
||||||
|
echo lxml psycopg2
|
||||||
|
} | xargs $TOX_BIN/pip install
|
||||||
@@ -18,7 +18,9 @@
|
|||||||
<div class="summarytext">{{ summary_text }}</div>
|
<div class="summarytext">{{ summary_text }}</div>
|
||||||
</td>
|
</td>
|
||||||
<td class="logo">
|
<td class="logo">
|
||||||
{% if app_name == "Vector" %}
|
{% if app_name == "Riot" %}
|
||||||
|
<img src="http://matrix.org/img/riot-logo-email.png" width="83" height="83" alt="[Riot]"/>
|
||||||
|
{% elif app_name == "Vector" %}
|
||||||
<img src="http://matrix.org/img/vector-logo-email.png" width="64" height="83" alt="[Vector]"/>
|
<img src="http://matrix.org/img/vector-logo-email.png" width="64" height="83" alt="[Vector]"/>
|
||||||
{% else %}
|
{% else %}
|
||||||
<img src="http://matrix.org/img/matrix-120x51.png" width="120" height="51" alt="[matrix]"/>
|
<img src="http://matrix.org/img/matrix-120x51.png" width="120" height="51" alt="[matrix]"/>
|
||||||
|
|||||||
90
scripts-dev/federation_client.py
Normal file → Executable file
90
scripts-dev/federation_client.py
Normal file → Executable file
@@ -1,10 +1,30 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
#
|
||||||
|
# Copyright 2015, 2016 OpenMarket Ltd
|
||||||
|
# Copyright 2017 New Vector Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
from __future__ import print_function
|
||||||
|
|
||||||
|
import argparse
|
||||||
import nacl.signing
|
import nacl.signing
|
||||||
import json
|
import json
|
||||||
import base64
|
import base64
|
||||||
import requests
|
import requests
|
||||||
import sys
|
import sys
|
||||||
import srvlookup
|
import srvlookup
|
||||||
|
import yaml
|
||||||
|
|
||||||
def encode_base64(input_bytes):
|
def encode_base64(input_bytes):
|
||||||
"""Encode bytes as a base64 string without any padding."""
|
"""Encode bytes as a base64 string without any padding."""
|
||||||
@@ -116,31 +136,85 @@ def get_json(origin_name, origin_key, destination, path):
|
|||||||
authorization_headers = []
|
authorization_headers = []
|
||||||
|
|
||||||
for key, sig in signed_json["signatures"][origin_name].items():
|
for key, sig in signed_json["signatures"][origin_name].items():
|
||||||
authorization_headers.append(bytes(
|
header = "X-Matrix origin=%s,key=\"%s\",sig=\"%s\"" % (
|
||||||
"X-Matrix origin=%s,key=\"%s\",sig=\"%s\"" % (
|
|
||||||
origin_name, key, sig,
|
origin_name, key, sig,
|
||||||
)
|
)
|
||||||
))
|
authorization_headers.append(bytes(header))
|
||||||
|
print ("Authorization: %s" % header, file=sys.stderr)
|
||||||
|
|
||||||
|
dest = lookup(destination, path)
|
||||||
|
print ("Requesting %s" % dest, file=sys.stderr)
|
||||||
|
|
||||||
result = requests.get(
|
result = requests.get(
|
||||||
lookup(destination, path),
|
dest,
|
||||||
headers={"Authorization": authorization_headers[0]},
|
headers={"Authorization": authorization_headers[0]},
|
||||||
verify=False,
|
verify=False,
|
||||||
)
|
)
|
||||||
|
sys.stderr.write("Status Code: %d\n" % (result.status_code,))
|
||||||
return result.json()
|
return result.json()
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
origin_name, keyfile, destination, path = sys.argv[1:]
|
parser = argparse.ArgumentParser(
|
||||||
|
description=
|
||||||
|
"Signs and sends a federation request to a matrix homeserver",
|
||||||
|
)
|
||||||
|
|
||||||
with open(keyfile) as f:
|
parser.add_argument(
|
||||||
|
"-N", "--server-name",
|
||||||
|
help="Name to give as the local homeserver. If unspecified, will be "
|
||||||
|
"read from the config file.",
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"-k", "--signing-key-path",
|
||||||
|
help="Path to the file containing the private ed25519 key to sign the "
|
||||||
|
"request with.",
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"-c", "--config",
|
||||||
|
default="homeserver.yaml",
|
||||||
|
help="Path to server config file. Ignored if --server-name and "
|
||||||
|
"--signing-key-path are both given.",
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"-d", "--destination",
|
||||||
|
default="matrix.org",
|
||||||
|
help="name of the remote homeserver. We will do SRV lookups and "
|
||||||
|
"connect appropriately.",
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"path",
|
||||||
|
help="request path. We will add '/_matrix/federation/v1/' to this."
|
||||||
|
)
|
||||||
|
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
if not args.server_name or not args.signing_key_path:
|
||||||
|
read_args_from_config(args)
|
||||||
|
|
||||||
|
with open(args.signing_key_path) as f:
|
||||||
key = read_signing_keys(f)[0]
|
key = read_signing_keys(f)[0]
|
||||||
|
|
||||||
result = get_json(
|
result = get_json(
|
||||||
origin_name, key, destination, "/_matrix/federation/v1/" + path
|
args.server_name, key, args.destination, "/_matrix/federation/v1/" + args.path
|
||||||
)
|
)
|
||||||
|
|
||||||
json.dump(result, sys.stdout)
|
json.dump(result, sys.stdout)
|
||||||
|
print ("")
|
||||||
|
|
||||||
|
|
||||||
|
def read_args_from_config(args):
|
||||||
|
with open(args.config, 'r') as fh:
|
||||||
|
config = yaml.safe_load(fh)
|
||||||
|
if not args.server_name:
|
||||||
|
args.server_name = config['server_name']
|
||||||
|
if not args.signing_key_path:
|
||||||
|
args.signing_key_path = config['signing_key_path']
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
main()
|
main()
|
||||||
|
|||||||
@@ -9,16 +9,39 @@
|
|||||||
ROOMID="$1"
|
ROOMID="$1"
|
||||||
|
|
||||||
sqlite3 homeserver.db <<EOF
|
sqlite3 homeserver.db <<EOF
|
||||||
DELETE FROM context_depth WHERE context = '$ROOMID';
|
DELETE FROM event_forward_extremities WHERE room_id = '$ROOMID';
|
||||||
DELETE FROM current_state WHERE context = '$ROOMID';
|
DELETE FROM event_backward_extremities WHERE room_id = '$ROOMID';
|
||||||
DELETE FROM feedback WHERE room_id = '$ROOMID';
|
DELETE FROM event_edges WHERE room_id = '$ROOMID';
|
||||||
DELETE FROM messages WHERE room_id = '$ROOMID';
|
DELETE FROM room_depth WHERE room_id = '$ROOMID';
|
||||||
DELETE FROM pdu_backward_extremities WHERE context = '$ROOMID';
|
DELETE FROM state_forward_extremities WHERE room_id = '$ROOMID';
|
||||||
DELETE FROM pdu_edges WHERE context = '$ROOMID';
|
DELETE FROM events WHERE room_id = '$ROOMID';
|
||||||
DELETE FROM pdu_forward_extremities WHERE context = '$ROOMID';
|
DELETE FROM event_json WHERE room_id = '$ROOMID';
|
||||||
DELETE FROM pdus WHERE context = '$ROOMID';
|
DELETE FROM state_events WHERE room_id = '$ROOMID';
|
||||||
DELETE FROM room_data WHERE room_id = '$ROOMID';
|
DELETE FROM current_state_events WHERE room_id = '$ROOMID';
|
||||||
DELETE FROM room_memberships WHERE room_id = '$ROOMID';
|
DELETE FROM room_memberships WHERE room_id = '$ROOMID';
|
||||||
|
DELETE FROM feedback WHERE room_id = '$ROOMID';
|
||||||
|
DELETE FROM topics WHERE room_id = '$ROOMID';
|
||||||
|
DELETE FROM room_names WHERE room_id = '$ROOMID';
|
||||||
DELETE FROM rooms WHERE room_id = '$ROOMID';
|
DELETE FROM rooms WHERE room_id = '$ROOMID';
|
||||||
DELETE FROM state_pdus WHERE context = '$ROOMID';
|
DELETE FROM room_hosts WHERE room_id = '$ROOMID';
|
||||||
|
DELETE FROM room_aliases WHERE room_id = '$ROOMID';
|
||||||
|
DELETE FROM state_groups WHERE room_id = '$ROOMID';
|
||||||
|
DELETE FROM state_groups_state WHERE room_id = '$ROOMID';
|
||||||
|
DELETE FROM receipts_graph WHERE room_id = '$ROOMID';
|
||||||
|
DELETE FROM receipts_linearized WHERE room_id = '$ROOMID';
|
||||||
|
DELETE FROM event_search_content WHERE c1room_id = '$ROOMID';
|
||||||
|
DELETE FROM guest_access WHERE room_id = '$ROOMID';
|
||||||
|
DELETE FROM history_visibility WHERE room_id = '$ROOMID';
|
||||||
|
DELETE FROM room_tags WHERE room_id = '$ROOMID';
|
||||||
|
DELETE FROM room_tags_revisions WHERE room_id = '$ROOMID';
|
||||||
|
DELETE FROM room_account_data WHERE room_id = '$ROOMID';
|
||||||
|
DELETE FROM event_push_actions WHERE room_id = '$ROOMID';
|
||||||
|
DELETE FROM local_invites WHERE room_id = '$ROOMID';
|
||||||
|
DELETE FROM pusher_throttle WHERE room_id = '$ROOMID';
|
||||||
|
DELETE FROM event_reports WHERE room_id = '$ROOMID';
|
||||||
|
DELETE FROM public_room_list_stream WHERE room_id = '$ROOMID';
|
||||||
|
DELETE FROM stream_ordering_to_exterm WHERE room_id = '$ROOMID';
|
||||||
|
DELETE FROM event_auth WHERE room_id = '$ROOMID';
|
||||||
|
DELETE FROM appservice_room_list WHERE room_id = '$ROOMID';
|
||||||
|
VACUUM;
|
||||||
EOF
|
EOF
|
||||||
|
|||||||
@@ -34,11 +34,14 @@ logger = logging.getLogger("synapse_port_db")
|
|||||||
|
|
||||||
|
|
||||||
BOOLEAN_COLUMNS = {
|
BOOLEAN_COLUMNS = {
|
||||||
"events": ["processed", "outlier"],
|
"events": ["processed", "outlier", "contains_url"],
|
||||||
"rooms": ["is_public"],
|
"rooms": ["is_public"],
|
||||||
"event_edges": ["is_state"],
|
"event_edges": ["is_state"],
|
||||||
"presence_list": ["accepted"],
|
"presence_list": ["accepted"],
|
||||||
"presence_stream": ["currently_active"],
|
"presence_stream": ["currently_active"],
|
||||||
|
"public_room_list_stream": ["visibility"],
|
||||||
|
"device_lists_outbound_pokes": ["sent"],
|
||||||
|
"users_who_share_rooms": ["share_private"],
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@@ -71,6 +74,14 @@ APPEND_ONLY_TABLES = [
|
|||||||
"event_to_state_groups",
|
"event_to_state_groups",
|
||||||
"rejections",
|
"rejections",
|
||||||
"event_search",
|
"event_search",
|
||||||
|
"presence_stream",
|
||||||
|
"push_rules_stream",
|
||||||
|
"current_state_resets",
|
||||||
|
"ex_outlier_stream",
|
||||||
|
"cache_invalidation_stream",
|
||||||
|
"public_room_list_stream",
|
||||||
|
"state_group_edges",
|
||||||
|
"stream_ordering_to_exterm",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
@@ -92,8 +103,12 @@ class Store(object):
|
|||||||
|
|
||||||
_simple_select_onecol_txn = SQLBaseStore.__dict__["_simple_select_onecol_txn"]
|
_simple_select_onecol_txn = SQLBaseStore.__dict__["_simple_select_onecol_txn"]
|
||||||
_simple_select_onecol = SQLBaseStore.__dict__["_simple_select_onecol"]
|
_simple_select_onecol = SQLBaseStore.__dict__["_simple_select_onecol"]
|
||||||
|
_simple_select_one = SQLBaseStore.__dict__["_simple_select_one"]
|
||||||
|
_simple_select_one_txn = SQLBaseStore.__dict__["_simple_select_one_txn"]
|
||||||
_simple_select_one_onecol = SQLBaseStore.__dict__["_simple_select_one_onecol"]
|
_simple_select_one_onecol = SQLBaseStore.__dict__["_simple_select_one_onecol"]
|
||||||
_simple_select_one_onecol_txn = SQLBaseStore.__dict__["_simple_select_one_onecol_txn"]
|
_simple_select_one_onecol_txn = SQLBaseStore.__dict__[
|
||||||
|
"_simple_select_one_onecol_txn"
|
||||||
|
]
|
||||||
|
|
||||||
_simple_update_one = SQLBaseStore.__dict__["_simple_update_one"]
|
_simple_update_one = SQLBaseStore.__dict__["_simple_update_one"]
|
||||||
_simple_update_one_txn = SQLBaseStore.__dict__["_simple_update_one_txn"]
|
_simple_update_one_txn = SQLBaseStore.__dict__["_simple_update_one_txn"]
|
||||||
@@ -107,7 +122,7 @@ class Store(object):
|
|||||||
try:
|
try:
|
||||||
txn = conn.cursor()
|
txn = conn.cursor()
|
||||||
return func(
|
return func(
|
||||||
LoggingTransaction(txn, desc, self.database_engine, []),
|
LoggingTransaction(txn, desc, self.database_engine, [], []),
|
||||||
*args, **kwargs
|
*args, **kwargs
|
||||||
)
|
)
|
||||||
except self.database_engine.module.DatabaseError as e:
|
except self.database_engine.module.DatabaseError as e:
|
||||||
@@ -158,31 +173,40 @@ class Porter(object):
|
|||||||
def setup_table(self, table):
|
def setup_table(self, table):
|
||||||
if table in APPEND_ONLY_TABLES:
|
if table in APPEND_ONLY_TABLES:
|
||||||
# It's safe to just carry on inserting.
|
# It's safe to just carry on inserting.
|
||||||
next_chunk = yield self.postgres_store._simple_select_one_onecol(
|
row = yield self.postgres_store._simple_select_one(
|
||||||
table="port_from_sqlite3",
|
table="port_from_sqlite3",
|
||||||
keyvalues={"table_name": table},
|
keyvalues={"table_name": table},
|
||||||
retcol="rowid",
|
retcols=("forward_rowid", "backward_rowid"),
|
||||||
allow_none=True,
|
allow_none=True,
|
||||||
)
|
)
|
||||||
|
|
||||||
total_to_port = None
|
total_to_port = None
|
||||||
if next_chunk is None:
|
if row is None:
|
||||||
if table == "sent_transactions":
|
if table == "sent_transactions":
|
||||||
next_chunk, already_ported, total_to_port = (
|
forward_chunk, already_ported, total_to_port = (
|
||||||
yield self._setup_sent_transactions()
|
yield self._setup_sent_transactions()
|
||||||
)
|
)
|
||||||
|
backward_chunk = 0
|
||||||
else:
|
else:
|
||||||
yield self.postgres_store._simple_insert(
|
yield self.postgres_store._simple_insert(
|
||||||
table="port_from_sqlite3",
|
table="port_from_sqlite3",
|
||||||
values={"table_name": table, "rowid": 1}
|
values={
|
||||||
|
"table_name": table,
|
||||||
|
"forward_rowid": 1,
|
||||||
|
"backward_rowid": 0,
|
||||||
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
next_chunk = 1
|
forward_chunk = 1
|
||||||
|
backward_chunk = 0
|
||||||
already_ported = 0
|
already_ported = 0
|
||||||
|
else:
|
||||||
|
forward_chunk = row["forward_rowid"]
|
||||||
|
backward_chunk = row["backward_rowid"]
|
||||||
|
|
||||||
if total_to_port is None:
|
if total_to_port is None:
|
||||||
already_ported, total_to_port = yield self._get_total_count_to_port(
|
already_ported, total_to_port = yield self._get_total_count_to_port(
|
||||||
table, next_chunk
|
table, forward_chunk, backward_chunk
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
def delete_all(txn):
|
def delete_all(txn):
|
||||||
@@ -196,46 +220,104 @@ class Porter(object):
|
|||||||
|
|
||||||
yield self.postgres_store._simple_insert(
|
yield self.postgres_store._simple_insert(
|
||||||
table="port_from_sqlite3",
|
table="port_from_sqlite3",
|
||||||
values={"table_name": table, "rowid": 0}
|
values={
|
||||||
|
"table_name": table,
|
||||||
|
"forward_rowid": 1,
|
||||||
|
"backward_rowid": 0,
|
||||||
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
next_chunk = 1
|
forward_chunk = 1
|
||||||
|
backward_chunk = 0
|
||||||
|
|
||||||
already_ported, total_to_port = yield self._get_total_count_to_port(
|
already_ported, total_to_port = yield self._get_total_count_to_port(
|
||||||
table, next_chunk
|
table, forward_chunk, backward_chunk
|
||||||
)
|
)
|
||||||
|
|
||||||
defer.returnValue((table, already_ported, total_to_port, next_chunk))
|
defer.returnValue(
|
||||||
|
(table, already_ported, total_to_port, forward_chunk, backward_chunk)
|
||||||
|
)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def handle_table(self, table, postgres_size, table_size, next_chunk):
|
def handle_table(self, table, postgres_size, table_size, forward_chunk,
|
||||||
|
backward_chunk):
|
||||||
if not table_size:
|
if not table_size:
|
||||||
return
|
return
|
||||||
|
|
||||||
self.progress.add_table(table, postgres_size, table_size)
|
self.progress.add_table(table, postgres_size, table_size)
|
||||||
|
|
||||||
if table == "event_search":
|
if table == "event_search":
|
||||||
yield self.handle_search_table(postgres_size, table_size, next_chunk)
|
yield self.handle_search_table(
|
||||||
|
postgres_size, table_size, forward_chunk, backward_chunk
|
||||||
|
)
|
||||||
return
|
return
|
||||||
|
|
||||||
select = (
|
if table in (
|
||||||
|
"user_directory", "user_directory_search", "users_who_share_rooms",
|
||||||
|
"users_in_pubic_room",
|
||||||
|
):
|
||||||
|
# We don't port these tables, as they're a faff and we can regenreate
|
||||||
|
# them anyway.
|
||||||
|
self.progress.update(table, table_size) # Mark table as done
|
||||||
|
return
|
||||||
|
|
||||||
|
if table == "user_directory_stream_pos":
|
||||||
|
# We need to make sure there is a single row, `(X, null), as that is
|
||||||
|
# what synapse expects to be there.
|
||||||
|
yield self.postgres_store._simple_insert(
|
||||||
|
table=table,
|
||||||
|
values={"stream_id": None},
|
||||||
|
)
|
||||||
|
self.progress.update(table, table_size) # Mark table as done
|
||||||
|
return
|
||||||
|
|
||||||
|
forward_select = (
|
||||||
"SELECT rowid, * FROM %s WHERE rowid >= ? ORDER BY rowid LIMIT ?"
|
"SELECT rowid, * FROM %s WHERE rowid >= ? ORDER BY rowid LIMIT ?"
|
||||||
% (table,)
|
% (table,)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
backward_select = (
|
||||||
|
"SELECT rowid, * FROM %s WHERE rowid <= ? ORDER BY rowid LIMIT ?"
|
||||||
|
% (table,)
|
||||||
|
)
|
||||||
|
|
||||||
|
do_forward = [True]
|
||||||
|
do_backward = [True]
|
||||||
|
|
||||||
while True:
|
while True:
|
||||||
def r(txn):
|
def r(txn):
|
||||||
txn.execute(select, (next_chunk, self.batch_size,))
|
forward_rows = []
|
||||||
rows = txn.fetchall()
|
backward_rows = []
|
||||||
|
if do_forward[0]:
|
||||||
|
txn.execute(forward_select, (forward_chunk, self.batch_size,))
|
||||||
|
forward_rows = txn.fetchall()
|
||||||
|
if not forward_rows:
|
||||||
|
do_forward[0] = False
|
||||||
|
|
||||||
|
if do_backward[0]:
|
||||||
|
txn.execute(backward_select, (backward_chunk, self.batch_size,))
|
||||||
|
backward_rows = txn.fetchall()
|
||||||
|
if not backward_rows:
|
||||||
|
do_backward[0] = False
|
||||||
|
|
||||||
|
if forward_rows or backward_rows:
|
||||||
headers = [column[0] for column in txn.description]
|
headers = [column[0] for column in txn.description]
|
||||||
|
else:
|
||||||
|
headers = None
|
||||||
|
|
||||||
return headers, rows
|
return headers, forward_rows, backward_rows
|
||||||
|
|
||||||
headers, rows = yield self.sqlite_store.runInteraction("select", r)
|
headers, frows, brows = yield self.sqlite_store.runInteraction(
|
||||||
|
"select", r
|
||||||
|
)
|
||||||
|
|
||||||
if rows:
|
if frows or brows:
|
||||||
next_chunk = rows[-1][0] + 1
|
if frows:
|
||||||
|
forward_chunk = max(row[0] for row in frows) + 1
|
||||||
|
if brows:
|
||||||
|
backward_chunk = min(row[0] for row in brows) - 1
|
||||||
|
|
||||||
|
rows = frows + brows
|
||||||
self._convert_rows(table, headers, rows)
|
self._convert_rows(table, headers, rows)
|
||||||
|
|
||||||
def insert(txn):
|
def insert(txn):
|
||||||
@@ -247,7 +329,10 @@ class Porter(object):
|
|||||||
txn,
|
txn,
|
||||||
table="port_from_sqlite3",
|
table="port_from_sqlite3",
|
||||||
keyvalues={"table_name": table},
|
keyvalues={"table_name": table},
|
||||||
updatevalues={"rowid": next_chunk},
|
updatevalues={
|
||||||
|
"forward_rowid": forward_chunk,
|
||||||
|
"backward_rowid": backward_chunk,
|
||||||
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
yield self.postgres_store.execute(insert)
|
yield self.postgres_store.execute(insert)
|
||||||
@@ -259,7 +344,8 @@ class Porter(object):
|
|||||||
return
|
return
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def handle_search_table(self, postgres_size, table_size, next_chunk):
|
def handle_search_table(self, postgres_size, table_size, forward_chunk,
|
||||||
|
backward_chunk):
|
||||||
select = (
|
select = (
|
||||||
"SELECT es.rowid, es.*, e.origin_server_ts, e.stream_ordering"
|
"SELECT es.rowid, es.*, e.origin_server_ts, e.stream_ordering"
|
||||||
" FROM event_search as es"
|
" FROM event_search as es"
|
||||||
@@ -270,7 +356,7 @@ class Porter(object):
|
|||||||
|
|
||||||
while True:
|
while True:
|
||||||
def r(txn):
|
def r(txn):
|
||||||
txn.execute(select, (next_chunk, self.batch_size,))
|
txn.execute(select, (forward_chunk, self.batch_size,))
|
||||||
rows = txn.fetchall()
|
rows = txn.fetchall()
|
||||||
headers = [column[0] for column in txn.description]
|
headers = [column[0] for column in txn.description]
|
||||||
|
|
||||||
@@ -279,7 +365,7 @@ class Porter(object):
|
|||||||
headers, rows = yield self.sqlite_store.runInteraction("select", r)
|
headers, rows = yield self.sqlite_store.runInteraction("select", r)
|
||||||
|
|
||||||
if rows:
|
if rows:
|
||||||
next_chunk = rows[-1][0] + 1
|
forward_chunk = rows[-1][0] + 1
|
||||||
|
|
||||||
# We have to treat event_search differently since it has a
|
# We have to treat event_search differently since it has a
|
||||||
# different structure in the two different databases.
|
# different structure in the two different databases.
|
||||||
@@ -290,10 +376,13 @@ class Porter(object):
|
|||||||
" VALUES (?,?,?,?,to_tsvector('english', ?),?,?)"
|
" VALUES (?,?,?,?,to_tsvector('english', ?),?,?)"
|
||||||
)
|
)
|
||||||
|
|
||||||
rows_dict = [
|
rows_dict = []
|
||||||
dict(zip(headers, row))
|
for row in rows:
|
||||||
for row in rows
|
d = dict(zip(headers, row))
|
||||||
]
|
if "\0" in d['value']:
|
||||||
|
logger.warn('dropping search row %s', d)
|
||||||
|
else:
|
||||||
|
rows_dict.append(d)
|
||||||
|
|
||||||
txn.executemany(sql, [
|
txn.executemany(sql, [
|
||||||
(
|
(
|
||||||
@@ -312,7 +401,10 @@ class Porter(object):
|
|||||||
txn,
|
txn,
|
||||||
table="port_from_sqlite3",
|
table="port_from_sqlite3",
|
||||||
keyvalues={"table_name": "event_search"},
|
keyvalues={"table_name": "event_search"},
|
||||||
updatevalues={"rowid": next_chunk},
|
updatevalues={
|
||||||
|
"forward_rowid": forward_chunk,
|
||||||
|
"backward_rowid": backward_chunk,
|
||||||
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
yield self.postgres_store.execute(insert)
|
yield self.postgres_store.execute(insert)
|
||||||
@@ -324,7 +416,6 @@ class Porter(object):
|
|||||||
else:
|
else:
|
||||||
return
|
return
|
||||||
|
|
||||||
|
|
||||||
def setup_db(self, db_config, database_engine):
|
def setup_db(self, db_config, database_engine):
|
||||||
db_conn = database_engine.module.connect(
|
db_conn = database_engine.module.connect(
|
||||||
**{
|
**{
|
||||||
@@ -379,9 +470,7 @@ class Porter(object):
|
|||||||
|
|
||||||
postgres_tables = yield self.postgres_store._simple_select_onecol(
|
postgres_tables = yield self.postgres_store._simple_select_onecol(
|
||||||
table="information_schema.tables",
|
table="information_schema.tables",
|
||||||
keyvalues={
|
keyvalues={},
|
||||||
"table_schema": "public",
|
|
||||||
},
|
|
||||||
retcol="distinct table_name",
|
retcol="distinct table_name",
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -395,10 +484,32 @@ class Porter(object):
|
|||||||
txn.execute(
|
txn.execute(
|
||||||
"CREATE TABLE port_from_sqlite3 ("
|
"CREATE TABLE port_from_sqlite3 ("
|
||||||
" table_name varchar(100) NOT NULL UNIQUE,"
|
" table_name varchar(100) NOT NULL UNIQUE,"
|
||||||
" rowid bigint NOT NULL"
|
" forward_rowid bigint NOT NULL,"
|
||||||
|
" backward_rowid bigint NOT NULL"
|
||||||
")"
|
")"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# The old port script created a table with just a "rowid" column.
|
||||||
|
# We want people to be able to rerun this script from an old port
|
||||||
|
# so that they can pick up any missing events that were not
|
||||||
|
# ported across.
|
||||||
|
def alter_table(txn):
|
||||||
|
txn.execute(
|
||||||
|
"ALTER TABLE IF EXISTS port_from_sqlite3"
|
||||||
|
" RENAME rowid TO forward_rowid"
|
||||||
|
)
|
||||||
|
txn.execute(
|
||||||
|
"ALTER TABLE IF EXISTS port_from_sqlite3"
|
||||||
|
" ADD backward_rowid bigint NOT NULL DEFAULT 0"
|
||||||
|
)
|
||||||
|
|
||||||
|
try:
|
||||||
|
yield self.postgres_store.runInteraction(
|
||||||
|
"alter_table", alter_table
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
logger.info("Failed to create port table: %s", e)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
yield self.postgres_store.runInteraction(
|
yield self.postgres_store.runInteraction(
|
||||||
"create_port_table", create_port_table
|
"create_port_table", create_port_table
|
||||||
@@ -514,7 +625,11 @@ class Porter(object):
|
|||||||
|
|
||||||
yield self.postgres_store._simple_insert(
|
yield self.postgres_store._simple_insert(
|
||||||
table="port_from_sqlite3",
|
table="port_from_sqlite3",
|
||||||
values={"table_name": "sent_transactions", "rowid": next_chunk}
|
values={
|
||||||
|
"table_name": "sent_transactions",
|
||||||
|
"forward_rowid": next_chunk,
|
||||||
|
"backward_rowid": 0,
|
||||||
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
def get_sent_table_size(txn):
|
def get_sent_table_size(txn):
|
||||||
@@ -535,13 +650,18 @@ class Porter(object):
|
|||||||
defer.returnValue((next_chunk, inserted_rows, total_count))
|
defer.returnValue((next_chunk, inserted_rows, total_count))
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def _get_remaining_count_to_port(self, table, next_chunk):
|
def _get_remaining_count_to_port(self, table, forward_chunk, backward_chunk):
|
||||||
rows = yield self.sqlite_store.execute_sql(
|
frows = yield self.sqlite_store.execute_sql(
|
||||||
"SELECT count(*) FROM %s WHERE rowid >= ?" % (table,),
|
"SELECT count(*) FROM %s WHERE rowid >= ?" % (table,),
|
||||||
next_chunk,
|
forward_chunk,
|
||||||
)
|
)
|
||||||
|
|
||||||
defer.returnValue(rows[0][0])
|
brows = yield self.sqlite_store.execute_sql(
|
||||||
|
"SELECT count(*) FROM %s WHERE rowid <= ?" % (table,),
|
||||||
|
backward_chunk,
|
||||||
|
)
|
||||||
|
|
||||||
|
defer.returnValue(frows[0][0] + brows[0][0])
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def _get_already_ported_count(self, table):
|
def _get_already_ported_count(self, table):
|
||||||
@@ -552,10 +672,10 @@ class Porter(object):
|
|||||||
defer.returnValue(rows[0][0])
|
defer.returnValue(rows[0][0])
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def _get_total_count_to_port(self, table, next_chunk):
|
def _get_total_count_to_port(self, table, forward_chunk, backward_chunk):
|
||||||
remaining, done = yield defer.gatherResults(
|
remaining, done = yield defer.gatherResults(
|
||||||
[
|
[
|
||||||
self._get_remaining_count_to_port(table, next_chunk),
|
self._get_remaining_count_to_port(table, forward_chunk, backward_chunk),
|
||||||
self._get_already_ported_count(table),
|
self._get_already_ported_count(table),
|
||||||
],
|
],
|
||||||
consumeErrors=True,
|
consumeErrors=True,
|
||||||
|
|||||||
@@ -16,7 +16,5 @@ ignore =
|
|||||||
|
|
||||||
[flake8]
|
[flake8]
|
||||||
max-line-length = 90
|
max-line-length = 90
|
||||||
ignore = W503 ; W503 requires that binary operators be at the end, not start, of lines. Erik doesn't like it.
|
# W503 requires that binary operators be at the end, not start, of lines. Erik doesn't like it.
|
||||||
|
ignore = W503
|
||||||
[pep8]
|
|
||||||
max-line-length = 90
|
|
||||||
|
|||||||
73
setup.py
73
setup.py
@@ -23,6 +23,45 @@ import sys
|
|||||||
here = os.path.abspath(os.path.dirname(__file__))
|
here = os.path.abspath(os.path.dirname(__file__))
|
||||||
|
|
||||||
|
|
||||||
|
# Some notes on `setup.py test`:
|
||||||
|
#
|
||||||
|
# Once upon a time we used to try to make `setup.py test` run `tox` to run the
|
||||||
|
# tests. That's a bad idea for three reasons:
|
||||||
|
#
|
||||||
|
# 1: `setup.py test` is supposed to find out whether the tests work in the
|
||||||
|
# *current* environmentt, not whatever tox sets up.
|
||||||
|
# 2: Empirically, trying to install tox during the test run wasn't working ("No
|
||||||
|
# module named virtualenv").
|
||||||
|
# 3: The tox documentation advises against it[1].
|
||||||
|
#
|
||||||
|
# Even further back in time, we used to use setuptools_trial [2]. That has its
|
||||||
|
# own set of issues: for instance, it requires installation of Twisted to build
|
||||||
|
# an sdist (because the recommended mode of usage is to add it to
|
||||||
|
# `setup_requires`). That in turn means that in order to successfully run tox
|
||||||
|
# you have to have the python header files installed for whichever version of
|
||||||
|
# python tox uses (which is python3 on recent ubuntus, for example).
|
||||||
|
#
|
||||||
|
# So, for now at least, we stick with what appears to be the convention among
|
||||||
|
# Twisted projects, and don't attempt to do anything when someone runs
|
||||||
|
# `setup.py test`; instead we direct people to run `trial` directly if they
|
||||||
|
# care.
|
||||||
|
#
|
||||||
|
# [1]: http://tox.readthedocs.io/en/2.5.0/example/basic.html#integration-with-setup-py-test-command
|
||||||
|
# [2]: https://pypi.python.org/pypi/setuptools_trial
|
||||||
|
class TestCommand(Command):
|
||||||
|
user_options = []
|
||||||
|
|
||||||
|
def initialize_options(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def finalize_options(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
print ("""Synapse's tests cannot be run via setup.py. To run them, try:
|
||||||
|
PYTHONPATH="." trial tests
|
||||||
|
""")
|
||||||
|
|
||||||
def read_file(path_segments):
|
def read_file(path_segments):
|
||||||
"""Read a file from the package. Takes a list of strings to join to
|
"""Read a file from the package. Takes a list of strings to join to
|
||||||
make the path"""
|
make the path"""
|
||||||
@@ -39,38 +78,6 @@ def exec_file(path_segments):
|
|||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
class Tox(Command):
|
|
||||||
user_options = [('tox-args=', 'a', "Arguments to pass to tox")]
|
|
||||||
|
|
||||||
def initialize_options(self):
|
|
||||||
self.tox_args = None
|
|
||||||
|
|
||||||
def finalize_options(self):
|
|
||||||
self.test_args = []
|
|
||||||
self.test_suite = True
|
|
||||||
|
|
||||||
def run(self):
|
|
||||||
#import here, cause outside the eggs aren't loaded
|
|
||||||
try:
|
|
||||||
import tox
|
|
||||||
except ImportError:
|
|
||||||
try:
|
|
||||||
self.distribution.fetch_build_eggs("tox")
|
|
||||||
import tox
|
|
||||||
except:
|
|
||||||
raise RuntimeError(
|
|
||||||
"The tests need 'tox' to run. Please install 'tox'."
|
|
||||||
)
|
|
||||||
import shlex
|
|
||||||
args = self.tox_args
|
|
||||||
if args:
|
|
||||||
args = shlex.split(self.tox_args)
|
|
||||||
else:
|
|
||||||
args = []
|
|
||||||
errno = tox.cmdline(args=args)
|
|
||||||
sys.exit(errno)
|
|
||||||
|
|
||||||
|
|
||||||
version = exec_file(("synapse", "__init__.py"))["__version__"]
|
version = exec_file(("synapse", "__init__.py"))["__version__"]
|
||||||
dependencies = exec_file(("synapse", "python_dependencies.py"))
|
dependencies = exec_file(("synapse", "python_dependencies.py"))
|
||||||
long_description = read_file(("README.rst",))
|
long_description = read_file(("README.rst",))
|
||||||
@@ -86,5 +93,5 @@ setup(
|
|||||||
zip_safe=False,
|
zip_safe=False,
|
||||||
long_description=long_description,
|
long_description=long_description,
|
||||||
scripts=["synctl"] + glob.glob("scripts/*"),
|
scripts=["synctl"] + glob.glob("scripts/*"),
|
||||||
cmdclass={'test': Tox},
|
cmdclass={'test': TestCommand},
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -16,4 +16,4 @@
|
|||||||
""" This is a reference implementation of a Matrix home server.
|
""" This is a reference implementation of a Matrix home server.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
__version__ = "0.16.1-r1"
|
__version__ = "0.24.1"
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -1,5 +1,6 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
# Copyright 2014-2016 OpenMarket Ltd
|
# Copyright 2014-2016 OpenMarket Ltd
|
||||||
|
# Copyright 2017 Vector Creations Ltd
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@@ -43,10 +44,8 @@ class JoinRules(object):
|
|||||||
|
|
||||||
class LoginType(object):
|
class LoginType(object):
|
||||||
PASSWORD = u"m.login.password"
|
PASSWORD = u"m.login.password"
|
||||||
OAUTH = u"m.login.oauth2"
|
|
||||||
EMAIL_CODE = u"m.login.email.code"
|
|
||||||
EMAIL_URL = u"m.login.email.url"
|
|
||||||
EMAIL_IDENTITY = u"m.login.email.identity"
|
EMAIL_IDENTITY = u"m.login.email.identity"
|
||||||
|
MSISDN = u"m.login.msisdn"
|
||||||
RECAPTCHA = u"m.login.recaptcha"
|
RECAPTCHA = u"m.login.recaptcha"
|
||||||
DUMMY = u"m.login.dummy"
|
DUMMY = u"m.login.dummy"
|
||||||
|
|
||||||
@@ -85,3 +84,8 @@ class RoomCreationPreset(object):
|
|||||||
PRIVATE_CHAT = "private_chat"
|
PRIVATE_CHAT = "private_chat"
|
||||||
PUBLIC_CHAT = "public_chat"
|
PUBLIC_CHAT = "public_chat"
|
||||||
TRUSTED_PRIVATE_CHAT = "trusted_private_chat"
|
TRUSTED_PRIVATE_CHAT = "trusted_private_chat"
|
||||||
|
|
||||||
|
|
||||||
|
class ThirdPartyEntityKind(object):
|
||||||
|
USER = "user"
|
||||||
|
LOCATION = "location"
|
||||||
|
|||||||
@@ -15,6 +15,7 @@
|
|||||||
|
|
||||||
"""Contains exceptions and error codes."""
|
"""Contains exceptions and error codes."""
|
||||||
|
|
||||||
|
import json
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
@@ -39,36 +40,57 @@ class Codes(object):
|
|||||||
CAPTCHA_NEEDED = "M_CAPTCHA_NEEDED"
|
CAPTCHA_NEEDED = "M_CAPTCHA_NEEDED"
|
||||||
CAPTCHA_INVALID = "M_CAPTCHA_INVALID"
|
CAPTCHA_INVALID = "M_CAPTCHA_INVALID"
|
||||||
MISSING_PARAM = "M_MISSING_PARAM"
|
MISSING_PARAM = "M_MISSING_PARAM"
|
||||||
|
INVALID_PARAM = "M_INVALID_PARAM"
|
||||||
TOO_LARGE = "M_TOO_LARGE"
|
TOO_LARGE = "M_TOO_LARGE"
|
||||||
EXCLUSIVE = "M_EXCLUSIVE"
|
EXCLUSIVE = "M_EXCLUSIVE"
|
||||||
THREEPID_AUTH_FAILED = "M_THREEPID_AUTH_FAILED"
|
THREEPID_AUTH_FAILED = "M_THREEPID_AUTH_FAILED"
|
||||||
THREEPID_IN_USE = "M_THREEPID_IN_USE"
|
THREEPID_IN_USE = "M_THREEPID_IN_USE"
|
||||||
|
THREEPID_NOT_FOUND = "M_THREEPID_NOT_FOUND"
|
||||||
INVALID_USERNAME = "M_INVALID_USERNAME"
|
INVALID_USERNAME = "M_INVALID_USERNAME"
|
||||||
SERVER_NOT_TRUSTED = "M_SERVER_NOT_TRUSTED"
|
SERVER_NOT_TRUSTED = "M_SERVER_NOT_TRUSTED"
|
||||||
|
|
||||||
|
|
||||||
class CodeMessageException(RuntimeError):
|
class CodeMessageException(RuntimeError):
|
||||||
"""An exception with integer code and message string attributes."""
|
"""An exception with integer code and message string attributes.
|
||||||
|
|
||||||
|
Attributes:
|
||||||
|
code (int): HTTP error code
|
||||||
|
msg (str): string describing the error
|
||||||
|
"""
|
||||||
def __init__(self, code, msg):
|
def __init__(self, code, msg):
|
||||||
super(CodeMessageException, self).__init__("%d: %s" % (code, msg))
|
super(CodeMessageException, self).__init__("%d: %s" % (code, msg))
|
||||||
self.code = code
|
self.code = code
|
||||||
self.msg = msg
|
self.msg = msg
|
||||||
self.response_code_message = None
|
|
||||||
|
|
||||||
def error_dict(self):
|
def error_dict(self):
|
||||||
return cs_error(self.msg)
|
return cs_error(self.msg)
|
||||||
|
|
||||||
|
|
||||||
|
class MatrixCodeMessageException(CodeMessageException):
|
||||||
|
"""An error from a general matrix endpoint, eg. from a proxied Matrix API call.
|
||||||
|
|
||||||
|
Attributes:
|
||||||
|
errcode (str): Matrix error code e.g 'M_FORBIDDEN'
|
||||||
|
"""
|
||||||
|
def __init__(self, code, msg, errcode=Codes.UNKNOWN):
|
||||||
|
super(MatrixCodeMessageException, self).__init__(code, msg)
|
||||||
|
self.errcode = errcode
|
||||||
|
|
||||||
|
|
||||||
class SynapseError(CodeMessageException):
|
class SynapseError(CodeMessageException):
|
||||||
"""A base error which can be caught for all synapse events."""
|
"""A base exception type for matrix errors which have an errcode and error
|
||||||
|
message (as well as an HTTP status code).
|
||||||
|
|
||||||
|
Attributes:
|
||||||
|
errcode (str): Matrix error code e.g 'M_FORBIDDEN'
|
||||||
|
"""
|
||||||
def __init__(self, code, msg, errcode=Codes.UNKNOWN):
|
def __init__(self, code, msg, errcode=Codes.UNKNOWN):
|
||||||
"""Constructs a synapse error.
|
"""Constructs a synapse error.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
code (int): The integer error code (an HTTP response code)
|
code (int): The integer error code (an HTTP response code)
|
||||||
msg (str): The human-readable error message.
|
msg (str): The human-readable error message.
|
||||||
err (str): The error code e.g 'M_FORBIDDEN'
|
errcode (str): The matrix error code e.g 'M_FORBIDDEN'
|
||||||
"""
|
"""
|
||||||
super(SynapseError, self).__init__(code, msg)
|
super(SynapseError, self).__init__(code, msg)
|
||||||
self.errcode = errcode
|
self.errcode = errcode
|
||||||
@@ -79,6 +101,39 @@ class SynapseError(CodeMessageException):
|
|||||||
self.errcode,
|
self.errcode,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_http_response_exception(cls, err):
|
||||||
|
"""Make a SynapseError based on an HTTPResponseException
|
||||||
|
|
||||||
|
This is useful when a proxied request has failed, and we need to
|
||||||
|
decide how to map the failure onto a matrix error to send back to the
|
||||||
|
client.
|
||||||
|
|
||||||
|
An attempt is made to parse the body of the http response as a matrix
|
||||||
|
error. If that succeeds, the errcode and error message from the body
|
||||||
|
are used as the errcode and error message in the new synapse error.
|
||||||
|
|
||||||
|
Otherwise, the errcode is set to M_UNKNOWN, and the error message is
|
||||||
|
set to the reason code from the HTTP response.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
err (HttpResponseException):
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
SynapseError:
|
||||||
|
"""
|
||||||
|
# try to parse the body as json, to get better errcode/msg, but
|
||||||
|
# default to M_UNKNOWN with the HTTP status as the error text
|
||||||
|
try:
|
||||||
|
j = json.loads(err.response)
|
||||||
|
except ValueError:
|
||||||
|
j = {}
|
||||||
|
errcode = j.get('errcode', Codes.UNKNOWN)
|
||||||
|
errmsg = j.get('error', err.msg)
|
||||||
|
|
||||||
|
res = SynapseError(err.code, errmsg, errcode)
|
||||||
|
return res
|
||||||
|
|
||||||
|
|
||||||
class RegistrationError(SynapseError):
|
class RegistrationError(SynapseError):
|
||||||
"""An error raised when a registration event fails."""
|
"""An error raised when a registration event fails."""
|
||||||
@@ -104,13 +159,11 @@ class UnrecognizedRequestError(SynapseError):
|
|||||||
|
|
||||||
class NotFoundError(SynapseError):
|
class NotFoundError(SynapseError):
|
||||||
"""An error indicating we can't find the thing you asked for"""
|
"""An error indicating we can't find the thing you asked for"""
|
||||||
def __init__(self, *args, **kwargs):
|
def __init__(self, msg="Not found", errcode=Codes.NOT_FOUND):
|
||||||
if "errcode" not in kwargs:
|
|
||||||
kwargs["errcode"] = Codes.NOT_FOUND
|
|
||||||
super(NotFoundError, self).__init__(
|
super(NotFoundError, self).__init__(
|
||||||
404,
|
404,
|
||||||
"Not found",
|
msg,
|
||||||
**kwargs
|
errcode=errcode
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@@ -171,7 +224,6 @@ class LimitExceededError(SynapseError):
|
|||||||
errcode=Codes.LIMIT_EXCEEDED):
|
errcode=Codes.LIMIT_EXCEEDED):
|
||||||
super(LimitExceededError, self).__init__(code, msg, errcode)
|
super(LimitExceededError, self).__init__(code, msg, errcode)
|
||||||
self.retry_after_ms = retry_after_ms
|
self.retry_after_ms = retry_after_ms
|
||||||
self.response_code_message = "Too Many Requests"
|
|
||||||
|
|
||||||
def error_dict(self):
|
def error_dict(self):
|
||||||
return cs_error(
|
return cs_error(
|
||||||
@@ -241,6 +293,19 @@ class FederationError(RuntimeError):
|
|||||||
|
|
||||||
|
|
||||||
class HttpResponseException(CodeMessageException):
|
class HttpResponseException(CodeMessageException):
|
||||||
|
"""
|
||||||
|
Represents an HTTP-level failure of an outbound request
|
||||||
|
|
||||||
|
Attributes:
|
||||||
|
response (str): body of response
|
||||||
|
"""
|
||||||
def __init__(self, code, msg, response):
|
def __init__(self, code, msg, response):
|
||||||
self.response = response
|
"""
|
||||||
|
|
||||||
|
Args:
|
||||||
|
code (int): HTTP status code
|
||||||
|
msg (str): reason phrase from HTTP response status line
|
||||||
|
response (str): body of response
|
||||||
|
"""
|
||||||
super(HttpResponseException, self).__init__(code, msg)
|
super(HttpResponseException, self).__init__(code, msg)
|
||||||
|
self.response = response
|
||||||
|
|||||||
@@ -13,11 +13,174 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
from synapse.api.errors import SynapseError
|
from synapse.api.errors import SynapseError
|
||||||
|
from synapse.storage.presence import UserPresenceState
|
||||||
from synapse.types import UserID, RoomID
|
from synapse.types import UserID, RoomID
|
||||||
|
|
||||||
from twisted.internet import defer
|
from twisted.internet import defer
|
||||||
|
|
||||||
import ujson as json
|
import ujson as json
|
||||||
|
import jsonschema
|
||||||
|
from jsonschema import FormatChecker
|
||||||
|
|
||||||
|
FILTER_SCHEMA = {
|
||||||
|
"additionalProperties": False,
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"limit": {
|
||||||
|
"type": "number"
|
||||||
|
},
|
||||||
|
"senders": {
|
||||||
|
"$ref": "#/definitions/user_id_array"
|
||||||
|
},
|
||||||
|
"not_senders": {
|
||||||
|
"$ref": "#/definitions/user_id_array"
|
||||||
|
},
|
||||||
|
# TODO: We don't limit event type values but we probably should...
|
||||||
|
# check types are valid event types
|
||||||
|
"types": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {
|
||||||
|
"type": "string"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"not_types": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {
|
||||||
|
"type": "string"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ROOM_FILTER_SCHEMA = {
|
||||||
|
"additionalProperties": False,
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"not_rooms": {
|
||||||
|
"$ref": "#/definitions/room_id_array"
|
||||||
|
},
|
||||||
|
"rooms": {
|
||||||
|
"$ref": "#/definitions/room_id_array"
|
||||||
|
},
|
||||||
|
"ephemeral": {
|
||||||
|
"$ref": "#/definitions/room_event_filter"
|
||||||
|
},
|
||||||
|
"include_leave": {
|
||||||
|
"type": "boolean"
|
||||||
|
},
|
||||||
|
"state": {
|
||||||
|
"$ref": "#/definitions/room_event_filter"
|
||||||
|
},
|
||||||
|
"timeline": {
|
||||||
|
"$ref": "#/definitions/room_event_filter"
|
||||||
|
},
|
||||||
|
"account_data": {
|
||||||
|
"$ref": "#/definitions/room_event_filter"
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ROOM_EVENT_FILTER_SCHEMA = {
|
||||||
|
"additionalProperties": False,
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"limit": {
|
||||||
|
"type": "number"
|
||||||
|
},
|
||||||
|
"senders": {
|
||||||
|
"$ref": "#/definitions/user_id_array"
|
||||||
|
},
|
||||||
|
"not_senders": {
|
||||||
|
"$ref": "#/definitions/user_id_array"
|
||||||
|
},
|
||||||
|
"types": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {
|
||||||
|
"type": "string"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"not_types": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {
|
||||||
|
"type": "string"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"rooms": {
|
||||||
|
"$ref": "#/definitions/room_id_array"
|
||||||
|
},
|
||||||
|
"not_rooms": {
|
||||||
|
"$ref": "#/definitions/room_id_array"
|
||||||
|
},
|
||||||
|
"contains_url": {
|
||||||
|
"type": "boolean"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
USER_ID_ARRAY_SCHEMA = {
|
||||||
|
"type": "array",
|
||||||
|
"items": {
|
||||||
|
"type": "string",
|
||||||
|
"format": "matrix_user_id"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ROOM_ID_ARRAY_SCHEMA = {
|
||||||
|
"type": "array",
|
||||||
|
"items": {
|
||||||
|
"type": "string",
|
||||||
|
"format": "matrix_room_id"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
USER_FILTER_SCHEMA = {
|
||||||
|
"$schema": "http://json-schema.org/draft-04/schema#",
|
||||||
|
"description": "schema for a Sync filter",
|
||||||
|
"type": "object",
|
||||||
|
"definitions": {
|
||||||
|
"room_id_array": ROOM_ID_ARRAY_SCHEMA,
|
||||||
|
"user_id_array": USER_ID_ARRAY_SCHEMA,
|
||||||
|
"filter": FILTER_SCHEMA,
|
||||||
|
"room_filter": ROOM_FILTER_SCHEMA,
|
||||||
|
"room_event_filter": ROOM_EVENT_FILTER_SCHEMA
|
||||||
|
},
|
||||||
|
"properties": {
|
||||||
|
"presence": {
|
||||||
|
"$ref": "#/definitions/filter"
|
||||||
|
},
|
||||||
|
"account_data": {
|
||||||
|
"$ref": "#/definitions/filter"
|
||||||
|
},
|
||||||
|
"room": {
|
||||||
|
"$ref": "#/definitions/room_filter"
|
||||||
|
},
|
||||||
|
"event_format": {
|
||||||
|
"type": "string",
|
||||||
|
"enum": ["client", "federation"]
|
||||||
|
},
|
||||||
|
"event_fields": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {
|
||||||
|
"type": "string",
|
||||||
|
# Don't allow '\\' in event field filters. This makes matching
|
||||||
|
# events a lot easier as we can then use a negative lookbehind
|
||||||
|
# assertion to split '\.' If we allowed \\ then it would
|
||||||
|
# incorrectly split '\\.' See synapse.events.utils.serialize_event
|
||||||
|
"pattern": "^((?!\\\).)*$"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"additionalProperties": False
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@FormatChecker.cls_checks('matrix_room_id')
|
||||||
|
def matrix_room_id_validator(room_id_str):
|
||||||
|
return RoomID.from_string(room_id_str)
|
||||||
|
|
||||||
|
|
||||||
|
@FormatChecker.cls_checks('matrix_user_id')
|
||||||
|
def matrix_user_id_validator(user_id_str):
|
||||||
|
return UserID.from_string(user_id_str)
|
||||||
|
|
||||||
|
|
||||||
class Filtering(object):
|
class Filtering(object):
|
||||||
@@ -52,83 +215,11 @@ class Filtering(object):
|
|||||||
# NB: Filters are the complete json blobs. "Definitions" are an
|
# NB: Filters are the complete json blobs. "Definitions" are an
|
||||||
# individual top-level key e.g. public_user_data. Filters are made of
|
# individual top-level key e.g. public_user_data. Filters are made of
|
||||||
# many definitions.
|
# many definitions.
|
||||||
|
try:
|
||||||
top_level_definitions = [
|
jsonschema.validate(user_filter_json, USER_FILTER_SCHEMA,
|
||||||
"presence", "account_data"
|
format_checker=FormatChecker())
|
||||||
]
|
except jsonschema.ValidationError as e:
|
||||||
|
raise SynapseError(400, e.message)
|
||||||
room_level_definitions = [
|
|
||||||
"state", "timeline", "ephemeral", "account_data"
|
|
||||||
]
|
|
||||||
|
|
||||||
for key in top_level_definitions:
|
|
||||||
if key in user_filter_json:
|
|
||||||
self._check_definition(user_filter_json[key])
|
|
||||||
|
|
||||||
if "room" in user_filter_json:
|
|
||||||
self._check_definition_room_lists(user_filter_json["room"])
|
|
||||||
for key in room_level_definitions:
|
|
||||||
if key in user_filter_json["room"]:
|
|
||||||
self._check_definition(user_filter_json["room"][key])
|
|
||||||
|
|
||||||
def _check_definition_room_lists(self, definition):
|
|
||||||
"""Check that "rooms" and "not_rooms" are lists of room ids if they
|
|
||||||
are present
|
|
||||||
|
|
||||||
Args:
|
|
||||||
definition(dict): The filter definition
|
|
||||||
Raises:
|
|
||||||
SynapseError: If there was a problem with this definition.
|
|
||||||
"""
|
|
||||||
# check rooms are valid room IDs
|
|
||||||
room_id_keys = ["rooms", "not_rooms"]
|
|
||||||
for key in room_id_keys:
|
|
||||||
if key in definition:
|
|
||||||
if type(definition[key]) != list:
|
|
||||||
raise SynapseError(400, "Expected %s to be a list." % key)
|
|
||||||
for room_id in definition[key]:
|
|
||||||
RoomID.from_string(room_id)
|
|
||||||
|
|
||||||
def _check_definition(self, definition):
|
|
||||||
"""Check if the provided definition is valid.
|
|
||||||
|
|
||||||
This inspects not only the types but also the values to make sure they
|
|
||||||
make sense.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
definition(dict): The filter definition
|
|
||||||
Raises:
|
|
||||||
SynapseError: If there was a problem with this definition.
|
|
||||||
"""
|
|
||||||
# NB: Filters are the complete json blobs. "Definitions" are an
|
|
||||||
# individual top-level key e.g. public_user_data. Filters are made of
|
|
||||||
# many definitions.
|
|
||||||
if type(definition) != dict:
|
|
||||||
raise SynapseError(
|
|
||||||
400, "Expected JSON object, not %s" % (definition,)
|
|
||||||
)
|
|
||||||
|
|
||||||
self._check_definition_room_lists(definition)
|
|
||||||
|
|
||||||
# check senders are valid user IDs
|
|
||||||
user_id_keys = ["senders", "not_senders"]
|
|
||||||
for key in user_id_keys:
|
|
||||||
if key in definition:
|
|
||||||
if type(definition[key]) != list:
|
|
||||||
raise SynapseError(400, "Expected %s to be a list." % key)
|
|
||||||
for user_id in definition[key]:
|
|
||||||
UserID.from_string(user_id)
|
|
||||||
|
|
||||||
# TODO: We don't limit event type values but we probably should...
|
|
||||||
# check types are valid event types
|
|
||||||
event_keys = ["types", "not_types"]
|
|
||||||
for key in event_keys:
|
|
||||||
if key in definition:
|
|
||||||
if type(definition[key]) != list:
|
|
||||||
raise SynapseError(400, "Expected %s to be a list." % key)
|
|
||||||
for event_type in definition[key]:
|
|
||||||
if not isinstance(event_type, basestring):
|
|
||||||
raise SynapseError(400, "Event type should be a string")
|
|
||||||
|
|
||||||
|
|
||||||
class FilterCollection(object):
|
class FilterCollection(object):
|
||||||
@@ -152,6 +243,7 @@ class FilterCollection(object):
|
|||||||
self.include_leave = filter_json.get("room", {}).get(
|
self.include_leave = filter_json.get("room", {}).get(
|
||||||
"include_leave", False
|
"include_leave", False
|
||||||
)
|
)
|
||||||
|
self.event_fields = filter_json.get("event_fields", [])
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
return "<FilterCollection %s>" % (json.dumps(self._filter_json),)
|
return "<FilterCollection %s>" % (json.dumps(self._filter_json),)
|
||||||
@@ -186,32 +278,89 @@ class FilterCollection(object):
|
|||||||
def filter_room_account_data(self, events):
|
def filter_room_account_data(self, events):
|
||||||
return self._room_account_data.filter(self._room_filter.filter(events))
|
return self._room_account_data.filter(self._room_filter.filter(events))
|
||||||
|
|
||||||
|
def blocks_all_presence(self):
|
||||||
|
return (
|
||||||
|
self._presence_filter.filters_all_types() or
|
||||||
|
self._presence_filter.filters_all_senders()
|
||||||
|
)
|
||||||
|
|
||||||
|
def blocks_all_room_ephemeral(self):
|
||||||
|
return (
|
||||||
|
self._room_ephemeral_filter.filters_all_types() or
|
||||||
|
self._room_ephemeral_filter.filters_all_senders() or
|
||||||
|
self._room_ephemeral_filter.filters_all_rooms()
|
||||||
|
)
|
||||||
|
|
||||||
|
def blocks_all_room_timeline(self):
|
||||||
|
return (
|
||||||
|
self._room_timeline_filter.filters_all_types() or
|
||||||
|
self._room_timeline_filter.filters_all_senders() or
|
||||||
|
self._room_timeline_filter.filters_all_rooms()
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class Filter(object):
|
class Filter(object):
|
||||||
def __init__(self, filter_json):
|
def __init__(self, filter_json):
|
||||||
self.filter_json = filter_json
|
self.filter_json = filter_json
|
||||||
|
|
||||||
|
self.types = self.filter_json.get("types", None)
|
||||||
|
self.not_types = self.filter_json.get("not_types", [])
|
||||||
|
|
||||||
|
self.rooms = self.filter_json.get("rooms", None)
|
||||||
|
self.not_rooms = self.filter_json.get("not_rooms", [])
|
||||||
|
|
||||||
|
self.senders = self.filter_json.get("senders", None)
|
||||||
|
self.not_senders = self.filter_json.get("not_senders", [])
|
||||||
|
|
||||||
|
self.contains_url = self.filter_json.get("contains_url", None)
|
||||||
|
|
||||||
|
def filters_all_types(self):
|
||||||
|
return "*" in self.not_types
|
||||||
|
|
||||||
|
def filters_all_senders(self):
|
||||||
|
return "*" in self.not_senders
|
||||||
|
|
||||||
|
def filters_all_rooms(self):
|
||||||
|
return "*" in self.not_rooms
|
||||||
|
|
||||||
def check(self, event):
|
def check(self, event):
|
||||||
"""Checks whether the filter matches the given event.
|
"""Checks whether the filter matches the given event.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
bool: True if the event matches
|
bool: True if the event matches
|
||||||
"""
|
"""
|
||||||
|
# We usually get the full "events" as dictionaries coming through,
|
||||||
|
# except for presence which actually gets passed around as its own
|
||||||
|
# namedtuple type.
|
||||||
|
if isinstance(event, UserPresenceState):
|
||||||
|
sender = event.user_id
|
||||||
|
room_id = None
|
||||||
|
ev_type = "m.presence"
|
||||||
|
is_url = False
|
||||||
|
else:
|
||||||
sender = event.get("sender", None)
|
sender = event.get("sender", None)
|
||||||
if not sender:
|
if not sender:
|
||||||
# Presence events have their 'sender' in content.user_id
|
# Presence events had their 'sender' in content.user_id, but are
|
||||||
|
# now handled above. We don't know if anything else uses this
|
||||||
|
# form. TODO: Check this and probably remove it.
|
||||||
content = event.get("content")
|
content = event.get("content")
|
||||||
# account_data has been allowed to have non-dict content, so check type first
|
# account_data has been allowed to have non-dict content, so
|
||||||
|
# check type first
|
||||||
if isinstance(content, dict):
|
if isinstance(content, dict):
|
||||||
sender = content.get("user_id")
|
sender = content.get("user_id")
|
||||||
|
|
||||||
|
room_id = event.get("room_id", None)
|
||||||
|
ev_type = event.get("type", None)
|
||||||
|
is_url = "url" in event.get("content", {})
|
||||||
|
|
||||||
return self.check_fields(
|
return self.check_fields(
|
||||||
event.get("room_id", None),
|
room_id,
|
||||||
sender,
|
sender,
|
||||||
event.get("type", None),
|
ev_type,
|
||||||
|
is_url,
|
||||||
)
|
)
|
||||||
|
|
||||||
def check_fields(self, room_id, sender, event_type):
|
def check_fields(self, room_id, sender, event_type, contains_url):
|
||||||
"""Checks whether the filter matches the given event fields.
|
"""Checks whether the filter matches the given event fields.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
@@ -225,15 +374,20 @@ class Filter(object):
|
|||||||
|
|
||||||
for name, match_func in literal_keys.items():
|
for name, match_func in literal_keys.items():
|
||||||
not_name = "not_%s" % (name,)
|
not_name = "not_%s" % (name,)
|
||||||
disallowed_values = self.filter_json.get(not_name, [])
|
disallowed_values = getattr(self, not_name)
|
||||||
if any(map(match_func, disallowed_values)):
|
if any(map(match_func, disallowed_values)):
|
||||||
return False
|
return False
|
||||||
|
|
||||||
allowed_values = self.filter_json.get(name, None)
|
allowed_values = getattr(self, name)
|
||||||
if allowed_values is not None:
|
if allowed_values is not None:
|
||||||
if not any(map(match_func, allowed_values)):
|
if not any(map(match_func, allowed_values)):
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
contains_url_filter = self.filter_json.get("contains_url")
|
||||||
|
if contains_url_filter is not None:
|
||||||
|
if contains_url_filter != contains_url:
|
||||||
|
return False
|
||||||
|
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def filter_rooms(self, room_ids):
|
def filter_rooms(self, room_ids):
|
||||||
|
|||||||
@@ -23,7 +23,7 @@ class Ratelimiter(object):
|
|||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.message_counts = collections.OrderedDict()
|
self.message_counts = collections.OrderedDict()
|
||||||
|
|
||||||
def send_message(self, user_id, time_now_s, msg_rate_hz, burst_count):
|
def send_message(self, user_id, time_now_s, msg_rate_hz, burst_count, update=True):
|
||||||
"""Can the user send a message?
|
"""Can the user send a message?
|
||||||
Args:
|
Args:
|
||||||
user_id: The user sending a message.
|
user_id: The user sending a message.
|
||||||
@@ -32,12 +32,15 @@ class Ratelimiter(object):
|
|||||||
second.
|
second.
|
||||||
burst_count: How many messages the user can send before being
|
burst_count: How many messages the user can send before being
|
||||||
limited.
|
limited.
|
||||||
|
update (bool): Whether to update the message rates or not. This is
|
||||||
|
useful to check if a message would be allowed to be sent before
|
||||||
|
its ready to be actually sent.
|
||||||
Returns:
|
Returns:
|
||||||
A pair of a bool indicating if they can send a message now and a
|
A pair of a bool indicating if they can send a message now and a
|
||||||
time in seconds of when they can next send a message.
|
time in seconds of when they can next send a message.
|
||||||
"""
|
"""
|
||||||
self.prune_message_counts(time_now_s)
|
self.prune_message_counts(time_now_s)
|
||||||
message_count, time_start, _ignored = self.message_counts.pop(
|
message_count, time_start, _ignored = self.message_counts.get(
|
||||||
user_id, (0., time_now_s, None),
|
user_id, (0., time_now_s, None),
|
||||||
)
|
)
|
||||||
time_delta = time_now_s - time_start
|
time_delta = time_now_s - time_start
|
||||||
@@ -52,6 +55,7 @@ class Ratelimiter(object):
|
|||||||
allowed = True
|
allowed = True
|
||||||
message_count += 1
|
message_count += 1
|
||||||
|
|
||||||
|
if update:
|
||||||
self.message_counts[user_id] = (
|
self.message_counts[user_id] = (
|
||||||
message_count, time_start, msg_rate_hz
|
message_count, time_start, msg_rate_hz
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -25,4 +25,3 @@ SERVER_KEY_PREFIX = "/_matrix/key/v1"
|
|||||||
SERVER_KEY_V2_PREFIX = "/_matrix/key/v2"
|
SERVER_KEY_V2_PREFIX = "/_matrix/key/v2"
|
||||||
MEDIA_PREFIX = "/_matrix/media/r0"
|
MEDIA_PREFIX = "/_matrix/media/r0"
|
||||||
LEGACY_MEDIA_PREFIX = "/_matrix/media/v1"
|
LEGACY_MEDIA_PREFIX = "/_matrix/media/v1"
|
||||||
APP_SERVICE_PREFIX = "/_matrix/appservice/v1"
|
|
||||||
|
|||||||
@@ -16,13 +16,11 @@
|
|||||||
import sys
|
import sys
|
||||||
sys.dont_write_bytecode = True
|
sys.dont_write_bytecode = True
|
||||||
|
|
||||||
from synapse.python_dependencies import (
|
from synapse import python_dependencies # noqa: E402
|
||||||
check_requirements, MissingRequirementError
|
|
||||||
) # NOQA
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
check_requirements()
|
python_dependencies.check_requirements()
|
||||||
except MissingRequirementError as e:
|
except python_dependencies.MissingRequirementError as e:
|
||||||
message = "\n".join([
|
message = "\n".join([
|
||||||
"Missing Requirement: %s" % (e.message,),
|
"Missing Requirement: %s" % (e.message,),
|
||||||
"To install run:",
|
"To install run:",
|
||||||
|
|||||||
122
synapse/app/_base.py
Normal file
122
synapse/app/_base.py
Normal file
@@ -0,0 +1,122 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright 2017 New Vector Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import gc
|
||||||
|
import logging
|
||||||
|
import sys
|
||||||
|
|
||||||
|
try:
|
||||||
|
import affinity
|
||||||
|
except Exception:
|
||||||
|
affinity = None
|
||||||
|
|
||||||
|
from daemonize import Daemonize
|
||||||
|
from synapse.util import PreserveLoggingContext
|
||||||
|
from synapse.util.rlimit import change_resource_limit
|
||||||
|
from twisted.internet import reactor
|
||||||
|
|
||||||
|
|
||||||
|
def start_worker_reactor(appname, config):
|
||||||
|
""" Run the reactor in the main process
|
||||||
|
|
||||||
|
Daemonizes if necessary, and then configures some resources, before starting
|
||||||
|
the reactor. Pulls configuration from the 'worker' settings in 'config'.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
appname (str): application name which will be sent to syslog
|
||||||
|
config (synapse.config.Config): config object
|
||||||
|
"""
|
||||||
|
|
||||||
|
logger = logging.getLogger(config.worker_app)
|
||||||
|
|
||||||
|
start_reactor(
|
||||||
|
appname,
|
||||||
|
config.soft_file_limit,
|
||||||
|
config.gc_thresholds,
|
||||||
|
config.worker_pid_file,
|
||||||
|
config.worker_daemonize,
|
||||||
|
config.worker_cpu_affinity,
|
||||||
|
logger,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def start_reactor(
|
||||||
|
appname,
|
||||||
|
soft_file_limit,
|
||||||
|
gc_thresholds,
|
||||||
|
pid_file,
|
||||||
|
daemonize,
|
||||||
|
cpu_affinity,
|
||||||
|
logger,
|
||||||
|
):
|
||||||
|
""" Run the reactor in the main process
|
||||||
|
|
||||||
|
Daemonizes if necessary, and then configures some resources, before starting
|
||||||
|
the reactor
|
||||||
|
|
||||||
|
Args:
|
||||||
|
appname (str): application name which will be sent to syslog
|
||||||
|
soft_file_limit (int):
|
||||||
|
gc_thresholds:
|
||||||
|
pid_file (str): name of pid file to write to if daemonize is True
|
||||||
|
daemonize (bool): true to run the reactor in a background process
|
||||||
|
cpu_affinity (int|None): cpu affinity mask
|
||||||
|
logger (logging.Logger): logger instance to pass to Daemonize
|
||||||
|
"""
|
||||||
|
|
||||||
|
def run():
|
||||||
|
# make sure that we run the reactor with the sentinel log context,
|
||||||
|
# otherwise other PreserveLoggingContext instances will get confused
|
||||||
|
# and complain when they see the logcontext arbitrarily swapping
|
||||||
|
# between the sentinel and `run` logcontexts.
|
||||||
|
with PreserveLoggingContext():
|
||||||
|
logger.info("Running")
|
||||||
|
if cpu_affinity is not None:
|
||||||
|
if not affinity:
|
||||||
|
quit_with_error(
|
||||||
|
"Missing package 'affinity' required for cpu_affinity\n"
|
||||||
|
"option\n\n"
|
||||||
|
"Install by running:\n\n"
|
||||||
|
" pip install affinity\n\n"
|
||||||
|
)
|
||||||
|
logger.info("Setting CPU affinity to %s" % cpu_affinity)
|
||||||
|
affinity.set_process_affinity_mask(0, cpu_affinity)
|
||||||
|
change_resource_limit(soft_file_limit)
|
||||||
|
if gc_thresholds:
|
||||||
|
gc.set_threshold(*gc_thresholds)
|
||||||
|
reactor.run()
|
||||||
|
|
||||||
|
if daemonize:
|
||||||
|
daemon = Daemonize(
|
||||||
|
app=appname,
|
||||||
|
pid=pid_file,
|
||||||
|
action=run,
|
||||||
|
auto_close_fds=False,
|
||||||
|
verbose=True,
|
||||||
|
logger=logger,
|
||||||
|
)
|
||||||
|
daemon.start()
|
||||||
|
else:
|
||||||
|
run()
|
||||||
|
|
||||||
|
|
||||||
|
def quit_with_error(error_string):
|
||||||
|
message_lines = error_string.split("\n")
|
||||||
|
line_length = max([len(l) for l in message_lines if len(l) < 80]) + 2
|
||||||
|
sys.stderr.write("*" * line_length + '\n')
|
||||||
|
for line in message_lines:
|
||||||
|
sys.stderr.write(" %s\n" % (line.rstrip(),))
|
||||||
|
sys.stderr.write("*" * line_length + '\n')
|
||||||
|
sys.exit(1)
|
||||||
188
synapse/app/appservice.py
Normal file
188
synapse/app/appservice.py
Normal file
@@ -0,0 +1,188 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright 2016 OpenMarket Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
import logging
|
||||||
|
import sys
|
||||||
|
|
||||||
|
import synapse
|
||||||
|
from synapse import events
|
||||||
|
from synapse.app import _base
|
||||||
|
from synapse.config._base import ConfigError
|
||||||
|
from synapse.config.homeserver import HomeServerConfig
|
||||||
|
from synapse.config.logger import setup_logging
|
||||||
|
from synapse.http.site import SynapseSite
|
||||||
|
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||||
|
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||||
|
from synapse.replication.slave.storage.directory import DirectoryStore
|
||||||
|
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||||
|
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||||
|
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||||
|
from synapse.server import HomeServer
|
||||||
|
from synapse.storage.engines import create_engine
|
||||||
|
from synapse.util.httpresourcetree import create_resource_tree
|
||||||
|
from synapse.util.logcontext import LoggingContext, preserve_fn
|
||||||
|
from synapse.util.manhole import manhole
|
||||||
|
from synapse.util.versionstring import get_version_string
|
||||||
|
from twisted.internet import reactor
|
||||||
|
from twisted.web.resource import Resource
|
||||||
|
|
||||||
|
logger = logging.getLogger("synapse.app.appservice")
|
||||||
|
|
||||||
|
|
||||||
|
class AppserviceSlaveStore(
|
||||||
|
DirectoryStore, SlavedEventStore, SlavedApplicationServiceStore,
|
||||||
|
SlavedRegistrationStore,
|
||||||
|
):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class AppserviceServer(HomeServer):
|
||||||
|
def get_db_conn(self, run_new_connection=True):
|
||||||
|
# Any param beginning with cp_ is a parameter for adbapi, and should
|
||||||
|
# not be passed to the database engine.
|
||||||
|
db_params = {
|
||||||
|
k: v for k, v in self.db_config.get("args", {}).items()
|
||||||
|
if not k.startswith("cp_")
|
||||||
|
}
|
||||||
|
db_conn = self.database_engine.module.connect(**db_params)
|
||||||
|
|
||||||
|
if run_new_connection:
|
||||||
|
self.database_engine.on_new_connection(db_conn)
|
||||||
|
return db_conn
|
||||||
|
|
||||||
|
def setup(self):
|
||||||
|
logger.info("Setting up.")
|
||||||
|
self.datastore = AppserviceSlaveStore(self.get_db_conn(), self)
|
||||||
|
logger.info("Finished setting up.")
|
||||||
|
|
||||||
|
def _listen_http(self, listener_config):
|
||||||
|
port = listener_config["port"]
|
||||||
|
bind_addresses = listener_config["bind_addresses"]
|
||||||
|
site_tag = listener_config.get("tag", port)
|
||||||
|
resources = {}
|
||||||
|
for res in listener_config["resources"]:
|
||||||
|
for name in res["names"]:
|
||||||
|
if name == "metrics":
|
||||||
|
resources[METRICS_PREFIX] = MetricsResource(self)
|
||||||
|
|
||||||
|
root_resource = create_resource_tree(resources, Resource())
|
||||||
|
|
||||||
|
for address in bind_addresses:
|
||||||
|
reactor.listenTCP(
|
||||||
|
port,
|
||||||
|
SynapseSite(
|
||||||
|
"synapse.access.http.%s" % (site_tag,),
|
||||||
|
site_tag,
|
||||||
|
listener_config,
|
||||||
|
root_resource,
|
||||||
|
),
|
||||||
|
interface=address
|
||||||
|
)
|
||||||
|
|
||||||
|
logger.info("Synapse appservice now listening on port %d", port)
|
||||||
|
|
||||||
|
def start_listening(self, listeners):
|
||||||
|
for listener in listeners:
|
||||||
|
if listener["type"] == "http":
|
||||||
|
self._listen_http(listener)
|
||||||
|
elif listener["type"] == "manhole":
|
||||||
|
bind_addresses = listener["bind_addresses"]
|
||||||
|
|
||||||
|
for address in bind_addresses:
|
||||||
|
reactor.listenTCP(
|
||||||
|
listener["port"],
|
||||||
|
manhole(
|
||||||
|
username="matrix",
|
||||||
|
password="rabbithole",
|
||||||
|
globals={"hs": self},
|
||||||
|
),
|
||||||
|
interface=address
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||||
|
|
||||||
|
self.get_tcp_replication().start_replication(self)
|
||||||
|
|
||||||
|
def build_tcp_replication(self):
|
||||||
|
return ASReplicationHandler(self)
|
||||||
|
|
||||||
|
|
||||||
|
class ASReplicationHandler(ReplicationClientHandler):
|
||||||
|
def __init__(self, hs):
|
||||||
|
super(ASReplicationHandler, self).__init__(hs.get_datastore())
|
||||||
|
self.appservice_handler = hs.get_application_service_handler()
|
||||||
|
|
||||||
|
def on_rdata(self, stream_name, token, rows):
|
||||||
|
super(ASReplicationHandler, self).on_rdata(stream_name, token, rows)
|
||||||
|
|
||||||
|
if stream_name == "events":
|
||||||
|
max_stream_id = self.store.get_room_max_stream_ordering()
|
||||||
|
preserve_fn(
|
||||||
|
self.appservice_handler.notify_interested_services
|
||||||
|
)(max_stream_id)
|
||||||
|
|
||||||
|
|
||||||
|
def start(config_options):
|
||||||
|
try:
|
||||||
|
config = HomeServerConfig.load_config(
|
||||||
|
"Synapse appservice", config_options
|
||||||
|
)
|
||||||
|
except ConfigError as e:
|
||||||
|
sys.stderr.write("\n" + e.message + "\n")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
assert config.worker_app == "synapse.app.appservice"
|
||||||
|
|
||||||
|
setup_logging(config, use_worker_options=True)
|
||||||
|
|
||||||
|
events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||||
|
|
||||||
|
database_engine = create_engine(config.database_config)
|
||||||
|
|
||||||
|
if config.notify_appservices:
|
||||||
|
sys.stderr.write(
|
||||||
|
"\nThe appservices must be disabled in the main synapse process"
|
||||||
|
"\nbefore they can be run in a separate worker."
|
||||||
|
"\nPlease add ``notify_appservices: false`` to the main config"
|
||||||
|
"\n"
|
||||||
|
)
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
# Force the pushers to start since they will be disabled in the main config
|
||||||
|
config.notify_appservices = True
|
||||||
|
|
||||||
|
ps = AppserviceServer(
|
||||||
|
config.server_name,
|
||||||
|
db_config=config.database_config,
|
||||||
|
config=config,
|
||||||
|
version_string="Synapse/" + get_version_string(synapse),
|
||||||
|
database_engine=database_engine,
|
||||||
|
)
|
||||||
|
|
||||||
|
ps.setup()
|
||||||
|
ps.start_listening(config.worker_listeners)
|
||||||
|
|
||||||
|
def start():
|
||||||
|
ps.get_datastore().start_profiling()
|
||||||
|
ps.get_state_handler().start_caching()
|
||||||
|
|
||||||
|
reactor.callWhenRunning(start)
|
||||||
|
|
||||||
|
_base.start_worker_reactor("synapse-appservice", config)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
with LoggingContext("main"):
|
||||||
|
start(sys.argv[1:])
|
||||||
189
synapse/app/client_reader.py
Normal file
189
synapse/app/client_reader.py
Normal file
@@ -0,0 +1,189 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright 2016 OpenMarket Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
import logging
|
||||||
|
import sys
|
||||||
|
|
||||||
|
import synapse
|
||||||
|
from synapse import events
|
||||||
|
from synapse.app import _base
|
||||||
|
from synapse.config._base import ConfigError
|
||||||
|
from synapse.config.homeserver import HomeServerConfig
|
||||||
|
from synapse.config.logger import setup_logging
|
||||||
|
from synapse.crypto import context_factory
|
||||||
|
from synapse.http.server import JsonResource
|
||||||
|
from synapse.http.site import SynapseSite
|
||||||
|
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||||
|
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||||
|
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||||
|
from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
|
||||||
|
from synapse.replication.slave.storage.directory import DirectoryStore
|
||||||
|
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||||
|
from synapse.replication.slave.storage.keys import SlavedKeyStore
|
||||||
|
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||||
|
from synapse.replication.slave.storage.room import RoomStore
|
||||||
|
from synapse.replication.slave.storage.transactions import TransactionStore
|
||||||
|
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||||
|
from synapse.rest.client.v1.room import PublicRoomListRestServlet
|
||||||
|
from synapse.server import HomeServer
|
||||||
|
from synapse.storage.engines import create_engine
|
||||||
|
from synapse.util.httpresourcetree import create_resource_tree
|
||||||
|
from synapse.util.logcontext import LoggingContext
|
||||||
|
from synapse.util.manhole import manhole
|
||||||
|
from synapse.util.versionstring import get_version_string
|
||||||
|
from twisted.internet import reactor
|
||||||
|
from twisted.web.resource import Resource
|
||||||
|
|
||||||
|
logger = logging.getLogger("synapse.app.client_reader")
|
||||||
|
|
||||||
|
|
||||||
|
class ClientReaderSlavedStore(
|
||||||
|
SlavedEventStore,
|
||||||
|
SlavedKeyStore,
|
||||||
|
RoomStore,
|
||||||
|
DirectoryStore,
|
||||||
|
SlavedApplicationServiceStore,
|
||||||
|
SlavedRegistrationStore,
|
||||||
|
TransactionStore,
|
||||||
|
SlavedClientIpStore,
|
||||||
|
BaseSlavedStore,
|
||||||
|
):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class ClientReaderServer(HomeServer):
|
||||||
|
def get_db_conn(self, run_new_connection=True):
|
||||||
|
# Any param beginning with cp_ is a parameter for adbapi, and should
|
||||||
|
# not be passed to the database engine.
|
||||||
|
db_params = {
|
||||||
|
k: v for k, v in self.db_config.get("args", {}).items()
|
||||||
|
if not k.startswith("cp_")
|
||||||
|
}
|
||||||
|
db_conn = self.database_engine.module.connect(**db_params)
|
||||||
|
|
||||||
|
if run_new_connection:
|
||||||
|
self.database_engine.on_new_connection(db_conn)
|
||||||
|
return db_conn
|
||||||
|
|
||||||
|
def setup(self):
|
||||||
|
logger.info("Setting up.")
|
||||||
|
self.datastore = ClientReaderSlavedStore(self.get_db_conn(), self)
|
||||||
|
logger.info("Finished setting up.")
|
||||||
|
|
||||||
|
def _listen_http(self, listener_config):
|
||||||
|
port = listener_config["port"]
|
||||||
|
bind_addresses = listener_config["bind_addresses"]
|
||||||
|
site_tag = listener_config.get("tag", port)
|
||||||
|
resources = {}
|
||||||
|
for res in listener_config["resources"]:
|
||||||
|
for name in res["names"]:
|
||||||
|
if name == "metrics":
|
||||||
|
resources[METRICS_PREFIX] = MetricsResource(self)
|
||||||
|
elif name == "client":
|
||||||
|
resource = JsonResource(self, canonical_json=False)
|
||||||
|
PublicRoomListRestServlet(self).register(resource)
|
||||||
|
resources.update({
|
||||||
|
"/_matrix/client/r0": resource,
|
||||||
|
"/_matrix/client/unstable": resource,
|
||||||
|
"/_matrix/client/v2_alpha": resource,
|
||||||
|
"/_matrix/client/api/v1": resource,
|
||||||
|
})
|
||||||
|
|
||||||
|
root_resource = create_resource_tree(resources, Resource())
|
||||||
|
|
||||||
|
for address in bind_addresses:
|
||||||
|
reactor.listenTCP(
|
||||||
|
port,
|
||||||
|
SynapseSite(
|
||||||
|
"synapse.access.http.%s" % (site_tag,),
|
||||||
|
site_tag,
|
||||||
|
listener_config,
|
||||||
|
root_resource,
|
||||||
|
),
|
||||||
|
interface=address
|
||||||
|
)
|
||||||
|
|
||||||
|
logger.info("Synapse client reader now listening on port %d", port)
|
||||||
|
|
||||||
|
def start_listening(self, listeners):
|
||||||
|
for listener in listeners:
|
||||||
|
if listener["type"] == "http":
|
||||||
|
self._listen_http(listener)
|
||||||
|
elif listener["type"] == "manhole":
|
||||||
|
bind_addresses = listener["bind_addresses"]
|
||||||
|
|
||||||
|
for address in bind_addresses:
|
||||||
|
reactor.listenTCP(
|
||||||
|
listener["port"],
|
||||||
|
manhole(
|
||||||
|
username="matrix",
|
||||||
|
password="rabbithole",
|
||||||
|
globals={"hs": self},
|
||||||
|
),
|
||||||
|
interface=address
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||||
|
|
||||||
|
self.get_tcp_replication().start_replication(self)
|
||||||
|
|
||||||
|
def build_tcp_replication(self):
|
||||||
|
return ReplicationClientHandler(self.get_datastore())
|
||||||
|
|
||||||
|
|
||||||
|
def start(config_options):
|
||||||
|
try:
|
||||||
|
config = HomeServerConfig.load_config(
|
||||||
|
"Synapse client reader", config_options
|
||||||
|
)
|
||||||
|
except ConfigError as e:
|
||||||
|
sys.stderr.write("\n" + e.message + "\n")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
assert config.worker_app == "synapse.app.client_reader"
|
||||||
|
|
||||||
|
setup_logging(config, use_worker_options=True)
|
||||||
|
|
||||||
|
events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||||
|
|
||||||
|
database_engine = create_engine(config.database_config)
|
||||||
|
|
||||||
|
tls_server_context_factory = context_factory.ServerContextFactory(config)
|
||||||
|
|
||||||
|
ss = ClientReaderServer(
|
||||||
|
config.server_name,
|
||||||
|
db_config=config.database_config,
|
||||||
|
tls_server_context_factory=tls_server_context_factory,
|
||||||
|
config=config,
|
||||||
|
version_string="Synapse/" + get_version_string(synapse),
|
||||||
|
database_engine=database_engine,
|
||||||
|
)
|
||||||
|
|
||||||
|
ss.setup()
|
||||||
|
ss.get_handlers()
|
||||||
|
ss.start_listening(config.worker_listeners)
|
||||||
|
|
||||||
|
def start():
|
||||||
|
ss.get_state_handler().start_caching()
|
||||||
|
ss.get_datastore().start_profiling()
|
||||||
|
|
||||||
|
reactor.callWhenRunning(start)
|
||||||
|
|
||||||
|
_base.start_worker_reactor("synapse-client-reader", config)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
with LoggingContext("main"):
|
||||||
|
start(sys.argv[1:])
|
||||||
178
synapse/app/federation_reader.py
Normal file
178
synapse/app/federation_reader.py
Normal file
@@ -0,0 +1,178 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright 2016 OpenMarket Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
import logging
|
||||||
|
import sys
|
||||||
|
|
||||||
|
import synapse
|
||||||
|
from synapse import events
|
||||||
|
from synapse.api.urls import FEDERATION_PREFIX
|
||||||
|
from synapse.app import _base
|
||||||
|
from synapse.config._base import ConfigError
|
||||||
|
from synapse.config.homeserver import HomeServerConfig
|
||||||
|
from synapse.config.logger import setup_logging
|
||||||
|
from synapse.crypto import context_factory
|
||||||
|
from synapse.federation.transport.server import TransportLayerServer
|
||||||
|
from synapse.http.site import SynapseSite
|
||||||
|
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||||
|
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||||
|
from synapse.replication.slave.storage.directory import DirectoryStore
|
||||||
|
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||||
|
from synapse.replication.slave.storage.keys import SlavedKeyStore
|
||||||
|
from synapse.replication.slave.storage.room import RoomStore
|
||||||
|
from synapse.replication.slave.storage.transactions import TransactionStore
|
||||||
|
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||||
|
from synapse.server import HomeServer
|
||||||
|
from synapse.storage.engines import create_engine
|
||||||
|
from synapse.util.httpresourcetree import create_resource_tree
|
||||||
|
from synapse.util.logcontext import LoggingContext
|
||||||
|
from synapse.util.manhole import manhole
|
||||||
|
from synapse.util.versionstring import get_version_string
|
||||||
|
from twisted.internet import reactor
|
||||||
|
from twisted.web.resource import Resource
|
||||||
|
|
||||||
|
logger = logging.getLogger("synapse.app.federation_reader")
|
||||||
|
|
||||||
|
|
||||||
|
class FederationReaderSlavedStore(
|
||||||
|
SlavedEventStore,
|
||||||
|
SlavedKeyStore,
|
||||||
|
RoomStore,
|
||||||
|
DirectoryStore,
|
||||||
|
TransactionStore,
|
||||||
|
BaseSlavedStore,
|
||||||
|
):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class FederationReaderServer(HomeServer):
|
||||||
|
def get_db_conn(self, run_new_connection=True):
|
||||||
|
# Any param beginning with cp_ is a parameter for adbapi, and should
|
||||||
|
# not be passed to the database engine.
|
||||||
|
db_params = {
|
||||||
|
k: v for k, v in self.db_config.get("args", {}).items()
|
||||||
|
if not k.startswith("cp_")
|
||||||
|
}
|
||||||
|
db_conn = self.database_engine.module.connect(**db_params)
|
||||||
|
|
||||||
|
if run_new_connection:
|
||||||
|
self.database_engine.on_new_connection(db_conn)
|
||||||
|
return db_conn
|
||||||
|
|
||||||
|
def setup(self):
|
||||||
|
logger.info("Setting up.")
|
||||||
|
self.datastore = FederationReaderSlavedStore(self.get_db_conn(), self)
|
||||||
|
logger.info("Finished setting up.")
|
||||||
|
|
||||||
|
def _listen_http(self, listener_config):
|
||||||
|
port = listener_config["port"]
|
||||||
|
bind_addresses = listener_config["bind_addresses"]
|
||||||
|
site_tag = listener_config.get("tag", port)
|
||||||
|
resources = {}
|
||||||
|
for res in listener_config["resources"]:
|
||||||
|
for name in res["names"]:
|
||||||
|
if name == "metrics":
|
||||||
|
resources[METRICS_PREFIX] = MetricsResource(self)
|
||||||
|
elif name == "federation":
|
||||||
|
resources.update({
|
||||||
|
FEDERATION_PREFIX: TransportLayerServer(self),
|
||||||
|
})
|
||||||
|
|
||||||
|
root_resource = create_resource_tree(resources, Resource())
|
||||||
|
|
||||||
|
for address in bind_addresses:
|
||||||
|
reactor.listenTCP(
|
||||||
|
port,
|
||||||
|
SynapseSite(
|
||||||
|
"synapse.access.http.%s" % (site_tag,),
|
||||||
|
site_tag,
|
||||||
|
listener_config,
|
||||||
|
root_resource,
|
||||||
|
),
|
||||||
|
interface=address
|
||||||
|
)
|
||||||
|
|
||||||
|
logger.info("Synapse federation reader now listening on port %d", port)
|
||||||
|
|
||||||
|
def start_listening(self, listeners):
|
||||||
|
for listener in listeners:
|
||||||
|
if listener["type"] == "http":
|
||||||
|
self._listen_http(listener)
|
||||||
|
elif listener["type"] == "manhole":
|
||||||
|
bind_addresses = listener["bind_addresses"]
|
||||||
|
|
||||||
|
for address in bind_addresses:
|
||||||
|
reactor.listenTCP(
|
||||||
|
listener["port"],
|
||||||
|
manhole(
|
||||||
|
username="matrix",
|
||||||
|
password="rabbithole",
|
||||||
|
globals={"hs": self},
|
||||||
|
),
|
||||||
|
interface=address
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||||
|
|
||||||
|
self.get_tcp_replication().start_replication(self)
|
||||||
|
|
||||||
|
def build_tcp_replication(self):
|
||||||
|
return ReplicationClientHandler(self.get_datastore())
|
||||||
|
|
||||||
|
|
||||||
|
def start(config_options):
|
||||||
|
try:
|
||||||
|
config = HomeServerConfig.load_config(
|
||||||
|
"Synapse federation reader", config_options
|
||||||
|
)
|
||||||
|
except ConfigError as e:
|
||||||
|
sys.stderr.write("\n" + e.message + "\n")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
assert config.worker_app == "synapse.app.federation_reader"
|
||||||
|
|
||||||
|
setup_logging(config, use_worker_options=True)
|
||||||
|
|
||||||
|
events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||||
|
|
||||||
|
database_engine = create_engine(config.database_config)
|
||||||
|
|
||||||
|
tls_server_context_factory = context_factory.ServerContextFactory(config)
|
||||||
|
|
||||||
|
ss = FederationReaderServer(
|
||||||
|
config.server_name,
|
||||||
|
db_config=config.database_config,
|
||||||
|
tls_server_context_factory=tls_server_context_factory,
|
||||||
|
config=config,
|
||||||
|
version_string="Synapse/" + get_version_string(synapse),
|
||||||
|
database_engine=database_engine,
|
||||||
|
)
|
||||||
|
|
||||||
|
ss.setup()
|
||||||
|
ss.get_handlers()
|
||||||
|
ss.start_listening(config.worker_listeners)
|
||||||
|
|
||||||
|
def start():
|
||||||
|
ss.get_state_handler().start_caching()
|
||||||
|
ss.get_datastore().start_profiling()
|
||||||
|
|
||||||
|
reactor.callWhenRunning(start)
|
||||||
|
|
||||||
|
_base.start_worker_reactor("synapse-federation-reader", config)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
with LoggingContext("main"):
|
||||||
|
start(sys.argv[1:])
|
||||||
274
synapse/app/federation_sender.py
Normal file
274
synapse/app/federation_sender.py
Normal file
@@ -0,0 +1,274 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright 2016 OpenMarket Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
import logging
|
||||||
|
import sys
|
||||||
|
|
||||||
|
import synapse
|
||||||
|
from synapse import events
|
||||||
|
from synapse.app import _base
|
||||||
|
from synapse.config._base import ConfigError
|
||||||
|
from synapse.config.homeserver import HomeServerConfig
|
||||||
|
from synapse.config.logger import setup_logging
|
||||||
|
from synapse.crypto import context_factory
|
||||||
|
from synapse.federation import send_queue
|
||||||
|
from synapse.http.site import SynapseSite
|
||||||
|
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||||
|
from synapse.replication.slave.storage.deviceinbox import SlavedDeviceInboxStore
|
||||||
|
from synapse.replication.slave.storage.devices import SlavedDeviceStore
|
||||||
|
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||||
|
from synapse.replication.slave.storage.presence import SlavedPresenceStore
|
||||||
|
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
|
||||||
|
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||||
|
from synapse.replication.slave.storage.transactions import TransactionStore
|
||||||
|
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||||
|
from synapse.server import HomeServer
|
||||||
|
from synapse.storage.engines import create_engine
|
||||||
|
from synapse.util.async import Linearizer
|
||||||
|
from synapse.util.httpresourcetree import create_resource_tree
|
||||||
|
from synapse.util.logcontext import LoggingContext, preserve_fn
|
||||||
|
from synapse.util.manhole import manhole
|
||||||
|
from synapse.util.versionstring import get_version_string
|
||||||
|
from twisted.internet import defer, reactor
|
||||||
|
from twisted.web.resource import Resource
|
||||||
|
|
||||||
|
logger = logging.getLogger("synapse.app.federation_sender")
|
||||||
|
|
||||||
|
|
||||||
|
class FederationSenderSlaveStore(
|
||||||
|
SlavedDeviceInboxStore, TransactionStore, SlavedReceiptsStore, SlavedEventStore,
|
||||||
|
SlavedRegistrationStore, SlavedDeviceStore, SlavedPresenceStore,
|
||||||
|
):
|
||||||
|
def __init__(self, db_conn, hs):
|
||||||
|
super(FederationSenderSlaveStore, self).__init__(db_conn, hs)
|
||||||
|
|
||||||
|
# We pull out the current federation stream position now so that we
|
||||||
|
# always have a known value for the federation position in memory so
|
||||||
|
# that we don't have to bounce via a deferred once when we start the
|
||||||
|
# replication streams.
|
||||||
|
self.federation_out_pos_startup = self._get_federation_out_pos(db_conn)
|
||||||
|
|
||||||
|
def _get_federation_out_pos(self, db_conn):
|
||||||
|
sql = (
|
||||||
|
"SELECT stream_id FROM federation_stream_position"
|
||||||
|
" WHERE type = ?"
|
||||||
|
)
|
||||||
|
sql = self.database_engine.convert_param_style(sql)
|
||||||
|
|
||||||
|
txn = db_conn.cursor()
|
||||||
|
txn.execute(sql, ("federation",))
|
||||||
|
rows = txn.fetchall()
|
||||||
|
txn.close()
|
||||||
|
|
||||||
|
return rows[0][0] if rows else -1
|
||||||
|
|
||||||
|
|
||||||
|
class FederationSenderServer(HomeServer):
|
||||||
|
def get_db_conn(self, run_new_connection=True):
|
||||||
|
# Any param beginning with cp_ is a parameter for adbapi, and should
|
||||||
|
# not be passed to the database engine.
|
||||||
|
db_params = {
|
||||||
|
k: v for k, v in self.db_config.get("args", {}).items()
|
||||||
|
if not k.startswith("cp_")
|
||||||
|
}
|
||||||
|
db_conn = self.database_engine.module.connect(**db_params)
|
||||||
|
|
||||||
|
if run_new_connection:
|
||||||
|
self.database_engine.on_new_connection(db_conn)
|
||||||
|
return db_conn
|
||||||
|
|
||||||
|
def setup(self):
|
||||||
|
logger.info("Setting up.")
|
||||||
|
self.datastore = FederationSenderSlaveStore(self.get_db_conn(), self)
|
||||||
|
logger.info("Finished setting up.")
|
||||||
|
|
||||||
|
def _listen_http(self, listener_config):
|
||||||
|
port = listener_config["port"]
|
||||||
|
bind_addresses = listener_config["bind_addresses"]
|
||||||
|
site_tag = listener_config.get("tag", port)
|
||||||
|
resources = {}
|
||||||
|
for res in listener_config["resources"]:
|
||||||
|
for name in res["names"]:
|
||||||
|
if name == "metrics":
|
||||||
|
resources[METRICS_PREFIX] = MetricsResource(self)
|
||||||
|
|
||||||
|
root_resource = create_resource_tree(resources, Resource())
|
||||||
|
|
||||||
|
for address in bind_addresses:
|
||||||
|
reactor.listenTCP(
|
||||||
|
port,
|
||||||
|
SynapseSite(
|
||||||
|
"synapse.access.http.%s" % (site_tag,),
|
||||||
|
site_tag,
|
||||||
|
listener_config,
|
||||||
|
root_resource,
|
||||||
|
),
|
||||||
|
interface=address
|
||||||
|
)
|
||||||
|
|
||||||
|
logger.info("Synapse federation_sender now listening on port %d", port)
|
||||||
|
|
||||||
|
def start_listening(self, listeners):
|
||||||
|
for listener in listeners:
|
||||||
|
if listener["type"] == "http":
|
||||||
|
self._listen_http(listener)
|
||||||
|
elif listener["type"] == "manhole":
|
||||||
|
bind_addresses = listener["bind_addresses"]
|
||||||
|
|
||||||
|
for address in bind_addresses:
|
||||||
|
reactor.listenTCP(
|
||||||
|
listener["port"],
|
||||||
|
manhole(
|
||||||
|
username="matrix",
|
||||||
|
password="rabbithole",
|
||||||
|
globals={"hs": self},
|
||||||
|
),
|
||||||
|
interface=address
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||||
|
|
||||||
|
self.get_tcp_replication().start_replication(self)
|
||||||
|
|
||||||
|
def build_tcp_replication(self):
|
||||||
|
return FederationSenderReplicationHandler(self)
|
||||||
|
|
||||||
|
|
||||||
|
class FederationSenderReplicationHandler(ReplicationClientHandler):
|
||||||
|
def __init__(self, hs):
|
||||||
|
super(FederationSenderReplicationHandler, self).__init__(hs.get_datastore())
|
||||||
|
self.send_handler = FederationSenderHandler(hs, self)
|
||||||
|
|
||||||
|
def on_rdata(self, stream_name, token, rows):
|
||||||
|
super(FederationSenderReplicationHandler, self).on_rdata(
|
||||||
|
stream_name, token, rows
|
||||||
|
)
|
||||||
|
self.send_handler.process_replication_rows(stream_name, token, rows)
|
||||||
|
|
||||||
|
def get_streams_to_replicate(self):
|
||||||
|
args = super(FederationSenderReplicationHandler, self).get_streams_to_replicate()
|
||||||
|
args.update(self.send_handler.stream_positions())
|
||||||
|
return args
|
||||||
|
|
||||||
|
|
||||||
|
def start(config_options):
|
||||||
|
try:
|
||||||
|
config = HomeServerConfig.load_config(
|
||||||
|
"Synapse federation sender", config_options
|
||||||
|
)
|
||||||
|
except ConfigError as e:
|
||||||
|
sys.stderr.write("\n" + e.message + "\n")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
assert config.worker_app == "synapse.app.federation_sender"
|
||||||
|
|
||||||
|
setup_logging(config, use_worker_options=True)
|
||||||
|
|
||||||
|
events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||||
|
|
||||||
|
database_engine = create_engine(config.database_config)
|
||||||
|
|
||||||
|
if config.send_federation:
|
||||||
|
sys.stderr.write(
|
||||||
|
"\nThe send_federation must be disabled in the main synapse process"
|
||||||
|
"\nbefore they can be run in a separate worker."
|
||||||
|
"\nPlease add ``send_federation: false`` to the main config"
|
||||||
|
"\n"
|
||||||
|
)
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
# Force the pushers to start since they will be disabled in the main config
|
||||||
|
config.send_federation = True
|
||||||
|
|
||||||
|
tls_server_context_factory = context_factory.ServerContextFactory(config)
|
||||||
|
|
||||||
|
ps = FederationSenderServer(
|
||||||
|
config.server_name,
|
||||||
|
db_config=config.database_config,
|
||||||
|
tls_server_context_factory=tls_server_context_factory,
|
||||||
|
config=config,
|
||||||
|
version_string="Synapse/" + get_version_string(synapse),
|
||||||
|
database_engine=database_engine,
|
||||||
|
)
|
||||||
|
|
||||||
|
ps.setup()
|
||||||
|
ps.start_listening(config.worker_listeners)
|
||||||
|
|
||||||
|
def start():
|
||||||
|
ps.get_datastore().start_profiling()
|
||||||
|
ps.get_state_handler().start_caching()
|
||||||
|
|
||||||
|
reactor.callWhenRunning(start)
|
||||||
|
_base.start_worker_reactor("synapse-federation-sender", config)
|
||||||
|
|
||||||
|
|
||||||
|
class FederationSenderHandler(object):
|
||||||
|
"""Processes the replication stream and forwards the appropriate entries
|
||||||
|
to the federation sender.
|
||||||
|
"""
|
||||||
|
def __init__(self, hs, replication_client):
|
||||||
|
self.store = hs.get_datastore()
|
||||||
|
self.federation_sender = hs.get_federation_sender()
|
||||||
|
self.replication_client = replication_client
|
||||||
|
|
||||||
|
self.federation_position = self.store.federation_out_pos_startup
|
||||||
|
self._fed_position_linearizer = Linearizer(name="_fed_position_linearizer")
|
||||||
|
|
||||||
|
self._last_ack = self.federation_position
|
||||||
|
|
||||||
|
self._room_serials = {}
|
||||||
|
self._room_typing = {}
|
||||||
|
|
||||||
|
def on_start(self):
|
||||||
|
# There may be some events that are persisted but haven't been sent,
|
||||||
|
# so send them now.
|
||||||
|
self.federation_sender.notify_new_events(
|
||||||
|
self.store.get_room_max_stream_ordering()
|
||||||
|
)
|
||||||
|
|
||||||
|
def stream_positions(self):
|
||||||
|
return {"federation": self.federation_position}
|
||||||
|
|
||||||
|
def process_replication_rows(self, stream_name, token, rows):
|
||||||
|
# The federation stream contains things that we want to send out, e.g.
|
||||||
|
# presence, typing, etc.
|
||||||
|
if stream_name == "federation":
|
||||||
|
send_queue.process_rows_for_federation(self.federation_sender, rows)
|
||||||
|
preserve_fn(self.update_token)(token)
|
||||||
|
|
||||||
|
# We also need to poke the federation sender when new events happen
|
||||||
|
elif stream_name == "events":
|
||||||
|
self.federation_sender.notify_new_events(token)
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def update_token(self, token):
|
||||||
|
self.federation_position = token
|
||||||
|
|
||||||
|
# We linearize here to ensure we don't have races updating the token
|
||||||
|
with (yield self._fed_position_linearizer.queue(None)):
|
||||||
|
if self._last_ack < self.federation_position:
|
||||||
|
yield self.store.update_federation_out_pos(
|
||||||
|
"federation", self.federation_position
|
||||||
|
)
|
||||||
|
|
||||||
|
# We ACK this token over replication so that the master can drop
|
||||||
|
# its in memory queues
|
||||||
|
self.replication_client.send_federation_ack(self.federation_position)
|
||||||
|
self._last_ack = self.federation_position
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
with LoggingContext("main"):
|
||||||
|
start(sys.argv[1:])
|
||||||
239
synapse/app/frontend_proxy.py
Normal file
239
synapse/app/frontend_proxy.py
Normal file
@@ -0,0 +1,239 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright 2016 OpenMarket Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
import logging
|
||||||
|
import sys
|
||||||
|
|
||||||
|
import synapse
|
||||||
|
from synapse import events
|
||||||
|
from synapse.api.errors import SynapseError
|
||||||
|
from synapse.app import _base
|
||||||
|
from synapse.config._base import ConfigError
|
||||||
|
from synapse.config.homeserver import HomeServerConfig
|
||||||
|
from synapse.config.logger import setup_logging
|
||||||
|
from synapse.crypto import context_factory
|
||||||
|
from synapse.http.server import JsonResource
|
||||||
|
from synapse.http.servlet import (
|
||||||
|
RestServlet, parse_json_object_from_request,
|
||||||
|
)
|
||||||
|
from synapse.http.site import SynapseSite
|
||||||
|
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||||
|
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||||
|
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||||
|
from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
|
||||||
|
from synapse.replication.slave.storage.devices import SlavedDeviceStore
|
||||||
|
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||||
|
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||||
|
from synapse.rest.client.v2_alpha._base import client_v2_patterns
|
||||||
|
from synapse.server import HomeServer
|
||||||
|
from synapse.storage.engines import create_engine
|
||||||
|
from synapse.util.httpresourcetree import create_resource_tree
|
||||||
|
from synapse.util.logcontext import LoggingContext
|
||||||
|
from synapse.util.manhole import manhole
|
||||||
|
from synapse.util.versionstring import get_version_string
|
||||||
|
from twisted.internet import defer, reactor
|
||||||
|
from twisted.web.resource import Resource
|
||||||
|
|
||||||
|
logger = logging.getLogger("synapse.app.frontend_proxy")
|
||||||
|
|
||||||
|
|
||||||
|
class KeyUploadServlet(RestServlet):
|
||||||
|
PATTERNS = client_v2_patterns("/keys/upload(/(?P<device_id>[^/]+))?$",
|
||||||
|
releases=())
|
||||||
|
|
||||||
|
def __init__(self, hs):
|
||||||
|
"""
|
||||||
|
Args:
|
||||||
|
hs (synapse.server.HomeServer): server
|
||||||
|
"""
|
||||||
|
super(KeyUploadServlet, self).__init__()
|
||||||
|
self.auth = hs.get_auth()
|
||||||
|
self.store = hs.get_datastore()
|
||||||
|
self.http_client = hs.get_simple_http_client()
|
||||||
|
self.main_uri = hs.config.worker_main_http_uri
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def on_POST(self, request, device_id):
|
||||||
|
requester = yield self.auth.get_user_by_req(request, allow_guest=True)
|
||||||
|
user_id = requester.user.to_string()
|
||||||
|
body = parse_json_object_from_request(request)
|
||||||
|
|
||||||
|
if device_id is not None:
|
||||||
|
# passing the device_id here is deprecated; however, we allow it
|
||||||
|
# for now for compatibility with older clients.
|
||||||
|
if (requester.device_id is not None and
|
||||||
|
device_id != requester.device_id):
|
||||||
|
logger.warning("Client uploading keys for a different device "
|
||||||
|
"(logged in as %s, uploading for %s)",
|
||||||
|
requester.device_id, device_id)
|
||||||
|
else:
|
||||||
|
device_id = requester.device_id
|
||||||
|
|
||||||
|
if device_id is None:
|
||||||
|
raise SynapseError(
|
||||||
|
400,
|
||||||
|
"To upload keys, you must pass device_id when authenticating"
|
||||||
|
)
|
||||||
|
|
||||||
|
if body:
|
||||||
|
# They're actually trying to upload something, proxy to main synapse.
|
||||||
|
result = yield self.http_client.post_json_get_json(
|
||||||
|
self.main_uri + request.uri,
|
||||||
|
body,
|
||||||
|
)
|
||||||
|
|
||||||
|
defer.returnValue((200, result))
|
||||||
|
else:
|
||||||
|
# Just interested in counts.
|
||||||
|
result = yield self.store.count_e2e_one_time_keys(user_id, device_id)
|
||||||
|
defer.returnValue((200, {"one_time_key_counts": result}))
|
||||||
|
|
||||||
|
|
||||||
|
class FrontendProxySlavedStore(
|
||||||
|
SlavedDeviceStore,
|
||||||
|
SlavedClientIpStore,
|
||||||
|
SlavedApplicationServiceStore,
|
||||||
|
SlavedRegistrationStore,
|
||||||
|
BaseSlavedStore,
|
||||||
|
):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class FrontendProxyServer(HomeServer):
|
||||||
|
def get_db_conn(self, run_new_connection=True):
|
||||||
|
# Any param beginning with cp_ is a parameter for adbapi, and should
|
||||||
|
# not be passed to the database engine.
|
||||||
|
db_params = {
|
||||||
|
k: v for k, v in self.db_config.get("args", {}).items()
|
||||||
|
if not k.startswith("cp_")
|
||||||
|
}
|
||||||
|
db_conn = self.database_engine.module.connect(**db_params)
|
||||||
|
|
||||||
|
if run_new_connection:
|
||||||
|
self.database_engine.on_new_connection(db_conn)
|
||||||
|
return db_conn
|
||||||
|
|
||||||
|
def setup(self):
|
||||||
|
logger.info("Setting up.")
|
||||||
|
self.datastore = FrontendProxySlavedStore(self.get_db_conn(), self)
|
||||||
|
logger.info("Finished setting up.")
|
||||||
|
|
||||||
|
def _listen_http(self, listener_config):
|
||||||
|
port = listener_config["port"]
|
||||||
|
bind_addresses = listener_config["bind_addresses"]
|
||||||
|
site_tag = listener_config.get("tag", port)
|
||||||
|
resources = {}
|
||||||
|
for res in listener_config["resources"]:
|
||||||
|
for name in res["names"]:
|
||||||
|
if name == "metrics":
|
||||||
|
resources[METRICS_PREFIX] = MetricsResource(self)
|
||||||
|
elif name == "client":
|
||||||
|
resource = JsonResource(self, canonical_json=False)
|
||||||
|
KeyUploadServlet(self).register(resource)
|
||||||
|
resources.update({
|
||||||
|
"/_matrix/client/r0": resource,
|
||||||
|
"/_matrix/client/unstable": resource,
|
||||||
|
"/_matrix/client/v2_alpha": resource,
|
||||||
|
"/_matrix/client/api/v1": resource,
|
||||||
|
})
|
||||||
|
|
||||||
|
root_resource = create_resource_tree(resources, Resource())
|
||||||
|
|
||||||
|
for address in bind_addresses:
|
||||||
|
reactor.listenTCP(
|
||||||
|
port,
|
||||||
|
SynapseSite(
|
||||||
|
"synapse.access.http.%s" % (site_tag,),
|
||||||
|
site_tag,
|
||||||
|
listener_config,
|
||||||
|
root_resource,
|
||||||
|
),
|
||||||
|
interface=address
|
||||||
|
)
|
||||||
|
|
||||||
|
logger.info("Synapse client reader now listening on port %d", port)
|
||||||
|
|
||||||
|
def start_listening(self, listeners):
|
||||||
|
for listener in listeners:
|
||||||
|
if listener["type"] == "http":
|
||||||
|
self._listen_http(listener)
|
||||||
|
elif listener["type"] == "manhole":
|
||||||
|
bind_addresses = listener["bind_addresses"]
|
||||||
|
|
||||||
|
for address in bind_addresses:
|
||||||
|
reactor.listenTCP(
|
||||||
|
listener["port"],
|
||||||
|
manhole(
|
||||||
|
username="matrix",
|
||||||
|
password="rabbithole",
|
||||||
|
globals={"hs": self},
|
||||||
|
),
|
||||||
|
interface=address
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||||
|
|
||||||
|
self.get_tcp_replication().start_replication(self)
|
||||||
|
|
||||||
|
def build_tcp_replication(self):
|
||||||
|
return ReplicationClientHandler(self.get_datastore())
|
||||||
|
|
||||||
|
|
||||||
|
def start(config_options):
|
||||||
|
try:
|
||||||
|
config = HomeServerConfig.load_config(
|
||||||
|
"Synapse frontend proxy", config_options
|
||||||
|
)
|
||||||
|
except ConfigError as e:
|
||||||
|
sys.stderr.write("\n" + e.message + "\n")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
assert config.worker_app == "synapse.app.frontend_proxy"
|
||||||
|
|
||||||
|
assert config.worker_main_http_uri is not None
|
||||||
|
|
||||||
|
setup_logging(config, use_worker_options=True)
|
||||||
|
|
||||||
|
events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||||
|
|
||||||
|
database_engine = create_engine(config.database_config)
|
||||||
|
|
||||||
|
tls_server_context_factory = context_factory.ServerContextFactory(config)
|
||||||
|
|
||||||
|
ss = FrontendProxyServer(
|
||||||
|
config.server_name,
|
||||||
|
db_config=config.database_config,
|
||||||
|
tls_server_context_factory=tls_server_context_factory,
|
||||||
|
config=config,
|
||||||
|
version_string="Synapse/" + get_version_string(synapse),
|
||||||
|
database_engine=database_engine,
|
||||||
|
)
|
||||||
|
|
||||||
|
ss.setup()
|
||||||
|
ss.get_handlers()
|
||||||
|
ss.start_listening(config.worker_listeners)
|
||||||
|
|
||||||
|
def start():
|
||||||
|
ss.get_state_handler().start_caching()
|
||||||
|
ss.get_datastore().start_profiling()
|
||||||
|
|
||||||
|
reactor.callWhenRunning(start)
|
||||||
|
|
||||||
|
_base.start_worker_reactor("synapse-frontend-proxy", config)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
with LoggingContext("main"):
|
||||||
|
start(sys.argv[1:])
|
||||||
@@ -13,58 +13,49 @@
|
|||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
import synapse
|
|
||||||
|
|
||||||
import gc
|
import gc
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
|
import synapse
|
||||||
|
import synapse.config.logger
|
||||||
|
from synapse import events
|
||||||
|
from synapse.api.urls import CONTENT_REPO_PREFIX, FEDERATION_PREFIX, \
|
||||||
|
LEGACY_MEDIA_PREFIX, MEDIA_PREFIX, SERVER_KEY_PREFIX, SERVER_KEY_V2_PREFIX, \
|
||||||
|
STATIC_PREFIX, WEB_CLIENT_PREFIX
|
||||||
|
from synapse.app import _base
|
||||||
|
from synapse.app._base import quit_with_error
|
||||||
from synapse.config._base import ConfigError
|
from synapse.config._base import ConfigError
|
||||||
|
|
||||||
from synapse.python_dependencies import (
|
|
||||||
check_requirements, DEPENDENCY_LINKS
|
|
||||||
)
|
|
||||||
|
|
||||||
from synapse.rest import ClientRestResource
|
|
||||||
from synapse.storage.engines import create_engine, IncorrectDatabaseSetup
|
|
||||||
from synapse.storage import are_all_users_on_domain
|
|
||||||
from synapse.storage.prepare_database import UpgradeDatabaseException, prepare_database
|
|
||||||
|
|
||||||
from synapse.server import HomeServer
|
|
||||||
|
|
||||||
from twisted.internet import reactor, task, defer
|
|
||||||
from twisted.application import service
|
|
||||||
from twisted.web.resource import Resource, EncodingResourceWrapper
|
|
||||||
from twisted.web.static import File
|
|
||||||
from twisted.web.server import GzipEncoderFactory
|
|
||||||
from synapse.http.server import RootRedirect
|
|
||||||
from synapse.rest.media.v0.content_repository import ContentRepoResource
|
|
||||||
from synapse.rest.media.v1.media_repository import MediaRepositoryResource
|
|
||||||
from synapse.rest.key.v1.server_key_resource import LocalKey
|
|
||||||
from synapse.rest.key.v2 import KeyApiV2Resource
|
|
||||||
from synapse.api.urls import (
|
|
||||||
FEDERATION_PREFIX, WEB_CLIENT_PREFIX, CONTENT_REPO_PREFIX,
|
|
||||||
SERVER_KEY_PREFIX, LEGACY_MEDIA_PREFIX, MEDIA_PREFIX, STATIC_PREFIX,
|
|
||||||
SERVER_KEY_V2_PREFIX,
|
|
||||||
)
|
|
||||||
from synapse.config.homeserver import HomeServerConfig
|
from synapse.config.homeserver import HomeServerConfig
|
||||||
from synapse.crypto import context_factory
|
from synapse.crypto import context_factory
|
||||||
from synapse.util.logcontext import LoggingContext
|
|
||||||
from synapse.metrics.resource import MetricsResource, METRICS_PREFIX
|
|
||||||
from synapse.replication.resource import ReplicationResource, REPLICATION_PREFIX
|
|
||||||
from synapse.federation.transport.server import TransportLayerServer
|
from synapse.federation.transport.server import TransportLayerServer
|
||||||
|
from synapse.http.server import RootRedirect
|
||||||
|
from synapse.http.site import SynapseSite
|
||||||
|
from synapse.metrics import register_memory_metrics
|
||||||
|
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||||
|
from synapse.python_dependencies import CONDITIONAL_REQUIREMENTS, \
|
||||||
|
check_requirements
|
||||||
|
from synapse.replication.tcp.resource import ReplicationStreamProtocolFactory
|
||||||
|
from synapse.rest import ClientRestResource
|
||||||
|
from synapse.rest.key.v1.server_key_resource import LocalKey
|
||||||
|
from synapse.rest.key.v2 import KeyApiV2Resource
|
||||||
|
from synapse.rest.media.v0.content_repository import ContentRepoResource
|
||||||
|
from synapse.rest.media.v1.media_repository import MediaRepositoryResource
|
||||||
|
from synapse.server import HomeServer
|
||||||
|
from synapse.storage import are_all_users_on_domain
|
||||||
|
from synapse.storage.engines import IncorrectDatabaseSetup, create_engine
|
||||||
|
from synapse.storage.prepare_database import UpgradeDatabaseException, prepare_database
|
||||||
|
from synapse.util.httpresourcetree import create_resource_tree
|
||||||
|
from synapse.util.logcontext import LoggingContext
|
||||||
|
from synapse.util.manhole import manhole
|
||||||
from synapse.util.rlimit import change_resource_limit
|
from synapse.util.rlimit import change_resource_limit
|
||||||
from synapse.util.versionstring import get_version_string
|
from synapse.util.versionstring import get_version_string
|
||||||
from synapse.util.httpresourcetree import create_resource_tree
|
from twisted.application import service
|
||||||
from synapse.util.manhole import manhole
|
from twisted.internet import defer, reactor
|
||||||
|
from twisted.web.resource import EncodingResourceWrapper, Resource
|
||||||
from synapse.http.site import SynapseSite
|
from twisted.web.server import GzipEncoderFactory
|
||||||
|
from twisted.web.static import File
|
||||||
from synapse import events
|
|
||||||
|
|
||||||
from daemonize import Daemonize
|
|
||||||
|
|
||||||
logger = logging.getLogger("synapse.app.homeserver")
|
logger = logging.getLogger("synapse.app.homeserver")
|
||||||
|
|
||||||
@@ -89,7 +80,7 @@ def build_resource_for_web_client(hs):
|
|||||||
"\n"
|
"\n"
|
||||||
"You can also disable hosting of the webclient via the\n"
|
"You can also disable hosting of the webclient via the\n"
|
||||||
"configuration option `web_client`\n"
|
"configuration option `web_client`\n"
|
||||||
% {"dep": DEPENDENCY_LINKS["matrix-angular-sdk"]}
|
% {"dep": CONDITIONAL_REQUIREMENTS["web_client"].keys()[0]}
|
||||||
)
|
)
|
||||||
syweb_path = os.path.dirname(syweb.__file__)
|
syweb_path = os.path.dirname(syweb.__file__)
|
||||||
webclient_path = os.path.join(syweb_path, "webclient")
|
webclient_path = os.path.join(syweb_path, "webclient")
|
||||||
@@ -106,7 +97,7 @@ def build_resource_for_web_client(hs):
|
|||||||
class SynapseHomeServer(HomeServer):
|
class SynapseHomeServer(HomeServer):
|
||||||
def _listener_http(self, config, listener_config):
|
def _listener_http(self, config, listener_config):
|
||||||
port = listener_config["port"]
|
port = listener_config["port"]
|
||||||
bind_address = listener_config.get("bind_address", "")
|
bind_addresses = listener_config["bind_addresses"]
|
||||||
tls = listener_config.get("tls", False)
|
tls = listener_config.get("tls", False)
|
||||||
site_tag = listener_config.get("tag", port)
|
site_tag = listener_config.get("tag", port)
|
||||||
|
|
||||||
@@ -163,16 +154,15 @@ class SynapseHomeServer(HomeServer):
|
|||||||
if name == "metrics" and self.get_config().enable_metrics:
|
if name == "metrics" and self.get_config().enable_metrics:
|
||||||
resources[METRICS_PREFIX] = MetricsResource(self)
|
resources[METRICS_PREFIX] = MetricsResource(self)
|
||||||
|
|
||||||
if name == "replication":
|
|
||||||
resources[REPLICATION_PREFIX] = ReplicationResource(self)
|
|
||||||
|
|
||||||
if WEB_CLIENT_PREFIX in resources:
|
if WEB_CLIENT_PREFIX in resources:
|
||||||
root_resource = RootRedirect(WEB_CLIENT_PREFIX)
|
root_resource = RootRedirect(WEB_CLIENT_PREFIX)
|
||||||
else:
|
else:
|
||||||
root_resource = Resource()
|
root_resource = Resource()
|
||||||
|
|
||||||
root_resource = create_resource_tree(resources, root_resource)
|
root_resource = create_resource_tree(resources, root_resource)
|
||||||
|
|
||||||
if tls:
|
if tls:
|
||||||
|
for address in bind_addresses:
|
||||||
reactor.listenSSL(
|
reactor.listenSSL(
|
||||||
port,
|
port,
|
||||||
SynapseSite(
|
SynapseSite(
|
||||||
@@ -182,9 +172,10 @@ class SynapseHomeServer(HomeServer):
|
|||||||
root_resource,
|
root_resource,
|
||||||
),
|
),
|
||||||
self.tls_server_context_factory,
|
self.tls_server_context_factory,
|
||||||
interface=bind_address
|
interface=address
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
|
for address in bind_addresses:
|
||||||
reactor.listenTCP(
|
reactor.listenTCP(
|
||||||
port,
|
port,
|
||||||
SynapseSite(
|
SynapseSite(
|
||||||
@@ -193,7 +184,7 @@ class SynapseHomeServer(HomeServer):
|
|||||||
listener_config,
|
listener_config,
|
||||||
root_resource,
|
root_resource,
|
||||||
),
|
),
|
||||||
interface=bind_address
|
interface=address
|
||||||
)
|
)
|
||||||
logger.info("Synapse now listening on port %d", port)
|
logger.info("Synapse now listening on port %d", port)
|
||||||
|
|
||||||
@@ -204,6 +195,9 @@ class SynapseHomeServer(HomeServer):
|
|||||||
if listener["type"] == "http":
|
if listener["type"] == "http":
|
||||||
self._listener_http(config, listener)
|
self._listener_http(config, listener)
|
||||||
elif listener["type"] == "manhole":
|
elif listener["type"] == "manhole":
|
||||||
|
bind_addresses = listener["bind_addresses"]
|
||||||
|
|
||||||
|
for address in bind_addresses:
|
||||||
reactor.listenTCP(
|
reactor.listenTCP(
|
||||||
listener["port"],
|
listener["port"],
|
||||||
manhole(
|
manhole(
|
||||||
@@ -211,7 +205,17 @@ class SynapseHomeServer(HomeServer):
|
|||||||
password="rabbithole",
|
password="rabbithole",
|
||||||
globals={"hs": self},
|
globals={"hs": self},
|
||||||
),
|
),
|
||||||
interface=listener.get("bind_address", '127.0.0.1')
|
interface=address
|
||||||
|
)
|
||||||
|
elif listener["type"] == "replication":
|
||||||
|
bind_addresses = listener["bind_addresses"]
|
||||||
|
for address in bind_addresses:
|
||||||
|
factory = ReplicationStreamProtocolFactory(self)
|
||||||
|
server_listener = reactor.listenTCP(
|
||||||
|
listener["port"], factory, interface=address
|
||||||
|
)
|
||||||
|
reactor.addSystemEventTrigger(
|
||||||
|
"before", "shutdown", server_listener.stopListening,
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||||
@@ -246,16 +250,6 @@ class SynapseHomeServer(HomeServer):
|
|||||||
return db_conn
|
return db_conn
|
||||||
|
|
||||||
|
|
||||||
def quit_with_error(error_string):
|
|
||||||
message_lines = error_string.split("\n")
|
|
||||||
line_length = max([len(l) for l in message_lines if len(l) < 80]) + 2
|
|
||||||
sys.stderr.write("*" * line_length + '\n')
|
|
||||||
for line in message_lines:
|
|
||||||
sys.stderr.write(" %s\n" % (line.rstrip(),))
|
|
||||||
sys.stderr.write("*" * line_length + '\n')
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
|
|
||||||
def setup(config_options):
|
def setup(config_options):
|
||||||
"""
|
"""
|
||||||
Args:
|
Args:
|
||||||
@@ -279,12 +273,12 @@ def setup(config_options):
|
|||||||
# generating config files and shouldn't try to continue.
|
# generating config files and shouldn't try to continue.
|
||||||
sys.exit(0)
|
sys.exit(0)
|
||||||
|
|
||||||
config.setup_logging()
|
synapse.config.logger.setup_logging(config, use_worker_options=False)
|
||||||
|
|
||||||
# check any extra requirements we have now we have a config
|
# check any extra requirements we have now we have a config
|
||||||
check_requirements(config)
|
check_requirements(config)
|
||||||
|
|
||||||
version_string = get_version_string("Synapse", synapse)
|
version_string = "Synapse/" + get_version_string(synapse)
|
||||||
|
|
||||||
logger.info("Server hostname: %s", config.server_name)
|
logger.info("Server hostname: %s", config.server_name)
|
||||||
logger.info("Server version: %s", version_string)
|
logger.info("Server version: %s", version_string)
|
||||||
@@ -335,6 +329,8 @@ def setup(config_options):
|
|||||||
hs.get_datastore().start_doing_background_updates()
|
hs.get_datastore().start_doing_background_updates()
|
||||||
hs.get_replication_layer().start_get_pdu_cache()
|
hs.get_replication_layer().start_get_pdu_cache()
|
||||||
|
|
||||||
|
register_memory_metrics(hs)
|
||||||
|
|
||||||
reactor.callWhenRunning(start)
|
reactor.callWhenRunning(start)
|
||||||
|
|
||||||
return hs
|
return hs
|
||||||
@@ -380,7 +376,10 @@ def run(hs):
|
|||||||
ThreadPool._worker = profile(ThreadPool._worker)
|
ThreadPool._worker = profile(ThreadPool._worker)
|
||||||
reactor.run = profile(reactor.run)
|
reactor.run = profile(reactor.run)
|
||||||
|
|
||||||
start_time = hs.get_clock().time()
|
clock = hs.get_clock()
|
||||||
|
start_time = clock.time()
|
||||||
|
|
||||||
|
stats = {}
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def phone_stats_home():
|
def phone_stats_home():
|
||||||
@@ -390,19 +389,23 @@ def run(hs):
|
|||||||
if uptime < 0:
|
if uptime < 0:
|
||||||
uptime = 0
|
uptime = 0
|
||||||
|
|
||||||
stats = {}
|
|
||||||
stats["homeserver"] = hs.config.server_name
|
stats["homeserver"] = hs.config.server_name
|
||||||
stats["timestamp"] = now
|
stats["timestamp"] = now
|
||||||
stats["uptime_seconds"] = uptime
|
stats["uptime_seconds"] = uptime
|
||||||
stats["total_users"] = yield hs.get_datastore().count_all_users()
|
stats["total_users"] = yield hs.get_datastore().count_all_users()
|
||||||
|
|
||||||
|
total_nonbridged_users = yield hs.get_datastore().count_nonbridged_users()
|
||||||
|
stats["total_nonbridged_users"] = total_nonbridged_users
|
||||||
|
|
||||||
room_count = yield hs.get_datastore().get_room_count()
|
room_count = yield hs.get_datastore().get_room_count()
|
||||||
stats["total_room_count"] = room_count
|
stats["total_room_count"] = room_count
|
||||||
|
|
||||||
stats["daily_active_users"] = yield hs.get_datastore().count_daily_users()
|
stats["daily_active_users"] = yield hs.get_datastore().count_daily_users()
|
||||||
daily_messages = yield hs.get_datastore().count_daily_messages()
|
stats["daily_active_rooms"] = yield hs.get_datastore().count_daily_active_rooms()
|
||||||
if daily_messages is not None:
|
stats["daily_messages"] = yield hs.get_datastore().count_daily_messages()
|
||||||
stats["daily_messages"] = daily_messages
|
|
||||||
|
daily_sent_messages = yield hs.get_datastore().count_daily_sent_messages()
|
||||||
|
stats["daily_sent_messages"] = daily_sent_messages
|
||||||
|
|
||||||
logger.info("Reporting stats to matrix.org: %s" % (stats,))
|
logger.info("Reporting stats to matrix.org: %s" % (stats,))
|
||||||
try:
|
try:
|
||||||
@@ -414,37 +417,26 @@ def run(hs):
|
|||||||
logger.warn("Error reporting stats: %s", e)
|
logger.warn("Error reporting stats: %s", e)
|
||||||
|
|
||||||
if hs.config.report_stats:
|
if hs.config.report_stats:
|
||||||
phone_home_task = task.LoopingCall(phone_stats_home)
|
logger.info("Scheduling stats reporting for 3 hour intervals")
|
||||||
logger.info("Scheduling stats reporting for 24 hour intervals")
|
clock.looping_call(phone_stats_home, 3 * 60 * 60 * 1000)
|
||||||
phone_home_task.start(60 * 60 * 24, now=False)
|
|
||||||
|
|
||||||
def in_thread():
|
# We wait 5 minutes to send the first set of stats as the server can
|
||||||
# Uncomment to enable tracing of log context changes.
|
# be quite busy the first few minutes
|
||||||
# sys.settrace(logcontext_tracer)
|
clock.call_later(5 * 60, phone_stats_home)
|
||||||
with LoggingContext("run"):
|
|
||||||
change_resource_limit(hs.config.soft_file_limit)
|
|
||||||
if hs.config.gc_thresholds:
|
|
||||||
gc.set_threshold(*hs.config.gc_thresholds)
|
|
||||||
reactor.run()
|
|
||||||
|
|
||||||
if hs.config.daemonize:
|
if hs.config.daemonize and hs.config.print_pidfile:
|
||||||
|
|
||||||
if hs.config.print_pidfile:
|
|
||||||
print (hs.config.pid_file)
|
print (hs.config.pid_file)
|
||||||
|
|
||||||
daemon = Daemonize(
|
_base.start_reactor(
|
||||||
app="synapse-homeserver",
|
"synapse-homeserver",
|
||||||
pid=hs.config.pid_file,
|
hs.config.soft_file_limit,
|
||||||
action=lambda: in_thread(),
|
hs.config.gc_thresholds,
|
||||||
auto_close_fds=False,
|
hs.config.pid_file,
|
||||||
verbose=True,
|
hs.config.daemonize,
|
||||||
logger=logger,
|
hs.config.cpu_affinity,
|
||||||
|
logger,
|
||||||
)
|
)
|
||||||
|
|
||||||
daemon.start()
|
|
||||||
else:
|
|
||||||
in_thread()
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
with LoggingContext("main"):
|
with LoggingContext("main"):
|
||||||
|
|||||||
186
synapse/app/media_repository.py
Normal file
186
synapse/app/media_repository.py
Normal file
@@ -0,0 +1,186 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright 2016 OpenMarket Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
import logging
|
||||||
|
import sys
|
||||||
|
|
||||||
|
import synapse
|
||||||
|
from synapse import events
|
||||||
|
from synapse.api.urls import (
|
||||||
|
CONTENT_REPO_PREFIX, LEGACY_MEDIA_PREFIX, MEDIA_PREFIX
|
||||||
|
)
|
||||||
|
from synapse.app import _base
|
||||||
|
from synapse.config._base import ConfigError
|
||||||
|
from synapse.config.homeserver import HomeServerConfig
|
||||||
|
from synapse.config.logger import setup_logging
|
||||||
|
from synapse.crypto import context_factory
|
||||||
|
from synapse.http.site import SynapseSite
|
||||||
|
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||||
|
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||||
|
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||||
|
from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
|
||||||
|
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||||
|
from synapse.replication.slave.storage.transactions import TransactionStore
|
||||||
|
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||||
|
from synapse.rest.media.v0.content_repository import ContentRepoResource
|
||||||
|
from synapse.rest.media.v1.media_repository import MediaRepositoryResource
|
||||||
|
from synapse.server import HomeServer
|
||||||
|
from synapse.storage.engines import create_engine
|
||||||
|
from synapse.storage.media_repository import MediaRepositoryStore
|
||||||
|
from synapse.util.httpresourcetree import create_resource_tree
|
||||||
|
from synapse.util.logcontext import LoggingContext
|
||||||
|
from synapse.util.manhole import manhole
|
||||||
|
from synapse.util.versionstring import get_version_string
|
||||||
|
from twisted.internet import reactor
|
||||||
|
from twisted.web.resource import Resource
|
||||||
|
|
||||||
|
logger = logging.getLogger("synapse.app.media_repository")
|
||||||
|
|
||||||
|
|
||||||
|
class MediaRepositorySlavedStore(
|
||||||
|
SlavedApplicationServiceStore,
|
||||||
|
SlavedRegistrationStore,
|
||||||
|
SlavedClientIpStore,
|
||||||
|
TransactionStore,
|
||||||
|
BaseSlavedStore,
|
||||||
|
MediaRepositoryStore,
|
||||||
|
):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class MediaRepositoryServer(HomeServer):
|
||||||
|
def get_db_conn(self, run_new_connection=True):
|
||||||
|
# Any param beginning with cp_ is a parameter for adbapi, and should
|
||||||
|
# not be passed to the database engine.
|
||||||
|
db_params = {
|
||||||
|
k: v for k, v in self.db_config.get("args", {}).items()
|
||||||
|
if not k.startswith("cp_")
|
||||||
|
}
|
||||||
|
db_conn = self.database_engine.module.connect(**db_params)
|
||||||
|
|
||||||
|
if run_new_connection:
|
||||||
|
self.database_engine.on_new_connection(db_conn)
|
||||||
|
return db_conn
|
||||||
|
|
||||||
|
def setup(self):
|
||||||
|
logger.info("Setting up.")
|
||||||
|
self.datastore = MediaRepositorySlavedStore(self.get_db_conn(), self)
|
||||||
|
logger.info("Finished setting up.")
|
||||||
|
|
||||||
|
def _listen_http(self, listener_config):
|
||||||
|
port = listener_config["port"]
|
||||||
|
bind_addresses = listener_config["bind_addresses"]
|
||||||
|
site_tag = listener_config.get("tag", port)
|
||||||
|
resources = {}
|
||||||
|
for res in listener_config["resources"]:
|
||||||
|
for name in res["names"]:
|
||||||
|
if name == "metrics":
|
||||||
|
resources[METRICS_PREFIX] = MetricsResource(self)
|
||||||
|
elif name == "media":
|
||||||
|
media_repo = MediaRepositoryResource(self)
|
||||||
|
resources.update({
|
||||||
|
MEDIA_PREFIX: media_repo,
|
||||||
|
LEGACY_MEDIA_PREFIX: media_repo,
|
||||||
|
CONTENT_REPO_PREFIX: ContentRepoResource(
|
||||||
|
self, self.config.uploads_path
|
||||||
|
),
|
||||||
|
})
|
||||||
|
|
||||||
|
root_resource = create_resource_tree(resources, Resource())
|
||||||
|
|
||||||
|
for address in bind_addresses:
|
||||||
|
reactor.listenTCP(
|
||||||
|
port,
|
||||||
|
SynapseSite(
|
||||||
|
"synapse.access.http.%s" % (site_tag,),
|
||||||
|
site_tag,
|
||||||
|
listener_config,
|
||||||
|
root_resource,
|
||||||
|
),
|
||||||
|
interface=address
|
||||||
|
)
|
||||||
|
|
||||||
|
logger.info("Synapse media repository now listening on port %d", port)
|
||||||
|
|
||||||
|
def start_listening(self, listeners):
|
||||||
|
for listener in listeners:
|
||||||
|
if listener["type"] == "http":
|
||||||
|
self._listen_http(listener)
|
||||||
|
elif listener["type"] == "manhole":
|
||||||
|
bind_addresses = listener["bind_addresses"]
|
||||||
|
|
||||||
|
for address in bind_addresses:
|
||||||
|
reactor.listenTCP(
|
||||||
|
listener["port"],
|
||||||
|
manhole(
|
||||||
|
username="matrix",
|
||||||
|
password="rabbithole",
|
||||||
|
globals={"hs": self},
|
||||||
|
),
|
||||||
|
interface=address
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||||
|
|
||||||
|
self.get_tcp_replication().start_replication(self)
|
||||||
|
|
||||||
|
def build_tcp_replication(self):
|
||||||
|
return ReplicationClientHandler(self.get_datastore())
|
||||||
|
|
||||||
|
|
||||||
|
def start(config_options):
|
||||||
|
try:
|
||||||
|
config = HomeServerConfig.load_config(
|
||||||
|
"Synapse media repository", config_options
|
||||||
|
)
|
||||||
|
except ConfigError as e:
|
||||||
|
sys.stderr.write("\n" + e.message + "\n")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
assert config.worker_app == "synapse.app.media_repository"
|
||||||
|
|
||||||
|
setup_logging(config, use_worker_options=True)
|
||||||
|
|
||||||
|
events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||||
|
|
||||||
|
database_engine = create_engine(config.database_config)
|
||||||
|
|
||||||
|
tls_server_context_factory = context_factory.ServerContextFactory(config)
|
||||||
|
|
||||||
|
ss = MediaRepositoryServer(
|
||||||
|
config.server_name,
|
||||||
|
db_config=config.database_config,
|
||||||
|
tls_server_context_factory=tls_server_context_factory,
|
||||||
|
config=config,
|
||||||
|
version_string="Synapse/" + get_version_string(synapse),
|
||||||
|
database_engine=database_engine,
|
||||||
|
)
|
||||||
|
|
||||||
|
ss.setup()
|
||||||
|
ss.get_handlers()
|
||||||
|
ss.start_listening(config.worker_listeners)
|
||||||
|
|
||||||
|
def start():
|
||||||
|
ss.get_state_handler().start_caching()
|
||||||
|
ss.get_datastore().start_profiling()
|
||||||
|
|
||||||
|
reactor.callWhenRunning(start)
|
||||||
|
|
||||||
|
_base.start_worker_reactor("synapse-media-repository", config)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
with LoggingContext("main"):
|
||||||
|
start(sys.argv[1:])
|
||||||
@@ -13,38 +13,33 @@
|
|||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
import logging
|
||||||
|
import sys
|
||||||
|
|
||||||
import synapse
|
import synapse
|
||||||
|
from synapse import events
|
||||||
from synapse.server import HomeServer
|
from synapse.app import _base
|
||||||
from synapse.config._base import ConfigError
|
from synapse.config._base import ConfigError
|
||||||
from synapse.config.logger import setup_logging
|
|
||||||
from synapse.config.homeserver import HomeServerConfig
|
from synapse.config.homeserver import HomeServerConfig
|
||||||
|
from synapse.config.logger import setup_logging
|
||||||
from synapse.http.site import SynapseSite
|
from synapse.http.site import SynapseSite
|
||||||
from synapse.metrics.resource import MetricsResource, METRICS_PREFIX
|
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||||
from synapse.storage.roommember import RoomMemberStore
|
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
|
||||||
from synapse.replication.slave.storage.events import SlavedEventStore
|
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||||
from synapse.replication.slave.storage.pushers import SlavedPusherStore
|
from synapse.replication.slave.storage.pushers import SlavedPusherStore
|
||||||
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
|
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
|
||||||
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
|
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||||
from synapse.storage.engines import create_engine
|
from synapse.server import HomeServer
|
||||||
from synapse.storage import DataStore
|
from synapse.storage import DataStore
|
||||||
from synapse.util.async import sleep
|
from synapse.storage.engines import create_engine
|
||||||
|
from synapse.storage.roommember import RoomMemberStore
|
||||||
from synapse.util.httpresourcetree import create_resource_tree
|
from synapse.util.httpresourcetree import create_resource_tree
|
||||||
from synapse.util.logcontext import LoggingContext, preserve_fn
|
from synapse.util.logcontext import LoggingContext, preserve_fn
|
||||||
from synapse.util.manhole import manhole
|
from synapse.util.manhole import manhole
|
||||||
from synapse.util.rlimit import change_resource_limit
|
|
||||||
from synapse.util.versionstring import get_version_string
|
from synapse.util.versionstring import get_version_string
|
||||||
|
from twisted.internet import defer, reactor
|
||||||
from twisted.internet import reactor, defer
|
|
||||||
from twisted.web.resource import Resource
|
from twisted.web.resource import Resource
|
||||||
|
|
||||||
from daemonize import Daemonize
|
|
||||||
|
|
||||||
import sys
|
|
||||||
import logging
|
|
||||||
import gc
|
|
||||||
|
|
||||||
logger = logging.getLogger("synapse.app.pusher")
|
logger = logging.getLogger("synapse.app.pusher")
|
||||||
|
|
||||||
|
|
||||||
@@ -80,18 +75,12 @@ class PusherSlaveStore(
|
|||||||
DataStore.get_profile_displayname.__func__
|
DataStore.get_profile_displayname.__func__
|
||||||
)
|
)
|
||||||
|
|
||||||
# XXX: This is a bit broken because we don't persist forgotten rooms
|
|
||||||
# in a way that they can be streamed. This means that we don't have a
|
|
||||||
# way to invalidate the forgotten rooms cache correctly.
|
|
||||||
# For now we expire the cache every 10 minutes.
|
|
||||||
BROKEN_CACHE_EXPIRY_MS = 60 * 60 * 1000
|
|
||||||
who_forgot_in_room = (
|
who_forgot_in_room = (
|
||||||
RoomMemberStore.__dict__["who_forgot_in_room"]
|
RoomMemberStore.__dict__["who_forgot_in_room"]
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
class PusherServer(HomeServer):
|
class PusherServer(HomeServer):
|
||||||
|
|
||||||
def get_db_conn(self, run_new_connection=True):
|
def get_db_conn(self, run_new_connection=True):
|
||||||
# Any param beginning with cp_ is a parameter for adbapi, and should
|
# Any param beginning with cp_ is a parameter for adbapi, and should
|
||||||
# not be passed to the database engine.
|
# not be passed to the database engine.
|
||||||
@@ -111,20 +100,11 @@ class PusherServer(HomeServer):
|
|||||||
logger.info("Finished setting up.")
|
logger.info("Finished setting up.")
|
||||||
|
|
||||||
def remove_pusher(self, app_id, push_key, user_id):
|
def remove_pusher(self, app_id, push_key, user_id):
|
||||||
http_client = self.get_simple_http_client()
|
self.get_tcp_replication().send_remove_pusher(app_id, push_key, user_id)
|
||||||
replication_url = self.config.worker_replication_url
|
|
||||||
url = replication_url + "/remove_pushers"
|
|
||||||
return http_client.post_json_get_json(url, {
|
|
||||||
"remove": [{
|
|
||||||
"app_id": app_id,
|
|
||||||
"push_key": push_key,
|
|
||||||
"user_id": user_id,
|
|
||||||
}]
|
|
||||||
})
|
|
||||||
|
|
||||||
def _listen_http(self, listener_config):
|
def _listen_http(self, listener_config):
|
||||||
port = listener_config["port"]
|
port = listener_config["port"]
|
||||||
bind_address = listener_config.get("bind_address", "")
|
bind_addresses = listener_config["bind_addresses"]
|
||||||
site_tag = listener_config.get("tag", port)
|
site_tag = listener_config.get("tag", port)
|
||||||
resources = {}
|
resources = {}
|
||||||
for res in listener_config["resources"]:
|
for res in listener_config["resources"]:
|
||||||
@@ -133,6 +113,8 @@ class PusherServer(HomeServer):
|
|||||||
resources[METRICS_PREFIX] = MetricsResource(self)
|
resources[METRICS_PREFIX] = MetricsResource(self)
|
||||||
|
|
||||||
root_resource = create_resource_tree(resources, Resource())
|
root_resource = create_resource_tree(resources, Resource())
|
||||||
|
|
||||||
|
for address in bind_addresses:
|
||||||
reactor.listenTCP(
|
reactor.listenTCP(
|
||||||
port,
|
port,
|
||||||
SynapseSite(
|
SynapseSite(
|
||||||
@@ -141,8 +123,9 @@ class PusherServer(HomeServer):
|
|||||||
listener_config,
|
listener_config,
|
||||||
root_resource,
|
root_resource,
|
||||||
),
|
),
|
||||||
interface=bind_address
|
interface=address
|
||||||
)
|
)
|
||||||
|
|
||||||
logger.info("Synapse pusher now listening on port %d", port)
|
logger.info("Synapse pusher now listening on port %d", port)
|
||||||
|
|
||||||
def start_listening(self, listeners):
|
def start_listening(self, listeners):
|
||||||
@@ -150,6 +133,9 @@ class PusherServer(HomeServer):
|
|||||||
if listener["type"] == "http":
|
if listener["type"] == "http":
|
||||||
self._listen_http(listener)
|
self._listen_http(listener)
|
||||||
elif listener["type"] == "manhole":
|
elif listener["type"] == "manhole":
|
||||||
|
bind_addresses = listener["bind_addresses"]
|
||||||
|
|
||||||
|
for address in bind_addresses:
|
||||||
reactor.listenTCP(
|
reactor.listenTCP(
|
||||||
listener["port"],
|
listener["port"],
|
||||||
manhole(
|
manhole(
|
||||||
@@ -157,89 +143,57 @@ class PusherServer(HomeServer):
|
|||||||
password="rabbithole",
|
password="rabbithole",
|
||||||
globals={"hs": self},
|
globals={"hs": self},
|
||||||
),
|
),
|
||||||
interface=listener.get("bind_address", '127.0.0.1')
|
interface=address
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
self.get_tcp_replication().start_replication(self)
|
||||||
def replicate(self):
|
|
||||||
http_client = self.get_simple_http_client()
|
|
||||||
store = self.get_datastore()
|
|
||||||
replication_url = self.config.worker_replication_url
|
|
||||||
pusher_pool = self.get_pusherpool()
|
|
||||||
clock = self.get_clock()
|
|
||||||
|
|
||||||
def stop_pusher(user_id, app_id, pushkey):
|
def build_tcp_replication(self):
|
||||||
|
return PusherReplicationHandler(self)
|
||||||
|
|
||||||
|
|
||||||
|
class PusherReplicationHandler(ReplicationClientHandler):
|
||||||
|
def __init__(self, hs):
|
||||||
|
super(PusherReplicationHandler, self).__init__(hs.get_datastore())
|
||||||
|
|
||||||
|
self.pusher_pool = hs.get_pusherpool()
|
||||||
|
|
||||||
|
def on_rdata(self, stream_name, token, rows):
|
||||||
|
super(PusherReplicationHandler, self).on_rdata(stream_name, token, rows)
|
||||||
|
preserve_fn(self.poke_pushers)(stream_name, token, rows)
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def poke_pushers(self, stream_name, token, rows):
|
||||||
|
if stream_name == "pushers":
|
||||||
|
for row in rows:
|
||||||
|
if row.deleted:
|
||||||
|
yield self.stop_pusher(row.user_id, row.app_id, row.pushkey)
|
||||||
|
else:
|
||||||
|
yield self.start_pusher(row.user_id, row.app_id, row.pushkey)
|
||||||
|
elif stream_name == "events":
|
||||||
|
yield self.pusher_pool.on_new_notifications(
|
||||||
|
token, token,
|
||||||
|
)
|
||||||
|
elif stream_name == "receipts":
|
||||||
|
yield self.pusher_pool.on_new_receipts(
|
||||||
|
token, token, set(row.room_id for row in rows)
|
||||||
|
)
|
||||||
|
|
||||||
|
def stop_pusher(self, user_id, app_id, pushkey):
|
||||||
key = "%s:%s" % (app_id, pushkey)
|
key = "%s:%s" % (app_id, pushkey)
|
||||||
pushers_for_user = pusher_pool.pushers.get(user_id, {})
|
pushers_for_user = self.pusher_pool.pushers.get(user_id, {})
|
||||||
pusher = pushers_for_user.pop(key, None)
|
pusher = pushers_for_user.pop(key, None)
|
||||||
if pusher is None:
|
if pusher is None:
|
||||||
return
|
return
|
||||||
logger.info("Stopping pusher %r / %r", user_id, key)
|
logger.info("Stopping pusher %r / %r", user_id, key)
|
||||||
pusher.on_stop()
|
pusher.on_stop()
|
||||||
|
|
||||||
def start_pusher(user_id, app_id, pushkey):
|
def start_pusher(self, user_id, app_id, pushkey):
|
||||||
key = "%s:%s" % (app_id, pushkey)
|
key = "%s:%s" % (app_id, pushkey)
|
||||||
logger.info("Starting pusher %r / %r", user_id, key)
|
logger.info("Starting pusher %r / %r", user_id, key)
|
||||||
return pusher_pool._refresh_pusher(app_id, pushkey, user_id)
|
return self.pusher_pool._refresh_pusher(app_id, pushkey, user_id)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def poke_pushers(results):
|
|
||||||
pushers_rows = set(
|
|
||||||
map(tuple, results.get("pushers", {}).get("rows", []))
|
|
||||||
)
|
|
||||||
deleted_pushers_rows = set(
|
|
||||||
map(tuple, results.get("deleted_pushers", {}).get("rows", []))
|
|
||||||
)
|
|
||||||
for row in sorted(pushers_rows | deleted_pushers_rows):
|
|
||||||
if row in deleted_pushers_rows:
|
|
||||||
user_id, app_id, pushkey = row[1:4]
|
|
||||||
stop_pusher(user_id, app_id, pushkey)
|
|
||||||
elif row in pushers_rows:
|
|
||||||
user_id = row[1]
|
|
||||||
app_id = row[5]
|
|
||||||
pushkey = row[8]
|
|
||||||
yield start_pusher(user_id, app_id, pushkey)
|
|
||||||
|
|
||||||
stream = results.get("events")
|
|
||||||
if stream:
|
|
||||||
min_stream_id = stream["rows"][0][0]
|
|
||||||
max_stream_id = stream["position"]
|
|
||||||
preserve_fn(pusher_pool.on_new_notifications)(
|
|
||||||
min_stream_id, max_stream_id
|
|
||||||
)
|
|
||||||
|
|
||||||
stream = results.get("receipts")
|
|
||||||
if stream:
|
|
||||||
rows = stream["rows"]
|
|
||||||
affected_room_ids = set(row[1] for row in rows)
|
|
||||||
min_stream_id = rows[0][0]
|
|
||||||
max_stream_id = stream["position"]
|
|
||||||
preserve_fn(pusher_pool.on_new_receipts)(
|
|
||||||
min_stream_id, max_stream_id, affected_room_ids
|
|
||||||
)
|
|
||||||
|
|
||||||
def expire_broken_caches():
|
|
||||||
store.who_forgot_in_room.invalidate_all()
|
|
||||||
|
|
||||||
next_expire_broken_caches_ms = 0
|
|
||||||
while True:
|
|
||||||
try:
|
|
||||||
args = store.stream_positions()
|
|
||||||
args["timeout"] = 30000
|
|
||||||
result = yield http_client.get_json(replication_url, args=args)
|
|
||||||
now_ms = clock.time_msec()
|
|
||||||
if now_ms > next_expire_broken_caches_ms:
|
|
||||||
expire_broken_caches()
|
|
||||||
next_expire_broken_caches_ms = (
|
|
||||||
now_ms + store.BROKEN_CACHE_EXPIRY_MS
|
|
||||||
)
|
|
||||||
yield store.process_replication(result)
|
|
||||||
poke_pushers(result)
|
|
||||||
except:
|
|
||||||
logger.exception("Error replicating from %r", replication_url)
|
|
||||||
yield sleep(30)
|
|
||||||
|
|
||||||
|
|
||||||
def start(config_options):
|
def start(config_options):
|
||||||
@@ -253,7 +207,9 @@ def start(config_options):
|
|||||||
|
|
||||||
assert config.worker_app == "synapse.app.pusher"
|
assert config.worker_app == "synapse.app.pusher"
|
||||||
|
|
||||||
setup_logging(config.worker_log_config, config.worker_log_file)
|
setup_logging(config, use_worker_options=True)
|
||||||
|
|
||||||
|
events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||||
|
|
||||||
if config.start_pushers:
|
if config.start_pushers:
|
||||||
sys.stderr.write(
|
sys.stderr.write(
|
||||||
@@ -273,40 +229,21 @@ def start(config_options):
|
|||||||
config.server_name,
|
config.server_name,
|
||||||
db_config=config.database_config,
|
db_config=config.database_config,
|
||||||
config=config,
|
config=config,
|
||||||
version_string=get_version_string("Synapse", synapse),
|
version_string="Synapse/" + get_version_string(synapse),
|
||||||
database_engine=database_engine,
|
database_engine=database_engine,
|
||||||
)
|
)
|
||||||
|
|
||||||
ps.setup()
|
ps.setup()
|
||||||
ps.start_listening(config.worker_listeners)
|
ps.start_listening(config.worker_listeners)
|
||||||
|
|
||||||
def run():
|
|
||||||
with LoggingContext("run"):
|
|
||||||
logger.info("Running")
|
|
||||||
change_resource_limit(config.soft_file_limit)
|
|
||||||
if config.gc_thresholds:
|
|
||||||
gc.set_threshold(*config.gc_thresholds)
|
|
||||||
reactor.run()
|
|
||||||
|
|
||||||
def start():
|
def start():
|
||||||
ps.replicate()
|
|
||||||
ps.get_pusherpool().start()
|
ps.get_pusherpool().start()
|
||||||
ps.get_datastore().start_profiling()
|
ps.get_datastore().start_profiling()
|
||||||
|
ps.get_state_handler().start_caching()
|
||||||
|
|
||||||
reactor.callWhenRunning(start)
|
reactor.callWhenRunning(start)
|
||||||
|
|
||||||
if config.worker_daemonize:
|
_base.start_worker_reactor("synapse-pusher", config)
|
||||||
daemon = Daemonize(
|
|
||||||
app="synapse-pusher",
|
|
||||||
pid=config.worker_pid_file,
|
|
||||||
action=run,
|
|
||||||
auto_close_fds=False,
|
|
||||||
verbose=True,
|
|
||||||
logger=logger,
|
|
||||||
)
|
|
||||||
daemon.start()
|
|
||||||
else:
|
|
||||||
run()
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
|||||||
@@ -13,52 +13,51 @@
|
|||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
import contextlib
|
||||||
|
import logging
|
||||||
|
import sys
|
||||||
|
|
||||||
import synapse
|
import synapse
|
||||||
|
from synapse.api.constants import EventTypes
|
||||||
from synapse.api.constants import EventTypes, PresenceState
|
from synapse.app import _base
|
||||||
from synapse.config._base import ConfigError
|
from synapse.config._base import ConfigError
|
||||||
from synapse.config.homeserver import HomeServerConfig
|
from synapse.config.homeserver import HomeServerConfig
|
||||||
from synapse.config.logger import setup_logging
|
from synapse.config.logger import setup_logging
|
||||||
from synapse.events import FrozenEvent
|
from synapse.handlers.presence import PresenceHandler, get_interested_parties
|
||||||
from synapse.handlers.presence import PresenceHandler
|
|
||||||
from synapse.http.site import SynapseSite
|
|
||||||
from synapse.http.server import JsonResource
|
from synapse.http.server import JsonResource
|
||||||
from synapse.metrics.resource import MetricsResource, METRICS_PREFIX
|
from synapse.http.site import SynapseSite
|
||||||
from synapse.rest.client.v2_alpha import sync
|
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||||
from synapse.replication.slave.storage._base import BaseSlavedStore
|
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||||
from synapse.replication.slave.storage.events import SlavedEventStore
|
|
||||||
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
|
|
||||||
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
|
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
|
||||||
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||||
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
|
||||||
|
from synapse.replication.slave.storage.deviceinbox import SlavedDeviceInboxStore
|
||||||
|
from synapse.replication.slave.storage.devices import SlavedDeviceStore
|
||||||
|
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||||
from synapse.replication.slave.storage.filtering import SlavedFilteringStore
|
from synapse.replication.slave.storage.filtering import SlavedFilteringStore
|
||||||
from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore
|
|
||||||
from synapse.replication.slave.storage.presence import SlavedPresenceStore
|
from synapse.replication.slave.storage.presence import SlavedPresenceStore
|
||||||
|
from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore
|
||||||
|
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
|
||||||
|
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||||
|
from synapse.replication.slave.storage.room import RoomStore
|
||||||
|
from synapse.replication.slave.storage.groups import SlavedGroupServerStore
|
||||||
|
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||||
|
from synapse.rest.client.v1 import events
|
||||||
|
from synapse.rest.client.v1.initial_sync import InitialSyncRestServlet
|
||||||
|
from synapse.rest.client.v1.room import RoomInitialSyncRestServlet
|
||||||
|
from synapse.rest.client.v2_alpha import sync
|
||||||
from synapse.server import HomeServer
|
from synapse.server import HomeServer
|
||||||
from synapse.storage.client_ips import ClientIpStore
|
|
||||||
from synapse.storage.engines import create_engine
|
from synapse.storage.engines import create_engine
|
||||||
from synapse.storage.presence import PresenceStore, UserPresenceState
|
from synapse.storage.presence import UserPresenceState
|
||||||
from synapse.storage.roommember import RoomMemberStore
|
from synapse.storage.roommember import RoomMemberStore
|
||||||
from synapse.util.async import sleep
|
|
||||||
from synapse.util.httpresourcetree import create_resource_tree
|
from synapse.util.httpresourcetree import create_resource_tree
|
||||||
from synapse.util.logcontext import LoggingContext, preserve_fn
|
from synapse.util.logcontext import LoggingContext, preserve_fn
|
||||||
from synapse.util.manhole import manhole
|
from synapse.util.manhole import manhole
|
||||||
from synapse.util.rlimit import change_resource_limit
|
|
||||||
from synapse.util.stringutils import random_string
|
from synapse.util.stringutils import random_string
|
||||||
from synapse.util.versionstring import get_version_string
|
from synapse.util.versionstring import get_version_string
|
||||||
|
from twisted.internet import defer, reactor
|
||||||
from twisted.internet import reactor, defer
|
|
||||||
from twisted.web.resource import Resource
|
from twisted.web.resource import Resource
|
||||||
|
|
||||||
from daemonize import Daemonize
|
|
||||||
|
|
||||||
import sys
|
|
||||||
import logging
|
|
||||||
import contextlib
|
|
||||||
import gc
|
|
||||||
import ujson as json
|
|
||||||
|
|
||||||
logger = logging.getLogger("synapse.app.synchrotron")
|
logger = logging.getLogger("synapse.app.synchrotron")
|
||||||
|
|
||||||
|
|
||||||
@@ -71,35 +70,34 @@ class SynchrotronSlavedStore(
|
|||||||
SlavedRegistrationStore,
|
SlavedRegistrationStore,
|
||||||
SlavedFilteringStore,
|
SlavedFilteringStore,
|
||||||
SlavedPresenceStore,
|
SlavedPresenceStore,
|
||||||
|
SlavedGroupServerStore,
|
||||||
|
SlavedDeviceInboxStore,
|
||||||
|
SlavedDeviceStore,
|
||||||
|
SlavedClientIpStore,
|
||||||
|
RoomStore,
|
||||||
BaseSlavedStore,
|
BaseSlavedStore,
|
||||||
ClientIpStore, # After BaseSlavedStore because the constructor is different
|
|
||||||
):
|
):
|
||||||
# XXX: This is a bit broken because we don't persist forgotten rooms
|
|
||||||
# in a way that they can be streamed. This means that we don't have a
|
|
||||||
# way to invalidate the forgotten rooms cache correctly.
|
|
||||||
# For now we expire the cache every 10 minutes.
|
|
||||||
BROKEN_CACHE_EXPIRY_MS = 60 * 60 * 1000
|
|
||||||
who_forgot_in_room = (
|
who_forgot_in_room = (
|
||||||
RoomMemberStore.__dict__["who_forgot_in_room"]
|
RoomMemberStore.__dict__["who_forgot_in_room"]
|
||||||
)
|
)
|
||||||
|
|
||||||
# XXX: This is a bit broken because we don't persist the accepted list in a
|
did_forget = (
|
||||||
# way that can be replicated. This means that we don't have a way to
|
RoomMemberStore.__dict__["did_forget"]
|
||||||
# invalidate the cache correctly.
|
)
|
||||||
get_presence_list_accepted = PresenceStore.__dict__[
|
|
||||||
"get_presence_list_accepted"
|
|
||||||
]
|
|
||||||
|
|
||||||
UPDATE_SYNCING_USERS_MS = 10 * 1000
|
UPDATE_SYNCING_USERS_MS = 10 * 1000
|
||||||
|
|
||||||
|
|
||||||
class SynchrotronPresence(object):
|
class SynchrotronPresence(object):
|
||||||
def __init__(self, hs):
|
def __init__(self, hs):
|
||||||
|
self.hs = hs
|
||||||
|
self.is_mine_id = hs.is_mine_id
|
||||||
self.http_client = hs.get_simple_http_client()
|
self.http_client = hs.get_simple_http_client()
|
||||||
self.store = hs.get_datastore()
|
self.store = hs.get_datastore()
|
||||||
self.user_to_num_current_syncs = {}
|
self.user_to_num_current_syncs = {}
|
||||||
self.syncing_users_url = hs.config.worker_replication_url + "/syncing_users"
|
|
||||||
self.clock = hs.get_clock()
|
self.clock = hs.get_clock()
|
||||||
|
self.notifier = hs.get_notifier()
|
||||||
|
|
||||||
active_presence = self.store.take_presence_startup_info()
|
active_presence = self.store.take_presence_startup_info()
|
||||||
self.user_to_current_state = {
|
self.user_to_current_state = {
|
||||||
@@ -107,34 +105,69 @@ class SynchrotronPresence(object):
|
|||||||
for state in active_presence
|
for state in active_presence
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# user_id -> last_sync_ms. Lists the users that have stopped syncing
|
||||||
|
# but we haven't notified the master of that yet
|
||||||
|
self.users_going_offline = {}
|
||||||
|
|
||||||
|
self._send_stop_syncing_loop = self.clock.looping_call(
|
||||||
|
self.send_stop_syncing, 10 * 1000
|
||||||
|
)
|
||||||
|
|
||||||
self.process_id = random_string(16)
|
self.process_id = random_string(16)
|
||||||
logger.info("Presence process_id is %r", self.process_id)
|
logger.info("Presence process_id is %r", self.process_id)
|
||||||
|
|
||||||
self._sending_sync = False
|
def send_user_sync(self, user_id, is_syncing, last_sync_ms):
|
||||||
self._need_to_send_sync = False
|
self.hs.get_tcp_replication().send_user_sync(user_id, is_syncing, last_sync_ms)
|
||||||
self.clock.looping_call(
|
|
||||||
self._send_syncing_users_regularly,
|
|
||||||
UPDATE_SYNCING_USERS_MS,
|
|
||||||
)
|
|
||||||
|
|
||||||
reactor.addSystemEventTrigger("before", "shutdown", self._on_shutdown)
|
def mark_as_coming_online(self, user_id):
|
||||||
|
"""A user has started syncing. Send a UserSync to the master, unless they
|
||||||
|
had recently stopped syncing.
|
||||||
|
|
||||||
def set_state(self, user, state):
|
Args:
|
||||||
|
user_id (str)
|
||||||
|
"""
|
||||||
|
going_offline = self.users_going_offline.pop(user_id, None)
|
||||||
|
if not going_offline:
|
||||||
|
# Safe to skip because we haven't yet told the master they were offline
|
||||||
|
self.send_user_sync(user_id, True, self.clock.time_msec())
|
||||||
|
|
||||||
|
def mark_as_going_offline(self, user_id):
|
||||||
|
"""A user has stopped syncing. We wait before notifying the master as
|
||||||
|
its likely they'll come back soon. This allows us to avoid sending
|
||||||
|
a stopped syncing immediately followed by a started syncing notification
|
||||||
|
to the master
|
||||||
|
|
||||||
|
Args:
|
||||||
|
user_id (str)
|
||||||
|
"""
|
||||||
|
self.users_going_offline[user_id] = self.clock.time_msec()
|
||||||
|
|
||||||
|
def send_stop_syncing(self):
|
||||||
|
"""Check if there are any users who have stopped syncing a while ago
|
||||||
|
and haven't come back yet. If there are poke the master about them.
|
||||||
|
"""
|
||||||
|
now = self.clock.time_msec()
|
||||||
|
for user_id, last_sync_ms in self.users_going_offline.items():
|
||||||
|
if now - last_sync_ms > 10 * 1000:
|
||||||
|
self.users_going_offline.pop(user_id, None)
|
||||||
|
self.send_user_sync(user_id, False, last_sync_ms)
|
||||||
|
|
||||||
|
def set_state(self, user, state, ignore_status_msg=False):
|
||||||
# TODO Hows this supposed to work?
|
# TODO Hows this supposed to work?
|
||||||
pass
|
pass
|
||||||
|
|
||||||
get_states = PresenceHandler.get_states.__func__
|
get_states = PresenceHandler.get_states.__func__
|
||||||
|
get_state = PresenceHandler.get_state.__func__
|
||||||
current_state_for_users = PresenceHandler.current_state_for_users.__func__
|
current_state_for_users = PresenceHandler.current_state_for_users.__func__
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def user_syncing(self, user_id, affect_presence):
|
def user_syncing(self, user_id, affect_presence):
|
||||||
if affect_presence:
|
if affect_presence:
|
||||||
curr_sync = self.user_to_num_current_syncs.get(user_id, 0)
|
curr_sync = self.user_to_num_current_syncs.get(user_id, 0)
|
||||||
self.user_to_num_current_syncs[user_id] = curr_sync + 1
|
self.user_to_num_current_syncs[user_id] = curr_sync + 1
|
||||||
prev_states = yield self.current_state_for_users([user_id])
|
|
||||||
if prev_states[user_id].state == PresenceState.OFFLINE:
|
# If we went from no in flight sync to some, notify replication
|
||||||
# TODO: Don't block the sync request on this HTTP hit.
|
if self.user_to_num_current_syncs[user_id] == 1:
|
||||||
yield self._send_syncing_users_now()
|
self.mark_as_coming_online(user_id)
|
||||||
|
|
||||||
def _end():
|
def _end():
|
||||||
# We check that the user_id is in user_to_num_current_syncs because
|
# We check that the user_id is in user_to_num_current_syncs because
|
||||||
@@ -143,6 +176,10 @@ class SynchrotronPresence(object):
|
|||||||
if affect_presence and user_id in self.user_to_num_current_syncs:
|
if affect_presence and user_id in self.user_to_num_current_syncs:
|
||||||
self.user_to_num_current_syncs[user_id] -= 1
|
self.user_to_num_current_syncs[user_id] -= 1
|
||||||
|
|
||||||
|
# If we went from one in flight sync to non, notify replication
|
||||||
|
if self.user_to_num_current_syncs[user_id] == 0:
|
||||||
|
self.mark_as_going_offline(user_id)
|
||||||
|
|
||||||
@contextlib.contextmanager
|
@contextlib.contextmanager
|
||||||
def _user_syncing():
|
def _user_syncing():
|
||||||
try:
|
try:
|
||||||
@@ -150,64 +187,38 @@ class SynchrotronPresence(object):
|
|||||||
finally:
|
finally:
|
||||||
_end()
|
_end()
|
||||||
|
|
||||||
defer.returnValue(_user_syncing())
|
return defer.succeed(_user_syncing())
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def _on_shutdown(self):
|
def notify_from_replication(self, states, stream_id):
|
||||||
# When the synchrotron is shutdown tell the master to clear the in
|
parties = yield get_interested_parties(self.store, states)
|
||||||
# progress syncs for this process
|
room_ids_to_states, users_to_states = parties
|
||||||
self.user_to_num_current_syncs.clear()
|
|
||||||
yield self._send_syncing_users_now()
|
|
||||||
|
|
||||||
def _send_syncing_users_regularly(self):
|
self.notifier.on_new_event(
|
||||||
# Only send an update if we aren't in the middle of sending one.
|
"presence_key", stream_id, rooms=room_ids_to_states.keys(),
|
||||||
if not self._sending_sync:
|
users=users_to_states.keys()
|
||||||
preserve_fn(self._send_syncing_users_now)()
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def _send_syncing_users_now(self):
|
|
||||||
if self._sending_sync:
|
|
||||||
# We don't want to race with sending another update.
|
|
||||||
# Instead we wait for that update to finish and send another
|
|
||||||
# update afterwards.
|
|
||||||
self._need_to_send_sync = True
|
|
||||||
return
|
|
||||||
|
|
||||||
# Flag that we are sending an update.
|
|
||||||
self._sending_sync = True
|
|
||||||
|
|
||||||
yield self.http_client.post_json_get_json(self.syncing_users_url, {
|
|
||||||
"process_id": self.process_id,
|
|
||||||
"syncing_users": [
|
|
||||||
user_id for user_id, count in self.user_to_num_current_syncs.items()
|
|
||||||
if count > 0
|
|
||||||
],
|
|
||||||
})
|
|
||||||
|
|
||||||
# Unset the flag as we are no longer sending an update.
|
|
||||||
self._sending_sync = False
|
|
||||||
if self._need_to_send_sync:
|
|
||||||
# If something happened while we were sending the update then
|
|
||||||
# we might need to send another update.
|
|
||||||
# TODO: Check if the update that was sent matches the current state
|
|
||||||
# as we only need to send an update if they are different.
|
|
||||||
self._need_to_send_sync = False
|
|
||||||
yield self._send_syncing_users_now()
|
|
||||||
|
|
||||||
def process_replication(self, result):
|
|
||||||
stream = result.get("presence", {"rows": []})
|
|
||||||
for row in stream["rows"]:
|
|
||||||
(
|
|
||||||
position, user_id, state, last_active_ts,
|
|
||||||
last_federation_update_ts, last_user_sync_ts, status_msg,
|
|
||||||
currently_active
|
|
||||||
) = row
|
|
||||||
self.user_to_current_state[user_id] = UserPresenceState(
|
|
||||||
user_id, state, last_active_ts,
|
|
||||||
last_federation_update_ts, last_user_sync_ts, status_msg,
|
|
||||||
currently_active
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def process_replication_rows(self, token, rows):
|
||||||
|
states = [UserPresenceState(
|
||||||
|
row.user_id, row.state, row.last_active_ts,
|
||||||
|
row.last_federation_update_ts, row.last_user_sync_ts, row.status_msg,
|
||||||
|
row.currently_active
|
||||||
|
) for row in rows]
|
||||||
|
|
||||||
|
for state in states:
|
||||||
|
self.user_to_current_state[row.user_id] = state
|
||||||
|
|
||||||
|
stream_id = token
|
||||||
|
yield self.notify_from_replication(states, stream_id)
|
||||||
|
|
||||||
|
def get_currently_syncing_users(self):
|
||||||
|
return [
|
||||||
|
user_id for user_id, count in self.user_to_num_current_syncs.iteritems()
|
||||||
|
if count > 0
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
class SynchrotronTyping(object):
|
class SynchrotronTyping(object):
|
||||||
def __init__(self, hs):
|
def __init__(self, hs):
|
||||||
@@ -216,18 +227,17 @@ class SynchrotronTyping(object):
|
|||||||
self._room_typing = {}
|
self._room_typing = {}
|
||||||
|
|
||||||
def stream_positions(self):
|
def stream_positions(self):
|
||||||
|
# We must update this typing token from the response of the previous
|
||||||
|
# sync. In particular, the stream id may "reset" back to zero/a low
|
||||||
|
# value which we *must* use for the next replication request.
|
||||||
return {"typing": self._latest_room_serial}
|
return {"typing": self._latest_room_serial}
|
||||||
|
|
||||||
def process_replication(self, result):
|
def process_replication_rows(self, token, rows):
|
||||||
stream = result.get("typing")
|
self._latest_room_serial = token
|
||||||
if stream:
|
|
||||||
self._latest_room_serial = int(stream["position"])
|
|
||||||
|
|
||||||
for row in stream["rows"]:
|
for row in rows:
|
||||||
position, room_id, typing_json = row
|
self._room_serials[row.room_id] = token
|
||||||
typing = json.loads(typing_json)
|
self._room_typing[row.room_id] = row.user_ids
|
||||||
self._room_serials[room_id] = position
|
|
||||||
self._room_typing[room_id] = typing
|
|
||||||
|
|
||||||
|
|
||||||
class SynchrotronApplicationService(object):
|
class SynchrotronApplicationService(object):
|
||||||
@@ -256,7 +266,7 @@ class SynchrotronServer(HomeServer):
|
|||||||
|
|
||||||
def _listen_http(self, listener_config):
|
def _listen_http(self, listener_config):
|
||||||
port = listener_config["port"]
|
port = listener_config["port"]
|
||||||
bind_address = listener_config.get("bind_address", "")
|
bind_addresses = listener_config["bind_addresses"]
|
||||||
site_tag = listener_config.get("tag", port)
|
site_tag = listener_config.get("tag", port)
|
||||||
resources = {}
|
resources = {}
|
||||||
for res in listener_config["resources"]:
|
for res in listener_config["resources"]:
|
||||||
@@ -266,13 +276,19 @@ class SynchrotronServer(HomeServer):
|
|||||||
elif name == "client":
|
elif name == "client":
|
||||||
resource = JsonResource(self, canonical_json=False)
|
resource = JsonResource(self, canonical_json=False)
|
||||||
sync.register_servlets(self, resource)
|
sync.register_servlets(self, resource)
|
||||||
|
events.register_servlets(self, resource)
|
||||||
|
InitialSyncRestServlet(self).register(resource)
|
||||||
|
RoomInitialSyncRestServlet(self).register(resource)
|
||||||
resources.update({
|
resources.update({
|
||||||
"/_matrix/client/r0": resource,
|
"/_matrix/client/r0": resource,
|
||||||
"/_matrix/client/unstable": resource,
|
"/_matrix/client/unstable": resource,
|
||||||
"/_matrix/client/v2_alpha": resource,
|
"/_matrix/client/v2_alpha": resource,
|
||||||
|
"/_matrix/client/api/v1": resource,
|
||||||
})
|
})
|
||||||
|
|
||||||
root_resource = create_resource_tree(resources, Resource())
|
root_resource = create_resource_tree(resources, Resource())
|
||||||
|
|
||||||
|
for address in bind_addresses:
|
||||||
reactor.listenTCP(
|
reactor.listenTCP(
|
||||||
port,
|
port,
|
||||||
SynapseSite(
|
SynapseSite(
|
||||||
@@ -281,8 +297,9 @@ class SynchrotronServer(HomeServer):
|
|||||||
listener_config,
|
listener_config,
|
||||||
root_resource,
|
root_resource,
|
||||||
),
|
),
|
||||||
interface=bind_address
|
interface=address
|
||||||
)
|
)
|
||||||
|
|
||||||
logger.info("Synapse synchrotron now listening on port %d", port)
|
logger.info("Synapse synchrotron now listening on port %d", port)
|
||||||
|
|
||||||
def start_listening(self, listeners):
|
def start_listening(self, listeners):
|
||||||
@@ -290,6 +307,9 @@ class SynchrotronServer(HomeServer):
|
|||||||
if listener["type"] == "http":
|
if listener["type"] == "http":
|
||||||
self._listen_http(listener)
|
self._listen_http(listener)
|
||||||
elif listener["type"] == "manhole":
|
elif listener["type"] == "manhole":
|
||||||
|
bind_addresses = listener["bind_addresses"]
|
||||||
|
|
||||||
|
for address in bind_addresses:
|
||||||
reactor.listenTCP(
|
reactor.listenTCP(
|
||||||
listener["port"],
|
listener["port"],
|
||||||
manhole(
|
manhole(
|
||||||
@@ -297,106 +317,15 @@ class SynchrotronServer(HomeServer):
|
|||||||
password="rabbithole",
|
password="rabbithole",
|
||||||
globals={"hs": self},
|
globals={"hs": self},
|
||||||
),
|
),
|
||||||
interface=listener.get("bind_address", '127.0.0.1')
|
interface=address
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
self.get_tcp_replication().start_replication(self)
|
||||||
def replicate(self):
|
|
||||||
http_client = self.get_simple_http_client()
|
|
||||||
store = self.get_datastore()
|
|
||||||
replication_url = self.config.worker_replication_url
|
|
||||||
clock = self.get_clock()
|
|
||||||
notifier = self.get_notifier()
|
|
||||||
presence_handler = self.get_presence_handler()
|
|
||||||
typing_handler = self.get_typing_handler()
|
|
||||||
|
|
||||||
def expire_broken_caches():
|
def build_tcp_replication(self):
|
||||||
store.who_forgot_in_room.invalidate_all()
|
return SyncReplicationHandler(self)
|
||||||
store.get_presence_list_accepted.invalidate_all()
|
|
||||||
|
|
||||||
def notify_from_stream(
|
|
||||||
result, stream_name, stream_key, room=None, user=None
|
|
||||||
):
|
|
||||||
stream = result.get(stream_name)
|
|
||||||
if stream:
|
|
||||||
position_index = stream["field_names"].index("position")
|
|
||||||
if room:
|
|
||||||
room_index = stream["field_names"].index(room)
|
|
||||||
if user:
|
|
||||||
user_index = stream["field_names"].index(user)
|
|
||||||
|
|
||||||
users = ()
|
|
||||||
rooms = ()
|
|
||||||
for row in stream["rows"]:
|
|
||||||
position = row[position_index]
|
|
||||||
|
|
||||||
if user:
|
|
||||||
users = (row[user_index],)
|
|
||||||
|
|
||||||
if room:
|
|
||||||
rooms = (row[room_index],)
|
|
||||||
|
|
||||||
notifier.on_new_event(
|
|
||||||
stream_key, position, users=users, rooms=rooms
|
|
||||||
)
|
|
||||||
|
|
||||||
def notify(result):
|
|
||||||
stream = result.get("events")
|
|
||||||
if stream:
|
|
||||||
max_position = stream["position"]
|
|
||||||
for row in stream["rows"]:
|
|
||||||
position = row[0]
|
|
||||||
internal = json.loads(row[1])
|
|
||||||
event_json = json.loads(row[2])
|
|
||||||
event = FrozenEvent(event_json, internal_metadata_dict=internal)
|
|
||||||
extra_users = ()
|
|
||||||
if event.type == EventTypes.Member:
|
|
||||||
extra_users = (event.state_key,)
|
|
||||||
notifier.on_new_room_event(
|
|
||||||
event, position, max_position, extra_users
|
|
||||||
)
|
|
||||||
|
|
||||||
notify_from_stream(
|
|
||||||
result, "push_rules", "push_rules_key", user="user_id"
|
|
||||||
)
|
|
||||||
notify_from_stream(
|
|
||||||
result, "user_account_data", "account_data_key", user="user_id"
|
|
||||||
)
|
|
||||||
notify_from_stream(
|
|
||||||
result, "room_account_data", "account_data_key", user="user_id"
|
|
||||||
)
|
|
||||||
notify_from_stream(
|
|
||||||
result, "tag_account_data", "account_data_key", user="user_id"
|
|
||||||
)
|
|
||||||
notify_from_stream(
|
|
||||||
result, "receipts", "receipt_key", room="room_id"
|
|
||||||
)
|
|
||||||
notify_from_stream(
|
|
||||||
result, "typing", "typing_key", room="room_id"
|
|
||||||
)
|
|
||||||
|
|
||||||
next_expire_broken_caches_ms = 0
|
|
||||||
while True:
|
|
||||||
try:
|
|
||||||
args = store.stream_positions()
|
|
||||||
args.update(typing_handler.stream_positions())
|
|
||||||
args["timeout"] = 30000
|
|
||||||
result = yield http_client.get_json(replication_url, args=args)
|
|
||||||
now_ms = clock.time_msec()
|
|
||||||
if now_ms > next_expire_broken_caches_ms:
|
|
||||||
expire_broken_caches()
|
|
||||||
next_expire_broken_caches_ms = (
|
|
||||||
now_ms + store.BROKEN_CACHE_EXPIRY_MS
|
|
||||||
)
|
|
||||||
yield store.process_replication(result)
|
|
||||||
typing_handler.process_replication(result)
|
|
||||||
presence_handler.process_replication(result)
|
|
||||||
notify(result)
|
|
||||||
except:
|
|
||||||
logger.exception("Error replicating from %r", replication_url)
|
|
||||||
yield sleep(5)
|
|
||||||
|
|
||||||
def build_presence_handler(self):
|
def build_presence_handler(self):
|
||||||
return SynchrotronPresence(self)
|
return SynchrotronPresence(self)
|
||||||
@@ -405,6 +334,83 @@ class SynchrotronServer(HomeServer):
|
|||||||
return SynchrotronTyping(self)
|
return SynchrotronTyping(self)
|
||||||
|
|
||||||
|
|
||||||
|
class SyncReplicationHandler(ReplicationClientHandler):
|
||||||
|
def __init__(self, hs):
|
||||||
|
super(SyncReplicationHandler, self).__init__(hs.get_datastore())
|
||||||
|
|
||||||
|
self.store = hs.get_datastore()
|
||||||
|
self.typing_handler = hs.get_typing_handler()
|
||||||
|
self.presence_handler = hs.get_presence_handler()
|
||||||
|
self.notifier = hs.get_notifier()
|
||||||
|
|
||||||
|
self.presence_handler.sync_callback = self.send_user_sync
|
||||||
|
|
||||||
|
def on_rdata(self, stream_name, token, rows):
|
||||||
|
super(SyncReplicationHandler, self).on_rdata(stream_name, token, rows)
|
||||||
|
|
||||||
|
preserve_fn(self.process_and_notify)(stream_name, token, rows)
|
||||||
|
|
||||||
|
def get_streams_to_replicate(self):
|
||||||
|
args = super(SyncReplicationHandler, self).get_streams_to_replicate()
|
||||||
|
args.update(self.typing_handler.stream_positions())
|
||||||
|
return args
|
||||||
|
|
||||||
|
def get_currently_syncing_users(self):
|
||||||
|
return self.presence_handler.get_currently_syncing_users()
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def process_and_notify(self, stream_name, token, rows):
|
||||||
|
if stream_name == "events":
|
||||||
|
# We shouldn't get multiple rows per token for events stream, so
|
||||||
|
# we don't need to optimise this for multiple rows.
|
||||||
|
for row in rows:
|
||||||
|
event = yield self.store.get_event(row.event_id)
|
||||||
|
extra_users = ()
|
||||||
|
if event.type == EventTypes.Member:
|
||||||
|
extra_users = (event.state_key,)
|
||||||
|
max_token = self.store.get_room_max_stream_ordering()
|
||||||
|
self.notifier.on_new_room_event(
|
||||||
|
event, token, max_token, extra_users
|
||||||
|
)
|
||||||
|
elif stream_name == "push_rules":
|
||||||
|
self.notifier.on_new_event(
|
||||||
|
"push_rules_key", token, users=[row.user_id for row in rows],
|
||||||
|
)
|
||||||
|
elif stream_name in ("account_data", "tag_account_data",):
|
||||||
|
self.notifier.on_new_event(
|
||||||
|
"account_data_key", token, users=[row.user_id for row in rows],
|
||||||
|
)
|
||||||
|
elif stream_name == "receipts":
|
||||||
|
self.notifier.on_new_event(
|
||||||
|
"receipt_key", token, rooms=[row.room_id for row in rows],
|
||||||
|
)
|
||||||
|
elif stream_name == "typing":
|
||||||
|
self.typing_handler.process_replication_rows(token, rows)
|
||||||
|
self.notifier.on_new_event(
|
||||||
|
"typing_key", token, rooms=[row.room_id for row in rows],
|
||||||
|
)
|
||||||
|
elif stream_name == "to_device":
|
||||||
|
entities = [row.entity for row in rows if row.entity.startswith("@")]
|
||||||
|
if entities:
|
||||||
|
self.notifier.on_new_event(
|
||||||
|
"to_device_key", token, users=entities,
|
||||||
|
)
|
||||||
|
elif stream_name == "device_lists":
|
||||||
|
all_room_ids = set()
|
||||||
|
for row in rows:
|
||||||
|
room_ids = yield self.store.get_rooms_for_user(row.user_id)
|
||||||
|
all_room_ids.update(room_ids)
|
||||||
|
self.notifier.on_new_event(
|
||||||
|
"device_list_key", token, rooms=all_room_ids,
|
||||||
|
)
|
||||||
|
elif stream_name == "presence":
|
||||||
|
yield self.presence_handler.process_replication_rows(token, rows)
|
||||||
|
elif stream_name == "receipts":
|
||||||
|
self.notifier.on_new_event(
|
||||||
|
"groups_key", token, users=[row.user_id for row in rows],
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def start(config_options):
|
def start(config_options):
|
||||||
try:
|
try:
|
||||||
config = HomeServerConfig.load_config(
|
config = HomeServerConfig.load_config(
|
||||||
@@ -416,7 +422,9 @@ def start(config_options):
|
|||||||
|
|
||||||
assert config.worker_app == "synapse.app.synchrotron"
|
assert config.worker_app == "synapse.app.synchrotron"
|
||||||
|
|
||||||
setup_logging(config.worker_log_config, config.worker_log_file)
|
setup_logging(config, use_worker_options=True)
|
||||||
|
|
||||||
|
synapse.events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||||
|
|
||||||
database_engine = create_engine(config.database_config)
|
database_engine = create_engine(config.database_config)
|
||||||
|
|
||||||
@@ -424,7 +432,7 @@ def start(config_options):
|
|||||||
config.server_name,
|
config.server_name,
|
||||||
db_config=config.database_config,
|
db_config=config.database_config,
|
||||||
config=config,
|
config=config,
|
||||||
version_string=get_version_string("Synapse", synapse),
|
version_string="Synapse/" + get_version_string(synapse),
|
||||||
database_engine=database_engine,
|
database_engine=database_engine,
|
||||||
application_service_handler=SynchrotronApplicationService(),
|
application_service_handler=SynchrotronApplicationService(),
|
||||||
)
|
)
|
||||||
@@ -432,32 +440,13 @@ def start(config_options):
|
|||||||
ss.setup()
|
ss.setup()
|
||||||
ss.start_listening(config.worker_listeners)
|
ss.start_listening(config.worker_listeners)
|
||||||
|
|
||||||
def run():
|
|
||||||
with LoggingContext("run"):
|
|
||||||
logger.info("Running")
|
|
||||||
change_resource_limit(config.soft_file_limit)
|
|
||||||
if config.gc_thresholds:
|
|
||||||
gc.set_threshold(*config.gc_thresholds)
|
|
||||||
reactor.run()
|
|
||||||
|
|
||||||
def start():
|
def start():
|
||||||
ss.get_datastore().start_profiling()
|
ss.get_datastore().start_profiling()
|
||||||
ss.replicate()
|
ss.get_state_handler().start_caching()
|
||||||
|
|
||||||
reactor.callWhenRunning(start)
|
reactor.callWhenRunning(start)
|
||||||
|
|
||||||
if config.worker_daemonize:
|
_base.start_worker_reactor("synapse-synchrotron", config)
|
||||||
daemon = Daemonize(
|
|
||||||
app="synapse-synchrotron",
|
|
||||||
pid=config.worker_pid_file,
|
|
||||||
action=run,
|
|
||||||
auto_close_fds=False,
|
|
||||||
verbose=True,
|
|
||||||
logger=logger,
|
|
||||||
)
|
|
||||||
daemon.start()
|
|
||||||
else:
|
|
||||||
run()
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
|||||||
@@ -23,14 +23,27 @@ import signal
|
|||||||
import subprocess
|
import subprocess
|
||||||
import sys
|
import sys
|
||||||
import yaml
|
import yaml
|
||||||
|
import errno
|
||||||
|
import time
|
||||||
|
|
||||||
SYNAPSE = ["python", "-B", "-m", "synapse.app.homeserver"]
|
SYNAPSE = [sys.executable, "-B", "-m", "synapse.app.homeserver"]
|
||||||
|
|
||||||
GREEN = "\x1b[1;32m"
|
GREEN = "\x1b[1;32m"
|
||||||
|
YELLOW = "\x1b[1;33m"
|
||||||
RED = "\x1b[1;31m"
|
RED = "\x1b[1;31m"
|
||||||
NORMAL = "\x1b[m"
|
NORMAL = "\x1b[m"
|
||||||
|
|
||||||
|
|
||||||
|
def pid_running(pid):
|
||||||
|
try:
|
||||||
|
os.kill(pid, 0)
|
||||||
|
return True
|
||||||
|
except OSError, err:
|
||||||
|
if err.errno == errno.EPERM:
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
def write(message, colour=NORMAL, stream=sys.stdout):
|
def write(message, colour=NORMAL, stream=sys.stdout):
|
||||||
if colour == NORMAL:
|
if colour == NORMAL:
|
||||||
stream.write(message + "\n")
|
stream.write(message + "\n")
|
||||||
@@ -38,6 +51,11 @@ def write(message, colour=NORMAL, stream=sys.stdout):
|
|||||||
stream.write(colour + message + NORMAL + "\n")
|
stream.write(colour + message + NORMAL + "\n")
|
||||||
|
|
||||||
|
|
||||||
|
def abort(message, colour=RED, stream=sys.stderr):
|
||||||
|
write(message, colour, stream)
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
def start(configfile):
|
def start(configfile):
|
||||||
write("Starting ...")
|
write("Starting ...")
|
||||||
args = SYNAPSE
|
args = SYNAPSE
|
||||||
@@ -45,7 +63,8 @@ def start(configfile):
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
subprocess.check_call(args)
|
subprocess.check_call(args)
|
||||||
write("started synapse.app.homeserver(%r)" % (configfile,), colour=GREEN)
|
write("started synapse.app.homeserver(%r)" %
|
||||||
|
(configfile,), colour=GREEN)
|
||||||
except subprocess.CalledProcessError as e:
|
except subprocess.CalledProcessError as e:
|
||||||
write(
|
write(
|
||||||
"error starting (exit code: %d); see above for logs" % e.returncode,
|
"error starting (exit code: %d); see above for logs" % e.returncode,
|
||||||
@@ -76,8 +95,16 @@ def start_worker(app, configfile, worker_configfile):
|
|||||||
def stop(pidfile, app):
|
def stop(pidfile, app):
|
||||||
if os.path.exists(pidfile):
|
if os.path.exists(pidfile):
|
||||||
pid = int(open(pidfile).read())
|
pid = int(open(pidfile).read())
|
||||||
|
try:
|
||||||
os.kill(pid, signal.SIGTERM)
|
os.kill(pid, signal.SIGTERM)
|
||||||
write("stopped %s" % (app,), colour=GREEN)
|
write("stopped %s" % (app,), colour=GREEN)
|
||||||
|
except OSError, err:
|
||||||
|
if err.errno == errno.ESRCH:
|
||||||
|
write("%s not running" % (app,), colour=YELLOW)
|
||||||
|
elif err.errno == errno.EPERM:
|
||||||
|
abort("Cannot stop %s: Operation not permitted" % (app,))
|
||||||
|
else:
|
||||||
|
abort("Cannot stop %s: Unknown error" % (app,))
|
||||||
|
|
||||||
|
|
||||||
Worker = collections.namedtuple("Worker", [
|
Worker = collections.namedtuple("Worker", [
|
||||||
@@ -98,7 +125,7 @@ def main():
|
|||||||
"configfile",
|
"configfile",
|
||||||
nargs="?",
|
nargs="?",
|
||||||
default="homeserver.yaml",
|
default="homeserver.yaml",
|
||||||
help="the homeserver config file, defaults to homserver.yaml",
|
help="the homeserver config file, defaults to homeserver.yaml",
|
||||||
)
|
)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"-w", "--worker",
|
"-w", "--worker",
|
||||||
@@ -175,7 +202,8 @@ def main():
|
|||||||
worker_app = worker_config["worker_app"]
|
worker_app = worker_config["worker_app"]
|
||||||
worker_pidfile = worker_config["worker_pid_file"]
|
worker_pidfile = worker_config["worker_pid_file"]
|
||||||
worker_daemonize = worker_config["worker_daemonize"]
|
worker_daemonize = worker_config["worker_daemonize"]
|
||||||
assert worker_daemonize # TODO print something more user friendly
|
assert worker_daemonize, "In config %r: expected '%s' to be True" % (
|
||||||
|
worker_configfile, "worker_daemonize")
|
||||||
worker_cache_factor = worker_config.get("synctl_cache_factor")
|
worker_cache_factor = worker_config.get("synctl_cache_factor")
|
||||||
workers.append(Worker(
|
workers.append(Worker(
|
||||||
worker_app, worker_configfile, worker_pidfile, worker_cache_factor,
|
worker_app, worker_configfile, worker_pidfile, worker_cache_factor,
|
||||||
@@ -190,10 +218,25 @@ def main():
|
|||||||
if start_stop_synapse:
|
if start_stop_synapse:
|
||||||
stop(pidfile, "synapse.app.homeserver")
|
stop(pidfile, "synapse.app.homeserver")
|
||||||
|
|
||||||
# TODO: Wait for synapse to actually shutdown before starting it again
|
# Wait for synapse to actually shutdown before starting it again
|
||||||
|
if action == "restart":
|
||||||
|
running_pids = []
|
||||||
|
if start_stop_synapse and os.path.exists(pidfile):
|
||||||
|
running_pids.append(int(open(pidfile).read()))
|
||||||
|
for worker in workers:
|
||||||
|
if os.path.exists(worker.pidfile):
|
||||||
|
running_pids.append(int(open(worker.pidfile).read()))
|
||||||
|
if len(running_pids) > 0:
|
||||||
|
write("Waiting for process to exit before restarting...")
|
||||||
|
for running_pid in running_pids:
|
||||||
|
while pid_running(running_pid):
|
||||||
|
time.sleep(0.2)
|
||||||
|
|
||||||
if action == "start" or action == "restart":
|
if action == "start" or action == "restart":
|
||||||
if start_stop_synapse:
|
if start_stop_synapse:
|
||||||
|
# Check if synapse is already running
|
||||||
|
if os.path.exists(pidfile) and pid_running(int(open(pidfile).read())):
|
||||||
|
abort("synapse.app.homeserver already running")
|
||||||
start(configfile)
|
start(configfile)
|
||||||
|
|
||||||
for worker in workers:
|
for worker in workers:
|
||||||
|
|||||||
241
synapse/app/user_dir.py
Normal file
241
synapse/app/user_dir.py
Normal file
@@ -0,0 +1,241 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright 2017 Vector Creations Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import logging
|
||||||
|
import sys
|
||||||
|
|
||||||
|
import synapse
|
||||||
|
from synapse import events
|
||||||
|
from synapse.app import _base
|
||||||
|
from synapse.config._base import ConfigError
|
||||||
|
from synapse.config.homeserver import HomeServerConfig
|
||||||
|
from synapse.config.logger import setup_logging
|
||||||
|
from synapse.crypto import context_factory
|
||||||
|
from synapse.http.server import JsonResource
|
||||||
|
from synapse.http.site import SynapseSite
|
||||||
|
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||||
|
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||||
|
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||||
|
from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
|
||||||
|
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||||
|
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||||
|
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||||
|
from synapse.rest.client.v2_alpha import user_directory
|
||||||
|
from synapse.server import HomeServer
|
||||||
|
from synapse.storage.engines import create_engine
|
||||||
|
from synapse.storage.user_directory import UserDirectoryStore
|
||||||
|
from synapse.util.caches.stream_change_cache import StreamChangeCache
|
||||||
|
from synapse.util.httpresourcetree import create_resource_tree
|
||||||
|
from synapse.util.logcontext import LoggingContext, preserve_fn
|
||||||
|
from synapse.util.manhole import manhole
|
||||||
|
from synapse.util.versionstring import get_version_string
|
||||||
|
from twisted.internet import reactor
|
||||||
|
from twisted.web.resource import Resource
|
||||||
|
|
||||||
|
logger = logging.getLogger("synapse.app.user_dir")
|
||||||
|
|
||||||
|
|
||||||
|
class UserDirectorySlaveStore(
|
||||||
|
SlavedEventStore,
|
||||||
|
SlavedApplicationServiceStore,
|
||||||
|
SlavedRegistrationStore,
|
||||||
|
SlavedClientIpStore,
|
||||||
|
UserDirectoryStore,
|
||||||
|
BaseSlavedStore,
|
||||||
|
):
|
||||||
|
def __init__(self, db_conn, hs):
|
||||||
|
super(UserDirectorySlaveStore, self).__init__(db_conn, hs)
|
||||||
|
|
||||||
|
events_max = self._stream_id_gen.get_current_token()
|
||||||
|
curr_state_delta_prefill, min_curr_state_delta_id = self._get_cache_dict(
|
||||||
|
db_conn, "current_state_delta_stream",
|
||||||
|
entity_column="room_id",
|
||||||
|
stream_column="stream_id",
|
||||||
|
max_value=events_max, # As we share the stream id with events token
|
||||||
|
limit=1000,
|
||||||
|
)
|
||||||
|
self._curr_state_delta_stream_cache = StreamChangeCache(
|
||||||
|
"_curr_state_delta_stream_cache", min_curr_state_delta_id,
|
||||||
|
prefilled_cache=curr_state_delta_prefill,
|
||||||
|
)
|
||||||
|
|
||||||
|
self._current_state_delta_pos = events_max
|
||||||
|
|
||||||
|
def stream_positions(self):
|
||||||
|
result = super(UserDirectorySlaveStore, self).stream_positions()
|
||||||
|
result["current_state_deltas"] = self._current_state_delta_pos
|
||||||
|
return result
|
||||||
|
|
||||||
|
def process_replication_rows(self, stream_name, token, rows):
|
||||||
|
if stream_name == "current_state_deltas":
|
||||||
|
self._current_state_delta_pos = token
|
||||||
|
for row in rows:
|
||||||
|
self._curr_state_delta_stream_cache.entity_has_changed(
|
||||||
|
row.room_id, token
|
||||||
|
)
|
||||||
|
return super(UserDirectorySlaveStore, self).process_replication_rows(
|
||||||
|
stream_name, token, rows
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class UserDirectoryServer(HomeServer):
|
||||||
|
def get_db_conn(self, run_new_connection=True):
|
||||||
|
# Any param beginning with cp_ is a parameter for adbapi, and should
|
||||||
|
# not be passed to the database engine.
|
||||||
|
db_params = {
|
||||||
|
k: v for k, v in self.db_config.get("args", {}).items()
|
||||||
|
if not k.startswith("cp_")
|
||||||
|
}
|
||||||
|
db_conn = self.database_engine.module.connect(**db_params)
|
||||||
|
|
||||||
|
if run_new_connection:
|
||||||
|
self.database_engine.on_new_connection(db_conn)
|
||||||
|
return db_conn
|
||||||
|
|
||||||
|
def setup(self):
|
||||||
|
logger.info("Setting up.")
|
||||||
|
self.datastore = UserDirectorySlaveStore(self.get_db_conn(), self)
|
||||||
|
logger.info("Finished setting up.")
|
||||||
|
|
||||||
|
def _listen_http(self, listener_config):
|
||||||
|
port = listener_config["port"]
|
||||||
|
bind_addresses = listener_config["bind_addresses"]
|
||||||
|
site_tag = listener_config.get("tag", port)
|
||||||
|
resources = {}
|
||||||
|
for res in listener_config["resources"]:
|
||||||
|
for name in res["names"]:
|
||||||
|
if name == "metrics":
|
||||||
|
resources[METRICS_PREFIX] = MetricsResource(self)
|
||||||
|
elif name == "client":
|
||||||
|
resource = JsonResource(self, canonical_json=False)
|
||||||
|
user_directory.register_servlets(self, resource)
|
||||||
|
resources.update({
|
||||||
|
"/_matrix/client/r0": resource,
|
||||||
|
"/_matrix/client/unstable": resource,
|
||||||
|
"/_matrix/client/v2_alpha": resource,
|
||||||
|
"/_matrix/client/api/v1": resource,
|
||||||
|
})
|
||||||
|
|
||||||
|
root_resource = create_resource_tree(resources, Resource())
|
||||||
|
|
||||||
|
for address in bind_addresses:
|
||||||
|
reactor.listenTCP(
|
||||||
|
port,
|
||||||
|
SynapseSite(
|
||||||
|
"synapse.access.http.%s" % (site_tag,),
|
||||||
|
site_tag,
|
||||||
|
listener_config,
|
||||||
|
root_resource,
|
||||||
|
),
|
||||||
|
interface=address
|
||||||
|
)
|
||||||
|
|
||||||
|
logger.info("Synapse user_dir now listening on port %d", port)
|
||||||
|
|
||||||
|
def start_listening(self, listeners):
|
||||||
|
for listener in listeners:
|
||||||
|
if listener["type"] == "http":
|
||||||
|
self._listen_http(listener)
|
||||||
|
elif listener["type"] == "manhole":
|
||||||
|
bind_addresses = listener["bind_addresses"]
|
||||||
|
|
||||||
|
for address in bind_addresses:
|
||||||
|
reactor.listenTCP(
|
||||||
|
listener["port"],
|
||||||
|
manhole(
|
||||||
|
username="matrix",
|
||||||
|
password="rabbithole",
|
||||||
|
globals={"hs": self},
|
||||||
|
),
|
||||||
|
interface=address
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||||
|
|
||||||
|
self.get_tcp_replication().start_replication(self)
|
||||||
|
|
||||||
|
def build_tcp_replication(self):
|
||||||
|
return UserDirectoryReplicationHandler(self)
|
||||||
|
|
||||||
|
|
||||||
|
class UserDirectoryReplicationHandler(ReplicationClientHandler):
|
||||||
|
def __init__(self, hs):
|
||||||
|
super(UserDirectoryReplicationHandler, self).__init__(hs.get_datastore())
|
||||||
|
self.user_directory = hs.get_user_directory_handler()
|
||||||
|
|
||||||
|
def on_rdata(self, stream_name, token, rows):
|
||||||
|
super(UserDirectoryReplicationHandler, self).on_rdata(
|
||||||
|
stream_name, token, rows
|
||||||
|
)
|
||||||
|
if stream_name == "current_state_deltas":
|
||||||
|
preserve_fn(self.user_directory.notify_new_event)()
|
||||||
|
|
||||||
|
|
||||||
|
def start(config_options):
|
||||||
|
try:
|
||||||
|
config = HomeServerConfig.load_config(
|
||||||
|
"Synapse user directory", config_options
|
||||||
|
)
|
||||||
|
except ConfigError as e:
|
||||||
|
sys.stderr.write("\n" + e.message + "\n")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
assert config.worker_app == "synapse.app.user_dir"
|
||||||
|
|
||||||
|
setup_logging(config, use_worker_options=True)
|
||||||
|
|
||||||
|
events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||||
|
|
||||||
|
database_engine = create_engine(config.database_config)
|
||||||
|
|
||||||
|
if config.update_user_directory:
|
||||||
|
sys.stderr.write(
|
||||||
|
"\nThe update_user_directory must be disabled in the main synapse process"
|
||||||
|
"\nbefore they can be run in a separate worker."
|
||||||
|
"\nPlease add ``update_user_directory: false`` to the main config"
|
||||||
|
"\n"
|
||||||
|
)
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
# Force the pushers to start since they will be disabled in the main config
|
||||||
|
config.update_user_directory = True
|
||||||
|
|
||||||
|
tls_server_context_factory = context_factory.ServerContextFactory(config)
|
||||||
|
|
||||||
|
ps = UserDirectoryServer(
|
||||||
|
config.server_name,
|
||||||
|
db_config=config.database_config,
|
||||||
|
tls_server_context_factory=tls_server_context_factory,
|
||||||
|
config=config,
|
||||||
|
version_string="Synapse/" + get_version_string(synapse),
|
||||||
|
database_engine=database_engine,
|
||||||
|
)
|
||||||
|
|
||||||
|
ps.setup()
|
||||||
|
ps.start_listening(config.worker_listeners)
|
||||||
|
|
||||||
|
def start():
|
||||||
|
ps.get_datastore().start_profiling()
|
||||||
|
ps.get_state_handler().start_caching()
|
||||||
|
|
||||||
|
reactor.callWhenRunning(start)
|
||||||
|
|
||||||
|
_base.start_worker_reactor("synapse-user-dir", config)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
with LoggingContext("main"):
|
||||||
|
start(sys.argv[1:])
|
||||||
@@ -13,6 +13,9 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
from synapse.api.constants import EventTypes
|
from synapse.api.constants import EventTypes
|
||||||
|
from synapse.util.caches.descriptors import cachedInlineCallbacks
|
||||||
|
|
||||||
|
from twisted.internet import defer
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
import re
|
import re
|
||||||
@@ -79,7 +82,7 @@ class ApplicationService(object):
|
|||||||
NS_LIST = [NS_USERS, NS_ALIASES, NS_ROOMS]
|
NS_LIST = [NS_USERS, NS_ALIASES, NS_ROOMS]
|
||||||
|
|
||||||
def __init__(self, token, url=None, namespaces=None, hs_token=None,
|
def __init__(self, token, url=None, namespaces=None, hs_token=None,
|
||||||
sender=None, id=None):
|
sender=None, id=None, protocols=None, rate_limited=True):
|
||||||
self.token = token
|
self.token = token
|
||||||
self.url = url
|
self.url = url
|
||||||
self.hs_token = hs_token
|
self.hs_token = hs_token
|
||||||
@@ -87,6 +90,17 @@ class ApplicationService(object):
|
|||||||
self.namespaces = self._check_namespaces(namespaces)
|
self.namespaces = self._check_namespaces(namespaces)
|
||||||
self.id = id
|
self.id = id
|
||||||
|
|
||||||
|
if "|" in self.id:
|
||||||
|
raise Exception("application service ID cannot contain '|' character")
|
||||||
|
|
||||||
|
# .protocols is a publicly visible field
|
||||||
|
if protocols:
|
||||||
|
self.protocols = set(protocols)
|
||||||
|
else:
|
||||||
|
self.protocols = set()
|
||||||
|
|
||||||
|
self.rate_limited = rate_limited
|
||||||
|
|
||||||
def _check_namespaces(self, namespaces):
|
def _check_namespaces(self, namespaces):
|
||||||
# Sanity check that it is of the form:
|
# Sanity check that it is of the form:
|
||||||
# {
|
# {
|
||||||
@@ -111,92 +125,94 @@ class ApplicationService(object):
|
|||||||
raise ValueError(
|
raise ValueError(
|
||||||
"Expected bool for 'exclusive' in ns '%s'" % ns
|
"Expected bool for 'exclusive' in ns '%s'" % ns
|
||||||
)
|
)
|
||||||
if not isinstance(regex_obj.get("regex"), basestring):
|
regex = regex_obj.get("regex")
|
||||||
|
if isinstance(regex, basestring):
|
||||||
|
regex_obj["regex"] = re.compile(regex) # Pre-compile regex
|
||||||
|
else:
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
"Expected string for 'regex' in ns '%s'" % ns
|
"Expected string for 'regex' in ns '%s'" % ns
|
||||||
)
|
)
|
||||||
return namespaces
|
return namespaces
|
||||||
|
|
||||||
def _matches_regex(self, test_string, namespace_key, return_obj=False):
|
def _matches_regex(self, test_string, namespace_key):
|
||||||
if not isinstance(test_string, basestring):
|
|
||||||
logger.error(
|
|
||||||
"Expected a string to test regex against, but got %s",
|
|
||||||
test_string
|
|
||||||
)
|
|
||||||
return False
|
|
||||||
|
|
||||||
for regex_obj in self.namespaces[namespace_key]:
|
for regex_obj in self.namespaces[namespace_key]:
|
||||||
if re.match(regex_obj["regex"], test_string):
|
if regex_obj["regex"].match(test_string):
|
||||||
if return_obj:
|
|
||||||
return regex_obj
|
return regex_obj
|
||||||
return True
|
return None
|
||||||
return False
|
|
||||||
|
|
||||||
def _is_exclusive(self, ns_key, test_string):
|
def _is_exclusive(self, ns_key, test_string):
|
||||||
regex_obj = self._matches_regex(test_string, ns_key, return_obj=True)
|
regex_obj = self._matches_regex(test_string, ns_key)
|
||||||
if regex_obj:
|
if regex_obj:
|
||||||
return regex_obj["exclusive"]
|
return regex_obj["exclusive"]
|
||||||
return False
|
return False
|
||||||
|
|
||||||
def _matches_user(self, event, member_list):
|
@defer.inlineCallbacks
|
||||||
if (hasattr(event, "sender") and
|
def _matches_user(self, event, store):
|
||||||
self.is_interested_in_user(event.sender)):
|
if not event:
|
||||||
return True
|
defer.returnValue(False)
|
||||||
|
|
||||||
|
if self.is_interested_in_user(event.sender):
|
||||||
|
defer.returnValue(True)
|
||||||
# also check m.room.member state key
|
# also check m.room.member state key
|
||||||
if (hasattr(event, "type") and event.type == EventTypes.Member
|
if (event.type == EventTypes.Member and
|
||||||
and hasattr(event, "state_key")
|
self.is_interested_in_user(event.state_key)):
|
||||||
and self.is_interested_in_user(event.state_key)):
|
defer.returnValue(True)
|
||||||
return True
|
|
||||||
|
if not store:
|
||||||
|
defer.returnValue(False)
|
||||||
|
|
||||||
|
does_match = yield self._matches_user_in_member_list(event.room_id, store)
|
||||||
|
defer.returnValue(does_match)
|
||||||
|
|
||||||
|
@cachedInlineCallbacks(num_args=1, cache_context=True)
|
||||||
|
def _matches_user_in_member_list(self, room_id, store, cache_context):
|
||||||
|
member_list = yield store.get_users_in_room(
|
||||||
|
room_id, on_invalidate=cache_context.invalidate
|
||||||
|
)
|
||||||
|
|
||||||
# check joined member events
|
# check joined member events
|
||||||
for user_id in member_list:
|
for user_id in member_list:
|
||||||
if self.is_interested_in_user(user_id):
|
if self.is_interested_in_user(user_id):
|
||||||
return True
|
defer.returnValue(True)
|
||||||
return False
|
defer.returnValue(False)
|
||||||
|
|
||||||
def _matches_room_id(self, event):
|
def _matches_room_id(self, event):
|
||||||
if hasattr(event, "room_id"):
|
if hasattr(event, "room_id"):
|
||||||
return self.is_interested_in_room(event.room_id)
|
return self.is_interested_in_room(event.room_id)
|
||||||
return False
|
return False
|
||||||
|
|
||||||
def _matches_aliases(self, event, alias_list):
|
@defer.inlineCallbacks
|
||||||
|
def _matches_aliases(self, event, store):
|
||||||
|
if not store or not event:
|
||||||
|
defer.returnValue(False)
|
||||||
|
|
||||||
|
alias_list = yield store.get_aliases_for_room(event.room_id)
|
||||||
for alias in alias_list:
|
for alias in alias_list:
|
||||||
if self.is_interested_in_alias(alias):
|
if self.is_interested_in_alias(alias):
|
||||||
return True
|
defer.returnValue(True)
|
||||||
return False
|
defer.returnValue(False)
|
||||||
|
|
||||||
def is_interested(self, event, restrict_to=None, aliases_for_event=None,
|
@defer.inlineCallbacks
|
||||||
member_list=None):
|
def is_interested(self, event, store=None):
|
||||||
"""Check if this service is interested in this event.
|
"""Check if this service is interested in this event.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
event(Event): The event to check.
|
event(Event): The event to check.
|
||||||
restrict_to(str): The namespace to restrict regex tests to.
|
store(DataStore)
|
||||||
aliases_for_event(list): A list of all the known room aliases for
|
|
||||||
this event.
|
|
||||||
member_list(list): A list of all joined user_ids in this room.
|
|
||||||
Returns:
|
Returns:
|
||||||
bool: True if this service would like to know about this event.
|
bool: True if this service would like to know about this event.
|
||||||
"""
|
"""
|
||||||
if aliases_for_event is None:
|
# Do cheap checks first
|
||||||
aliases_for_event = []
|
if self._matches_room_id(event):
|
||||||
if member_list is None:
|
defer.returnValue(True)
|
||||||
member_list = []
|
|
||||||
|
|
||||||
if restrict_to and restrict_to not in ApplicationService.NS_LIST:
|
if (yield self._matches_aliases(event, store)):
|
||||||
# this is a programming error, so fail early and raise a general
|
defer.returnValue(True)
|
||||||
# exception
|
|
||||||
raise Exception("Unexpected restrict_to value: %s". restrict_to)
|
|
||||||
|
|
||||||
if not restrict_to:
|
if (yield self._matches_user(event, store)):
|
||||||
return (self._matches_user(event, member_list)
|
defer.returnValue(True)
|
||||||
or self._matches_aliases(event, aliases_for_event)
|
|
||||||
or self._matches_room_id(event))
|
defer.returnValue(False)
|
||||||
elif restrict_to == ApplicationService.NS_ALIASES:
|
|
||||||
return self._matches_aliases(event, aliases_for_event)
|
|
||||||
elif restrict_to == ApplicationService.NS_ROOMS:
|
|
||||||
return self._matches_room_id(event)
|
|
||||||
elif restrict_to == ApplicationService.NS_USERS:
|
|
||||||
return self._matches_user(event, member_list)
|
|
||||||
|
|
||||||
def is_interested_in_user(self, user_id):
|
def is_interested_in_user(self, user_id):
|
||||||
return (
|
return (
|
||||||
@@ -205,10 +221,10 @@ class ApplicationService(object):
|
|||||||
)
|
)
|
||||||
|
|
||||||
def is_interested_in_alias(self, alias):
|
def is_interested_in_alias(self, alias):
|
||||||
return self._matches_regex(alias, ApplicationService.NS_ALIASES)
|
return bool(self._matches_regex(alias, ApplicationService.NS_ALIASES))
|
||||||
|
|
||||||
def is_interested_in_room(self, room_id):
|
def is_interested_in_room(self, room_id):
|
||||||
return self._matches_regex(room_id, ApplicationService.NS_ROOMS)
|
return bool(self._matches_regex(room_id, ApplicationService.NS_ROOMS))
|
||||||
|
|
||||||
def is_exclusive_user(self, user_id):
|
def is_exclusive_user(self, user_id):
|
||||||
return (
|
return (
|
||||||
@@ -216,11 +232,27 @@ class ApplicationService(object):
|
|||||||
or user_id == self.sender
|
or user_id == self.sender
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def is_interested_in_protocol(self, protocol):
|
||||||
|
return protocol in self.protocols
|
||||||
|
|
||||||
def is_exclusive_alias(self, alias):
|
def is_exclusive_alias(self, alias):
|
||||||
return self._is_exclusive(ApplicationService.NS_ALIASES, alias)
|
return self._is_exclusive(ApplicationService.NS_ALIASES, alias)
|
||||||
|
|
||||||
def is_exclusive_room(self, room_id):
|
def is_exclusive_room(self, room_id):
|
||||||
return self._is_exclusive(ApplicationService.NS_ROOMS, room_id)
|
return self._is_exclusive(ApplicationService.NS_ROOMS, room_id)
|
||||||
|
|
||||||
|
def get_exlusive_user_regexes(self):
|
||||||
|
"""Get the list of regexes used to determine if a user is exclusively
|
||||||
|
registered by the AS
|
||||||
|
"""
|
||||||
|
return [
|
||||||
|
regex_obj["regex"]
|
||||||
|
for regex_obj in self.namespaces[ApplicationService.NS_USERS]
|
||||||
|
if regex_obj["exclusive"]
|
||||||
|
]
|
||||||
|
|
||||||
|
def is_rate_limited(self):
|
||||||
|
return self.rate_limited
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
return "ApplicationService: %s" % (self.__dict__,)
|
return "ApplicationService: %s" % (self.__dict__,)
|
||||||
|
|||||||
@@ -14,9 +14,12 @@
|
|||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
from twisted.internet import defer
|
from twisted.internet import defer
|
||||||
|
|
||||||
|
from synapse.api.constants import ThirdPartyEntityKind
|
||||||
from synapse.api.errors import CodeMessageException
|
from synapse.api.errors import CodeMessageException
|
||||||
from synapse.http.client import SimpleHttpClient
|
from synapse.http.client import SimpleHttpClient
|
||||||
from synapse.events.utils import serialize_event
|
from synapse.events.utils import serialize_event
|
||||||
|
from synapse.util.caches.response_cache import ResponseCache
|
||||||
|
from synapse.types import ThirdPartyInstanceID
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
import urllib
|
import urllib
|
||||||
@@ -24,6 +27,42 @@ import urllib
|
|||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
HOUR_IN_MS = 60 * 60 * 1000
|
||||||
|
|
||||||
|
|
||||||
|
APP_SERVICE_PREFIX = "/_matrix/app/unstable"
|
||||||
|
|
||||||
|
|
||||||
|
def _is_valid_3pe_metadata(info):
|
||||||
|
if "instances" not in info:
|
||||||
|
return False
|
||||||
|
if not isinstance(info["instances"], list):
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
def _is_valid_3pe_result(r, field):
|
||||||
|
if not isinstance(r, dict):
|
||||||
|
return False
|
||||||
|
|
||||||
|
for k in (field, "protocol"):
|
||||||
|
if k not in r:
|
||||||
|
return False
|
||||||
|
if not isinstance(r[k], str):
|
||||||
|
return False
|
||||||
|
|
||||||
|
if "fields" not in r:
|
||||||
|
return False
|
||||||
|
fields = r["fields"]
|
||||||
|
if not isinstance(fields, dict):
|
||||||
|
return False
|
||||||
|
for k in fields.keys():
|
||||||
|
if not isinstance(fields[k], str):
|
||||||
|
return False
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
class ApplicationServiceApi(SimpleHttpClient):
|
class ApplicationServiceApi(SimpleHttpClient):
|
||||||
"""This class manages HS -> AS communications, including querying and
|
"""This class manages HS -> AS communications, including querying and
|
||||||
pushing.
|
pushing.
|
||||||
@@ -33,8 +72,12 @@ class ApplicationServiceApi(SimpleHttpClient):
|
|||||||
super(ApplicationServiceApi, self).__init__(hs)
|
super(ApplicationServiceApi, self).__init__(hs)
|
||||||
self.clock = hs.get_clock()
|
self.clock = hs.get_clock()
|
||||||
|
|
||||||
|
self.protocol_meta_cache = ResponseCache(hs, timeout_ms=HOUR_IN_MS)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def query_user(self, service, user_id):
|
def query_user(self, service, user_id):
|
||||||
|
if service.url is None:
|
||||||
|
defer.returnValue(False)
|
||||||
uri = service.url + ("/users/%s" % urllib.quote(user_id))
|
uri = service.url + ("/users/%s" % urllib.quote(user_id))
|
||||||
response = None
|
response = None
|
||||||
try:
|
try:
|
||||||
@@ -54,6 +97,8 @@ class ApplicationServiceApi(SimpleHttpClient):
|
|||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def query_alias(self, service, alias):
|
def query_alias(self, service, alias):
|
||||||
|
if service.url is None:
|
||||||
|
defer.returnValue(False)
|
||||||
uri = service.url + ("/rooms/%s" % urllib.quote(alias))
|
uri = service.url + ("/rooms/%s" % urllib.quote(alias))
|
||||||
response = None
|
response = None
|
||||||
try:
|
try:
|
||||||
@@ -71,8 +116,91 @@ class ApplicationServiceApi(SimpleHttpClient):
|
|||||||
logger.warning("query_alias to %s threw exception %s", uri, ex)
|
logger.warning("query_alias to %s threw exception %s", uri, ex)
|
||||||
defer.returnValue(False)
|
defer.returnValue(False)
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def query_3pe(self, service, kind, protocol, fields):
|
||||||
|
if kind == ThirdPartyEntityKind.USER:
|
||||||
|
required_field = "userid"
|
||||||
|
elif kind == ThirdPartyEntityKind.LOCATION:
|
||||||
|
required_field = "alias"
|
||||||
|
else:
|
||||||
|
raise ValueError(
|
||||||
|
"Unrecognised 'kind' argument %r to query_3pe()", kind
|
||||||
|
)
|
||||||
|
if service.url is None:
|
||||||
|
defer.returnValue([])
|
||||||
|
|
||||||
|
uri = "%s%s/thirdparty/%s/%s" % (
|
||||||
|
service.url,
|
||||||
|
APP_SERVICE_PREFIX,
|
||||||
|
kind,
|
||||||
|
urllib.quote(protocol)
|
||||||
|
)
|
||||||
|
try:
|
||||||
|
response = yield self.get_json(uri, fields)
|
||||||
|
if not isinstance(response, list):
|
||||||
|
logger.warning(
|
||||||
|
"query_3pe to %s returned an invalid response %r",
|
||||||
|
uri, response
|
||||||
|
)
|
||||||
|
defer.returnValue([])
|
||||||
|
|
||||||
|
ret = []
|
||||||
|
for r in response:
|
||||||
|
if _is_valid_3pe_result(r, field=required_field):
|
||||||
|
ret.append(r)
|
||||||
|
else:
|
||||||
|
logger.warning(
|
||||||
|
"query_3pe to %s returned an invalid result %r",
|
||||||
|
uri, r
|
||||||
|
)
|
||||||
|
|
||||||
|
defer.returnValue(ret)
|
||||||
|
except Exception as ex:
|
||||||
|
logger.warning("query_3pe to %s threw exception %s", uri, ex)
|
||||||
|
defer.returnValue([])
|
||||||
|
|
||||||
|
def get_3pe_protocol(self, service, protocol):
|
||||||
|
if service.url is None:
|
||||||
|
defer.returnValue({})
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def _get():
|
||||||
|
uri = "%s%s/thirdparty/protocol/%s" % (
|
||||||
|
service.url,
|
||||||
|
APP_SERVICE_PREFIX,
|
||||||
|
urllib.quote(protocol)
|
||||||
|
)
|
||||||
|
try:
|
||||||
|
info = yield self.get_json(uri, {})
|
||||||
|
|
||||||
|
if not _is_valid_3pe_metadata(info):
|
||||||
|
logger.warning("query_3pe_protocol to %s did not return a"
|
||||||
|
" valid result", uri)
|
||||||
|
defer.returnValue(None)
|
||||||
|
|
||||||
|
for instance in info.get("instances", []):
|
||||||
|
network_id = instance.get("network_id", None)
|
||||||
|
if network_id is not None:
|
||||||
|
instance["instance_id"] = ThirdPartyInstanceID(
|
||||||
|
service.id, network_id,
|
||||||
|
).to_string()
|
||||||
|
|
||||||
|
defer.returnValue(info)
|
||||||
|
except Exception as ex:
|
||||||
|
logger.warning("query_3pe_protocol to %s threw exception %s",
|
||||||
|
uri, ex)
|
||||||
|
defer.returnValue(None)
|
||||||
|
|
||||||
|
key = (service.id, protocol)
|
||||||
|
return self.protocol_meta_cache.get(key) or (
|
||||||
|
self.protocol_meta_cache.set(key, _get())
|
||||||
|
)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def push_bulk(self, service, events, txn_id=None):
|
def push_bulk(self, service, events, txn_id=None):
|
||||||
|
if service.url is None:
|
||||||
|
defer.returnValue(True)
|
||||||
|
|
||||||
events = self._serialize(events)
|
events = self._serialize(events)
|
||||||
|
|
||||||
if txn_id is None:
|
if txn_id is None:
|
||||||
|
|||||||
@@ -48,9 +48,12 @@ UP & quit +---------- YES SUCCESS
|
|||||||
This is all tied together by the AppServiceScheduler which DIs the required
|
This is all tied together by the AppServiceScheduler which DIs the required
|
||||||
components.
|
components.
|
||||||
"""
|
"""
|
||||||
|
from twisted.internet import defer
|
||||||
|
|
||||||
from synapse.appservice import ApplicationServiceState
|
from synapse.appservice import ApplicationServiceState
|
||||||
from twisted.internet import defer
|
from synapse.util.logcontext import preserve_fn
|
||||||
|
from synapse.util.metrics import Measure
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
@@ -73,7 +76,7 @@ class ApplicationServiceScheduler(object):
|
|||||||
self.txn_ctrl = _TransactionController(
|
self.txn_ctrl = _TransactionController(
|
||||||
self.clock, self.store, self.as_api, create_recoverer
|
self.clock, self.store, self.as_api, create_recoverer
|
||||||
)
|
)
|
||||||
self.queuer = _ServiceQueuer(self.txn_ctrl)
|
self.queuer = _ServiceQueuer(self.txn_ctrl, self.clock)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def start(self):
|
def start(self):
|
||||||
@@ -94,38 +97,36 @@ class _ServiceQueuer(object):
|
|||||||
this schedules any other events in the queue to run.
|
this schedules any other events in the queue to run.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, txn_ctrl):
|
def __init__(self, txn_ctrl, clock):
|
||||||
self.queued_events = {} # dict of {service_id: [events]}
|
self.queued_events = {} # dict of {service_id: [events]}
|
||||||
self.pending_requests = {} # dict of {service_id: Deferred}
|
self.requests_in_flight = set()
|
||||||
self.txn_ctrl = txn_ctrl
|
self.txn_ctrl = txn_ctrl
|
||||||
|
self.clock = clock
|
||||||
|
|
||||||
def enqueue(self, service, event):
|
def enqueue(self, service, event):
|
||||||
# if this service isn't being sent something
|
# if this service isn't being sent something
|
||||||
if not self.pending_requests.get(service.id):
|
self.queued_events.setdefault(service.id, []).append(event)
|
||||||
self._send_request(service, [event])
|
preserve_fn(self._send_request)(service)
|
||||||
else:
|
|
||||||
# add to queue for this service
|
|
||||||
if service.id not in self.queued_events:
|
|
||||||
self.queued_events[service.id] = []
|
|
||||||
self.queued_events[service.id].append(event)
|
|
||||||
|
|
||||||
def _send_request(self, service, events):
|
@defer.inlineCallbacks
|
||||||
# send request and add callbacks
|
def _send_request(self, service):
|
||||||
d = self.txn_ctrl.send(service, events)
|
if service.id in self.requests_in_flight:
|
||||||
d.addBoth(self._on_request_finish)
|
return
|
||||||
d.addErrback(self._on_request_fail)
|
|
||||||
self.pending_requests[service.id] = d
|
|
||||||
|
|
||||||
def _on_request_finish(self, service):
|
self.requests_in_flight.add(service.id)
|
||||||
self.pending_requests[service.id] = None
|
try:
|
||||||
# if there are queued events, then send them.
|
while True:
|
||||||
if (service.id in self.queued_events
|
events = self.queued_events.pop(service.id, [])
|
||||||
and len(self.queued_events[service.id]) > 0):
|
if not events:
|
||||||
self._send_request(service, self.queued_events[service.id])
|
return
|
||||||
self.queued_events[service.id] = []
|
|
||||||
|
|
||||||
def _on_request_fail(self, err):
|
with Measure(self.clock, "servicequeuer.send"):
|
||||||
logger.error("AS request failed: %s", err)
|
try:
|
||||||
|
yield self.txn_ctrl.send(service, events)
|
||||||
|
except Exception:
|
||||||
|
logger.exception("AS request failed")
|
||||||
|
finally:
|
||||||
|
self.requests_in_flight.discard(service.id)
|
||||||
|
|
||||||
|
|
||||||
class _TransactionController(object):
|
class _TransactionController(object):
|
||||||
@@ -149,14 +150,12 @@ class _TransactionController(object):
|
|||||||
if service_is_up:
|
if service_is_up:
|
||||||
sent = yield txn.send(self.as_api)
|
sent = yield txn.send(self.as_api)
|
||||||
if sent:
|
if sent:
|
||||||
txn.complete(self.store)
|
yield txn.complete(self.store)
|
||||||
else:
|
else:
|
||||||
self._start_recoverer(service)
|
preserve_fn(self._start_recoverer)(service)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.exception(e)
|
logger.exception(e)
|
||||||
self._start_recoverer(service)
|
preserve_fn(self._start_recoverer)(service)
|
||||||
# request has finished
|
|
||||||
defer.returnValue(service)
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def on_recovered(self, recoverer):
|
def on_recovered(self, recoverer):
|
||||||
|
|||||||
@@ -64,11 +64,12 @@ class Config(object):
|
|||||||
if isinstance(value, int) or isinstance(value, long):
|
if isinstance(value, int) or isinstance(value, long):
|
||||||
return value
|
return value
|
||||||
second = 1000
|
second = 1000
|
||||||
hour = 60 * 60 * second
|
minute = 60 * second
|
||||||
|
hour = 60 * minute
|
||||||
day = 24 * hour
|
day = 24 * hour
|
||||||
week = 7 * day
|
week = 7 * day
|
||||||
year = 365 * day
|
year = 365 * day
|
||||||
sizes = {"s": second, "h": hour, "d": day, "w": week, "y": year}
|
sizes = {"s": second, "m": minute, "h": hour, "d": day, "w": week, "y": year}
|
||||||
size = 1
|
size = 1
|
||||||
suffix = value[-1]
|
suffix = value[-1]
|
||||||
if suffix in sizes:
|
if suffix in sizes:
|
||||||
@@ -80,22 +81,38 @@ class Config(object):
|
|||||||
def abspath(file_path):
|
def abspath(file_path):
|
||||||
return os.path.abspath(file_path) if file_path else file_path
|
return os.path.abspath(file_path) if file_path else file_path
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def path_exists(cls, file_path):
|
||||||
|
"""Check if a file exists
|
||||||
|
|
||||||
|
Unlike os.path.exists, this throws an exception if there is an error
|
||||||
|
checking if the file exists (for example, if there is a perms error on
|
||||||
|
the parent dir).
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if the file exists; False if not.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
os.stat(file_path)
|
||||||
|
return True
|
||||||
|
except OSError as e:
|
||||||
|
if e.errno != errno.ENOENT:
|
||||||
|
raise e
|
||||||
|
return False
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def check_file(cls, file_path, config_name):
|
def check_file(cls, file_path, config_name):
|
||||||
if file_path is None:
|
if file_path is None:
|
||||||
raise ConfigError(
|
raise ConfigError(
|
||||||
"Missing config for %s."
|
"Missing config for %s."
|
||||||
" You must specify a path for the config file. You can "
|
|
||||||
"do this with the -c or --config-path option. "
|
|
||||||
"Adding --generate-config along with --server-name "
|
|
||||||
"<server name> will generate a config file at the given path."
|
|
||||||
% (config_name,)
|
% (config_name,)
|
||||||
)
|
)
|
||||||
if not os.path.exists(file_path):
|
try:
|
||||||
|
os.stat(file_path)
|
||||||
|
except OSError as e:
|
||||||
raise ConfigError(
|
raise ConfigError(
|
||||||
"File %s config for %s doesn't exist."
|
"Error accessing file '%s' (config for %s): %s"
|
||||||
" Try running again with --generate-config"
|
% (file_path, config_name, e.strerror)
|
||||||
% (file_path, config_name,)
|
|
||||||
)
|
)
|
||||||
return cls.abspath(file_path)
|
return cls.abspath(file_path)
|
||||||
|
|
||||||
@@ -247,7 +264,7 @@ class Config(object):
|
|||||||
" -c CONFIG-FILE\""
|
" -c CONFIG-FILE\""
|
||||||
)
|
)
|
||||||
(config_path,) = config_files
|
(config_path,) = config_files
|
||||||
if not os.path.exists(config_path):
|
if not cls.path_exists(config_path):
|
||||||
if config_args.keys_directory:
|
if config_args.keys_directory:
|
||||||
config_dir_path = config_args.keys_directory
|
config_dir_path = config_args.keys_directory
|
||||||
else:
|
else:
|
||||||
@@ -260,7 +277,7 @@ class Config(object):
|
|||||||
"Must specify a server_name to a generate config for."
|
"Must specify a server_name to a generate config for."
|
||||||
" Pass -H server.name."
|
" Pass -H server.name."
|
||||||
)
|
)
|
||||||
if not os.path.exists(config_dir_path):
|
if not cls.path_exists(config_dir_path):
|
||||||
os.makedirs(config_dir_path)
|
os.makedirs(config_dir_path)
|
||||||
with open(config_path, "wb") as config_file:
|
with open(config_path, "wb") as config_file:
|
||||||
config_bytes, config = obj.generate_config(
|
config_bytes, config = obj.generate_config(
|
||||||
|
|||||||
@@ -28,6 +28,7 @@ class AppServiceConfig(Config):
|
|||||||
|
|
||||||
def read_config(self, config):
|
def read_config(self, config):
|
||||||
self.app_service_config_files = config.get("app_service_config_files", [])
|
self.app_service_config_files = config.get("app_service_config_files", [])
|
||||||
|
self.notify_appservices = config.get("notify_appservices", True)
|
||||||
|
|
||||||
def default_config(cls, **kwargs):
|
def default_config(cls, **kwargs):
|
||||||
return """\
|
return """\
|
||||||
@@ -85,7 +86,7 @@ def load_appservices(hostname, config_files):
|
|||||||
|
|
||||||
def _load_appservice(hostname, as_info, config_filename):
|
def _load_appservice(hostname, as_info, config_filename):
|
||||||
required_string_fields = [
|
required_string_fields = [
|
||||||
"id", "url", "as_token", "hs_token", "sender_localpart"
|
"id", "as_token", "hs_token", "sender_localpart"
|
||||||
]
|
]
|
||||||
for field in required_string_fields:
|
for field in required_string_fields:
|
||||||
if not isinstance(as_info.get(field), basestring):
|
if not isinstance(as_info.get(field), basestring):
|
||||||
@@ -93,6 +94,14 @@ def _load_appservice(hostname, as_info, config_filename):
|
|||||||
field, config_filename,
|
field, config_filename,
|
||||||
))
|
))
|
||||||
|
|
||||||
|
# 'url' must either be a string or explicitly null, not missing
|
||||||
|
# to avoid accidentally turning off push for ASes.
|
||||||
|
if (not isinstance(as_info.get("url"), basestring) and
|
||||||
|
as_info.get("url", "") is not None):
|
||||||
|
raise KeyError(
|
||||||
|
"Required string field or explicit null: 'url' (%s)" % (config_filename,)
|
||||||
|
)
|
||||||
|
|
||||||
localpart = as_info["sender_localpart"]
|
localpart = as_info["sender_localpart"]
|
||||||
if urllib.quote(localpart) != localpart:
|
if urllib.quote(localpart) != localpart:
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
@@ -101,6 +110,11 @@ def _load_appservice(hostname, as_info, config_filename):
|
|||||||
user = UserID(localpart, hostname)
|
user = UserID(localpart, hostname)
|
||||||
user_id = user.to_string()
|
user_id = user.to_string()
|
||||||
|
|
||||||
|
# Rate limiting for users of this AS is on by default (excludes sender)
|
||||||
|
rate_limited = True
|
||||||
|
if isinstance(as_info.get("rate_limited"), bool):
|
||||||
|
rate_limited = as_info.get("rate_limited")
|
||||||
|
|
||||||
# namespace checks
|
# namespace checks
|
||||||
if not isinstance(as_info.get("namespaces"), dict):
|
if not isinstance(as_info.get("namespaces"), dict):
|
||||||
raise KeyError("Requires 'namespaces' object.")
|
raise KeyError("Requires 'namespaces' object.")
|
||||||
@@ -122,6 +136,22 @@ def _load_appservice(hostname, as_info, config_filename):
|
|||||||
raise ValueError(
|
raise ValueError(
|
||||||
"Missing/bad type 'exclusive' key in %s", regex_obj
|
"Missing/bad type 'exclusive' key in %s", regex_obj
|
||||||
)
|
)
|
||||||
|
# protocols check
|
||||||
|
protocols = as_info.get("protocols")
|
||||||
|
if protocols:
|
||||||
|
# Because strings are lists in python
|
||||||
|
if isinstance(protocols, str) or not isinstance(protocols, list):
|
||||||
|
raise KeyError("Optional 'protocols' must be a list if present.")
|
||||||
|
for p in protocols:
|
||||||
|
if not isinstance(p, str):
|
||||||
|
raise KeyError("Bad value for 'protocols' item")
|
||||||
|
|
||||||
|
if as_info["url"] is None:
|
||||||
|
logger.info(
|
||||||
|
"(%s) Explicitly empty 'url' provided. This application service"
|
||||||
|
" will not receive events or queries.",
|
||||||
|
config_filename,
|
||||||
|
)
|
||||||
return ApplicationService(
|
return ApplicationService(
|
||||||
token=as_info["as_token"],
|
token=as_info["as_token"],
|
||||||
url=as_info["url"],
|
url=as_info["url"],
|
||||||
@@ -129,4 +159,6 @@ def _load_appservice(hostname, as_info, config_filename):
|
|||||||
hs_token=as_info["hs_token"],
|
hs_token=as_info["hs_token"],
|
||||||
sender=user_id,
|
sender=user_id,
|
||||||
id=as_info["id"],
|
id=as_info["id"],
|
||||||
|
protocols=protocols,
|
||||||
|
rate_limited=rate_limited
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -68,6 +68,18 @@ class EmailConfig(Config):
|
|||||||
self.email_notif_for_new_users = email_config.get(
|
self.email_notif_for_new_users = email_config.get(
|
||||||
"notif_for_new_users", True
|
"notif_for_new_users", True
|
||||||
)
|
)
|
||||||
|
self.email_riot_base_url = email_config.get(
|
||||||
|
"riot_base_url", None
|
||||||
|
)
|
||||||
|
self.email_smtp_user = email_config.get(
|
||||||
|
"smtp_user", None
|
||||||
|
)
|
||||||
|
self.email_smtp_pass = email_config.get(
|
||||||
|
"smtp_pass", None
|
||||||
|
)
|
||||||
|
self.require_transport_security = email_config.get(
|
||||||
|
"require_transport_security", False
|
||||||
|
)
|
||||||
if "app_name" in email_config:
|
if "app_name" in email_config:
|
||||||
self.email_app_name = email_config["app_name"]
|
self.email_app_name = email_config["app_name"]
|
||||||
else:
|
else:
|
||||||
@@ -85,14 +97,25 @@ class EmailConfig(Config):
|
|||||||
def default_config(self, config_dir_path, server_name, **kwargs):
|
def default_config(self, config_dir_path, server_name, **kwargs):
|
||||||
return """
|
return """
|
||||||
# Enable sending emails for notification events
|
# Enable sending emails for notification events
|
||||||
|
# Defining a custom URL for Riot is only needed if email notifications
|
||||||
|
# should contain links to a self-hosted installation of Riot; when set
|
||||||
|
# the "app_name" setting is ignored.
|
||||||
|
#
|
||||||
|
# If your SMTP server requires authentication, the optional smtp_user &
|
||||||
|
# smtp_pass variables should be used
|
||||||
|
#
|
||||||
#email:
|
#email:
|
||||||
# enable_notifs: false
|
# enable_notifs: false
|
||||||
# smtp_host: "localhost"
|
# smtp_host: "localhost"
|
||||||
# smtp_port: 25
|
# smtp_port: 25
|
||||||
|
# smtp_user: "exampleusername"
|
||||||
|
# smtp_pass: "examplepassword"
|
||||||
|
# require_transport_security: False
|
||||||
# notif_from: "Your Friendly %(app)s Home Server <noreply@example.com>"
|
# notif_from: "Your Friendly %(app)s Home Server <noreply@example.com>"
|
||||||
# app_name: Matrix
|
# app_name: Matrix
|
||||||
# template_dir: res/templates
|
# template_dir: res/templates
|
||||||
# notif_template_html: notif_mail.html
|
# notif_template_html: notif_mail.html
|
||||||
# notif_template_text: notif_mail.txt
|
# notif_template_text: notif_mail.txt
|
||||||
# notif_for_new_users: True
|
# notif_for_new_users: True
|
||||||
|
# riot_base_url: "http://localhost/riot"
|
||||||
"""
|
"""
|
||||||
|
|||||||
32
synapse/config/groups.py
Normal file
32
synapse/config/groups.py
Normal file
@@ -0,0 +1,32 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright 2017 New Vector Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
from ._base import Config
|
||||||
|
|
||||||
|
|
||||||
|
class GroupsConfig(Config):
|
||||||
|
def read_config(self, config):
|
||||||
|
self.enable_group_creation = config.get("enable_group_creation", False)
|
||||||
|
self.group_creation_prefix = config.get("group_creation_prefix", "")
|
||||||
|
|
||||||
|
def default_config(self, **kwargs):
|
||||||
|
return """\
|
||||||
|
# Whether to allow non server admins to create groups on this server
|
||||||
|
enable_group_creation: false
|
||||||
|
|
||||||
|
# If enabled, non server admins can only create groups with local parts
|
||||||
|
# starting with this prefix
|
||||||
|
# group_creation_prefix: "unofficial/"
|
||||||
|
"""
|
||||||
@@ -30,17 +30,21 @@ from .saml2 import SAML2Config
|
|||||||
from .cas import CasConfig
|
from .cas import CasConfig
|
||||||
from .password import PasswordConfig
|
from .password import PasswordConfig
|
||||||
from .jwt import JWTConfig
|
from .jwt import JWTConfig
|
||||||
from .ldap import LDAPConfig
|
from .password_auth_providers import PasswordAuthProviderConfig
|
||||||
from .emailconfig import EmailConfig
|
from .emailconfig import EmailConfig
|
||||||
from .workers import WorkerConfig
|
from .workers import WorkerConfig
|
||||||
|
from .push import PushConfig
|
||||||
|
from .spam_checker import SpamCheckerConfig
|
||||||
|
from .groups import GroupsConfig
|
||||||
|
|
||||||
|
|
||||||
class HomeServerConfig(TlsConfig, ServerConfig, DatabaseConfig, LoggingConfig,
|
class HomeServerConfig(TlsConfig, ServerConfig, DatabaseConfig, LoggingConfig,
|
||||||
RatelimitConfig, ContentRepositoryConfig, CaptchaConfig,
|
RatelimitConfig, ContentRepositoryConfig, CaptchaConfig,
|
||||||
VoipConfig, RegistrationConfig, MetricsConfig, ApiConfig,
|
VoipConfig, RegistrationConfig, MetricsConfig, ApiConfig,
|
||||||
AppServiceConfig, KeyConfig, SAML2Config, CasConfig,
|
AppServiceConfig, KeyConfig, SAML2Config, CasConfig,
|
||||||
JWTConfig, LDAPConfig, PasswordConfig, EmailConfig,
|
JWTConfig, PasswordConfig, EmailConfig,
|
||||||
WorkerConfig,):
|
WorkerConfig, PasswordAuthProviderConfig, PushConfig,
|
||||||
|
SpamCheckerConfig, GroupsConfig,):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -118,10 +118,9 @@ class KeyConfig(Config):
|
|||||||
signing_keys = self.read_file(signing_key_path, "signing_key")
|
signing_keys = self.read_file(signing_key_path, "signing_key")
|
||||||
try:
|
try:
|
||||||
return read_signing_keys(signing_keys.splitlines(True))
|
return read_signing_keys(signing_keys.splitlines(True))
|
||||||
except Exception:
|
except Exception as e:
|
||||||
raise ConfigError(
|
raise ConfigError(
|
||||||
"Error reading signing_key."
|
"Error reading signing_key: %s" % (str(e))
|
||||||
" Try running again with --generate-config"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
def read_old_signing_keys(self, old_signing_keys):
|
def read_old_signing_keys(self, old_signing_keys):
|
||||||
@@ -141,7 +140,8 @@ class KeyConfig(Config):
|
|||||||
|
|
||||||
def generate_files(self, config):
|
def generate_files(self, config):
|
||||||
signing_key_path = config["signing_key_path"]
|
signing_key_path = config["signing_key_path"]
|
||||||
if not os.path.exists(signing_key_path):
|
|
||||||
|
if not self.path_exists(signing_key_path):
|
||||||
with open(signing_key_path, "w") as signing_key_file:
|
with open(signing_key_path, "w") as signing_key_file:
|
||||||
key_id = "a_" + random_string(4)
|
key_id = "a_" + random_string(4)
|
||||||
write_signing_keys(
|
write_signing_keys(
|
||||||
|
|||||||
@@ -1,100 +0,0 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
# Copyright 2015 Niklas Riekenbrauck
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
from ._base import Config, ConfigError
|
|
||||||
|
|
||||||
|
|
||||||
MISSING_LDAP3 = (
|
|
||||||
"Missing ldap3 library. This is required for LDAP Authentication."
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class LDAPMode(object):
|
|
||||||
SIMPLE = "simple",
|
|
||||||
SEARCH = "search",
|
|
||||||
|
|
||||||
LIST = (SIMPLE, SEARCH)
|
|
||||||
|
|
||||||
|
|
||||||
class LDAPConfig(Config):
|
|
||||||
def read_config(self, config):
|
|
||||||
ldap_config = config.get("ldap_config", {})
|
|
||||||
|
|
||||||
self.ldap_enabled = ldap_config.get("enabled", False)
|
|
||||||
|
|
||||||
if self.ldap_enabled:
|
|
||||||
# verify dependencies are available
|
|
||||||
try:
|
|
||||||
import ldap3
|
|
||||||
ldap3 # to stop unused lint
|
|
||||||
except ImportError:
|
|
||||||
raise ConfigError(MISSING_LDAP3)
|
|
||||||
|
|
||||||
self.ldap_mode = LDAPMode.SIMPLE
|
|
||||||
|
|
||||||
# verify config sanity
|
|
||||||
self.require_keys(ldap_config, [
|
|
||||||
"uri",
|
|
||||||
"base",
|
|
||||||
"attributes",
|
|
||||||
])
|
|
||||||
|
|
||||||
self.ldap_uri = ldap_config["uri"]
|
|
||||||
self.ldap_start_tls = ldap_config.get("start_tls", False)
|
|
||||||
self.ldap_base = ldap_config["base"]
|
|
||||||
self.ldap_attributes = ldap_config["attributes"]
|
|
||||||
|
|
||||||
if "bind_dn" in ldap_config:
|
|
||||||
self.ldap_mode = LDAPMode.SEARCH
|
|
||||||
self.require_keys(ldap_config, [
|
|
||||||
"bind_dn",
|
|
||||||
"bind_password",
|
|
||||||
])
|
|
||||||
|
|
||||||
self.ldap_bind_dn = ldap_config["bind_dn"]
|
|
||||||
self.ldap_bind_password = ldap_config["bind_password"]
|
|
||||||
self.ldap_filter = ldap_config.get("filter", None)
|
|
||||||
|
|
||||||
# verify attribute lookup
|
|
||||||
self.require_keys(ldap_config['attributes'], [
|
|
||||||
"uid",
|
|
||||||
"name",
|
|
||||||
"mail",
|
|
||||||
])
|
|
||||||
|
|
||||||
def require_keys(self, config, required):
|
|
||||||
missing = [key for key in required if key not in config]
|
|
||||||
if missing:
|
|
||||||
raise ConfigError(
|
|
||||||
"LDAP enabled but missing required config values: {}".format(
|
|
||||||
", ".join(missing)
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
def default_config(self, **kwargs):
|
|
||||||
return """\
|
|
||||||
# ldap_config:
|
|
||||||
# enabled: true
|
|
||||||
# uri: "ldap://ldap.example.com:389"
|
|
||||||
# start_tls: true
|
|
||||||
# base: "ou=users,dc=example,dc=com"
|
|
||||||
# attributes:
|
|
||||||
# uid: "cn"
|
|
||||||
# mail: "email"
|
|
||||||
# name: "givenName"
|
|
||||||
# #bind_dn:
|
|
||||||
# #bind_password:
|
|
||||||
# #filter: "(objectClass=posixAccount)"
|
|
||||||
"""
|
|
||||||
@@ -15,14 +15,13 @@
|
|||||||
|
|
||||||
from ._base import Config
|
from ._base import Config
|
||||||
from synapse.util.logcontext import LoggingContextFilter
|
from synapse.util.logcontext import LoggingContextFilter
|
||||||
from twisted.python.log import PythonLoggingObserver
|
from twisted.logger import globalLogBeginner, STDLibLogObserver
|
||||||
import logging
|
import logging
|
||||||
import logging.config
|
import logging.config
|
||||||
import yaml
|
import yaml
|
||||||
from string import Template
|
from string import Template
|
||||||
import os
|
import os
|
||||||
import signal
|
import signal
|
||||||
from synapse.util.debug import debug_deferreds
|
|
||||||
|
|
||||||
|
|
||||||
DEFAULT_LOG_CONFIG = Template("""
|
DEFAULT_LOG_CONFIG = Template("""
|
||||||
@@ -46,16 +45,18 @@ handlers:
|
|||||||
maxBytes: 104857600
|
maxBytes: 104857600
|
||||||
backupCount: 10
|
backupCount: 10
|
||||||
filters: [context]
|
filters: [context]
|
||||||
level: INFO
|
|
||||||
console:
|
console:
|
||||||
class: logging.StreamHandler
|
class: logging.StreamHandler
|
||||||
formatter: precise
|
formatter: precise
|
||||||
|
filters: [context]
|
||||||
|
|
||||||
loggers:
|
loggers:
|
||||||
synapse:
|
synapse:
|
||||||
level: INFO
|
level: INFO
|
||||||
|
|
||||||
synapse.storage.SQL:
|
synapse.storage.SQL:
|
||||||
|
# beware: increasing this to DEBUG will make synapse log sensitive
|
||||||
|
# information such as access tokens.
|
||||||
level: INFO
|
level: INFO
|
||||||
|
|
||||||
root:
|
root:
|
||||||
@@ -68,10 +69,9 @@ class LoggingConfig(Config):
|
|||||||
|
|
||||||
def read_config(self, config):
|
def read_config(self, config):
|
||||||
self.verbosity = config.get("verbose", 0)
|
self.verbosity = config.get("verbose", 0)
|
||||||
|
self.no_redirect_stdio = config.get("no_redirect_stdio", False)
|
||||||
self.log_config = self.abspath(config.get("log_config"))
|
self.log_config = self.abspath(config.get("log_config"))
|
||||||
self.log_file = self.abspath(config.get("log_file"))
|
self.log_file = self.abspath(config.get("log_file"))
|
||||||
if config.get("full_twisted_stacktraces"):
|
|
||||||
debug_deferreds()
|
|
||||||
|
|
||||||
def default_config(self, config_dir_path, server_name, **kwargs):
|
def default_config(self, config_dir_path, server_name, **kwargs):
|
||||||
log_file = self.abspath("homeserver.log")
|
log_file = self.abspath("homeserver.log")
|
||||||
@@ -79,24 +79,21 @@ class LoggingConfig(Config):
|
|||||||
os.path.join(config_dir_path, server_name + ".log.config")
|
os.path.join(config_dir_path, server_name + ".log.config")
|
||||||
)
|
)
|
||||||
return """
|
return """
|
||||||
# Logging verbosity level.
|
# Logging verbosity level. Ignored if log_config is specified.
|
||||||
verbose: 0
|
verbose: 0
|
||||||
|
|
||||||
# File to write logging to
|
# File to write logging to. Ignored if log_config is specified.
|
||||||
log_file: "%(log_file)s"
|
log_file: "%(log_file)s"
|
||||||
|
|
||||||
# A yaml python logging config file
|
# A yaml python logging config file
|
||||||
log_config: "%(log_config)s"
|
log_config: "%(log_config)s"
|
||||||
|
|
||||||
# Stop twisted from discarding the stack traces of exceptions in
|
|
||||||
# deferreds by waiting a reactor tick before running a deferred's
|
|
||||||
# callbacks.
|
|
||||||
# full_twisted_stacktraces: true
|
|
||||||
""" % locals()
|
""" % locals()
|
||||||
|
|
||||||
def read_arguments(self, args):
|
def read_arguments(self, args):
|
||||||
if args.verbose is not None:
|
if args.verbose is not None:
|
||||||
self.verbosity = args.verbose
|
self.verbosity = args.verbose
|
||||||
|
if args.no_redirect_stdio is not None:
|
||||||
|
self.no_redirect_stdio = args.no_redirect_stdio
|
||||||
if args.log_config is not None:
|
if args.log_config is not None:
|
||||||
self.log_config = args.log_config
|
self.log_config = args.log_config
|
||||||
if args.log_file is not None:
|
if args.log_file is not None:
|
||||||
@@ -106,16 +103,22 @@ class LoggingConfig(Config):
|
|||||||
logging_group = parser.add_argument_group("logging")
|
logging_group = parser.add_argument_group("logging")
|
||||||
logging_group.add_argument(
|
logging_group.add_argument(
|
||||||
'-v', '--verbose', dest="verbose", action='count',
|
'-v', '--verbose', dest="verbose", action='count',
|
||||||
help="The verbosity level."
|
help="The verbosity level. Specify multiple times to increase "
|
||||||
|
"verbosity. (Ignored if --log-config is specified.)"
|
||||||
)
|
)
|
||||||
logging_group.add_argument(
|
logging_group.add_argument(
|
||||||
'-f', '--log-file', dest="log_file",
|
'-f', '--log-file', dest="log_file",
|
||||||
help="File to log to."
|
help="File to log to. (Ignored if --log-config is specified.)"
|
||||||
)
|
)
|
||||||
logging_group.add_argument(
|
logging_group.add_argument(
|
||||||
'--log-config', dest="log_config", default=None,
|
'--log-config', dest="log_config", default=None,
|
||||||
help="Python logging config file"
|
help="Python logging config file"
|
||||||
)
|
)
|
||||||
|
logging_group.add_argument(
|
||||||
|
'-n', '--no-redirect-stdio',
|
||||||
|
action='store_true', default=None,
|
||||||
|
help="Do not redirect stdout/stderr to the log"
|
||||||
|
)
|
||||||
|
|
||||||
def generate_files(self, config):
|
def generate_files(self, config):
|
||||||
log_config = config.get("log_config")
|
log_config = config.get("log_config")
|
||||||
@@ -125,11 +128,22 @@ class LoggingConfig(Config):
|
|||||||
DEFAULT_LOG_CONFIG.substitute(log_file=config["log_file"])
|
DEFAULT_LOG_CONFIG.substitute(log_file=config["log_file"])
|
||||||
)
|
)
|
||||||
|
|
||||||
def setup_logging(self):
|
|
||||||
setup_logging(self.log_config, self.log_file, self.verbosity)
|
|
||||||
|
|
||||||
|
def setup_logging(config, use_worker_options=False):
|
||||||
|
""" Set up python logging
|
||||||
|
|
||||||
|
Args:
|
||||||
|
config (LoggingConfig | synapse.config.workers.WorkerConfig):
|
||||||
|
configuration data
|
||||||
|
|
||||||
|
use_worker_options (bool): True to use 'worker_log_config' and
|
||||||
|
'worker_log_file' options instead of 'log_config' and 'log_file'.
|
||||||
|
"""
|
||||||
|
log_config = (config.worker_log_config if use_worker_options
|
||||||
|
else config.log_config)
|
||||||
|
log_file = (config.worker_log_file if use_worker_options
|
||||||
|
else config.log_file)
|
||||||
|
|
||||||
def setup_logging(log_config=None, log_file=None, verbosity=None):
|
|
||||||
log_format = (
|
log_format = (
|
||||||
"%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s"
|
"%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s"
|
||||||
" - %(message)s"
|
" - %(message)s"
|
||||||
@@ -138,9 +152,9 @@ def setup_logging(log_config=None, log_file=None, verbosity=None):
|
|||||||
|
|
||||||
level = logging.INFO
|
level = logging.INFO
|
||||||
level_for_storage = logging.INFO
|
level_for_storage = logging.INFO
|
||||||
if verbosity:
|
if config.verbosity:
|
||||||
level = logging.DEBUG
|
level = logging.DEBUG
|
||||||
if verbosity > 1:
|
if config.verbosity > 1:
|
||||||
level_for_storage = logging.DEBUG
|
level_for_storage = logging.DEBUG
|
||||||
|
|
||||||
# FIXME: we need a logging.WARN for a -q quiet option
|
# FIXME: we need a logging.WARN for a -q quiet option
|
||||||
@@ -160,14 +174,6 @@ def setup_logging(log_config=None, log_file=None, verbosity=None):
|
|||||||
logger.info("Closing log file due to SIGHUP")
|
logger.info("Closing log file due to SIGHUP")
|
||||||
handler.doRollover()
|
handler.doRollover()
|
||||||
logger.info("Opened new log file due to SIGHUP")
|
logger.info("Opened new log file due to SIGHUP")
|
||||||
|
|
||||||
# TODO(paul): obviously this is a terrible mechanism for
|
|
||||||
# stealing SIGHUP, because it means no other part of synapse
|
|
||||||
# can use it instead. If we want to catch SIGHUP anywhere
|
|
||||||
# else as well, I'd suggest we find a nicer way to broadcast
|
|
||||||
# it around.
|
|
||||||
if getattr(signal, "SIGHUP"):
|
|
||||||
signal.signal(signal.SIGHUP, sighup)
|
|
||||||
else:
|
else:
|
||||||
handler = logging.StreamHandler()
|
handler = logging.StreamHandler()
|
||||||
handler.setFormatter(formatter)
|
handler.setFormatter(formatter)
|
||||||
@@ -176,8 +182,38 @@ def setup_logging(log_config=None, log_file=None, verbosity=None):
|
|||||||
|
|
||||||
logger.addHandler(handler)
|
logger.addHandler(handler)
|
||||||
else:
|
else:
|
||||||
|
def load_log_config():
|
||||||
with open(log_config, 'r') as f:
|
with open(log_config, 'r') as f:
|
||||||
logging.config.dictConfig(yaml.load(f))
|
logging.config.dictConfig(yaml.load(f))
|
||||||
|
|
||||||
observer = PythonLoggingObserver()
|
def sighup(signum, stack):
|
||||||
observer.start()
|
# it might be better to use a file watcher or something for this.
|
||||||
|
logging.info("Reloading log config from %s due to SIGHUP",
|
||||||
|
log_config)
|
||||||
|
load_log_config()
|
||||||
|
|
||||||
|
load_log_config()
|
||||||
|
|
||||||
|
# TODO(paul): obviously this is a terrible mechanism for
|
||||||
|
# stealing SIGHUP, because it means no other part of synapse
|
||||||
|
# can use it instead. If we want to catch SIGHUP anywhere
|
||||||
|
# else as well, I'd suggest we find a nicer way to broadcast
|
||||||
|
# it around.
|
||||||
|
if getattr(signal, "SIGHUP"):
|
||||||
|
signal.signal(signal.SIGHUP, sighup)
|
||||||
|
|
||||||
|
# It's critical to point twisted's internal logging somewhere, otherwise it
|
||||||
|
# stacks up and leaks kup to 64K object;
|
||||||
|
# see: https://twistedmatrix.com/trac/ticket/8164
|
||||||
|
#
|
||||||
|
# Routing to the python logging framework could be a performance problem if
|
||||||
|
# the handlers blocked for a long time as python.logging is a blocking API
|
||||||
|
# see https://twistedmatrix.com/documents/current/core/howto/logger.html
|
||||||
|
# filed as https://github.com/matrix-org/synapse/issues/1727
|
||||||
|
#
|
||||||
|
# However this may not be too much of a problem if we are just writing to a file.
|
||||||
|
observer = STDLibLogObserver()
|
||||||
|
globalLogBeginner.beginLoggingTo(
|
||||||
|
[observer],
|
||||||
|
redirectStandardIO=not config.no_redirect_stdio,
|
||||||
|
)
|
||||||
|
|||||||
70
synapse/config/password_auth_providers.py
Normal file
70
synapse/config/password_auth_providers.py
Normal file
@@ -0,0 +1,70 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright 2016 Openmarket
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
from ._base import Config, ConfigError
|
||||||
|
|
||||||
|
from synapse.util.module_loader import load_module
|
||||||
|
|
||||||
|
|
||||||
|
class PasswordAuthProviderConfig(Config):
|
||||||
|
def read_config(self, config):
|
||||||
|
self.password_providers = []
|
||||||
|
|
||||||
|
provider_config = None
|
||||||
|
|
||||||
|
# We want to be backwards compatible with the old `ldap_config`
|
||||||
|
# param.
|
||||||
|
ldap_config = config.get("ldap_config", {})
|
||||||
|
self.ldap_enabled = ldap_config.get("enabled", False)
|
||||||
|
if self.ldap_enabled:
|
||||||
|
from ldap_auth_provider import LdapAuthProvider
|
||||||
|
parsed_config = LdapAuthProvider.parse_config(ldap_config)
|
||||||
|
self.password_providers.append((LdapAuthProvider, parsed_config))
|
||||||
|
|
||||||
|
providers = config.get("password_providers", [])
|
||||||
|
for provider in providers:
|
||||||
|
# This is for backwards compat when the ldap auth provider resided
|
||||||
|
# in this package.
|
||||||
|
if provider['module'] == "synapse.util.ldap_auth_provider.LdapAuthProvider":
|
||||||
|
from ldap_auth_provider import LdapAuthProvider
|
||||||
|
provider_class = LdapAuthProvider
|
||||||
|
try:
|
||||||
|
provider_config = provider_class.parse_config(provider["config"])
|
||||||
|
except Exception as e:
|
||||||
|
raise ConfigError(
|
||||||
|
"Failed to parse config for %r: %r" % (provider['module'], e)
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
(provider_class, provider_config) = load_module(provider)
|
||||||
|
|
||||||
|
self.password_providers.append((provider_class, provider_config))
|
||||||
|
|
||||||
|
def default_config(self, **kwargs):
|
||||||
|
return """\
|
||||||
|
# password_providers:
|
||||||
|
# - module: "ldap_auth_provider.LdapAuthProvider"
|
||||||
|
# config:
|
||||||
|
# enabled: true
|
||||||
|
# uri: "ldap://ldap.example.com:389"
|
||||||
|
# start_tls: true
|
||||||
|
# base: "ou=users,dc=example,dc=com"
|
||||||
|
# attributes:
|
||||||
|
# uid: "cn"
|
||||||
|
# mail: "email"
|
||||||
|
# name: "givenName"
|
||||||
|
# #bind_dn:
|
||||||
|
# #bind_password:
|
||||||
|
# #filter: "(objectClass=posixAccount)"
|
||||||
|
"""
|
||||||
45
synapse/config/push.py
Normal file
45
synapse/config/push.py
Normal file
@@ -0,0 +1,45 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright 2015, 2016 OpenMarket Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
from ._base import Config
|
||||||
|
|
||||||
|
|
||||||
|
class PushConfig(Config):
|
||||||
|
def read_config(self, config):
|
||||||
|
self.push_redact_content = False
|
||||||
|
|
||||||
|
push_config = config.get("email", {})
|
||||||
|
self.push_redact_content = push_config.get("redact_content", False)
|
||||||
|
|
||||||
|
def default_config(self, config_dir_path, server_name, **kwargs):
|
||||||
|
return """
|
||||||
|
# Control how push messages are sent to google/apple to notifications.
|
||||||
|
# Normally every message said in a room with one or more people using
|
||||||
|
# mobile devices will be posted to a push server hosted by matrix.org
|
||||||
|
# which is registered with google and apple in order to allow push
|
||||||
|
# notifications to be sent to these mobile devices.
|
||||||
|
#
|
||||||
|
# Setting redact_content to true will make the push messages contain no
|
||||||
|
# message content which will provide increased privacy. This is a
|
||||||
|
# temporary solution pending improvements to Android and iPhone apps
|
||||||
|
# to get content from the app rather than the notification.
|
||||||
|
#
|
||||||
|
# For modern android devices the notification content will still appear
|
||||||
|
# because it is loaded by the app. iPhone, however will send a
|
||||||
|
# notification saying only that a message arrived and who it came from.
|
||||||
|
#
|
||||||
|
#push:
|
||||||
|
# redact_content: false
|
||||||
|
"""
|
||||||
@@ -32,7 +32,6 @@ class RegistrationConfig(Config):
|
|||||||
)
|
)
|
||||||
|
|
||||||
self.registration_shared_secret = config.get("registration_shared_secret")
|
self.registration_shared_secret = config.get("registration_shared_secret")
|
||||||
self.user_creation_max_duration = int(config["user_creation_max_duration"])
|
|
||||||
|
|
||||||
self.bcrypt_rounds = config.get("bcrypt_rounds", 12)
|
self.bcrypt_rounds = config.get("bcrypt_rounds", 12)
|
||||||
self.trusted_third_party_id_servers = config["trusted_third_party_id_servers"]
|
self.trusted_third_party_id_servers = config["trusted_third_party_id_servers"]
|
||||||
@@ -42,6 +41,8 @@ class RegistrationConfig(Config):
|
|||||||
self.allow_guest_access and config.get("invite_3pid_guest", False)
|
self.allow_guest_access and config.get("invite_3pid_guest", False)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
self.auto_join_rooms = config.get("auto_join_rooms", [])
|
||||||
|
|
||||||
def default_config(self, **kwargs):
|
def default_config(self, **kwargs):
|
||||||
registration_shared_secret = random_string_with_symbols(50)
|
registration_shared_secret = random_string_with_symbols(50)
|
||||||
|
|
||||||
@@ -55,11 +56,6 @@ class RegistrationConfig(Config):
|
|||||||
# secret, even if registration is otherwise disabled.
|
# secret, even if registration is otherwise disabled.
|
||||||
registration_shared_secret: "%(registration_shared_secret)s"
|
registration_shared_secret: "%(registration_shared_secret)s"
|
||||||
|
|
||||||
# Sets the expiry for the short term user creation in
|
|
||||||
# milliseconds. For instance the bellow duration is two weeks
|
|
||||||
# in milliseconds.
|
|
||||||
user_creation_max_duration: 1209600000
|
|
||||||
|
|
||||||
# Set the number of bcrypt rounds used to generate password hash.
|
# Set the number of bcrypt rounds used to generate password hash.
|
||||||
# Larger numbers increase the work factor needed to generate the hash.
|
# Larger numbers increase the work factor needed to generate the hash.
|
||||||
# The default number of rounds is 12.
|
# The default number of rounds is 12.
|
||||||
@@ -75,6 +71,12 @@ class RegistrationConfig(Config):
|
|||||||
trusted_third_party_id_servers:
|
trusted_third_party_id_servers:
|
||||||
- matrix.org
|
- matrix.org
|
||||||
- vector.im
|
- vector.im
|
||||||
|
- riot.im
|
||||||
|
|
||||||
|
# Users who register on this homeserver will automatically be joined
|
||||||
|
# to these rooms
|
||||||
|
#auto_join_rooms:
|
||||||
|
# - "#example:example.com"
|
||||||
""" % locals()
|
""" % locals()
|
||||||
|
|
||||||
def add_arguments(self, parser):
|
def add_arguments(self, parser):
|
||||||
|
|||||||
@@ -70,7 +70,19 @@ class ContentRepositoryConfig(Config):
|
|||||||
self.max_upload_size = self.parse_size(config["max_upload_size"])
|
self.max_upload_size = self.parse_size(config["max_upload_size"])
|
||||||
self.max_image_pixels = self.parse_size(config["max_image_pixels"])
|
self.max_image_pixels = self.parse_size(config["max_image_pixels"])
|
||||||
self.max_spider_size = self.parse_size(config["max_spider_size"])
|
self.max_spider_size = self.parse_size(config["max_spider_size"])
|
||||||
|
|
||||||
self.media_store_path = self.ensure_directory(config["media_store_path"])
|
self.media_store_path = self.ensure_directory(config["media_store_path"])
|
||||||
|
|
||||||
|
self.backup_media_store_path = config.get("backup_media_store_path")
|
||||||
|
if self.backup_media_store_path:
|
||||||
|
self.backup_media_store_path = self.ensure_directory(
|
||||||
|
self.backup_media_store_path
|
||||||
|
)
|
||||||
|
|
||||||
|
self.synchronous_backup_media_store = config.get(
|
||||||
|
"synchronous_backup_media_store", False
|
||||||
|
)
|
||||||
|
|
||||||
self.uploads_path = self.ensure_directory(config["uploads_path"])
|
self.uploads_path = self.ensure_directory(config["uploads_path"])
|
||||||
self.dynamic_thumbnails = config["dynamic_thumbnails"]
|
self.dynamic_thumbnails = config["dynamic_thumbnails"]
|
||||||
self.thumbnail_requirements = parse_thumbnail_requirements(
|
self.thumbnail_requirements = parse_thumbnail_requirements(
|
||||||
@@ -115,6 +127,14 @@ class ContentRepositoryConfig(Config):
|
|||||||
# Directory where uploaded images and attachments are stored.
|
# Directory where uploaded images and attachments are stored.
|
||||||
media_store_path: "%(media_store)s"
|
media_store_path: "%(media_store)s"
|
||||||
|
|
||||||
|
# A secondary directory where uploaded images and attachments are
|
||||||
|
# stored as a backup.
|
||||||
|
# backup_media_store_path: "%(media_store)s"
|
||||||
|
|
||||||
|
# Whether to wait for successful write to backup media store before
|
||||||
|
# returning successfully.
|
||||||
|
# synchronous_backup_media_store: false
|
||||||
|
|
||||||
# Directory where in-progress uploads are stored.
|
# Directory where in-progress uploads are stored.
|
||||||
uploads_path: "%(uploads_path)s"
|
uploads_path: "%(uploads_path)s"
|
||||||
|
|
||||||
@@ -167,6 +187,8 @@ class ContentRepositoryConfig(Config):
|
|||||||
# - '10.0.0.0/8'
|
# - '10.0.0.0/8'
|
||||||
# - '172.16.0.0/12'
|
# - '172.16.0.0/12'
|
||||||
# - '192.168.0.0/16'
|
# - '192.168.0.0/16'
|
||||||
|
# - '100.64.0.0/10'
|
||||||
|
# - '169.254.0.0/16'
|
||||||
#
|
#
|
||||||
# List of IP address CIDR ranges that the URL preview spider is allowed
|
# List of IP address CIDR ranges that the URL preview spider is allowed
|
||||||
# to access even if they are specified in url_preview_ip_range_blacklist.
|
# to access even if they are specified in url_preview_ip_range_blacklist.
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
# Copyright 2014-2016 OpenMarket Ltd
|
# Copyright 2014-2016 OpenMarket Ltd
|
||||||
|
# Copyright 2017 New Vector Ltd
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@@ -29,7 +30,24 @@ class ServerConfig(Config):
|
|||||||
self.user_agent_suffix = config.get("user_agent_suffix")
|
self.user_agent_suffix = config.get("user_agent_suffix")
|
||||||
self.use_frozen_dicts = config.get("use_frozen_dicts", False)
|
self.use_frozen_dicts = config.get("use_frozen_dicts", False)
|
||||||
self.public_baseurl = config.get("public_baseurl")
|
self.public_baseurl = config.get("public_baseurl")
|
||||||
self.secondary_directory_servers = config.get("secondary_directory_servers", [])
|
self.cpu_affinity = config.get("cpu_affinity")
|
||||||
|
|
||||||
|
# Whether to send federation traffic out in this process. This only
|
||||||
|
# applies to some federation traffic, and so shouldn't be used to
|
||||||
|
# "disable" federation
|
||||||
|
self.send_federation = config.get("send_federation", True)
|
||||||
|
|
||||||
|
# Whether to update the user directory or not. This should be set to
|
||||||
|
# false only if we are updating the user directory in a worker
|
||||||
|
self.update_user_directory = config.get("update_user_directory", True)
|
||||||
|
|
||||||
|
self.filter_timeline_limit = config.get("filter_timeline_limit", -1)
|
||||||
|
|
||||||
|
# Whether we should block invites sent to users on this server
|
||||||
|
# (other than those sent by local server admins)
|
||||||
|
self.block_non_admin_invites = config.get(
|
||||||
|
"block_non_admin_invites", False,
|
||||||
|
)
|
||||||
|
|
||||||
if self.public_baseurl is not None:
|
if self.public_baseurl is not None:
|
||||||
if self.public_baseurl[-1] != '/':
|
if self.public_baseurl[-1] != '/':
|
||||||
@@ -38,6 +56,15 @@ class ServerConfig(Config):
|
|||||||
|
|
||||||
self.listeners = config.get("listeners", [])
|
self.listeners = config.get("listeners", [])
|
||||||
|
|
||||||
|
for listener in self.listeners:
|
||||||
|
bind_address = listener.pop("bind_address", None)
|
||||||
|
bind_addresses = listener.setdefault("bind_addresses", [])
|
||||||
|
|
||||||
|
if bind_address:
|
||||||
|
bind_addresses.append(bind_address)
|
||||||
|
elif not bind_addresses:
|
||||||
|
bind_addresses.append('')
|
||||||
|
|
||||||
self.gc_thresholds = read_gc_thresholds(config.get("gc_thresholds", None))
|
self.gc_thresholds = read_gc_thresholds(config.get("gc_thresholds", None))
|
||||||
|
|
||||||
bind_port = config.get("bind_port")
|
bind_port = config.get("bind_port")
|
||||||
@@ -50,7 +77,7 @@ class ServerConfig(Config):
|
|||||||
|
|
||||||
self.listeners.append({
|
self.listeners.append({
|
||||||
"port": bind_port,
|
"port": bind_port,
|
||||||
"bind_address": bind_host,
|
"bind_addresses": [bind_host],
|
||||||
"tls": True,
|
"tls": True,
|
||||||
"type": "http",
|
"type": "http",
|
||||||
"resources": [
|
"resources": [
|
||||||
@@ -69,7 +96,7 @@ class ServerConfig(Config):
|
|||||||
if unsecure_port:
|
if unsecure_port:
|
||||||
self.listeners.append({
|
self.listeners.append({
|
||||||
"port": unsecure_port,
|
"port": unsecure_port,
|
||||||
"bind_address": bind_host,
|
"bind_addresses": [bind_host],
|
||||||
"tls": False,
|
"tls": False,
|
||||||
"type": "http",
|
"type": "http",
|
||||||
"resources": [
|
"resources": [
|
||||||
@@ -88,7 +115,7 @@ class ServerConfig(Config):
|
|||||||
if manhole:
|
if manhole:
|
||||||
self.listeners.append({
|
self.listeners.append({
|
||||||
"port": manhole,
|
"port": manhole,
|
||||||
"bind_address": "127.0.0.1",
|
"bind_addresses": ["127.0.0.1"],
|
||||||
"type": "manhole",
|
"type": "manhole",
|
||||||
})
|
})
|
||||||
|
|
||||||
@@ -96,7 +123,7 @@ class ServerConfig(Config):
|
|||||||
if metrics_port:
|
if metrics_port:
|
||||||
self.listeners.append({
|
self.listeners.append({
|
||||||
"port": metrics_port,
|
"port": metrics_port,
|
||||||
"bind_address": config.get("metrics_bind_host", "127.0.0.1"),
|
"bind_addresses": [config.get("metrics_bind_host", "127.0.0.1")],
|
||||||
"tls": False,
|
"tls": False,
|
||||||
"type": "http",
|
"type": "http",
|
||||||
"resources": [
|
"resources": [
|
||||||
@@ -128,9 +155,36 @@ class ServerConfig(Config):
|
|||||||
# When running as a daemon, the file to store the pid in
|
# When running as a daemon, the file to store the pid in
|
||||||
pid_file: %(pid_file)s
|
pid_file: %(pid_file)s
|
||||||
|
|
||||||
|
# CPU affinity mask. Setting this restricts the CPUs on which the
|
||||||
|
# process will be scheduled. It is represented as a bitmask, with the
|
||||||
|
# lowest order bit corresponding to the first logical CPU and the
|
||||||
|
# highest order bit corresponding to the last logical CPU. Not all CPUs
|
||||||
|
# may exist on a given system but a mask may specify more CPUs than are
|
||||||
|
# present.
|
||||||
|
#
|
||||||
|
# For example:
|
||||||
|
# 0x00000001 is processor #0,
|
||||||
|
# 0x00000003 is processors #0 and #1,
|
||||||
|
# 0xFFFFFFFF is all processors (#0 through #31).
|
||||||
|
#
|
||||||
|
# Pinning a Python process to a single CPU is desirable, because Python
|
||||||
|
# is inherently single-threaded due to the GIL, and can suffer a
|
||||||
|
# 30-40%% slowdown due to cache blow-out and thread context switching
|
||||||
|
# if the scheduler happens to schedule the underlying threads across
|
||||||
|
# different cores. See
|
||||||
|
# https://www.mirantis.com/blog/improve-performance-python-programs-restricting-single-cpu/.
|
||||||
|
#
|
||||||
|
# cpu_affinity: 0xFFFFFFFF
|
||||||
|
|
||||||
# Whether to serve a web client from the HTTP/HTTPS root resource.
|
# Whether to serve a web client from the HTTP/HTTPS root resource.
|
||||||
web_client: True
|
web_client: True
|
||||||
|
|
||||||
|
# The root directory to server for the above web client.
|
||||||
|
# If left undefined, synapse will serve the matrix-angular-sdk web client.
|
||||||
|
# Make sure matrix-angular-sdk is installed with pip if web_client is True
|
||||||
|
# and web_client_location is undefined
|
||||||
|
# web_client_location: "/path/to/web/root"
|
||||||
|
|
||||||
# The public-facing base URL for the client API (not including _matrix/...)
|
# The public-facing base URL for the client API (not including _matrix/...)
|
||||||
# public_baseurl: https://example.com:8448/
|
# public_baseurl: https://example.com:8448/
|
||||||
|
|
||||||
@@ -142,13 +196,13 @@ class ServerConfig(Config):
|
|||||||
# The GC threshold parameters to pass to `gc.set_threshold`, if defined
|
# The GC threshold parameters to pass to `gc.set_threshold`, if defined
|
||||||
# gc_thresholds: [700, 10, 10]
|
# gc_thresholds: [700, 10, 10]
|
||||||
|
|
||||||
# A list of other Home Servers to fetch the public room directory from
|
# Set the limit on the returned events in the timeline in the get
|
||||||
# and include in the public room directory of this home server
|
# and sync operations. The default value is -1, means no upper limit.
|
||||||
# This is a temporary stopgap solution to populate new server with a
|
# filter_timeline_limit: 5000
|
||||||
# list of rooms until there exists a good solution of a decentralized
|
|
||||||
# room directory.
|
# Whether room invites to users on this server should be blocked
|
||||||
# secondary_directory_servers:
|
# (except those sent by local server admins). The default is False.
|
||||||
# - matrix.org
|
# block_non_admin_invites: True
|
||||||
|
|
||||||
# List of ports that Synapse should listen on, their purpose and their
|
# List of ports that Synapse should listen on, their purpose and their
|
||||||
# configuration.
|
# configuration.
|
||||||
@@ -159,9 +213,14 @@ class ServerConfig(Config):
|
|||||||
# The port to listen for HTTPS requests on.
|
# The port to listen for HTTPS requests on.
|
||||||
port: %(bind_port)s
|
port: %(bind_port)s
|
||||||
|
|
||||||
# Local interface to listen on.
|
# Local addresses to listen on.
|
||||||
# The empty string will cause synapse to listen on all interfaces.
|
# This will listen on all IPv4 addresses by default.
|
||||||
bind_address: ''
|
bind_addresses:
|
||||||
|
- '0.0.0.0'
|
||||||
|
# Uncomment to listen on all IPv6 interfaces
|
||||||
|
# N.B: On at least Linux this will also listen on all IPv4
|
||||||
|
# addresses, so you will need to comment out the line above.
|
||||||
|
# - '::'
|
||||||
|
|
||||||
# This is a 'http' listener, allows us to specify 'resources'.
|
# This is a 'http' listener, allows us to specify 'resources'.
|
||||||
type: http
|
type: http
|
||||||
@@ -192,7 +251,7 @@ class ServerConfig(Config):
|
|||||||
# For when matrix traffic passes through loadbalancer that unwraps TLS.
|
# For when matrix traffic passes through loadbalancer that unwraps TLS.
|
||||||
- port: %(unsecure_port)s
|
- port: %(unsecure_port)s
|
||||||
tls: false
|
tls: false
|
||||||
bind_address: ''
|
bind_addresses: ['0.0.0.0']
|
||||||
type: http
|
type: http
|
||||||
|
|
||||||
x_forwarded: false
|
x_forwarded: false
|
||||||
@@ -244,7 +303,7 @@ def read_gc_thresholds(thresholds):
|
|||||||
return (
|
return (
|
||||||
int(thresholds[0]), int(thresholds[1]), int(thresholds[2]),
|
int(thresholds[0]), int(thresholds[1]), int(thresholds[2]),
|
||||||
)
|
)
|
||||||
except:
|
except Exception:
|
||||||
raise ConfigError(
|
raise ConfigError(
|
||||||
"Value of `gc_threshold` must be a list of three integers if set"
|
"Value of `gc_threshold` must be a list of three integers if set"
|
||||||
)
|
)
|
||||||
|
|||||||
35
synapse/config/spam_checker.py
Normal file
35
synapse/config/spam_checker.py
Normal file
@@ -0,0 +1,35 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright 2017 New Vector Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
from synapse.util.module_loader import load_module
|
||||||
|
|
||||||
|
from ._base import Config
|
||||||
|
|
||||||
|
|
||||||
|
class SpamCheckerConfig(Config):
|
||||||
|
def read_config(self, config):
|
||||||
|
self.spam_checker = None
|
||||||
|
|
||||||
|
provider = config.get("spam_checker", None)
|
||||||
|
if provider is not None:
|
||||||
|
self.spam_checker = load_module(provider)
|
||||||
|
|
||||||
|
def default_config(self, **kwargs):
|
||||||
|
return """\
|
||||||
|
# spam_checker:
|
||||||
|
# module: "my_custom_project.SuperSpamChecker"
|
||||||
|
# config:
|
||||||
|
# example_option: 'things'
|
||||||
|
"""
|
||||||
@@ -19,6 +19,9 @@ from OpenSSL import crypto
|
|||||||
import subprocess
|
import subprocess
|
||||||
import os
|
import os
|
||||||
|
|
||||||
|
from hashlib import sha256
|
||||||
|
from unpaddedbase64 import encode_base64
|
||||||
|
|
||||||
GENERATE_DH_PARAMS = False
|
GENERATE_DH_PARAMS = False
|
||||||
|
|
||||||
|
|
||||||
@@ -42,6 +45,19 @@ class TlsConfig(Config):
|
|||||||
config.get("tls_dh_params_path"), "tls_dh_params"
|
config.get("tls_dh_params_path"), "tls_dh_params"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
self.tls_fingerprints = config["tls_fingerprints"]
|
||||||
|
|
||||||
|
# Check that our own certificate is included in the list of fingerprints
|
||||||
|
# and include it if it is not.
|
||||||
|
x509_certificate_bytes = crypto.dump_certificate(
|
||||||
|
crypto.FILETYPE_ASN1,
|
||||||
|
self.tls_certificate
|
||||||
|
)
|
||||||
|
sha256_fingerprint = encode_base64(sha256(x509_certificate_bytes).digest())
|
||||||
|
sha256_fingerprints = set(f["sha256"] for f in self.tls_fingerprints)
|
||||||
|
if sha256_fingerprint not in sha256_fingerprints:
|
||||||
|
self.tls_fingerprints.append({u"sha256": sha256_fingerprint})
|
||||||
|
|
||||||
# This config option applies to non-federation HTTP clients
|
# This config option applies to non-federation HTTP clients
|
||||||
# (e.g. for talking to recaptcha, identity servers, and such)
|
# (e.g. for talking to recaptcha, identity servers, and such)
|
||||||
# It should never be used in production, and is intended for
|
# It should never be used in production, and is intended for
|
||||||
@@ -73,6 +89,28 @@ class TlsConfig(Config):
|
|||||||
|
|
||||||
# Don't bind to the https port
|
# Don't bind to the https port
|
||||||
no_tls: False
|
no_tls: False
|
||||||
|
|
||||||
|
# List of allowed TLS fingerprints for this server to publish along
|
||||||
|
# with the signing keys for this server. Other matrix servers that
|
||||||
|
# make HTTPS requests to this server will check that the TLS
|
||||||
|
# certificates returned by this server match one of the fingerprints.
|
||||||
|
#
|
||||||
|
# Synapse automatically adds the fingerprint of its own certificate
|
||||||
|
# to the list. So if federation traffic is handle directly by synapse
|
||||||
|
# then no modification to the list is required.
|
||||||
|
#
|
||||||
|
# If synapse is run behind a load balancer that handles the TLS then it
|
||||||
|
# will be necessary to add the fingerprints of the certificates used by
|
||||||
|
# the loadbalancers to this list if they are different to the one
|
||||||
|
# synapse is using.
|
||||||
|
#
|
||||||
|
# Homeservers are permitted to cache the list of TLS fingerprints
|
||||||
|
# returned in the key responses up to the "valid_until_ts" returned in
|
||||||
|
# key. It may be necessary to publish the fingerprints of a new
|
||||||
|
# certificate and wait until the "valid_until_ts" of the previous key
|
||||||
|
# responses have passed before deploying it.
|
||||||
|
tls_fingerprints: []
|
||||||
|
# tls_fingerprints: [{"sha256": "<base64_encoded_sha256_fingerprint>"}]
|
||||||
""" % locals()
|
""" % locals()
|
||||||
|
|
||||||
def read_tls_certificate(self, cert_path):
|
def read_tls_certificate(self, cert_path):
|
||||||
@@ -88,7 +126,7 @@ class TlsConfig(Config):
|
|||||||
tls_private_key_path = config["tls_private_key_path"]
|
tls_private_key_path = config["tls_private_key_path"]
|
||||||
tls_dh_params_path = config["tls_dh_params_path"]
|
tls_dh_params_path = config["tls_dh_params_path"]
|
||||||
|
|
||||||
if not os.path.exists(tls_private_key_path):
|
if not self.path_exists(tls_private_key_path):
|
||||||
with open(tls_private_key_path, "w") as private_key_file:
|
with open(tls_private_key_path, "w") as private_key_file:
|
||||||
tls_private_key = crypto.PKey()
|
tls_private_key = crypto.PKey()
|
||||||
tls_private_key.generate_key(crypto.TYPE_RSA, 2048)
|
tls_private_key.generate_key(crypto.TYPE_RSA, 2048)
|
||||||
@@ -103,7 +141,7 @@ class TlsConfig(Config):
|
|||||||
crypto.FILETYPE_PEM, private_key_pem
|
crypto.FILETYPE_PEM, private_key_pem
|
||||||
)
|
)
|
||||||
|
|
||||||
if not os.path.exists(tls_certificate_path):
|
if not self.path_exists(tls_certificate_path):
|
||||||
with open(tls_certificate_path, "w") as certificate_file:
|
with open(tls_certificate_path, "w") as certificate_file:
|
||||||
cert = crypto.X509()
|
cert = crypto.X509()
|
||||||
subject = cert.get_subject()
|
subject = cert.get_subject()
|
||||||
@@ -121,7 +159,7 @@ class TlsConfig(Config):
|
|||||||
|
|
||||||
certificate_file.write(cert_pem)
|
certificate_file.write(cert_pem)
|
||||||
|
|
||||||
if not os.path.exists(tls_dh_params_path):
|
if not self.path_exists(tls_dh_params_path):
|
||||||
if GENERATE_DH_PARAMS:
|
if GENERATE_DH_PARAMS:
|
||||||
subprocess.check_call([
|
subprocess.check_call([
|
||||||
"openssl", "dhparam",
|
"openssl", "dhparam",
|
||||||
|
|||||||
@@ -19,8 +19,11 @@ class VoipConfig(Config):
|
|||||||
|
|
||||||
def read_config(self, config):
|
def read_config(self, config):
|
||||||
self.turn_uris = config.get("turn_uris", [])
|
self.turn_uris = config.get("turn_uris", [])
|
||||||
self.turn_shared_secret = config["turn_shared_secret"]
|
self.turn_shared_secret = config.get("turn_shared_secret")
|
||||||
|
self.turn_username = config.get("turn_username")
|
||||||
|
self.turn_password = config.get("turn_password")
|
||||||
self.turn_user_lifetime = self.parse_duration(config["turn_user_lifetime"])
|
self.turn_user_lifetime = self.parse_duration(config["turn_user_lifetime"])
|
||||||
|
self.turn_allow_guests = config.get("turn_allow_guests", True)
|
||||||
|
|
||||||
def default_config(self, **kwargs):
|
def default_config(self, **kwargs):
|
||||||
return """\
|
return """\
|
||||||
@@ -32,6 +35,18 @@ class VoipConfig(Config):
|
|||||||
# The shared secret used to compute passwords for the TURN server
|
# The shared secret used to compute passwords for the TURN server
|
||||||
turn_shared_secret: "YOUR_SHARED_SECRET"
|
turn_shared_secret: "YOUR_SHARED_SECRET"
|
||||||
|
|
||||||
|
# The Username and password if the TURN server needs them and
|
||||||
|
# does not use a token
|
||||||
|
#turn_username: "TURNSERVER_USERNAME"
|
||||||
|
#turn_password: "TURNSERVER_PASSWORD"
|
||||||
|
|
||||||
# How long generated TURN credentials last
|
# How long generated TURN credentials last
|
||||||
turn_user_lifetime: "1h"
|
turn_user_lifetime: "1h"
|
||||||
|
|
||||||
|
# Whether guests should be allowed to use the TURN server.
|
||||||
|
# This defaults to True, otherwise VoIP will be unreliable for guests.
|
||||||
|
# However, it does introduce a slight security risk as it allows users to
|
||||||
|
# connect to arbitrary endpoints without having first signed up for a
|
||||||
|
# valid account (e.g. by passing a CAPTCHA).
|
||||||
|
turn_allow_guests: True
|
||||||
"""
|
"""
|
||||||
|
|||||||
@@ -28,4 +28,19 @@ class WorkerConfig(Config):
|
|||||||
self.worker_pid_file = config.get("worker_pid_file")
|
self.worker_pid_file = config.get("worker_pid_file")
|
||||||
self.worker_log_file = config.get("worker_log_file")
|
self.worker_log_file = config.get("worker_log_file")
|
||||||
self.worker_log_config = config.get("worker_log_config")
|
self.worker_log_config = config.get("worker_log_config")
|
||||||
self.worker_replication_url = config.get("worker_replication_url")
|
self.worker_replication_host = config.get("worker_replication_host", None)
|
||||||
|
self.worker_replication_port = config.get("worker_replication_port", None)
|
||||||
|
self.worker_name = config.get("worker_name", self.worker_app)
|
||||||
|
|
||||||
|
self.worker_main_http_uri = config.get("worker_main_http_uri", None)
|
||||||
|
self.worker_cpu_affinity = config.get("worker_cpu_affinity")
|
||||||
|
|
||||||
|
if self.worker_listeners:
|
||||||
|
for listener in self.worker_listeners:
|
||||||
|
bind_address = listener.pop("bind_address", None)
|
||||||
|
bind_addresses = listener.setdefault("bind_addresses", [])
|
||||||
|
|
||||||
|
if bind_address:
|
||||||
|
bind_addresses.append(bind_address)
|
||||||
|
elif not bind_addresses:
|
||||||
|
bind_addresses.append('')
|
||||||
|
|||||||
@@ -34,7 +34,7 @@ class ServerContextFactory(ssl.ContextFactory):
|
|||||||
try:
|
try:
|
||||||
_ecCurve = _OpenSSLECCurve(_defaultCurveName)
|
_ecCurve = _OpenSSLECCurve(_defaultCurveName)
|
||||||
_ecCurve.addECKeyToContext(context)
|
_ecCurve.addECKeyToContext(context)
|
||||||
except:
|
except Exception:
|
||||||
logger.exception("Failed to enable elliptic curve for TLS")
|
logger.exception("Failed to enable elliptic curve for TLS")
|
||||||
context.set_options(SSL.OP_NO_SSLv2 | SSL.OP_NO_SSLv3)
|
context.set_options(SSL.OP_NO_SSLv2 | SSL.OP_NO_SSLv3)
|
||||||
context.use_certificate_chain_file(config.tls_certificate_file)
|
context.use_certificate_chain_file(config.tls_certificate_file)
|
||||||
|
|||||||
@@ -43,7 +43,7 @@ def check_event_content_hash(event, hash_algorithm=hashlib.sha256):
|
|||||||
message_hash_base64 = event.hashes[name]
|
message_hash_base64 = event.hashes[name]
|
||||||
try:
|
try:
|
||||||
message_hash_bytes = decode_base64(message_hash_base64)
|
message_hash_bytes = decode_base64(message_hash_base64)
|
||||||
except:
|
except Exception:
|
||||||
raise SynapseError(
|
raise SynapseError(
|
||||||
400,
|
400,
|
||||||
"Invalid base64: %s" % (message_hash_base64,),
|
"Invalid base64: %s" % (message_hash_base64,),
|
||||||
|
|||||||
@@ -13,14 +13,11 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
|
from synapse.util import logcontext
|
||||||
from twisted.web.http import HTTPClient
|
from twisted.web.http import HTTPClient
|
||||||
from twisted.internet.protocol import Factory
|
from twisted.internet.protocol import Factory
|
||||||
from twisted.internet import defer, reactor
|
from twisted.internet import defer, reactor
|
||||||
from synapse.http.endpoint import matrix_federation_endpoint
|
from synapse.http.endpoint import matrix_federation_endpoint
|
||||||
from synapse.util.logcontext import (
|
|
||||||
preserve_context_over_fn, preserve_context_over_deferred
|
|
||||||
)
|
|
||||||
import simplejson as json
|
import simplejson as json
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
@@ -43,14 +40,10 @@ def fetch_server_key(server_name, ssl_context_factory, path=KEY_API_V1):
|
|||||||
|
|
||||||
for i in range(5):
|
for i in range(5):
|
||||||
try:
|
try:
|
||||||
protocol = yield preserve_context_over_fn(
|
with logcontext.PreserveLoggingContext():
|
||||||
endpoint.connect, factory
|
protocol = yield endpoint.connect(factory)
|
||||||
)
|
server_response, server_certificate = yield protocol.remote_key
|
||||||
server_response, server_certificate = yield preserve_context_over_deferred(
|
|
||||||
protocol.remote_key
|
|
||||||
)
|
|
||||||
defer.returnValue((server_response, server_certificate))
|
defer.returnValue((server_response, server_certificate))
|
||||||
return
|
|
||||||
except SynapseKeyClientError as e:
|
except SynapseKeyClientError as e:
|
||||||
logger.exception("Error getting key for %r" % (server_name,))
|
logger.exception("Error getting key for %r" % (server_name,))
|
||||||
if e.status.startswith("4"):
|
if e.status.startswith("4"):
|
||||||
@@ -77,10 +70,12 @@ class SynapseKeyClientProtocol(HTTPClient):
|
|||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.remote_key = defer.Deferred()
|
self.remote_key = defer.Deferred()
|
||||||
self.host = None
|
self.host = None
|
||||||
|
self._peer = None
|
||||||
|
|
||||||
def connectionMade(self):
|
def connectionMade(self):
|
||||||
self.host = self.transport.getHost()
|
self._peer = self.transport.getPeer()
|
||||||
logger.debug("Connected to %s", self.host)
|
logger.debug("Connected to %s", self._peer)
|
||||||
|
|
||||||
self.sendCommand(b"GET", self.path)
|
self.sendCommand(b"GET", self.path)
|
||||||
if self.host:
|
if self.host:
|
||||||
self.sendHeader(b"Host", self.host)
|
self.sendHeader(b"Host", self.host)
|
||||||
@@ -124,7 +119,10 @@ class SynapseKeyClientProtocol(HTTPClient):
|
|||||||
self.timer.cancel()
|
self.timer.cancel()
|
||||||
|
|
||||||
def on_timeout(self):
|
def on_timeout(self):
|
||||||
logger.debug("Timeout waiting for response from %s", self.host)
|
logger.debug(
|
||||||
|
"Timeout waiting for response from %s: %s",
|
||||||
|
self.host, self._peer,
|
||||||
|
)
|
||||||
self.errback(IOError("Timeout waiting for response"))
|
self.errback(IOError("Timeout waiting for response"))
|
||||||
self.transport.abortConnection()
|
self.transport.abortConnection()
|
||||||
|
|
||||||
@@ -133,4 +131,5 @@ class SynapseKeyClientFactory(Factory):
|
|||||||
def protocol(self):
|
def protocol(self):
|
||||||
protocol = SynapseKeyClientProtocol()
|
protocol = SynapseKeyClientProtocol()
|
||||||
protocol.path = self.path
|
protocol.path = self.path
|
||||||
|
protocol.host = self.host
|
||||||
return protocol
|
return protocol
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
# Copyright 2014-2016 OpenMarket Ltd
|
# Copyright 2014-2016 OpenMarket Ltd
|
||||||
|
# Copyright 2017 New Vector Ltd.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@@ -15,13 +16,12 @@
|
|||||||
|
|
||||||
from synapse.crypto.keyclient import fetch_server_key
|
from synapse.crypto.keyclient import fetch_server_key
|
||||||
from synapse.api.errors import SynapseError, Codes
|
from synapse.api.errors import SynapseError, Codes
|
||||||
from synapse.util.retryutils import get_retry_limiter
|
from synapse.util import unwrapFirstError, logcontext
|
||||||
from synapse.util import unwrapFirstError
|
|
||||||
from synapse.util.async import ObservableDeferred
|
|
||||||
from synapse.util.logcontext import (
|
from synapse.util.logcontext import (
|
||||||
preserve_context_over_deferred, preserve_context_over_fn, PreserveLoggingContext,
|
PreserveLoggingContext,
|
||||||
preserve_fn
|
preserve_fn
|
||||||
)
|
)
|
||||||
|
from synapse.util.metrics import Measure
|
||||||
|
|
||||||
from twisted.internet import defer
|
from twisted.internet import defer
|
||||||
|
|
||||||
@@ -44,7 +44,26 @@ import logging
|
|||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
KeyGroup = namedtuple("KeyGroup", ("server_name", "group_id", "key_ids"))
|
VerifyKeyRequest = namedtuple("VerifyRequest", (
|
||||||
|
"server_name", "key_ids", "json_object", "deferred"
|
||||||
|
))
|
||||||
|
"""
|
||||||
|
A request for a verify key to verify a JSON object.
|
||||||
|
|
||||||
|
Attributes:
|
||||||
|
server_name(str): The name of the server to verify against.
|
||||||
|
key_ids(set(str)): The set of key_ids to that could be used to verify the
|
||||||
|
JSON object
|
||||||
|
json_object(dict): The JSON object to verify.
|
||||||
|
deferred(twisted.internet.defer.Deferred):
|
||||||
|
A deferred (server_name, key_id, verify_key) tuple that resolves when
|
||||||
|
a verify key has been fetched. The deferreds' callbacks are run with no
|
||||||
|
logcontext.
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
class KeyLookupError(ValueError):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
class Keyring(object):
|
class Keyring(object):
|
||||||
@@ -56,138 +75,123 @@ class Keyring(object):
|
|||||||
self.perspective_servers = self.config.perspectives
|
self.perspective_servers = self.config.perspectives
|
||||||
self.hs = hs
|
self.hs = hs
|
||||||
|
|
||||||
|
# map from server name to Deferred. Has an entry for each server with
|
||||||
|
# an ongoing key download; the Deferred completes once the download
|
||||||
|
# completes.
|
||||||
|
#
|
||||||
|
# These are regular, logcontext-agnostic Deferreds.
|
||||||
self.key_downloads = {}
|
self.key_downloads = {}
|
||||||
|
|
||||||
def verify_json_for_server(self, server_name, json_object):
|
def verify_json_for_server(self, server_name, json_object):
|
||||||
return self.verify_json_objects_for_server(
|
return logcontext.make_deferred_yieldable(
|
||||||
|
self.verify_json_objects_for_server(
|
||||||
[(server_name, json_object)]
|
[(server_name, json_object)]
|
||||||
)[0]
|
)[0]
|
||||||
|
)
|
||||||
|
|
||||||
def verify_json_objects_for_server(self, server_and_json):
|
def verify_json_objects_for_server(self, server_and_json):
|
||||||
"""Bulk verfies signatures of json objects, bulk fetching keys as
|
"""Bulk verifies signatures of json objects, bulk fetching keys as
|
||||||
necessary.
|
necessary.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
server_and_json (list): List of pairs of (server_name, json_object)
|
server_and_json (list): List of pairs of (server_name, json_object)
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
list of deferreds indicating success or failure to verify each
|
List<Deferred>: for each input pair, a deferred indicating success
|
||||||
json object's signature for the given server_name.
|
or failure to verify each json object's signature for the given
|
||||||
|
server_name. The deferreds run their callbacks in the sentinel
|
||||||
|
logcontext.
|
||||||
"""
|
"""
|
||||||
group_id_to_json = {}
|
verify_requests = []
|
||||||
group_id_to_group = {}
|
|
||||||
group_ids = []
|
|
||||||
|
|
||||||
next_group_id = 0
|
|
||||||
deferreds = {}
|
|
||||||
|
|
||||||
for server_name, json_object in server_and_json:
|
for server_name, json_object in server_and_json:
|
||||||
logger.debug("Verifying for %s", server_name)
|
|
||||||
group_id = next_group_id
|
|
||||||
next_group_id += 1
|
|
||||||
group_ids.append(group_id)
|
|
||||||
|
|
||||||
key_ids = signature_ids(json_object, server_name)
|
key_ids = signature_ids(json_object, server_name)
|
||||||
if not key_ids:
|
if not key_ids:
|
||||||
deferreds[group_id] = defer.fail(SynapseError(
|
logger.warn("Request from %s: no supported signature keys",
|
||||||
|
server_name)
|
||||||
|
deferred = defer.fail(SynapseError(
|
||||||
400,
|
400,
|
||||||
"Not signed with a supported algorithm",
|
"Not signed with a supported algorithm",
|
||||||
Codes.UNAUTHORIZED,
|
Codes.UNAUTHORIZED,
|
||||||
))
|
))
|
||||||
else:
|
else:
|
||||||
deferreds[group_id] = defer.Deferred()
|
deferred = defer.Deferred()
|
||||||
|
|
||||||
group = KeyGroup(server_name, group_id, key_ids)
|
logger.debug("Verifying for %s with key_ids %s",
|
||||||
|
server_name, key_ids)
|
||||||
|
|
||||||
group_id_to_group[group_id] = group
|
verify_request = VerifyKeyRequest(
|
||||||
group_id_to_json[group_id] = json_object
|
server_name, key_ids, json_object, deferred
|
||||||
|
)
|
||||||
|
|
||||||
|
verify_requests.append(verify_request)
|
||||||
|
|
||||||
|
preserve_fn(self._start_key_lookups)(verify_requests)
|
||||||
|
|
||||||
|
# Pass those keys to handle_key_deferred so that the json object
|
||||||
|
# signatures can be verified
|
||||||
|
handle = preserve_fn(_handle_key_deferred)
|
||||||
|
return [
|
||||||
|
handle(rq) for rq in verify_requests
|
||||||
|
]
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def handle_key_deferred(group, deferred):
|
def _start_key_lookups(self, verify_requests):
|
||||||
server_name = group.server_name
|
"""Sets off the key fetches for each verify request
|
||||||
try:
|
|
||||||
_, _, key_id, verify_key = yield deferred
|
|
||||||
except IOError as e:
|
|
||||||
logger.warn(
|
|
||||||
"Got IOError when downloading keys for %s: %s %s",
|
|
||||||
server_name, type(e).__name__, str(e.message),
|
|
||||||
)
|
|
||||||
raise SynapseError(
|
|
||||||
502,
|
|
||||||
"Error downloading keys for %s" % (server_name,),
|
|
||||||
Codes.UNAUTHORIZED,
|
|
||||||
)
|
|
||||||
except Exception as e:
|
|
||||||
logger.exception(
|
|
||||||
"Got Exception when downloading keys for %s: %s %s",
|
|
||||||
server_name, type(e).__name__, str(e.message),
|
|
||||||
)
|
|
||||||
raise SynapseError(
|
|
||||||
401,
|
|
||||||
"No key for %s with id %s" % (server_name, key_ids),
|
|
||||||
Codes.UNAUTHORIZED,
|
|
||||||
)
|
|
||||||
|
|
||||||
json_object = group_id_to_json[group.group_id]
|
Once each fetch completes, verify_request.deferred will be resolved.
|
||||||
|
|
||||||
try:
|
Args:
|
||||||
verify_signed_json(json_object, server_name, verify_key)
|
verify_requests (List[VerifyKeyRequest]):
|
||||||
except:
|
"""
|
||||||
raise SynapseError(
|
|
||||||
401,
|
|
||||||
"Invalid signature for server %s with key %s:%s" % (
|
|
||||||
server_name, verify_key.alg, verify_key.version
|
|
||||||
),
|
|
||||||
Codes.UNAUTHORIZED,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
# create a deferred for each server we're going to look up the keys
|
||||||
|
# for; we'll resolve them once we have completed our lookups.
|
||||||
|
# These will be passed into wait_for_previous_lookups to block
|
||||||
|
# any other lookups until we have finished.
|
||||||
|
# The deferreds are called with no logcontext.
|
||||||
server_to_deferred = {
|
server_to_deferred = {
|
||||||
server_name: defer.Deferred()
|
rq.server_name: defer.Deferred()
|
||||||
for server_name, _ in server_and_json
|
for rq in verify_requests
|
||||||
}
|
}
|
||||||
|
|
||||||
with PreserveLoggingContext():
|
|
||||||
|
|
||||||
# We want to wait for any previous lookups to complete before
|
# We want to wait for any previous lookups to complete before
|
||||||
# proceeding.
|
# proceeding.
|
||||||
wait_on_deferred = self.wait_for_previous_lookups(
|
yield self.wait_for_previous_lookups(
|
||||||
[server_name for server_name, _ in server_and_json],
|
[rq.server_name for rq in verify_requests],
|
||||||
server_to_deferred,
|
server_to_deferred,
|
||||||
)
|
)
|
||||||
|
|
||||||
# Actually start fetching keys.
|
# Actually start fetching keys.
|
||||||
wait_on_deferred.addBoth(
|
self._get_server_verify_keys(verify_requests)
|
||||||
lambda _: self.get_server_verify_keys(group_id_to_group, deferreds)
|
|
||||||
)
|
|
||||||
|
|
||||||
# When we've finished fetching all the keys for a given server_name,
|
# When we've finished fetching all the keys for a given server_name,
|
||||||
# resolve the deferred passed to `wait_for_previous_lookups` so that
|
# resolve the deferred passed to `wait_for_previous_lookups` so that
|
||||||
# any lookups waiting will proceed.
|
# any lookups waiting will proceed.
|
||||||
server_to_gids = {}
|
#
|
||||||
|
# map from server name to a set of request ids
|
||||||
|
server_to_request_ids = {}
|
||||||
|
|
||||||
def remove_deferreds(res, server_name, group_id):
|
for verify_request in verify_requests:
|
||||||
server_to_gids[server_name].discard(group_id)
|
server_name = verify_request.server_name
|
||||||
if not server_to_gids[server_name]:
|
request_id = id(verify_request)
|
||||||
|
server_to_request_ids.setdefault(server_name, set()).add(request_id)
|
||||||
|
|
||||||
|
def remove_deferreds(res, verify_request):
|
||||||
|
server_name = verify_request.server_name
|
||||||
|
request_id = id(verify_request)
|
||||||
|
server_to_request_ids[server_name].discard(request_id)
|
||||||
|
if not server_to_request_ids[server_name]:
|
||||||
d = server_to_deferred.pop(server_name, None)
|
d = server_to_deferred.pop(server_name, None)
|
||||||
if d:
|
if d:
|
||||||
d.callback(None)
|
d.callback(None)
|
||||||
return res
|
return res
|
||||||
|
|
||||||
for g_id, deferred in deferreds.items():
|
for verify_request in verify_requests:
|
||||||
server_name = group_id_to_group[g_id].server_name
|
verify_request.deferred.addBoth(
|
||||||
server_to_gids.setdefault(server_name, set()).add(g_id)
|
remove_deferreds, verify_request,
|
||||||
deferred.addBoth(remove_deferreds, server_name, g_id)
|
|
||||||
|
|
||||||
# Pass those keys to handle_key_deferred so that the json object
|
|
||||||
# signatures can be verified
|
|
||||||
return [
|
|
||||||
preserve_context_over_fn(
|
|
||||||
handle_key_deferred,
|
|
||||||
group_id_to_group[g_id],
|
|
||||||
deferreds[g_id],
|
|
||||||
)
|
)
|
||||||
for g_id in group_ids
|
|
||||||
]
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def wait_for_previous_lookups(self, server_names, server_to_deferred):
|
def wait_for_previous_lookups(self, server_names, server_to_deferred):
|
||||||
@@ -196,7 +200,13 @@ class Keyring(object):
|
|||||||
Args:
|
Args:
|
||||||
server_names (list): list of server_names we want to lookup
|
server_names (list): list of server_names we want to lookup
|
||||||
server_to_deferred (dict): server_name to deferred which gets
|
server_to_deferred (dict): server_name to deferred which gets
|
||||||
resolved once we've finished looking up keys for that server
|
resolved once we've finished looking up keys for that server.
|
||||||
|
The Deferreds should be regular twisted ones which call their
|
||||||
|
callbacks with no logcontext.
|
||||||
|
|
||||||
|
Returns: a Deferred which resolves once all key lookups for the given
|
||||||
|
servers have completed. Follows the synapse rules of logcontext
|
||||||
|
preservation.
|
||||||
"""
|
"""
|
||||||
while True:
|
while True:
|
||||||
wait_on = [
|
wait_on = [
|
||||||
@@ -210,19 +220,23 @@ class Keyring(object):
|
|||||||
else:
|
else:
|
||||||
break
|
break
|
||||||
|
|
||||||
for server_name, deferred in server_to_deferred.items():
|
def rm(r, server_name_):
|
||||||
d = ObservableDeferred(preserve_context_over_deferred(deferred))
|
self.key_downloads.pop(server_name_, None)
|
||||||
self.key_downloads[server_name] = d
|
|
||||||
|
|
||||||
def rm(r, server_name):
|
|
||||||
self.key_downloads.pop(server_name, None)
|
|
||||||
return r
|
return r
|
||||||
|
|
||||||
d.addBoth(rm, server_name)
|
for server_name, deferred in server_to_deferred.items():
|
||||||
|
self.key_downloads[server_name] = deferred
|
||||||
|
deferred.addBoth(rm, server_name)
|
||||||
|
|
||||||
def get_server_verify_keys(self, group_id_to_group, group_id_to_deferred):
|
def _get_server_verify_keys(self, verify_requests):
|
||||||
"""Takes a dict of KeyGroups and tries to find at least one key for
|
"""Tries to find at least one key for each verify request
|
||||||
each group.
|
|
||||||
|
For each verify_request, verify_request.deferred is called back with
|
||||||
|
params (server_name, key_id, VerifyKey) if a key is found, or errbacked
|
||||||
|
with a SynapseError if none of the keys are found.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
verify_requests (list[VerifyKeyRequest]): list of verify requests
|
||||||
"""
|
"""
|
||||||
|
|
||||||
# These are functions that produce keys given a list of key ids
|
# These are functions that produce keys given a list of key ids
|
||||||
@@ -234,76 +248,94 @@ class Keyring(object):
|
|||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def do_iterations():
|
def do_iterations():
|
||||||
|
with Measure(self.clock, "get_server_verify_keys"):
|
||||||
|
# dict[str, dict[str, VerifyKey]]: results so far.
|
||||||
|
# map server_name -> key_id -> VerifyKey
|
||||||
merged_results = {}
|
merged_results = {}
|
||||||
|
|
||||||
|
# dict[str, set(str)]: keys to fetch for each server
|
||||||
missing_keys = {}
|
missing_keys = {}
|
||||||
for group in group_id_to_group.values():
|
for verify_request in verify_requests:
|
||||||
missing_keys.setdefault(group.server_name, set()).update(
|
missing_keys.setdefault(verify_request.server_name, set()).update(
|
||||||
group.key_ids
|
verify_request.key_ids
|
||||||
)
|
)
|
||||||
|
|
||||||
for fn in key_fetch_fns:
|
for fn in key_fetch_fns:
|
||||||
results = yield fn(missing_keys.items())
|
results = yield fn(missing_keys.items())
|
||||||
merged_results.update(results)
|
merged_results.update(results)
|
||||||
|
|
||||||
# We now need to figure out which groups we have keys for
|
# We now need to figure out which verify requests we have keys
|
||||||
# and which we don't
|
# for and which we don't
|
||||||
missing_groups = {}
|
missing_keys = {}
|
||||||
for group in group_id_to_group.values():
|
requests_missing_keys = []
|
||||||
for key_id in group.key_ids:
|
for verify_request in verify_requests:
|
||||||
if key_id in merged_results[group.server_name]:
|
server_name = verify_request.server_name
|
||||||
|
result_keys = merged_results[server_name]
|
||||||
|
|
||||||
|
if verify_request.deferred.called:
|
||||||
|
# We've already called this deferred, which probably
|
||||||
|
# means that we've already found a key for it.
|
||||||
|
continue
|
||||||
|
|
||||||
|
for key_id in verify_request.key_ids:
|
||||||
|
if key_id in result_keys:
|
||||||
with PreserveLoggingContext():
|
with PreserveLoggingContext():
|
||||||
group_id_to_deferred[group.group_id].callback((
|
verify_request.deferred.callback((
|
||||||
group.group_id,
|
server_name,
|
||||||
group.server_name,
|
|
||||||
key_id,
|
key_id,
|
||||||
merged_results[group.server_name][key_id],
|
result_keys[key_id],
|
||||||
))
|
))
|
||||||
break
|
break
|
||||||
else:
|
else:
|
||||||
missing_groups.setdefault(
|
# The else block is only reached if the loop above
|
||||||
group.server_name, []
|
# doesn't break.
|
||||||
).append(group)
|
missing_keys.setdefault(server_name, set()).update(
|
||||||
|
verify_request.key_ids
|
||||||
|
)
|
||||||
|
requests_missing_keys.append(verify_request)
|
||||||
|
|
||||||
if not missing_groups:
|
if not missing_keys:
|
||||||
break
|
break
|
||||||
|
|
||||||
missing_keys = {
|
with PreserveLoggingContext():
|
||||||
server_name: set(
|
for verify_request in requests_missing_keys:
|
||||||
key_id for group in groups for key_id in group.key_ids
|
verify_request.deferred.errback(SynapseError(
|
||||||
)
|
|
||||||
for server_name, groups in missing_groups.items()
|
|
||||||
}
|
|
||||||
|
|
||||||
for group in missing_groups.values():
|
|
||||||
group_id_to_deferred[group.group_id].errback(SynapseError(
|
|
||||||
401,
|
401,
|
||||||
"No key for %s with id %s" % (
|
"No key for %s with id %s" % (
|
||||||
group.server_name, group.key_ids,
|
verify_request.server_name, verify_request.key_ids,
|
||||||
),
|
),
|
||||||
Codes.UNAUTHORIZED,
|
Codes.UNAUTHORIZED,
|
||||||
))
|
))
|
||||||
|
|
||||||
def on_err(err):
|
def on_err(err):
|
||||||
for deferred in group_id_to_deferred.values():
|
with PreserveLoggingContext():
|
||||||
if not deferred.called:
|
for verify_request in verify_requests:
|
||||||
deferred.errback(err)
|
if not verify_request.deferred.called:
|
||||||
|
verify_request.deferred.errback(err)
|
||||||
|
|
||||||
do_iterations().addErrback(on_err)
|
preserve_fn(do_iterations)().addErrback(on_err)
|
||||||
|
|
||||||
return group_id_to_deferred
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def get_keys_from_store(self, server_name_and_key_ids):
|
def get_keys_from_store(self, server_name_and_key_ids):
|
||||||
res = yield defer.gatherResults(
|
"""
|
||||||
|
|
||||||
|
Args:
|
||||||
|
server_name_and_key_ids (list[(str, iterable[str])]):
|
||||||
|
list of (server_name, iterable[key_id]) tuples to fetch keys for
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Deferred: resolves to dict[str, dict[str, VerifyKey]]: map from
|
||||||
|
server_name -> key_id -> VerifyKey
|
||||||
|
"""
|
||||||
|
res = yield logcontext.make_deferred_yieldable(defer.gatherResults(
|
||||||
[
|
[
|
||||||
self.store.get_server_verify_keys(
|
preserve_fn(self.store.get_server_verify_keys)(
|
||||||
server_name, key_ids
|
server_name, key_ids
|
||||||
).addCallback(lambda ks, server: (server, ks), server_name)
|
).addCallback(lambda ks, server: (server, ks), server_name)
|
||||||
for server_name, key_ids in server_name_and_key_ids
|
for server_name, key_ids in server_name_and_key_ids
|
||||||
],
|
],
|
||||||
consumeErrors=True,
|
consumeErrors=True,
|
||||||
).addErrback(unwrapFirstError)
|
).addErrback(unwrapFirstError))
|
||||||
|
|
||||||
defer.returnValue(dict(res))
|
defer.returnValue(dict(res))
|
||||||
|
|
||||||
@@ -324,13 +356,13 @@ class Keyring(object):
|
|||||||
)
|
)
|
||||||
defer.returnValue({})
|
defer.returnValue({})
|
||||||
|
|
||||||
results = yield defer.gatherResults(
|
results = yield logcontext.make_deferred_yieldable(defer.gatherResults(
|
||||||
[
|
[
|
||||||
get_key(p_name, p_keys)
|
preserve_fn(get_key)(p_name, p_keys)
|
||||||
for p_name, p_keys in self.perspective_servers.items()
|
for p_name, p_keys in self.perspective_servers.items()
|
||||||
],
|
],
|
||||||
consumeErrors=True,
|
consumeErrors=True,
|
||||||
).addErrback(unwrapFirstError)
|
).addErrback(unwrapFirstError))
|
||||||
|
|
||||||
union_of_keys = {}
|
union_of_keys = {}
|
||||||
for result in results:
|
for result in results:
|
||||||
@@ -343,12 +375,6 @@ class Keyring(object):
|
|||||||
def get_keys_from_server(self, server_name_and_key_ids):
|
def get_keys_from_server(self, server_name_and_key_ids):
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def get_key(server_name, key_ids):
|
def get_key(server_name, key_ids):
|
||||||
limiter = yield get_retry_limiter(
|
|
||||||
server_name,
|
|
||||||
self.clock,
|
|
||||||
self.store,
|
|
||||||
)
|
|
||||||
with limiter:
|
|
||||||
keys = None
|
keys = None
|
||||||
try:
|
try:
|
||||||
keys = yield self.get_server_verify_key_v2_direct(
|
keys = yield self.get_server_verify_key_v2_direct(
|
||||||
@@ -356,7 +382,7 @@ class Keyring(object):
|
|||||||
)
|
)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.info(
|
logger.info(
|
||||||
"Unable to getting key %r for %r directly: %s %s",
|
"Unable to get key %r for %r directly: %s %s",
|
||||||
key_ids, server_name,
|
key_ids, server_name,
|
||||||
type(e).__name__, str(e.message),
|
type(e).__name__, str(e.message),
|
||||||
)
|
)
|
||||||
@@ -370,13 +396,13 @@ class Keyring(object):
|
|||||||
|
|
||||||
defer.returnValue(keys)
|
defer.returnValue(keys)
|
||||||
|
|
||||||
results = yield defer.gatherResults(
|
results = yield logcontext.make_deferred_yieldable(defer.gatherResults(
|
||||||
[
|
[
|
||||||
get_key(server_name, key_ids)
|
preserve_fn(get_key)(server_name, key_ids)
|
||||||
for server_name, key_ids in server_name_and_key_ids
|
for server_name, key_ids in server_name_and_key_ids
|
||||||
],
|
],
|
||||||
consumeErrors=True,
|
consumeErrors=True,
|
||||||
).addErrback(unwrapFirstError)
|
).addErrback(unwrapFirstError))
|
||||||
|
|
||||||
merged = {}
|
merged = {}
|
||||||
for result in results:
|
for result in results:
|
||||||
@@ -418,7 +444,7 @@ class Keyring(object):
|
|||||||
for response in responses:
|
for response in responses:
|
||||||
if (u"signatures" not in response
|
if (u"signatures" not in response
|
||||||
or perspective_name not in response[u"signatures"]):
|
or perspective_name not in response[u"signatures"]):
|
||||||
raise ValueError(
|
raise KeyLookupError(
|
||||||
"Key response not signed by perspective server"
|
"Key response not signed by perspective server"
|
||||||
" %r" % (perspective_name,)
|
" %r" % (perspective_name,)
|
||||||
)
|
)
|
||||||
@@ -441,21 +467,21 @@ class Keyring(object):
|
|||||||
list(response[u"signatures"][perspective_name]),
|
list(response[u"signatures"][perspective_name]),
|
||||||
list(perspective_keys)
|
list(perspective_keys)
|
||||||
)
|
)
|
||||||
raise ValueError(
|
raise KeyLookupError(
|
||||||
"Response not signed with a known key for perspective"
|
"Response not signed with a known key for perspective"
|
||||||
" server %r" % (perspective_name,)
|
" server %r" % (perspective_name,)
|
||||||
)
|
)
|
||||||
|
|
||||||
processed_response = yield self.process_v2_response(
|
processed_response = yield self.process_v2_response(
|
||||||
perspective_name, response
|
perspective_name, response, only_from_server=False
|
||||||
)
|
)
|
||||||
|
|
||||||
for server_name, response_keys in processed_response.items():
|
for server_name, response_keys in processed_response.items():
|
||||||
keys.setdefault(server_name, {}).update(response_keys)
|
keys.setdefault(server_name, {}).update(response_keys)
|
||||||
|
|
||||||
yield defer.gatherResults(
|
yield logcontext.make_deferred_yieldable(defer.gatherResults(
|
||||||
[
|
[
|
||||||
self.store_keys(
|
preserve_fn(self.store_keys)(
|
||||||
server_name=server_name,
|
server_name=server_name,
|
||||||
from_server=perspective_name,
|
from_server=perspective_name,
|
||||||
verify_keys=response_keys,
|
verify_keys=response_keys,
|
||||||
@@ -463,7 +489,7 @@ class Keyring(object):
|
|||||||
for server_name, response_keys in keys.items()
|
for server_name, response_keys in keys.items()
|
||||||
],
|
],
|
||||||
consumeErrors=True
|
consumeErrors=True
|
||||||
).addErrback(unwrapFirstError)
|
).addErrback(unwrapFirstError))
|
||||||
|
|
||||||
defer.returnValue(keys)
|
defer.returnValue(keys)
|
||||||
|
|
||||||
@@ -484,10 +510,10 @@ class Keyring(object):
|
|||||||
|
|
||||||
if (u"signatures" not in response
|
if (u"signatures" not in response
|
||||||
or server_name not in response[u"signatures"]):
|
or server_name not in response[u"signatures"]):
|
||||||
raise ValueError("Key response not signed by remote server")
|
raise KeyLookupError("Key response not signed by remote server")
|
||||||
|
|
||||||
if "tls_fingerprints" not in response:
|
if "tls_fingerprints" not in response:
|
||||||
raise ValueError("Key response missing TLS fingerprints")
|
raise KeyLookupError("Key response missing TLS fingerprints")
|
||||||
|
|
||||||
certificate_bytes = crypto.dump_certificate(
|
certificate_bytes = crypto.dump_certificate(
|
||||||
crypto.FILETYPE_ASN1, tls_certificate
|
crypto.FILETYPE_ASN1, tls_certificate
|
||||||
@@ -501,7 +527,7 @@ class Keyring(object):
|
|||||||
response_sha256_fingerprints.add(fingerprint[u"sha256"])
|
response_sha256_fingerprints.add(fingerprint[u"sha256"])
|
||||||
|
|
||||||
if sha256_fingerprint_b64 not in response_sha256_fingerprints:
|
if sha256_fingerprint_b64 not in response_sha256_fingerprints:
|
||||||
raise ValueError("TLS certificate not allowed by fingerprints")
|
raise KeyLookupError("TLS certificate not allowed by fingerprints")
|
||||||
|
|
||||||
response_keys = yield self.process_v2_response(
|
response_keys = yield self.process_v2_response(
|
||||||
from_server=server_name,
|
from_server=server_name,
|
||||||
@@ -511,7 +537,7 @@ class Keyring(object):
|
|||||||
|
|
||||||
keys.update(response_keys)
|
keys.update(response_keys)
|
||||||
|
|
||||||
yield defer.gatherResults(
|
yield logcontext.make_deferred_yieldable(defer.gatherResults(
|
||||||
[
|
[
|
||||||
preserve_fn(self.store_keys)(
|
preserve_fn(self.store_keys)(
|
||||||
server_name=key_server_name,
|
server_name=key_server_name,
|
||||||
@@ -521,13 +547,13 @@ class Keyring(object):
|
|||||||
for key_server_name, verify_keys in keys.items()
|
for key_server_name, verify_keys in keys.items()
|
||||||
],
|
],
|
||||||
consumeErrors=True
|
consumeErrors=True
|
||||||
).addErrback(unwrapFirstError)
|
).addErrback(unwrapFirstError))
|
||||||
|
|
||||||
defer.returnValue(keys)
|
defer.returnValue(keys)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def process_v2_response(self, from_server, response_json,
|
def process_v2_response(self, from_server, response_json,
|
||||||
requested_ids=[]):
|
requested_ids=[], only_from_server=True):
|
||||||
time_now_ms = self.clock.time_msec()
|
time_now_ms = self.clock.time_msec()
|
||||||
response_keys = {}
|
response_keys = {}
|
||||||
verify_keys = {}
|
verify_keys = {}
|
||||||
@@ -551,9 +577,16 @@ class Keyring(object):
|
|||||||
|
|
||||||
results = {}
|
results = {}
|
||||||
server_name = response_json["server_name"]
|
server_name = response_json["server_name"]
|
||||||
|
if only_from_server:
|
||||||
|
if server_name != from_server:
|
||||||
|
raise KeyLookupError(
|
||||||
|
"Expected a response for server %r not %r" % (
|
||||||
|
from_server, server_name
|
||||||
|
)
|
||||||
|
)
|
||||||
for key_id in response_json["signatures"].get(server_name, {}):
|
for key_id in response_json["signatures"].get(server_name, {}):
|
||||||
if key_id not in response_json["verify_keys"]:
|
if key_id not in response_json["verify_keys"]:
|
||||||
raise ValueError(
|
raise KeyLookupError(
|
||||||
"Key response must include verification keys for all"
|
"Key response must include verification keys for all"
|
||||||
" signatures"
|
" signatures"
|
||||||
)
|
)
|
||||||
@@ -580,7 +613,7 @@ class Keyring(object):
|
|||||||
response_keys.update(verify_keys)
|
response_keys.update(verify_keys)
|
||||||
response_keys.update(old_verify_keys)
|
response_keys.update(old_verify_keys)
|
||||||
|
|
||||||
yield defer.gatherResults(
|
yield logcontext.make_deferred_yieldable(defer.gatherResults(
|
||||||
[
|
[
|
||||||
preserve_fn(self.store.store_server_keys_json)(
|
preserve_fn(self.store.store_server_keys_json)(
|
||||||
server_name=server_name,
|
server_name=server_name,
|
||||||
@@ -593,7 +626,7 @@ class Keyring(object):
|
|||||||
for key_id in updated_key_ids
|
for key_id in updated_key_ids
|
||||||
],
|
],
|
||||||
consumeErrors=True,
|
consumeErrors=True,
|
||||||
).addErrback(unwrapFirstError)
|
).addErrback(unwrapFirstError))
|
||||||
|
|
||||||
results[server_name] = response_keys
|
results[server_name] = response_keys
|
||||||
|
|
||||||
@@ -621,15 +654,15 @@ class Keyring(object):
|
|||||||
|
|
||||||
if ("signatures" not in response
|
if ("signatures" not in response
|
||||||
or server_name not in response["signatures"]):
|
or server_name not in response["signatures"]):
|
||||||
raise ValueError("Key response not signed by remote server")
|
raise KeyLookupError("Key response not signed by remote server")
|
||||||
|
|
||||||
if "tls_certificate" not in response:
|
if "tls_certificate" not in response:
|
||||||
raise ValueError("Key response missing TLS certificate")
|
raise KeyLookupError("Key response missing TLS certificate")
|
||||||
|
|
||||||
tls_certificate_b64 = response["tls_certificate"]
|
tls_certificate_b64 = response["tls_certificate"]
|
||||||
|
|
||||||
if encode_base64(x509_certificate_bytes) != tls_certificate_b64:
|
if encode_base64(x509_certificate_bytes) != tls_certificate_b64:
|
||||||
raise ValueError("TLS certificate doesn't match")
|
raise KeyLookupError("TLS certificate doesn't match")
|
||||||
|
|
||||||
# Cache the result in the datastore.
|
# Cache the result in the datastore.
|
||||||
|
|
||||||
@@ -645,7 +678,7 @@ class Keyring(object):
|
|||||||
|
|
||||||
for key_id in response["signatures"][server_name]:
|
for key_id in response["signatures"][server_name]:
|
||||||
if key_id not in response["verify_keys"]:
|
if key_id not in response["verify_keys"]:
|
||||||
raise ValueError(
|
raise KeyLookupError(
|
||||||
"Key response must include verification keys for all"
|
"Key response must include verification keys for all"
|
||||||
" signatures"
|
" signatures"
|
||||||
)
|
)
|
||||||
@@ -671,7 +704,6 @@ class Keyring(object):
|
|||||||
|
|
||||||
defer.returnValue(verify_keys)
|
defer.returnValue(verify_keys)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def store_keys(self, server_name, from_server, verify_keys):
|
def store_keys(self, server_name, from_server, verify_keys):
|
||||||
"""Store a collection of verify keys for a given server
|
"""Store a collection of verify keys for a given server
|
||||||
Args:
|
Args:
|
||||||
@@ -682,7 +714,7 @@ class Keyring(object):
|
|||||||
A deferred that completes when the keys are stored.
|
A deferred that completes when the keys are stored.
|
||||||
"""
|
"""
|
||||||
# TODO(markjh): Store whether the keys have expired.
|
# TODO(markjh): Store whether the keys have expired.
|
||||||
yield defer.gatherResults(
|
return logcontext.make_deferred_yieldable(defer.gatherResults(
|
||||||
[
|
[
|
||||||
preserve_fn(self.store.store_server_verify_key)(
|
preserve_fn(self.store.store_server_verify_key)(
|
||||||
server_name, server_name, key.time_added, key
|
server_name, server_name, key.time_added, key
|
||||||
@@ -690,4 +722,48 @@ class Keyring(object):
|
|||||||
for key_id, key in verify_keys.items()
|
for key_id, key in verify_keys.items()
|
||||||
],
|
],
|
||||||
consumeErrors=True,
|
consumeErrors=True,
|
||||||
).addErrback(unwrapFirstError)
|
).addErrback(unwrapFirstError))
|
||||||
|
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def _handle_key_deferred(verify_request):
|
||||||
|
server_name = verify_request.server_name
|
||||||
|
try:
|
||||||
|
with PreserveLoggingContext():
|
||||||
|
_, key_id, verify_key = yield verify_request.deferred
|
||||||
|
except IOError as e:
|
||||||
|
logger.warn(
|
||||||
|
"Got IOError when downloading keys for %s: %s %s",
|
||||||
|
server_name, type(e).__name__, str(e.message),
|
||||||
|
)
|
||||||
|
raise SynapseError(
|
||||||
|
502,
|
||||||
|
"Error downloading keys for %s" % (server_name,),
|
||||||
|
Codes.UNAUTHORIZED,
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
logger.exception(
|
||||||
|
"Got Exception when downloading keys for %s: %s %s",
|
||||||
|
server_name, type(e).__name__, str(e.message),
|
||||||
|
)
|
||||||
|
raise SynapseError(
|
||||||
|
401,
|
||||||
|
"No key for %s with id %s" % (server_name, verify_request.key_ids),
|
||||||
|
Codes.UNAUTHORIZED,
|
||||||
|
)
|
||||||
|
|
||||||
|
json_object = verify_request.json_object
|
||||||
|
|
||||||
|
logger.debug("Got key %s %s:%s for server %s, verifying" % (
|
||||||
|
key_id, verify_key.alg, verify_key.version, server_name,
|
||||||
|
))
|
||||||
|
try:
|
||||||
|
verify_signed_json(json_object, server_name, verify_key)
|
||||||
|
except Exception:
|
||||||
|
raise SynapseError(
|
||||||
|
401,
|
||||||
|
"Invalid signature for server %s with key %s:%s" % (
|
||||||
|
server_name, verify_key.alg, verify_key.version
|
||||||
|
),
|
||||||
|
Codes.UNAUTHORIZED,
|
||||||
|
)
|
||||||
|
|||||||
678
synapse/event_auth.py
Normal file
678
synapse/event_auth.py
Normal file
@@ -0,0 +1,678 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright 2014 - 2016 OpenMarket Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import logging
|
||||||
|
|
||||||
|
from canonicaljson import encode_canonical_json
|
||||||
|
from signedjson.key import decode_verify_key_bytes
|
||||||
|
from signedjson.sign import verify_signed_json, SignatureVerifyException
|
||||||
|
from unpaddedbase64 import decode_base64
|
||||||
|
|
||||||
|
from synapse.api.constants import EventTypes, Membership, JoinRules
|
||||||
|
from synapse.api.errors import AuthError, SynapseError, EventSizeError
|
||||||
|
from synapse.types import UserID, get_domain_from_id
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def check(event, auth_events, do_sig_check=True, do_size_check=True):
|
||||||
|
""" Checks if this event is correctly authed.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
event: the event being checked.
|
||||||
|
auth_events (dict: event-key -> event): the existing room state.
|
||||||
|
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
True if the auth checks pass.
|
||||||
|
"""
|
||||||
|
if do_size_check:
|
||||||
|
_check_size_limits(event)
|
||||||
|
|
||||||
|
if not hasattr(event, "room_id"):
|
||||||
|
raise AuthError(500, "Event has no room_id: %s" % event)
|
||||||
|
|
||||||
|
if do_sig_check:
|
||||||
|
sender_domain = get_domain_from_id(event.sender)
|
||||||
|
event_id_domain = get_domain_from_id(event.event_id)
|
||||||
|
|
||||||
|
is_invite_via_3pid = (
|
||||||
|
event.type == EventTypes.Member
|
||||||
|
and event.membership == Membership.INVITE
|
||||||
|
and "third_party_invite" in event.content
|
||||||
|
)
|
||||||
|
|
||||||
|
# Check the sender's domain has signed the event
|
||||||
|
if not event.signatures.get(sender_domain):
|
||||||
|
# We allow invites via 3pid to have a sender from a different
|
||||||
|
# HS, as the sender must match the sender of the original
|
||||||
|
# 3pid invite. This is checked further down with the
|
||||||
|
# other dedicated membership checks.
|
||||||
|
if not is_invite_via_3pid:
|
||||||
|
raise AuthError(403, "Event not signed by sender's server")
|
||||||
|
|
||||||
|
# Check the event_id's domain has signed the event
|
||||||
|
if not event.signatures.get(event_id_domain):
|
||||||
|
raise AuthError(403, "Event not signed by sending server")
|
||||||
|
|
||||||
|
if auth_events is None:
|
||||||
|
# Oh, we don't know what the state of the room was, so we
|
||||||
|
# are trusting that this is allowed (at least for now)
|
||||||
|
logger.warn("Trusting event: %s", event.event_id)
|
||||||
|
return True
|
||||||
|
|
||||||
|
if event.type == EventTypes.Create:
|
||||||
|
room_id_domain = get_domain_from_id(event.room_id)
|
||||||
|
if room_id_domain != sender_domain:
|
||||||
|
raise AuthError(
|
||||||
|
403,
|
||||||
|
"Creation event's room_id domain does not match sender's"
|
||||||
|
)
|
||||||
|
# FIXME
|
||||||
|
return True
|
||||||
|
|
||||||
|
creation_event = auth_events.get((EventTypes.Create, ""), None)
|
||||||
|
|
||||||
|
if not creation_event:
|
||||||
|
raise SynapseError(
|
||||||
|
403,
|
||||||
|
"Room %r does not exist" % (event.room_id,)
|
||||||
|
)
|
||||||
|
|
||||||
|
creating_domain = get_domain_from_id(event.room_id)
|
||||||
|
originating_domain = get_domain_from_id(event.sender)
|
||||||
|
if creating_domain != originating_domain:
|
||||||
|
if not _can_federate(event, auth_events):
|
||||||
|
raise AuthError(
|
||||||
|
403,
|
||||||
|
"This room has been marked as unfederatable."
|
||||||
|
)
|
||||||
|
|
||||||
|
# FIXME: Temp hack
|
||||||
|
if event.type == EventTypes.Aliases:
|
||||||
|
if not event.is_state():
|
||||||
|
raise AuthError(
|
||||||
|
403,
|
||||||
|
"Alias event must be a state event",
|
||||||
|
)
|
||||||
|
if not event.state_key:
|
||||||
|
raise AuthError(
|
||||||
|
403,
|
||||||
|
"Alias event must have non-empty state_key"
|
||||||
|
)
|
||||||
|
sender_domain = get_domain_from_id(event.sender)
|
||||||
|
if event.state_key != sender_domain:
|
||||||
|
raise AuthError(
|
||||||
|
403,
|
||||||
|
"Alias event's state_key does not match sender's domain"
|
||||||
|
)
|
||||||
|
return True
|
||||||
|
|
||||||
|
if logger.isEnabledFor(logging.DEBUG):
|
||||||
|
logger.debug(
|
||||||
|
"Auth events: %s",
|
||||||
|
[a.event_id for a in auth_events.values()]
|
||||||
|
)
|
||||||
|
|
||||||
|
if event.type == EventTypes.Member:
|
||||||
|
allowed = _is_membership_change_allowed(
|
||||||
|
event, auth_events
|
||||||
|
)
|
||||||
|
if allowed:
|
||||||
|
logger.debug("Allowing! %s", event)
|
||||||
|
else:
|
||||||
|
logger.debug("Denying! %s", event)
|
||||||
|
return allowed
|
||||||
|
|
||||||
|
_check_event_sender_in_room(event, auth_events)
|
||||||
|
|
||||||
|
# Special case to allow m.room.third_party_invite events wherever
|
||||||
|
# a user is allowed to issue invites. Fixes
|
||||||
|
# https://github.com/vector-im/vector-web/issues/1208 hopefully
|
||||||
|
if event.type == EventTypes.ThirdPartyInvite:
|
||||||
|
user_level = get_user_power_level(event.user_id, auth_events)
|
||||||
|
invite_level = _get_named_level(auth_events, "invite", 0)
|
||||||
|
|
||||||
|
if user_level < invite_level:
|
||||||
|
raise AuthError(
|
||||||
|
403, (
|
||||||
|
"You cannot issue a third party invite for %s." %
|
||||||
|
(event.content.display_name,)
|
||||||
|
)
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
return True
|
||||||
|
|
||||||
|
_can_send_event(event, auth_events)
|
||||||
|
|
||||||
|
if event.type == EventTypes.PowerLevels:
|
||||||
|
_check_power_levels(event, auth_events)
|
||||||
|
|
||||||
|
if event.type == EventTypes.Redaction:
|
||||||
|
check_redaction(event, auth_events)
|
||||||
|
|
||||||
|
logger.debug("Allowing! %s", event)
|
||||||
|
|
||||||
|
|
||||||
|
def _check_size_limits(event):
|
||||||
|
def too_big(field):
|
||||||
|
raise EventSizeError("%s too large" % (field,))
|
||||||
|
|
||||||
|
if len(event.user_id) > 255:
|
||||||
|
too_big("user_id")
|
||||||
|
if len(event.room_id) > 255:
|
||||||
|
too_big("room_id")
|
||||||
|
if event.is_state() and len(event.state_key) > 255:
|
||||||
|
too_big("state_key")
|
||||||
|
if len(event.type) > 255:
|
||||||
|
too_big("type")
|
||||||
|
if len(event.event_id) > 255:
|
||||||
|
too_big("event_id")
|
||||||
|
if len(encode_canonical_json(event.get_pdu_json())) > 65536:
|
||||||
|
too_big("event")
|
||||||
|
|
||||||
|
|
||||||
|
def _can_federate(event, auth_events):
|
||||||
|
creation_event = auth_events.get((EventTypes.Create, ""))
|
||||||
|
|
||||||
|
return creation_event.content.get("m.federate", True) is True
|
||||||
|
|
||||||
|
|
||||||
|
def _is_membership_change_allowed(event, auth_events):
|
||||||
|
membership = event.content["membership"]
|
||||||
|
|
||||||
|
# Check if this is the room creator joining:
|
||||||
|
if len(event.prev_events) == 1 and Membership.JOIN == membership:
|
||||||
|
# Get room creation event:
|
||||||
|
key = (EventTypes.Create, "", )
|
||||||
|
create = auth_events.get(key)
|
||||||
|
if create and event.prev_events[0][0] == create.event_id:
|
||||||
|
if create.content["creator"] == event.state_key:
|
||||||
|
return True
|
||||||
|
|
||||||
|
target_user_id = event.state_key
|
||||||
|
|
||||||
|
creating_domain = get_domain_from_id(event.room_id)
|
||||||
|
target_domain = get_domain_from_id(target_user_id)
|
||||||
|
if creating_domain != target_domain:
|
||||||
|
if not _can_federate(event, auth_events):
|
||||||
|
raise AuthError(
|
||||||
|
403,
|
||||||
|
"This room has been marked as unfederatable."
|
||||||
|
)
|
||||||
|
|
||||||
|
# get info about the caller
|
||||||
|
key = (EventTypes.Member, event.user_id, )
|
||||||
|
caller = auth_events.get(key)
|
||||||
|
|
||||||
|
caller_in_room = caller and caller.membership == Membership.JOIN
|
||||||
|
caller_invited = caller and caller.membership == Membership.INVITE
|
||||||
|
|
||||||
|
# get info about the target
|
||||||
|
key = (EventTypes.Member, target_user_id, )
|
||||||
|
target = auth_events.get(key)
|
||||||
|
|
||||||
|
target_in_room = target and target.membership == Membership.JOIN
|
||||||
|
target_banned = target and target.membership == Membership.BAN
|
||||||
|
|
||||||
|
key = (EventTypes.JoinRules, "", )
|
||||||
|
join_rule_event = auth_events.get(key)
|
||||||
|
if join_rule_event:
|
||||||
|
join_rule = join_rule_event.content.get(
|
||||||
|
"join_rule", JoinRules.INVITE
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
join_rule = JoinRules.INVITE
|
||||||
|
|
||||||
|
user_level = get_user_power_level(event.user_id, auth_events)
|
||||||
|
target_level = get_user_power_level(
|
||||||
|
target_user_id, auth_events
|
||||||
|
)
|
||||||
|
|
||||||
|
# FIXME (erikj): What should we do here as the default?
|
||||||
|
ban_level = _get_named_level(auth_events, "ban", 50)
|
||||||
|
|
||||||
|
logger.debug(
|
||||||
|
"_is_membership_change_allowed: %s",
|
||||||
|
{
|
||||||
|
"caller_in_room": caller_in_room,
|
||||||
|
"caller_invited": caller_invited,
|
||||||
|
"target_banned": target_banned,
|
||||||
|
"target_in_room": target_in_room,
|
||||||
|
"membership": membership,
|
||||||
|
"join_rule": join_rule,
|
||||||
|
"target_user_id": target_user_id,
|
||||||
|
"event.user_id": event.user_id,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
if Membership.INVITE == membership and "third_party_invite" in event.content:
|
||||||
|
if not _verify_third_party_invite(event, auth_events):
|
||||||
|
raise AuthError(403, "You are not invited to this room.")
|
||||||
|
if target_banned:
|
||||||
|
raise AuthError(
|
||||||
|
403, "%s is banned from the room" % (target_user_id,)
|
||||||
|
)
|
||||||
|
return True
|
||||||
|
|
||||||
|
if Membership.JOIN != membership:
|
||||||
|
if (caller_invited
|
||||||
|
and Membership.LEAVE == membership
|
||||||
|
and target_user_id == event.user_id):
|
||||||
|
return True
|
||||||
|
|
||||||
|
if not caller_in_room: # caller isn't joined
|
||||||
|
raise AuthError(
|
||||||
|
403,
|
||||||
|
"%s not in room %s." % (event.user_id, event.room_id,)
|
||||||
|
)
|
||||||
|
|
||||||
|
if Membership.INVITE == membership:
|
||||||
|
# TODO (erikj): We should probably handle this more intelligently
|
||||||
|
# PRIVATE join rules.
|
||||||
|
|
||||||
|
# Invites are valid iff caller is in the room and target isn't.
|
||||||
|
if target_banned:
|
||||||
|
raise AuthError(
|
||||||
|
403, "%s is banned from the room" % (target_user_id,)
|
||||||
|
)
|
||||||
|
elif target_in_room: # the target is already in the room.
|
||||||
|
raise AuthError(403, "%s is already in the room." %
|
||||||
|
target_user_id)
|
||||||
|
else:
|
||||||
|
invite_level = _get_named_level(auth_events, "invite", 0)
|
||||||
|
|
||||||
|
if user_level < invite_level:
|
||||||
|
raise AuthError(
|
||||||
|
403, "You cannot invite user %s." % target_user_id
|
||||||
|
)
|
||||||
|
elif Membership.JOIN == membership:
|
||||||
|
# Joins are valid iff caller == target and they were:
|
||||||
|
# invited: They are accepting the invitation
|
||||||
|
# joined: It's a NOOP
|
||||||
|
if event.user_id != target_user_id:
|
||||||
|
raise AuthError(403, "Cannot force another user to join.")
|
||||||
|
elif target_banned:
|
||||||
|
raise AuthError(403, "You are banned from this room")
|
||||||
|
elif join_rule == JoinRules.PUBLIC:
|
||||||
|
pass
|
||||||
|
elif join_rule == JoinRules.INVITE:
|
||||||
|
if not caller_in_room and not caller_invited:
|
||||||
|
raise AuthError(403, "You are not invited to this room.")
|
||||||
|
else:
|
||||||
|
# TODO (erikj): may_join list
|
||||||
|
# TODO (erikj): private rooms
|
||||||
|
raise AuthError(403, "You are not allowed to join this room")
|
||||||
|
elif Membership.LEAVE == membership:
|
||||||
|
# TODO (erikj): Implement kicks.
|
||||||
|
if target_banned and user_level < ban_level:
|
||||||
|
raise AuthError(
|
||||||
|
403, "You cannot unban user &s." % (target_user_id,)
|
||||||
|
)
|
||||||
|
elif target_user_id != event.user_id:
|
||||||
|
kick_level = _get_named_level(auth_events, "kick", 50)
|
||||||
|
|
||||||
|
if user_level < kick_level or user_level <= target_level:
|
||||||
|
raise AuthError(
|
||||||
|
403, "You cannot kick user %s." % target_user_id
|
||||||
|
)
|
||||||
|
elif Membership.BAN == membership:
|
||||||
|
if user_level < ban_level or user_level <= target_level:
|
||||||
|
raise AuthError(403, "You don't have permission to ban")
|
||||||
|
else:
|
||||||
|
raise AuthError(500, "Unknown membership %s" % membership)
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
def _check_event_sender_in_room(event, auth_events):
|
||||||
|
key = (EventTypes.Member, event.user_id, )
|
||||||
|
member_event = auth_events.get(key)
|
||||||
|
|
||||||
|
return _check_joined_room(
|
||||||
|
member_event,
|
||||||
|
event.user_id,
|
||||||
|
event.room_id
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def _check_joined_room(member, user_id, room_id):
|
||||||
|
if not member or member.membership != Membership.JOIN:
|
||||||
|
raise AuthError(403, "User %s not in room %s (%s)" % (
|
||||||
|
user_id, room_id, repr(member)
|
||||||
|
))
|
||||||
|
|
||||||
|
|
||||||
|
def get_send_level(etype, state_key, auth_events):
|
||||||
|
key = (EventTypes.PowerLevels, "", )
|
||||||
|
send_level_event = auth_events.get(key)
|
||||||
|
send_level = None
|
||||||
|
if send_level_event:
|
||||||
|
send_level = send_level_event.content.get("events", {}).get(
|
||||||
|
etype
|
||||||
|
)
|
||||||
|
if send_level is None:
|
||||||
|
if state_key is not None:
|
||||||
|
send_level = send_level_event.content.get(
|
||||||
|
"state_default", 50
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
send_level = send_level_event.content.get(
|
||||||
|
"events_default", 0
|
||||||
|
)
|
||||||
|
|
||||||
|
if send_level:
|
||||||
|
send_level = int(send_level)
|
||||||
|
else:
|
||||||
|
send_level = 0
|
||||||
|
|
||||||
|
return send_level
|
||||||
|
|
||||||
|
|
||||||
|
def _can_send_event(event, auth_events):
|
||||||
|
send_level = get_send_level(
|
||||||
|
event.type, event.get("state_key", None), auth_events
|
||||||
|
)
|
||||||
|
user_level = get_user_power_level(event.user_id, auth_events)
|
||||||
|
|
||||||
|
if user_level < send_level:
|
||||||
|
raise AuthError(
|
||||||
|
403,
|
||||||
|
"You don't have permission to post that to the room. " +
|
||||||
|
"user_level (%d) < send_level (%d)" % (user_level, send_level)
|
||||||
|
)
|
||||||
|
|
||||||
|
# Check state_key
|
||||||
|
if hasattr(event, "state_key"):
|
||||||
|
if event.state_key.startswith("@"):
|
||||||
|
if event.state_key != event.user_id:
|
||||||
|
raise AuthError(
|
||||||
|
403,
|
||||||
|
"You are not allowed to set others state"
|
||||||
|
)
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
def check_redaction(event, auth_events):
|
||||||
|
"""Check whether the event sender is allowed to redact the target event.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
True if the the sender is allowed to redact the target event if the
|
||||||
|
target event was created by them.
|
||||||
|
False if the sender is allowed to redact the target event with no
|
||||||
|
further checks.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
AuthError if the event sender is definitely not allowed to redact
|
||||||
|
the target event.
|
||||||
|
"""
|
||||||
|
user_level = get_user_power_level(event.user_id, auth_events)
|
||||||
|
|
||||||
|
redact_level = _get_named_level(auth_events, "redact", 50)
|
||||||
|
|
||||||
|
if user_level >= redact_level:
|
||||||
|
return False
|
||||||
|
|
||||||
|
redacter_domain = get_domain_from_id(event.event_id)
|
||||||
|
redactee_domain = get_domain_from_id(event.redacts)
|
||||||
|
if redacter_domain == redactee_domain:
|
||||||
|
return True
|
||||||
|
|
||||||
|
raise AuthError(
|
||||||
|
403,
|
||||||
|
"You don't have permission to redact events"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def _check_power_levels(event, auth_events):
|
||||||
|
user_list = event.content.get("users", {})
|
||||||
|
# Validate users
|
||||||
|
for k, v in user_list.items():
|
||||||
|
try:
|
||||||
|
UserID.from_string(k)
|
||||||
|
except Exception:
|
||||||
|
raise SynapseError(400, "Not a valid user_id: %s" % (k,))
|
||||||
|
|
||||||
|
try:
|
||||||
|
int(v)
|
||||||
|
except Exception:
|
||||||
|
raise SynapseError(400, "Not a valid power level: %s" % (v,))
|
||||||
|
|
||||||
|
key = (event.type, event.state_key, )
|
||||||
|
current_state = auth_events.get(key)
|
||||||
|
|
||||||
|
if not current_state:
|
||||||
|
return
|
||||||
|
|
||||||
|
user_level = get_user_power_level(event.user_id, auth_events)
|
||||||
|
|
||||||
|
# Check other levels:
|
||||||
|
levels_to_check = [
|
||||||
|
("users_default", None),
|
||||||
|
("events_default", None),
|
||||||
|
("state_default", None),
|
||||||
|
("ban", None),
|
||||||
|
("redact", None),
|
||||||
|
("kick", None),
|
||||||
|
("invite", None),
|
||||||
|
]
|
||||||
|
|
||||||
|
old_list = current_state.content.get("users", {})
|
||||||
|
for user in set(old_list.keys() + user_list.keys()):
|
||||||
|
levels_to_check.append(
|
||||||
|
(user, "users")
|
||||||
|
)
|
||||||
|
|
||||||
|
old_list = current_state.content.get("events", {})
|
||||||
|
new_list = event.content.get("events", {})
|
||||||
|
for ev_id in set(old_list.keys() + new_list.keys()):
|
||||||
|
levels_to_check.append(
|
||||||
|
(ev_id, "events")
|
||||||
|
)
|
||||||
|
|
||||||
|
old_state = current_state.content
|
||||||
|
new_state = event.content
|
||||||
|
|
||||||
|
for level_to_check, dir in levels_to_check:
|
||||||
|
old_loc = old_state
|
||||||
|
new_loc = new_state
|
||||||
|
if dir:
|
||||||
|
old_loc = old_loc.get(dir, {})
|
||||||
|
new_loc = new_loc.get(dir, {})
|
||||||
|
|
||||||
|
if level_to_check in old_loc:
|
||||||
|
old_level = int(old_loc[level_to_check])
|
||||||
|
else:
|
||||||
|
old_level = None
|
||||||
|
|
||||||
|
if level_to_check in new_loc:
|
||||||
|
new_level = int(new_loc[level_to_check])
|
||||||
|
else:
|
||||||
|
new_level = None
|
||||||
|
|
||||||
|
if new_level is not None and old_level is not None:
|
||||||
|
if new_level == old_level:
|
||||||
|
continue
|
||||||
|
|
||||||
|
if dir == "users" and level_to_check != event.user_id:
|
||||||
|
if old_level == user_level:
|
||||||
|
raise AuthError(
|
||||||
|
403,
|
||||||
|
"You don't have permission to remove ops level equal "
|
||||||
|
"to your own"
|
||||||
|
)
|
||||||
|
|
||||||
|
if old_level > user_level or new_level > user_level:
|
||||||
|
raise AuthError(
|
||||||
|
403,
|
||||||
|
"You don't have permission to add ops level greater "
|
||||||
|
"than your own"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def _get_power_level_event(auth_events):
|
||||||
|
key = (EventTypes.PowerLevels, "", )
|
||||||
|
return auth_events.get(key)
|
||||||
|
|
||||||
|
|
||||||
|
def get_user_power_level(user_id, auth_events):
|
||||||
|
power_level_event = _get_power_level_event(auth_events)
|
||||||
|
|
||||||
|
if power_level_event:
|
||||||
|
level = power_level_event.content.get("users", {}).get(user_id)
|
||||||
|
if not level:
|
||||||
|
level = power_level_event.content.get("users_default", 0)
|
||||||
|
|
||||||
|
if level is None:
|
||||||
|
return 0
|
||||||
|
else:
|
||||||
|
return int(level)
|
||||||
|
else:
|
||||||
|
key = (EventTypes.Create, "", )
|
||||||
|
create_event = auth_events.get(key)
|
||||||
|
if (create_event is not None and
|
||||||
|
create_event.content["creator"] == user_id):
|
||||||
|
return 100
|
||||||
|
else:
|
||||||
|
return 0
|
||||||
|
|
||||||
|
|
||||||
|
def _get_named_level(auth_events, name, default):
|
||||||
|
power_level_event = _get_power_level_event(auth_events)
|
||||||
|
|
||||||
|
if not power_level_event:
|
||||||
|
return default
|
||||||
|
|
||||||
|
level = power_level_event.content.get(name, None)
|
||||||
|
if level is not None:
|
||||||
|
return int(level)
|
||||||
|
else:
|
||||||
|
return default
|
||||||
|
|
||||||
|
|
||||||
|
def _verify_third_party_invite(event, auth_events):
|
||||||
|
"""
|
||||||
|
Validates that the invite event is authorized by a previous third-party invite.
|
||||||
|
|
||||||
|
Checks that the public key, and keyserver, match those in the third party invite,
|
||||||
|
and that the invite event has a signature issued using that public key.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
event: The m.room.member join event being validated.
|
||||||
|
auth_events: All relevant previous context events which may be used
|
||||||
|
for authorization decisions.
|
||||||
|
|
||||||
|
Return:
|
||||||
|
True if the event fulfills the expectations of a previous third party
|
||||||
|
invite event.
|
||||||
|
"""
|
||||||
|
if "third_party_invite" not in event.content:
|
||||||
|
return False
|
||||||
|
if "signed" not in event.content["third_party_invite"]:
|
||||||
|
return False
|
||||||
|
signed = event.content["third_party_invite"]["signed"]
|
||||||
|
for key in {"mxid", "token"}:
|
||||||
|
if key not in signed:
|
||||||
|
return False
|
||||||
|
|
||||||
|
token = signed["token"]
|
||||||
|
|
||||||
|
invite_event = auth_events.get(
|
||||||
|
(EventTypes.ThirdPartyInvite, token,)
|
||||||
|
)
|
||||||
|
if not invite_event:
|
||||||
|
return False
|
||||||
|
|
||||||
|
if invite_event.sender != event.sender:
|
||||||
|
return False
|
||||||
|
|
||||||
|
if event.user_id != invite_event.user_id:
|
||||||
|
return False
|
||||||
|
|
||||||
|
if signed["mxid"] != event.state_key:
|
||||||
|
return False
|
||||||
|
if signed["token"] != token:
|
||||||
|
return False
|
||||||
|
|
||||||
|
for public_key_object in get_public_keys(invite_event):
|
||||||
|
public_key = public_key_object["public_key"]
|
||||||
|
try:
|
||||||
|
for server, signature_block in signed["signatures"].items():
|
||||||
|
for key_name, encoded_signature in signature_block.items():
|
||||||
|
if not key_name.startswith("ed25519:"):
|
||||||
|
continue
|
||||||
|
verify_key = decode_verify_key_bytes(
|
||||||
|
key_name,
|
||||||
|
decode_base64(public_key)
|
||||||
|
)
|
||||||
|
verify_signed_json(signed, server, verify_key)
|
||||||
|
|
||||||
|
# We got the public key from the invite, so we know that the
|
||||||
|
# correct server signed the signed bundle.
|
||||||
|
# The caller is responsible for checking that the signing
|
||||||
|
# server has not revoked that public key.
|
||||||
|
return True
|
||||||
|
except (KeyError, SignatureVerifyException,):
|
||||||
|
continue
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def get_public_keys(invite_event):
|
||||||
|
public_keys = []
|
||||||
|
if "public_key" in invite_event.content:
|
||||||
|
o = {
|
||||||
|
"public_key": invite_event.content["public_key"],
|
||||||
|
}
|
||||||
|
if "key_validity_url" in invite_event.content:
|
||||||
|
o["key_validity_url"] = invite_event.content["key_validity_url"]
|
||||||
|
public_keys.append(o)
|
||||||
|
public_keys.extend(invite_event.content.get("public_keys", []))
|
||||||
|
return public_keys
|
||||||
|
|
||||||
|
|
||||||
|
def auth_types_for_event(event):
|
||||||
|
"""Given an event, return a list of (EventType, StateKey) that may be
|
||||||
|
needed to auth the event. The returned list may be a superset of what
|
||||||
|
would actually be required depending on the full state of the room.
|
||||||
|
|
||||||
|
Used to limit the number of events to fetch from the database to
|
||||||
|
actually auth the event.
|
||||||
|
"""
|
||||||
|
if event.type == EventTypes.Create:
|
||||||
|
return []
|
||||||
|
|
||||||
|
auth_types = []
|
||||||
|
|
||||||
|
auth_types.append((EventTypes.PowerLevels, "", ))
|
||||||
|
auth_types.append((EventTypes.Member, event.user_id, ))
|
||||||
|
auth_types.append((EventTypes.Create, "", ))
|
||||||
|
|
||||||
|
if event.type == EventTypes.Member:
|
||||||
|
membership = event.content["membership"]
|
||||||
|
if membership in [Membership.JOIN, Membership.INVITE]:
|
||||||
|
auth_types.append((EventTypes.JoinRules, "", ))
|
||||||
|
|
||||||
|
auth_types.append((EventTypes.Member, event.state_key, ))
|
||||||
|
|
||||||
|
if membership == Membership.INVITE:
|
||||||
|
if "third_party_invite" in event.content:
|
||||||
|
key = (
|
||||||
|
EventTypes.ThirdPartyInvite,
|
||||||
|
event.content["third_party_invite"]["signed"]["token"]
|
||||||
|
)
|
||||||
|
auth_types.append(key)
|
||||||
|
|
||||||
|
return auth_types
|
||||||
@@ -36,6 +36,15 @@ class _EventInternalMetadata(object):
|
|||||||
def is_invite_from_remote(self):
|
def is_invite_from_remote(self):
|
||||||
return getattr(self, "invite_from_remote", False)
|
return getattr(self, "invite_from_remote", False)
|
||||||
|
|
||||||
|
def get_send_on_behalf_of(self):
|
||||||
|
"""Whether this server should send the event on behalf of another server.
|
||||||
|
This is used by the federation "send_join" API to forward the initial join
|
||||||
|
event for a server in the room.
|
||||||
|
|
||||||
|
returns a str with the name of the server this event is sent on behalf of.
|
||||||
|
"""
|
||||||
|
return getattr(self, "send_on_behalf_of", None)
|
||||||
|
|
||||||
|
|
||||||
def _event_dict_property(key):
|
def _event_dict_property(key):
|
||||||
def getter(self):
|
def getter(self):
|
||||||
@@ -70,7 +79,6 @@ class EventBase(object):
|
|||||||
auth_events = _event_dict_property("auth_events")
|
auth_events = _event_dict_property("auth_events")
|
||||||
depth = _event_dict_property("depth")
|
depth = _event_dict_property("depth")
|
||||||
content = _event_dict_property("content")
|
content = _event_dict_property("content")
|
||||||
event_id = _event_dict_property("event_id")
|
|
||||||
hashes = _event_dict_property("hashes")
|
hashes = _event_dict_property("hashes")
|
||||||
origin = _event_dict_property("origin")
|
origin = _event_dict_property("origin")
|
||||||
origin_server_ts = _event_dict_property("origin_server_ts")
|
origin_server_ts = _event_dict_property("origin_server_ts")
|
||||||
@@ -79,8 +87,6 @@ class EventBase(object):
|
|||||||
redacts = _event_dict_property("redacts")
|
redacts = _event_dict_property("redacts")
|
||||||
room_id = _event_dict_property("room_id")
|
room_id = _event_dict_property("room_id")
|
||||||
sender = _event_dict_property("sender")
|
sender = _event_dict_property("sender")
|
||||||
state_key = _event_dict_property("state_key")
|
|
||||||
type = _event_dict_property("type")
|
|
||||||
user_id = _event_dict_property("sender")
|
user_id = _event_dict_property("sender")
|
||||||
|
|
||||||
@property
|
@property
|
||||||
@@ -99,7 +105,7 @@ class EventBase(object):
|
|||||||
|
|
||||||
return d
|
return d
|
||||||
|
|
||||||
def get(self, key, default):
|
def get(self, key, default=None):
|
||||||
return self._event_dict.get(key, default)
|
return self._event_dict.get(key, default)
|
||||||
|
|
||||||
def get_internal_metadata_dict(self):
|
def get_internal_metadata_dict(self):
|
||||||
@@ -153,6 +159,11 @@ class FrozenEvent(EventBase):
|
|||||||
else:
|
else:
|
||||||
frozen_dict = event_dict
|
frozen_dict = event_dict
|
||||||
|
|
||||||
|
self.event_id = event_dict["event_id"]
|
||||||
|
self.type = event_dict["type"]
|
||||||
|
if "state_key" in event_dict:
|
||||||
|
self.state_key = event_dict["state_key"]
|
||||||
|
|
||||||
super(FrozenEvent, self).__init__(
|
super(FrozenEvent, self).__init__(
|
||||||
frozen_dict,
|
frozen_dict,
|
||||||
signatures=signatures,
|
signatures=signatures,
|
||||||
|
|||||||
@@ -13,7 +13,7 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
from . import EventBase, FrozenEvent
|
from . import EventBase, FrozenEvent, _event_dict_property
|
||||||
|
|
||||||
from synapse.types import EventID
|
from synapse.types import EventID
|
||||||
|
|
||||||
@@ -34,6 +34,10 @@ class EventBuilder(EventBase):
|
|||||||
internal_metadata_dict=internal_metadata_dict,
|
internal_metadata_dict=internal_metadata_dict,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
event_id = _event_dict_property("event_id")
|
||||||
|
state_key = _event_dict_property("state_key")
|
||||||
|
type = _event_dict_property("type")
|
||||||
|
|
||||||
def build(self):
|
def build(self):
|
||||||
return FrozenEvent.from_event(self)
|
return FrozenEvent.from_event(self)
|
||||||
|
|
||||||
@@ -51,7 +55,7 @@ class EventBuilderFactory(object):
|
|||||||
|
|
||||||
local_part = str(int(self.clock.time())) + i + random_string(5)
|
local_part = str(int(self.clock.time())) + i + random_string(5)
|
||||||
|
|
||||||
e_id = EventID.create(local_part, self.hostname)
|
e_id = EventID(local_part, self.hostname)
|
||||||
|
|
||||||
return e_id.to_string()
|
return e_id.to_string()
|
||||||
|
|
||||||
|
|||||||
@@ -15,9 +15,59 @@
|
|||||||
|
|
||||||
|
|
||||||
class EventContext(object):
|
class EventContext(object):
|
||||||
|
"""
|
||||||
|
Attributes:
|
||||||
|
current_state_ids (dict[(str, str), str]):
|
||||||
|
The current state map including the current event.
|
||||||
|
(type, state_key) -> event_id
|
||||||
|
|
||||||
def __init__(self, current_state=None):
|
prev_state_ids (dict[(str, str), str]):
|
||||||
self.current_state = current_state
|
The current state map excluding the current event.
|
||||||
|
(type, state_key) -> event_id
|
||||||
|
|
||||||
|
state_group (int): state group id
|
||||||
|
rejected (bool|str): A rejection reason if the event was rejected, else
|
||||||
|
False
|
||||||
|
|
||||||
|
push_actions (list[(str, list[object])]): list of (user_id, actions)
|
||||||
|
tuples
|
||||||
|
|
||||||
|
prev_group (int): Previously persisted state group. ``None`` for an
|
||||||
|
outlier.
|
||||||
|
delta_ids (dict[(str, str), str]): Delta from ``prev_group``.
|
||||||
|
(type, state_key) -> event_id. ``None`` for an outlier.
|
||||||
|
|
||||||
|
prev_state_events (?): XXX: is this ever set to anything other than
|
||||||
|
the empty list?
|
||||||
|
"""
|
||||||
|
|
||||||
|
__slots__ = [
|
||||||
|
"current_state_ids",
|
||||||
|
"prev_state_ids",
|
||||||
|
"state_group",
|
||||||
|
"rejected",
|
||||||
|
"push_actions",
|
||||||
|
"prev_group",
|
||||||
|
"delta_ids",
|
||||||
|
"prev_state_events",
|
||||||
|
"app_service",
|
||||||
|
]
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
# The current state including the current event
|
||||||
|
self.current_state_ids = None
|
||||||
|
# The current state excluding the current event
|
||||||
|
self.prev_state_ids = None
|
||||||
self.state_group = None
|
self.state_group = None
|
||||||
|
|
||||||
self.rejected = False
|
self.rejected = False
|
||||||
self.push_actions = []
|
self.push_actions = []
|
||||||
|
|
||||||
|
# A previously persisted state group and a delta between that
|
||||||
|
# and this state.
|
||||||
|
self.prev_group = None
|
||||||
|
self.delta_ids = None
|
||||||
|
|
||||||
|
self.prev_state_events = None
|
||||||
|
|
||||||
|
self.app_service = None
|
||||||
|
|||||||
113
synapse/events/spamcheck.py
Normal file
113
synapse/events/spamcheck.py
Normal file
@@ -0,0 +1,113 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright 2017 New Vector Ltd.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
|
||||||
|
class SpamChecker(object):
|
||||||
|
def __init__(self, hs):
|
||||||
|
self.spam_checker = None
|
||||||
|
|
||||||
|
module = None
|
||||||
|
config = None
|
||||||
|
try:
|
||||||
|
module, config = hs.config.spam_checker
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
|
if module is not None:
|
||||||
|
self.spam_checker = module(config=config)
|
||||||
|
|
||||||
|
def check_event_for_spam(self, event):
|
||||||
|
"""Checks if a given event is considered "spammy" by this server.
|
||||||
|
|
||||||
|
If the server considers an event spammy, then it will be rejected if
|
||||||
|
sent by a local user. If it is sent by a user on another server, then
|
||||||
|
users receive a blank event.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
event (synapse.events.EventBase): the event to be checked
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if the event is spammy.
|
||||||
|
"""
|
||||||
|
if self.spam_checker is None:
|
||||||
|
return False
|
||||||
|
|
||||||
|
return self.spam_checker.check_event_for_spam(event)
|
||||||
|
|
||||||
|
def user_may_invite(self, inviter_userid, invitee_userid, room_id):
|
||||||
|
"""Checks if a given user may send an invite
|
||||||
|
|
||||||
|
If this method returns false, the invite will be rejected.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
userid (string): The sender's user ID
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if the user may send an invite, otherwise False
|
||||||
|
"""
|
||||||
|
if self.spam_checker is None:
|
||||||
|
return True
|
||||||
|
|
||||||
|
return self.spam_checker.user_may_invite(inviter_userid, invitee_userid, room_id)
|
||||||
|
|
||||||
|
def user_may_create_room(self, userid):
|
||||||
|
"""Checks if a given user may create a room
|
||||||
|
|
||||||
|
If this method returns false, the creation request will be rejected.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
userid (string): The sender's user ID
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if the user may create a room, otherwise False
|
||||||
|
"""
|
||||||
|
if self.spam_checker is None:
|
||||||
|
return True
|
||||||
|
|
||||||
|
return self.spam_checker.user_may_create_room(userid)
|
||||||
|
|
||||||
|
def user_may_create_room_alias(self, userid, room_alias):
|
||||||
|
"""Checks if a given user may create a room alias
|
||||||
|
|
||||||
|
If this method returns false, the association request will be rejected.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
userid (string): The sender's user ID
|
||||||
|
room_alias (string): The alias to be created
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if the user may create a room alias, otherwise False
|
||||||
|
"""
|
||||||
|
if self.spam_checker is None:
|
||||||
|
return True
|
||||||
|
|
||||||
|
return self.spam_checker.user_may_create_room_alias(userid, room_alias)
|
||||||
|
|
||||||
|
def user_may_publish_room(self, userid, room_id):
|
||||||
|
"""Checks if a given user may publish a room to the directory
|
||||||
|
|
||||||
|
If this method returns false, the publish request will be rejected.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
userid (string): The sender's user ID
|
||||||
|
room_id (string): The ID of the room that would be published
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if the user may publish the room, otherwise False
|
||||||
|
"""
|
||||||
|
if self.spam_checker is None:
|
||||||
|
return True
|
||||||
|
|
||||||
|
return self.spam_checker.user_may_publish_room(userid, room_id)
|
||||||
@@ -16,6 +16,17 @@
|
|||||||
from synapse.api.constants import EventTypes
|
from synapse.api.constants import EventTypes
|
||||||
from . import EventBase
|
from . import EventBase
|
||||||
|
|
||||||
|
from frozendict import frozendict
|
||||||
|
|
||||||
|
import re
|
||||||
|
|
||||||
|
# Split strings on "." but not "\." This uses a negative lookbehind assertion for '\'
|
||||||
|
# (?<!stuff) matches if the current position in the string is not preceded
|
||||||
|
# by a match for 'stuff'.
|
||||||
|
# TODO: This is fast, but fails to handle "foo\\.bar" which should be treated as
|
||||||
|
# the literal fields "foo\" and "bar" but will instead be treated as "foo\\.bar"
|
||||||
|
SPLIT_FIELD_REGEX = re.compile(r'(?<!\\)\.')
|
||||||
|
|
||||||
|
|
||||||
def prune_event(event):
|
def prune_event(event):
|
||||||
""" Returns a pruned version of the given event, which removes all keys we
|
""" Returns a pruned version of the given event, which removes all keys we
|
||||||
@@ -88,6 +99,8 @@ def prune_event(event):
|
|||||||
|
|
||||||
if "age_ts" in event.unsigned:
|
if "age_ts" in event.unsigned:
|
||||||
allowed_fields["unsigned"]["age_ts"] = event.unsigned["age_ts"]
|
allowed_fields["unsigned"]["age_ts"] = event.unsigned["age_ts"]
|
||||||
|
if "replaces_state" in event.unsigned:
|
||||||
|
allowed_fields["unsigned"]["replaces_state"] = event.unsigned["replaces_state"]
|
||||||
|
|
||||||
return type(event)(
|
return type(event)(
|
||||||
allowed_fields,
|
allowed_fields,
|
||||||
@@ -95,6 +108,83 @@ def prune_event(event):
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def _copy_field(src, dst, field):
|
||||||
|
"""Copy the field in 'src' to 'dst'.
|
||||||
|
|
||||||
|
For example, if src={"foo":{"bar":5}} and dst={}, and field=["foo","bar"]
|
||||||
|
then dst={"foo":{"bar":5}}.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
src(dict): The dict to read from.
|
||||||
|
dst(dict): The dict to modify.
|
||||||
|
field(list<str>): List of keys to drill down to in 'src'.
|
||||||
|
"""
|
||||||
|
if len(field) == 0: # this should be impossible
|
||||||
|
return
|
||||||
|
if len(field) == 1: # common case e.g. 'origin_server_ts'
|
||||||
|
if field[0] in src:
|
||||||
|
dst[field[0]] = src[field[0]]
|
||||||
|
return
|
||||||
|
|
||||||
|
# Else is a nested field e.g. 'content.body'
|
||||||
|
# Pop the last field as that's the key to move across and we need the
|
||||||
|
# parent dict in order to access the data. Drill down to the right dict.
|
||||||
|
key_to_move = field.pop(-1)
|
||||||
|
sub_dict = src
|
||||||
|
for sub_field in field: # e.g. sub_field => "content"
|
||||||
|
if sub_field in sub_dict and type(sub_dict[sub_field]) in [dict, frozendict]:
|
||||||
|
sub_dict = sub_dict[sub_field]
|
||||||
|
else:
|
||||||
|
return
|
||||||
|
|
||||||
|
if key_to_move not in sub_dict:
|
||||||
|
return
|
||||||
|
|
||||||
|
# Insert the key into the output dictionary, creating nested objects
|
||||||
|
# as required. We couldn't do this any earlier or else we'd need to delete
|
||||||
|
# the empty objects if the key didn't exist.
|
||||||
|
sub_out_dict = dst
|
||||||
|
for sub_field in field:
|
||||||
|
sub_out_dict = sub_out_dict.setdefault(sub_field, {})
|
||||||
|
sub_out_dict[key_to_move] = sub_dict[key_to_move]
|
||||||
|
|
||||||
|
|
||||||
|
def only_fields(dictionary, fields):
|
||||||
|
"""Return a new dict with only the fields in 'dictionary' which are present
|
||||||
|
in 'fields'.
|
||||||
|
|
||||||
|
If there are no event fields specified then all fields are included.
|
||||||
|
The entries may include '.' charaters to indicate sub-fields.
|
||||||
|
So ['content.body'] will include the 'body' field of the 'content' object.
|
||||||
|
A literal '.' character in a field name may be escaped using a '\'.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
dictionary(dict): The dictionary to read from.
|
||||||
|
fields(list<str>): A list of fields to copy over. Only shallow refs are
|
||||||
|
taken.
|
||||||
|
Returns:
|
||||||
|
dict: A new dictionary with only the given fields. If fields was empty,
|
||||||
|
the same dictionary is returned.
|
||||||
|
"""
|
||||||
|
if len(fields) == 0:
|
||||||
|
return dictionary
|
||||||
|
|
||||||
|
# for each field, convert it:
|
||||||
|
# ["content.body.thing\.with\.dots"] => [["content", "body", "thing\.with\.dots"]]
|
||||||
|
split_fields = [SPLIT_FIELD_REGEX.split(f) for f in fields]
|
||||||
|
|
||||||
|
# for each element of the output array of arrays:
|
||||||
|
# remove escaping so we can use the right key names.
|
||||||
|
split_fields[:] = [
|
||||||
|
[f.replace(r'\.', r'.') for f in field_array] for field_array in split_fields
|
||||||
|
]
|
||||||
|
|
||||||
|
output = {}
|
||||||
|
for field_array in split_fields:
|
||||||
|
_copy_field(dictionary, output, field_array)
|
||||||
|
return output
|
||||||
|
|
||||||
|
|
||||||
def format_event_raw(d):
|
def format_event_raw(d):
|
||||||
return d
|
return d
|
||||||
|
|
||||||
@@ -135,7 +225,22 @@ def format_event_for_client_v2_without_room_id(d):
|
|||||||
|
|
||||||
def serialize_event(e, time_now_ms, as_client_event=True,
|
def serialize_event(e, time_now_ms, as_client_event=True,
|
||||||
event_format=format_event_for_client_v1,
|
event_format=format_event_for_client_v1,
|
||||||
token_id=None):
|
token_id=None, only_event_fields=None, is_invite=False):
|
||||||
|
"""Serialize event for clients
|
||||||
|
|
||||||
|
Args:
|
||||||
|
e (EventBase)
|
||||||
|
time_now_ms (int)
|
||||||
|
as_client_event (bool)
|
||||||
|
event_format
|
||||||
|
token_id
|
||||||
|
only_event_fields
|
||||||
|
is_invite (bool): Whether this is an invite that is being sent to the
|
||||||
|
invitee
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
dict
|
||||||
|
"""
|
||||||
# FIXME(erikj): To handle the case of presence events and the like
|
# FIXME(erikj): To handle the case of presence events and the like
|
||||||
if not isinstance(e, EventBase):
|
if not isinstance(e, EventBase):
|
||||||
return e
|
return e
|
||||||
@@ -161,7 +266,19 @@ def serialize_event(e, time_now_ms, as_client_event=True,
|
|||||||
if txn_id is not None:
|
if txn_id is not None:
|
||||||
d["unsigned"]["transaction_id"] = txn_id
|
d["unsigned"]["transaction_id"] = txn_id
|
||||||
|
|
||||||
|
# If this is an invite for somebody else, then we don't care about the
|
||||||
|
# invite_room_state as that's meant solely for the invitee. Other clients
|
||||||
|
# will already have the state since they're in the room.
|
||||||
|
if not is_invite:
|
||||||
|
d["unsigned"].pop("invite_room_state", None)
|
||||||
|
|
||||||
if as_client_event:
|
if as_client_event:
|
||||||
return event_format(d)
|
d = event_format(d)
|
||||||
else:
|
|
||||||
|
if only_event_fields:
|
||||||
|
if (not isinstance(only_event_fields, list) or
|
||||||
|
not all(isinstance(f, basestring) for f in only_event_fields)):
|
||||||
|
raise TypeError("only_event_fields must be a list of strings")
|
||||||
|
d = only_fields(d, only_event_fields)
|
||||||
|
|
||||||
return d
|
return d
|
||||||
|
|||||||
@@ -17,10 +17,9 @@
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
from .replication import ReplicationLayer
|
from .replication import ReplicationLayer
|
||||||
from .transport.client import TransportLayerClient
|
|
||||||
|
|
||||||
|
|
||||||
def initialize_http_replication(homeserver):
|
def initialize_http_replication(hs):
|
||||||
transport = TransportLayerClient(homeserver)
|
transport = hs.get_federation_transport_client()
|
||||||
|
|
||||||
return ReplicationLayer(homeserver, transport)
|
return ReplicationLayer(hs, transport)
|
||||||
|
|||||||
@@ -12,27 +12,20 @@
|
|||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
|
|
||||||
from twisted.internet import defer
|
|
||||||
|
|
||||||
from synapse.events.utils import prune_event
|
|
||||||
|
|
||||||
from synapse.crypto.event_signing import check_event_content_hash
|
|
||||||
|
|
||||||
from synapse.api.errors import SynapseError
|
|
||||||
|
|
||||||
from synapse.util import unwrapFirstError
|
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
|
from synapse.api.errors import SynapseError
|
||||||
|
from synapse.crypto.event_signing import check_event_content_hash
|
||||||
|
from synapse.events.utils import prune_event
|
||||||
|
from synapse.util import unwrapFirstError, logcontext
|
||||||
|
from twisted.internet import defer
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class FederationBase(object):
|
class FederationBase(object):
|
||||||
def __init__(self, hs):
|
def __init__(self, hs):
|
||||||
pass
|
self.spam_checker = hs.get_spam_checker()
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def _check_sigs_and_hash_and_fetch(self, origin, pdus, outlier=False,
|
def _check_sigs_and_hash_and_fetch(self, origin, pdus, outlier=False,
|
||||||
@@ -56,55 +49,51 @@ class FederationBase(object):
|
|||||||
"""
|
"""
|
||||||
deferreds = self._check_sigs_and_hashes(pdus)
|
deferreds = self._check_sigs_and_hashes(pdus)
|
||||||
|
|
||||||
def callback(pdu):
|
@defer.inlineCallbacks
|
||||||
return pdu
|
def handle_check_result(pdu, deferred):
|
||||||
|
try:
|
||||||
|
res = yield logcontext.make_deferred_yieldable(deferred)
|
||||||
|
except SynapseError:
|
||||||
|
res = None
|
||||||
|
|
||||||
def errback(failure, pdu):
|
|
||||||
failure.trap(SynapseError)
|
|
||||||
return None
|
|
||||||
|
|
||||||
def try_local_db(res, pdu):
|
|
||||||
if not res:
|
if not res:
|
||||||
# Check local db.
|
# Check local db.
|
||||||
return self.store.get_event(
|
res = yield self.store.get_event(
|
||||||
pdu.event_id,
|
pdu.event_id,
|
||||||
allow_rejected=True,
|
allow_rejected=True,
|
||||||
allow_none=True,
|
allow_none=True,
|
||||||
)
|
)
|
||||||
return res
|
|
||||||
|
|
||||||
def try_remote(res, pdu):
|
|
||||||
if not res and pdu.origin != origin:
|
if not res and pdu.origin != origin:
|
||||||
return self.get_pdu(
|
try:
|
||||||
|
res = yield self.get_pdu(
|
||||||
destinations=[pdu.origin],
|
destinations=[pdu.origin],
|
||||||
event_id=pdu.event_id,
|
event_id=pdu.event_id,
|
||||||
outlier=outlier,
|
outlier=outlier,
|
||||||
timeout=10000,
|
timeout=10000,
|
||||||
).addErrback(lambda e: None)
|
)
|
||||||
return res
|
except SynapseError:
|
||||||
|
pass
|
||||||
|
|
||||||
def warn(res, pdu):
|
|
||||||
if not res:
|
if not res:
|
||||||
logger.warn(
|
logger.warn(
|
||||||
"Failed to find copy of %s with valid signature",
|
"Failed to find copy of %s with valid signature",
|
||||||
pdu.event_id,
|
pdu.event_id,
|
||||||
)
|
)
|
||||||
return res
|
|
||||||
|
|
||||||
for pdu, deferred in zip(pdus, deferreds):
|
defer.returnValue(res)
|
||||||
deferred.addCallbacks(
|
|
||||||
callback, errback, errbackArgs=[pdu]
|
handle = logcontext.preserve_fn(handle_check_result)
|
||||||
).addCallback(
|
deferreds2 = [
|
||||||
try_local_db, pdu
|
handle(pdu, deferred)
|
||||||
).addCallback(
|
for pdu, deferred in zip(pdus, deferreds)
|
||||||
try_remote, pdu
|
]
|
||||||
).addCallback(
|
|
||||||
warn, pdu
|
valid_pdus = yield logcontext.make_deferred_yieldable(
|
||||||
|
defer.gatherResults(
|
||||||
|
deferreds2,
|
||||||
|
consumeErrors=True,
|
||||||
)
|
)
|
||||||
|
|
||||||
valid_pdus = yield defer.gatherResults(
|
|
||||||
deferreds,
|
|
||||||
consumeErrors=True
|
|
||||||
).addErrback(unwrapFirstError)
|
).addErrback(unwrapFirstError)
|
||||||
|
|
||||||
if include_none:
|
if include_none:
|
||||||
@@ -113,15 +102,24 @@ class FederationBase(object):
|
|||||||
defer.returnValue([p for p in valid_pdus if p])
|
defer.returnValue([p for p in valid_pdus if p])
|
||||||
|
|
||||||
def _check_sigs_and_hash(self, pdu):
|
def _check_sigs_and_hash(self, pdu):
|
||||||
return self._check_sigs_and_hashes([pdu])[0]
|
return logcontext.make_deferred_yieldable(
|
||||||
|
self._check_sigs_and_hashes([pdu])[0],
|
||||||
|
)
|
||||||
|
|
||||||
def _check_sigs_and_hashes(self, pdus):
|
def _check_sigs_and_hashes(self, pdus):
|
||||||
"""Throws a SynapseError if a PDU does not have the correct
|
"""Checks that each of the received events is correctly signed by the
|
||||||
signatures.
|
sending server.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
pdus (list[FrozenEvent]): the events to be checked
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
FrozenEvent: Either the given event or it redacted if it failed the
|
list[Deferred]: for each input event, a deferred which:
|
||||||
content hash check.
|
* returns the original event if the checks pass
|
||||||
|
* returns a redacted version of the event (if the signature
|
||||||
|
matched but the hash did not)
|
||||||
|
* throws a SynapseError if the signature check failed.
|
||||||
|
The deferreds run their callbacks in the sentinel logcontext.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
redacted_pdus = [
|
redacted_pdus = [
|
||||||
@@ -134,17 +132,29 @@ class FederationBase(object):
|
|||||||
for p in redacted_pdus
|
for p in redacted_pdus
|
||||||
])
|
])
|
||||||
|
|
||||||
|
ctx = logcontext.LoggingContext.current_context()
|
||||||
|
|
||||||
def callback(_, pdu, redacted):
|
def callback(_, pdu, redacted):
|
||||||
|
with logcontext.PreserveLoggingContext(ctx):
|
||||||
if not check_event_content_hash(pdu):
|
if not check_event_content_hash(pdu):
|
||||||
logger.warn(
|
logger.warn(
|
||||||
"Event content has been tampered, redacting %s: %s",
|
"Event content has been tampered, redacting %s: %s",
|
||||||
pdu.event_id, pdu.get_pdu_json()
|
pdu.event_id, pdu.get_pdu_json()
|
||||||
)
|
)
|
||||||
return redacted
|
return redacted
|
||||||
|
|
||||||
|
if self.spam_checker.check_event_for_spam(pdu):
|
||||||
|
logger.warn(
|
||||||
|
"Event contains spam, redacting %s: %s",
|
||||||
|
pdu.event_id, pdu.get_pdu_json()
|
||||||
|
)
|
||||||
|
return redacted
|
||||||
|
|
||||||
return pdu
|
return pdu
|
||||||
|
|
||||||
def errback(failure, pdu):
|
def errback(failure, pdu):
|
||||||
failure.trap(SynapseError)
|
failure.trap(SynapseError)
|
||||||
|
with logcontext.PreserveLoggingContext(ctx):
|
||||||
logger.warn(
|
logger.warn(
|
||||||
"Signature check failed for %s",
|
"Signature check failed for %s",
|
||||||
pdu.event_id,
|
pdu.event_id,
|
||||||
|
|||||||
@@ -18,19 +18,18 @@ from twisted.internet import defer
|
|||||||
|
|
||||||
from .federation_base import FederationBase
|
from .federation_base import FederationBase
|
||||||
from synapse.api.constants import Membership
|
from synapse.api.constants import Membership
|
||||||
from .units import Edu
|
|
||||||
|
|
||||||
from synapse.api.errors import (
|
from synapse.api.errors import (
|
||||||
CodeMessageException, HttpResponseException, SynapseError,
|
CodeMessageException, HttpResponseException, SynapseError,
|
||||||
)
|
)
|
||||||
from synapse.util import unwrapFirstError
|
from synapse.util import unwrapFirstError, logcontext
|
||||||
from synapse.util.async import concurrently_execute
|
|
||||||
from synapse.util.caches.expiringcache import ExpiringCache
|
from synapse.util.caches.expiringcache import ExpiringCache
|
||||||
from synapse.util.logutils import log_function
|
from synapse.util.logutils import log_function
|
||||||
from synapse.events import FrozenEvent
|
from synapse.util.logcontext import preserve_fn, preserve_context_over_deferred
|
||||||
|
from synapse.events import FrozenEvent, builder
|
||||||
import synapse.metrics
|
import synapse.metrics
|
||||||
|
|
||||||
from synapse.util.retryutils import get_retry_limiter, NotRetryingDestination
|
from synapse.util.retryutils import NotRetryingDestination
|
||||||
|
|
||||||
import copy
|
import copy
|
||||||
import itertools
|
import itertools
|
||||||
@@ -44,17 +43,38 @@ logger = logging.getLogger(__name__)
|
|||||||
# synapse.federation.federation_client is a silly name
|
# synapse.federation.federation_client is a silly name
|
||||||
metrics = synapse.metrics.get_metrics_for("synapse.federation.client")
|
metrics = synapse.metrics.get_metrics_for("synapse.federation.client")
|
||||||
|
|
||||||
sent_pdus_destination_dist = metrics.register_distribution("sent_pdu_destinations")
|
|
||||||
|
|
||||||
sent_edus_counter = metrics.register_counter("sent_edus")
|
|
||||||
|
|
||||||
sent_queries_counter = metrics.register_counter("sent_queries", labels=["type"])
|
sent_queries_counter = metrics.register_counter("sent_queries", labels=["type"])
|
||||||
|
|
||||||
|
|
||||||
|
PDU_RETRY_TIME_MS = 1 * 60 * 1000
|
||||||
|
|
||||||
|
|
||||||
class FederationClient(FederationBase):
|
class FederationClient(FederationBase):
|
||||||
def __init__(self, hs):
|
def __init__(self, hs):
|
||||||
super(FederationClient, self).__init__(hs)
|
super(FederationClient, self).__init__(hs)
|
||||||
|
|
||||||
|
self.pdu_destination_tried = {}
|
||||||
|
self._clock.looping_call(
|
||||||
|
self._clear_tried_cache, 60 * 1000,
|
||||||
|
)
|
||||||
|
self.state = hs.get_state_handler()
|
||||||
|
|
||||||
|
def _clear_tried_cache(self):
|
||||||
|
"""Clear pdu_destination_tried cache"""
|
||||||
|
now = self._clock.time_msec()
|
||||||
|
|
||||||
|
old_dict = self.pdu_destination_tried
|
||||||
|
self.pdu_destination_tried = {}
|
||||||
|
|
||||||
|
for event_id, destination_dict in old_dict.items():
|
||||||
|
destination_dict = {
|
||||||
|
dest: time
|
||||||
|
for dest, time in destination_dict.items()
|
||||||
|
if time + PDU_RETRY_TIME_MS > now
|
||||||
|
}
|
||||||
|
if destination_dict:
|
||||||
|
self.pdu_destination_tried[event_id] = destination_dict
|
||||||
|
|
||||||
def start_get_pdu_cache(self):
|
def start_get_pdu_cache(self):
|
||||||
self._get_pdu_cache = ExpiringCache(
|
self._get_pdu_cache = ExpiringCache(
|
||||||
cache_name="get_pdu_cache",
|
cache_name="get_pdu_cache",
|
||||||
@@ -66,58 +86,9 @@ class FederationClient(FederationBase):
|
|||||||
|
|
||||||
self._get_pdu_cache.start()
|
self._get_pdu_cache.start()
|
||||||
|
|
||||||
@log_function
|
|
||||||
def send_pdu(self, pdu, destinations):
|
|
||||||
"""Informs the replication layer about a new PDU generated within the
|
|
||||||
home server that should be transmitted to others.
|
|
||||||
|
|
||||||
TODO: Figure out when we should actually resolve the deferred.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
pdu (Pdu): The new Pdu.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Deferred: Completes when we have successfully processed the PDU
|
|
||||||
and replicated it to any interested remote home servers.
|
|
||||||
"""
|
|
||||||
order = self._order
|
|
||||||
self._order += 1
|
|
||||||
|
|
||||||
sent_pdus_destination_dist.inc_by(len(destinations))
|
|
||||||
|
|
||||||
logger.debug("[%s] transaction_layer.enqueue_pdu... ", pdu.event_id)
|
|
||||||
|
|
||||||
# TODO, add errback, etc.
|
|
||||||
self._transaction_queue.enqueue_pdu(pdu, destinations, order)
|
|
||||||
|
|
||||||
logger.debug(
|
|
||||||
"[%s] transaction_layer.enqueue_pdu... done",
|
|
||||||
pdu.event_id
|
|
||||||
)
|
|
||||||
|
|
||||||
@log_function
|
|
||||||
def send_edu(self, destination, edu_type, content):
|
|
||||||
edu = Edu(
|
|
||||||
origin=self.server_name,
|
|
||||||
destination=destination,
|
|
||||||
edu_type=edu_type,
|
|
||||||
content=content,
|
|
||||||
)
|
|
||||||
|
|
||||||
sent_edus_counter.inc()
|
|
||||||
|
|
||||||
# TODO, add errback, etc.
|
|
||||||
self._transaction_queue.enqueue_edu(edu)
|
|
||||||
return defer.succeed(None)
|
|
||||||
|
|
||||||
@log_function
|
|
||||||
def send_failure(self, failure, destination):
|
|
||||||
self._transaction_queue.enqueue_failure(failure, destination)
|
|
||||||
return defer.succeed(None)
|
|
||||||
|
|
||||||
@log_function
|
@log_function
|
||||||
def make_query(self, destination, query_type, args,
|
def make_query(self, destination, query_type, args,
|
||||||
retry_on_dns_fail=False):
|
retry_on_dns_fail=False, ignore_backoff=False):
|
||||||
"""Sends a federation Query to a remote homeserver of the given type
|
"""Sends a federation Query to a remote homeserver of the given type
|
||||||
and arguments.
|
and arguments.
|
||||||
|
|
||||||
@@ -127,6 +98,8 @@ class FederationClient(FederationBase):
|
|||||||
handler name used in register_query_handler().
|
handler name used in register_query_handler().
|
||||||
args (dict): Mapping of strings to strings containing the details
|
args (dict): Mapping of strings to strings containing the details
|
||||||
of the query request.
|
of the query request.
|
||||||
|
ignore_backoff (bool): true to ignore the historical backoff data
|
||||||
|
and try the request anyway.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
a Deferred which will eventually yield a JSON object from the
|
a Deferred which will eventually yield a JSON object from the
|
||||||
@@ -135,11 +108,12 @@ class FederationClient(FederationBase):
|
|||||||
sent_queries_counter.inc(query_type)
|
sent_queries_counter.inc(query_type)
|
||||||
|
|
||||||
return self.transport_layer.make_query(
|
return self.transport_layer.make_query(
|
||||||
destination, query_type, args, retry_on_dns_fail=retry_on_dns_fail
|
destination, query_type, args, retry_on_dns_fail=retry_on_dns_fail,
|
||||||
|
ignore_backoff=ignore_backoff,
|
||||||
)
|
)
|
||||||
|
|
||||||
@log_function
|
@log_function
|
||||||
def query_client_keys(self, destination, content):
|
def query_client_keys(self, destination, content, timeout):
|
||||||
"""Query device keys for a device hosted on a remote server.
|
"""Query device keys for a device hosted on a remote server.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
@@ -151,10 +125,22 @@ class FederationClient(FederationBase):
|
|||||||
response
|
response
|
||||||
"""
|
"""
|
||||||
sent_queries_counter.inc("client_device_keys")
|
sent_queries_counter.inc("client_device_keys")
|
||||||
return self.transport_layer.query_client_keys(destination, content)
|
return self.transport_layer.query_client_keys(
|
||||||
|
destination, content, timeout
|
||||||
|
)
|
||||||
|
|
||||||
@log_function
|
@log_function
|
||||||
def claim_client_keys(self, destination, content):
|
def query_user_devices(self, destination, user_id, timeout=30000):
|
||||||
|
"""Query the device keys for a list of user ids hosted on a remote
|
||||||
|
server.
|
||||||
|
"""
|
||||||
|
sent_queries_counter.inc("user_devices")
|
||||||
|
return self.transport_layer.query_user_devices(
|
||||||
|
destination, user_id, timeout
|
||||||
|
)
|
||||||
|
|
||||||
|
@log_function
|
||||||
|
def claim_client_keys(self, destination, content, timeout):
|
||||||
"""Claims one-time keys for a device hosted on a remote server.
|
"""Claims one-time keys for a device hosted on a remote server.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
@@ -166,7 +152,9 @@ class FederationClient(FederationBase):
|
|||||||
response
|
response
|
||||||
"""
|
"""
|
||||||
sent_queries_counter.inc("client_one_time_keys")
|
sent_queries_counter.inc("client_one_time_keys")
|
||||||
return self.transport_layer.claim_client_keys(destination, content)
|
return self.transport_layer.claim_client_keys(
|
||||||
|
destination, content, timeout
|
||||||
|
)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
@log_function
|
@log_function
|
||||||
@@ -201,10 +189,10 @@ class FederationClient(FederationBase):
|
|||||||
]
|
]
|
||||||
|
|
||||||
# FIXME: We should handle signature failures more gracefully.
|
# FIXME: We should handle signature failures more gracefully.
|
||||||
pdus[:] = yield defer.gatherResults(
|
pdus[:] = yield logcontext.make_deferred_yieldable(defer.gatherResults(
|
||||||
self._check_sigs_and_hashes(pdus),
|
self._check_sigs_and_hashes(pdus),
|
||||||
consumeErrors=True,
|
consumeErrors=True,
|
||||||
).addErrback(unwrapFirstError)
|
).addErrback(unwrapFirstError))
|
||||||
|
|
||||||
defer.returnValue(pdus)
|
defer.returnValue(pdus)
|
||||||
|
|
||||||
@@ -221,8 +209,7 @@ class FederationClient(FederationBase):
|
|||||||
|
|
||||||
Args:
|
Args:
|
||||||
destinations (list): Which home servers to query
|
destinations (list): Which home servers to query
|
||||||
pdu_origin (str): The home server that originally sent the pdu.
|
event_id (str): event to fetch
|
||||||
event_id (str)
|
|
||||||
outlier (bool): Indicates whether the PDU is an `outlier`, i.e. if
|
outlier (bool): Indicates whether the PDU is an `outlier`, i.e. if
|
||||||
it's from an arbitary point in the context as opposed to part
|
it's from an arbitary point in the context as opposed to part
|
||||||
of the current block of PDUs. Defaults to `False`
|
of the current block of PDUs. Defaults to `False`
|
||||||
@@ -236,20 +223,20 @@ class FederationClient(FederationBase):
|
|||||||
# TODO: Rate limit the number of times we try and get the same event.
|
# TODO: Rate limit the number of times we try and get the same event.
|
||||||
|
|
||||||
if self._get_pdu_cache:
|
if self._get_pdu_cache:
|
||||||
e = self._get_pdu_cache.get(event_id)
|
ev = self._get_pdu_cache.get(event_id)
|
||||||
if e:
|
if ev:
|
||||||
defer.returnValue(e)
|
defer.returnValue(ev)
|
||||||
|
|
||||||
pdu = None
|
pdu_attempts = self.pdu_destination_tried.setdefault(event_id, {})
|
||||||
|
|
||||||
|
signed_pdu = None
|
||||||
for destination in destinations:
|
for destination in destinations:
|
||||||
try:
|
now = self._clock.time_msec()
|
||||||
limiter = yield get_retry_limiter(
|
last_attempt = pdu_attempts.get(destination, 0)
|
||||||
destination,
|
if last_attempt + PDU_RETRY_TIME_MS > now:
|
||||||
self._clock,
|
continue
|
||||||
self.store,
|
|
||||||
)
|
|
||||||
|
|
||||||
with limiter:
|
try:
|
||||||
transaction_data = yield self.transport_layer.get_event(
|
transaction_data = yield self.transport_layer.get_event(
|
||||||
destination, event_id, timeout=timeout,
|
destination, event_id, timeout=timeout,
|
||||||
)
|
)
|
||||||
@@ -265,39 +252,33 @@ class FederationClient(FederationBase):
|
|||||||
pdu = pdu_list[0]
|
pdu = pdu_list[0]
|
||||||
|
|
||||||
# Check signatures are correct.
|
# Check signatures are correct.
|
||||||
pdu = yield self._check_sigs_and_hashes([pdu])[0]
|
signed_pdu = yield self._check_sigs_and_hash(pdu)
|
||||||
|
|
||||||
break
|
break
|
||||||
|
|
||||||
except SynapseError:
|
pdu_attempts[destination] = now
|
||||||
logger.info(
|
|
||||||
"Failed to get PDU %s from %s because %s",
|
|
||||||
event_id, destination, e,
|
|
||||||
)
|
|
||||||
continue
|
|
||||||
except CodeMessageException as e:
|
|
||||||
if 400 <= e.code < 500:
|
|
||||||
raise
|
|
||||||
|
|
||||||
|
except SynapseError as e:
|
||||||
logger.info(
|
logger.info(
|
||||||
"Failed to get PDU %s from %s because %s",
|
"Failed to get PDU %s from %s because %s",
|
||||||
event_id, destination, e,
|
event_id, destination, e,
|
||||||
)
|
)
|
||||||
continue
|
|
||||||
except NotRetryingDestination as e:
|
except NotRetryingDestination as e:
|
||||||
logger.info(e.message)
|
logger.info(e.message)
|
||||||
continue
|
continue
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
pdu_attempts[destination] = now
|
||||||
|
|
||||||
logger.info(
|
logger.info(
|
||||||
"Failed to get PDU %s from %s because %s",
|
"Failed to get PDU %s from %s because %s",
|
||||||
event_id, destination, e,
|
event_id, destination, e,
|
||||||
)
|
)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if self._get_pdu_cache is not None and pdu:
|
if self._get_pdu_cache is not None and signed_pdu:
|
||||||
self._get_pdu_cache[event_id] = pdu
|
self._get_pdu_cache[event_id] = signed_pdu
|
||||||
|
|
||||||
defer.returnValue(pdu)
|
defer.returnValue(signed_pdu)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
@log_function
|
@log_function
|
||||||
@@ -314,6 +295,42 @@ class FederationClient(FederationBase):
|
|||||||
Deferred: Results in a list of PDUs.
|
Deferred: Results in a list of PDUs.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
try:
|
||||||
|
# First we try and ask for just the IDs, as thats far quicker if
|
||||||
|
# we have most of the state and auth_chain already.
|
||||||
|
# However, this may 404 if the other side has an old synapse.
|
||||||
|
result = yield self.transport_layer.get_room_state_ids(
|
||||||
|
destination, room_id, event_id=event_id,
|
||||||
|
)
|
||||||
|
|
||||||
|
state_event_ids = result["pdu_ids"]
|
||||||
|
auth_event_ids = result.get("auth_chain_ids", [])
|
||||||
|
|
||||||
|
fetched_events, failed_to_fetch = yield self.get_events(
|
||||||
|
[destination], room_id, set(state_event_ids + auth_event_ids)
|
||||||
|
)
|
||||||
|
|
||||||
|
if failed_to_fetch:
|
||||||
|
logger.warn("Failed to get %r", failed_to_fetch)
|
||||||
|
|
||||||
|
event_map = {
|
||||||
|
ev.event_id: ev for ev in fetched_events
|
||||||
|
}
|
||||||
|
|
||||||
|
pdus = [event_map[e_id] for e_id in state_event_ids if e_id in event_map]
|
||||||
|
auth_chain = [
|
||||||
|
event_map[e_id] for e_id in auth_event_ids if e_id in event_map
|
||||||
|
]
|
||||||
|
|
||||||
|
auth_chain.sort(key=lambda e: e.depth)
|
||||||
|
|
||||||
|
defer.returnValue((pdus, auth_chain))
|
||||||
|
except HttpResponseException as e:
|
||||||
|
if e.code == 400 or e.code == 404:
|
||||||
|
logger.info("Failed to use get_room_state_ids API, falling back")
|
||||||
|
else:
|
||||||
|
raise e
|
||||||
|
|
||||||
result = yield self.transport_layer.get_room_state(
|
result = yield self.transport_layer.get_room_state(
|
||||||
destination, room_id, event_id=event_id,
|
destination, room_id, event_id=event_id,
|
||||||
)
|
)
|
||||||
@@ -327,18 +344,95 @@ class FederationClient(FederationBase):
|
|||||||
for p in result.get("auth_chain", [])
|
for p in result.get("auth_chain", [])
|
||||||
]
|
]
|
||||||
|
|
||||||
|
seen_events = yield self.store.get_events([
|
||||||
|
ev.event_id for ev in itertools.chain(pdus, auth_chain)
|
||||||
|
])
|
||||||
|
|
||||||
signed_pdus = yield self._check_sigs_and_hash_and_fetch(
|
signed_pdus = yield self._check_sigs_and_hash_and_fetch(
|
||||||
destination, pdus, outlier=True
|
destination,
|
||||||
|
[p for p in pdus if p.event_id not in seen_events],
|
||||||
|
outlier=True
|
||||||
|
)
|
||||||
|
signed_pdus.extend(
|
||||||
|
seen_events[p.event_id] for p in pdus if p.event_id in seen_events
|
||||||
)
|
)
|
||||||
|
|
||||||
signed_auth = yield self._check_sigs_and_hash_and_fetch(
|
signed_auth = yield self._check_sigs_and_hash_and_fetch(
|
||||||
destination, auth_chain, outlier=True
|
destination,
|
||||||
|
[p for p in auth_chain if p.event_id not in seen_events],
|
||||||
|
outlier=True
|
||||||
|
)
|
||||||
|
signed_auth.extend(
|
||||||
|
seen_events[p.event_id] for p in auth_chain if p.event_id in seen_events
|
||||||
)
|
)
|
||||||
|
|
||||||
signed_auth.sort(key=lambda e: e.depth)
|
signed_auth.sort(key=lambda e: e.depth)
|
||||||
|
|
||||||
defer.returnValue((signed_pdus, signed_auth))
|
defer.returnValue((signed_pdus, signed_auth))
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def get_events(self, destinations, room_id, event_ids, return_local=True):
|
||||||
|
"""Fetch events from some remote destinations, checking if we already
|
||||||
|
have them.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
destinations (list)
|
||||||
|
room_id (str)
|
||||||
|
event_ids (list)
|
||||||
|
return_local (bool): Whether to include events we already have in
|
||||||
|
the DB in the returned list of events
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Deferred: A deferred resolving to a 2-tuple where the first is a list of
|
||||||
|
events and the second is a list of event ids that we failed to fetch.
|
||||||
|
"""
|
||||||
|
if return_local:
|
||||||
|
seen_events = yield self.store.get_events(event_ids, allow_rejected=True)
|
||||||
|
signed_events = seen_events.values()
|
||||||
|
else:
|
||||||
|
seen_events = yield self.store.have_events(event_ids)
|
||||||
|
signed_events = []
|
||||||
|
|
||||||
|
failed_to_fetch = set()
|
||||||
|
|
||||||
|
missing_events = set(event_ids)
|
||||||
|
for k in seen_events:
|
||||||
|
missing_events.discard(k)
|
||||||
|
|
||||||
|
if not missing_events:
|
||||||
|
defer.returnValue((signed_events, failed_to_fetch))
|
||||||
|
|
||||||
|
def random_server_list():
|
||||||
|
srvs = list(destinations)
|
||||||
|
random.shuffle(srvs)
|
||||||
|
return srvs
|
||||||
|
|
||||||
|
batch_size = 20
|
||||||
|
missing_events = list(missing_events)
|
||||||
|
for i in xrange(0, len(missing_events), batch_size):
|
||||||
|
batch = set(missing_events[i:i + batch_size])
|
||||||
|
|
||||||
|
deferreds = [
|
||||||
|
preserve_fn(self.get_pdu)(
|
||||||
|
destinations=random_server_list(),
|
||||||
|
event_id=e_id,
|
||||||
|
)
|
||||||
|
for e_id in batch
|
||||||
|
]
|
||||||
|
|
||||||
|
res = yield preserve_context_over_deferred(
|
||||||
|
defer.DeferredList(deferreds, consumeErrors=True)
|
||||||
|
)
|
||||||
|
for success, result in res:
|
||||||
|
if success and result:
|
||||||
|
signed_events.append(result)
|
||||||
|
batch.discard(result.event_id)
|
||||||
|
|
||||||
|
# We removed all events we successfully fetched from `batch`
|
||||||
|
failed_to_fetch.update(batch)
|
||||||
|
|
||||||
|
defer.returnValue((signed_events, failed_to_fetch))
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
@log_function
|
@log_function
|
||||||
def get_event_auth(self, destination, room_id, event_id):
|
def get_event_auth(self, destination, room_id, event_id):
|
||||||
@@ -380,8 +474,13 @@ class FederationClient(FederationBase):
|
|||||||
content (object): Any additional data to put into the content field
|
content (object): Any additional data to put into the content field
|
||||||
of the event.
|
of the event.
|
||||||
Return:
|
Return:
|
||||||
A tuple of (origin (str), event (object)) where origin is the remote
|
Deferred: resolves to a tuple of (origin (str), event (object))
|
||||||
homeserver which generated the event.
|
where origin is the remote homeserver which generated the event.
|
||||||
|
|
||||||
|
Fails with a ``CodeMessageException`` if the chosen remote server
|
||||||
|
returns a 300/400 code.
|
||||||
|
|
||||||
|
Fails with a ``RuntimeError`` if no servers were reachable.
|
||||||
"""
|
"""
|
||||||
valid_memberships = {Membership.JOIN, Membership.LEAVE}
|
valid_memberships = {Membership.JOIN, Membership.LEAVE}
|
||||||
if membership not in valid_memberships:
|
if membership not in valid_memberships:
|
||||||
@@ -410,23 +509,51 @@ class FederationClient(FederationBase):
|
|||||||
if "prev_state" not in pdu_dict:
|
if "prev_state" not in pdu_dict:
|
||||||
pdu_dict["prev_state"] = []
|
pdu_dict["prev_state"] = []
|
||||||
|
|
||||||
|
ev = builder.EventBuilder(pdu_dict)
|
||||||
|
|
||||||
defer.returnValue(
|
defer.returnValue(
|
||||||
(destination, self.event_from_pdu_json(pdu_dict))
|
(destination, ev)
|
||||||
)
|
)
|
||||||
break
|
break
|
||||||
except CodeMessageException:
|
except CodeMessageException as e:
|
||||||
|
if not 500 <= e.code < 600:
|
||||||
raise
|
raise
|
||||||
|
else:
|
||||||
|
logger.warn(
|
||||||
|
"Failed to make_%s via %s: %s",
|
||||||
|
membership, destination, e.message
|
||||||
|
)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.warn(
|
logger.warn(
|
||||||
"Failed to make_%s via %s: %s",
|
"Failed to make_%s via %s: %s",
|
||||||
membership, destination, e.message
|
membership, destination, e.message
|
||||||
)
|
)
|
||||||
raise
|
|
||||||
|
|
||||||
raise RuntimeError("Failed to send to any server.")
|
raise RuntimeError("Failed to send to any server.")
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def send_join(self, destinations, pdu):
|
def send_join(self, destinations, pdu):
|
||||||
|
"""Sends a join event to one of a list of homeservers.
|
||||||
|
|
||||||
|
Doing so will cause the remote server to add the event to the graph,
|
||||||
|
and send the event out to the rest of the federation.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
destinations (str): Candidate homeservers which are probably
|
||||||
|
participating in the room.
|
||||||
|
pdu (BaseEvent): event to be sent
|
||||||
|
|
||||||
|
Return:
|
||||||
|
Deferred: resolves to a dict with members ``origin`` (a string
|
||||||
|
giving the serer the event was sent to, ``state`` (?) and
|
||||||
|
``auth_chain``.
|
||||||
|
|
||||||
|
Fails with a ``CodeMessageException`` if the chosen remote server
|
||||||
|
returns a 300/400 code.
|
||||||
|
|
||||||
|
Fails with a ``RuntimeError`` if no servers were reachable.
|
||||||
|
"""
|
||||||
|
|
||||||
for destination in destinations:
|
for destination in destinations:
|
||||||
if destination == self.server_name:
|
if destination == self.server_name:
|
||||||
continue
|
continue
|
||||||
@@ -493,8 +620,14 @@ class FederationClient(FederationBase):
|
|||||||
"auth_chain": signed_auth,
|
"auth_chain": signed_auth,
|
||||||
"origin": destination,
|
"origin": destination,
|
||||||
})
|
})
|
||||||
except CodeMessageException:
|
except CodeMessageException as e:
|
||||||
|
if not 500 <= e.code < 600:
|
||||||
raise
|
raise
|
||||||
|
else:
|
||||||
|
logger.exception(
|
||||||
|
"Failed to send_join via %s: %s",
|
||||||
|
destination, e.message
|
||||||
|
)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.exception(
|
logger.exception(
|
||||||
"Failed to send_join via %s: %s",
|
"Failed to send_join via %s: %s",
|
||||||
@@ -528,6 +661,26 @@ class FederationClient(FederationBase):
|
|||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def send_leave(self, destinations, pdu):
|
def send_leave(self, destinations, pdu):
|
||||||
|
"""Sends a leave event to one of a list of homeservers.
|
||||||
|
|
||||||
|
Doing so will cause the remote server to add the event to the graph,
|
||||||
|
and send the event out to the rest of the federation.
|
||||||
|
|
||||||
|
This is mostly useful to reject received invites.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
destinations (str): Candidate homeservers which are probably
|
||||||
|
participating in the room.
|
||||||
|
pdu (BaseEvent): event to be sent
|
||||||
|
|
||||||
|
Return:
|
||||||
|
Deferred: resolves to None.
|
||||||
|
|
||||||
|
Fails with a ``CodeMessageException`` if the chosen remote server
|
||||||
|
returns a non-200 code.
|
||||||
|
|
||||||
|
Fails with a ``RuntimeError`` if no servers were reachable.
|
||||||
|
"""
|
||||||
for destination in destinations:
|
for destination in destinations:
|
||||||
if destination == self.server_name:
|
if destination == self.server_name:
|
||||||
continue
|
continue
|
||||||
@@ -553,24 +706,17 @@ class FederationClient(FederationBase):
|
|||||||
|
|
||||||
raise RuntimeError("Failed to send to any server.")
|
raise RuntimeError("Failed to send to any server.")
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
def get_public_rooms(self, destination, limit=None, since_token=None,
|
||||||
def get_public_rooms(self, destinations):
|
search_filter=None, include_all_networks=False,
|
||||||
results_by_server = {}
|
third_party_instance_id=None):
|
||||||
|
if destination == self.server_name:
|
||||||
|
return
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
return self.transport_layer.get_public_rooms(
|
||||||
def _get_result(s):
|
destination, limit, since_token, search_filter,
|
||||||
if s == self.server_name:
|
include_all_networks=include_all_networks,
|
||||||
defer.returnValue()
|
third_party_instance_id=third_party_instance_id,
|
||||||
|
)
|
||||||
try:
|
|
||||||
result = yield self.transport_layer.get_public_rooms(s)
|
|
||||||
results_by_server[s] = result
|
|
||||||
except:
|
|
||||||
logger.exception("Error getting room list from server %r", s)
|
|
||||||
|
|
||||||
yield concurrently_execute(_get_result, destinations, 3)
|
|
||||||
|
|
||||||
defer.returnValue(results_by_server)
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def query_auth(self, destination, room_id, event_id, local_auth):
|
def query_auth(self, destination, room_id, event_id, local_auth):
|
||||||
@@ -614,7 +760,7 @@ class FederationClient(FederationBase):
|
|||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def get_missing_events(self, destination, room_id, earliest_events_ids,
|
def get_missing_events(self, destination, room_id, earliest_events_ids,
|
||||||
latest_events, limit, min_depth):
|
latest_events, limit, min_depth, timeout):
|
||||||
"""Tries to fetch events we are missing. This is called when we receive
|
"""Tries to fetch events we are missing. This is called when we receive
|
||||||
an event without having received all of its ancestors.
|
an event without having received all of its ancestors.
|
||||||
|
|
||||||
@@ -628,6 +774,7 @@ class FederationClient(FederationBase):
|
|||||||
have all previous events for.
|
have all previous events for.
|
||||||
limit (int): Maximum number of events to return.
|
limit (int): Maximum number of events to return.
|
||||||
min_depth (int): Minimum depth of events tor return.
|
min_depth (int): Minimum depth of events tor return.
|
||||||
|
timeout (int): Max time to wait in ms
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
content = yield self.transport_layer.get_missing_events(
|
content = yield self.transport_layer.get_missing_events(
|
||||||
@@ -637,6 +784,7 @@ class FederationClient(FederationBase):
|
|||||||
latest_events=[e.event_id for e in latest_events],
|
latest_events=[e.event_id for e in latest_events],
|
||||||
limit=limit,
|
limit=limit,
|
||||||
min_depth=min_depth,
|
min_depth=min_depth,
|
||||||
|
timeout=timeout,
|
||||||
)
|
)
|
||||||
|
|
||||||
events = [
|
events = [
|
||||||
@@ -647,8 +795,6 @@ class FederationClient(FederationBase):
|
|||||||
signed_events = yield self._check_sigs_and_hash_and_fetch(
|
signed_events = yield self._check_sigs_and_hash_and_fetch(
|
||||||
destination, events, outlier=False
|
destination, events, outlier=False
|
||||||
)
|
)
|
||||||
|
|
||||||
have_gotten_all_from_destination = True
|
|
||||||
except HttpResponseException as e:
|
except HttpResponseException as e:
|
||||||
if not e.code == 400:
|
if not e.code == 400:
|
||||||
raise
|
raise
|
||||||
@@ -656,69 +802,6 @@ class FederationClient(FederationBase):
|
|||||||
# We are probably hitting an old server that doesn't support
|
# We are probably hitting an old server that doesn't support
|
||||||
# get_missing_events
|
# get_missing_events
|
||||||
signed_events = []
|
signed_events = []
|
||||||
have_gotten_all_from_destination = False
|
|
||||||
|
|
||||||
if len(signed_events) >= limit:
|
|
||||||
defer.returnValue(signed_events)
|
|
||||||
|
|
||||||
servers = yield self.store.get_joined_hosts_for_room(room_id)
|
|
||||||
|
|
||||||
servers = set(servers)
|
|
||||||
servers.discard(self.server_name)
|
|
||||||
|
|
||||||
failed_to_fetch = set()
|
|
||||||
|
|
||||||
while len(signed_events) < limit:
|
|
||||||
# Are we missing any?
|
|
||||||
|
|
||||||
seen_events = set(earliest_events_ids)
|
|
||||||
seen_events.update(e.event_id for e in signed_events if e)
|
|
||||||
|
|
||||||
missing_events = {}
|
|
||||||
for e in itertools.chain(latest_events, signed_events):
|
|
||||||
if e.depth > min_depth:
|
|
||||||
missing_events.update({
|
|
||||||
e_id: e.depth for e_id, _ in e.prev_events
|
|
||||||
if e_id not in seen_events
|
|
||||||
and e_id not in failed_to_fetch
|
|
||||||
})
|
|
||||||
|
|
||||||
if not missing_events:
|
|
||||||
break
|
|
||||||
|
|
||||||
have_seen = yield self.store.have_events(missing_events)
|
|
||||||
|
|
||||||
for k in have_seen:
|
|
||||||
missing_events.pop(k, None)
|
|
||||||
|
|
||||||
if not missing_events:
|
|
||||||
break
|
|
||||||
|
|
||||||
# Okay, we haven't gotten everything yet. Lets get them.
|
|
||||||
ordered_missing = sorted(missing_events.items(), key=lambda x: x[0])
|
|
||||||
|
|
||||||
if have_gotten_all_from_destination:
|
|
||||||
servers.discard(destination)
|
|
||||||
|
|
||||||
def random_server_list():
|
|
||||||
srvs = list(servers)
|
|
||||||
random.shuffle(srvs)
|
|
||||||
return srvs
|
|
||||||
|
|
||||||
deferreds = [
|
|
||||||
self.get_pdu(
|
|
||||||
destinations=random_server_list(),
|
|
||||||
event_id=e_id,
|
|
||||||
)
|
|
||||||
for e_id, depth in ordered_missing[:limit - len(signed_events)]
|
|
||||||
]
|
|
||||||
|
|
||||||
res = yield defer.DeferredList(deferreds, consumeErrors=True)
|
|
||||||
for (result, val), (e_id, _) in zip(res, ordered_missing):
|
|
||||||
if result and val:
|
|
||||||
signed_events.append(val)
|
|
||||||
else:
|
|
||||||
failed_to_fetch.add(e_id)
|
|
||||||
|
|
||||||
defer.returnValue(signed_events)
|
defer.returnValue(signed_events)
|
||||||
|
|
||||||
|
|||||||
@@ -12,25 +12,28 @@
|
|||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
|
|
||||||
from twisted.internet import defer
|
from twisted.internet import defer
|
||||||
|
|
||||||
from .federation_base import FederationBase
|
from .federation_base import FederationBase
|
||||||
from .units import Transaction, Edu
|
from .units import Transaction, Edu
|
||||||
|
|
||||||
from synapse.util.async import Linearizer
|
from synapse.util import async
|
||||||
from synapse.util.logutils import log_function
|
from synapse.util.logutils import log_function
|
||||||
|
from synapse.util.caches.response_cache import ResponseCache
|
||||||
from synapse.events import FrozenEvent
|
from synapse.events import FrozenEvent
|
||||||
|
from synapse.types import get_domain_from_id
|
||||||
import synapse.metrics
|
import synapse.metrics
|
||||||
|
|
||||||
from synapse.api.errors import FederationError, SynapseError
|
from synapse.api.errors import AuthError, FederationError, SynapseError
|
||||||
|
|
||||||
from synapse.crypto.event_signing import compute_event_signature
|
from synapse.crypto.event_signing import compute_event_signature
|
||||||
|
|
||||||
import simplejson as json
|
import simplejson as json
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
|
# when processing incoming transactions, we try to handle multiple rooms in
|
||||||
|
# parallel, up to this limit.
|
||||||
|
TRANSACTION_CONCURRENCY_LIMIT = 10
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
@@ -48,8 +51,14 @@ class FederationServer(FederationBase):
|
|||||||
def __init__(self, hs):
|
def __init__(self, hs):
|
||||||
super(FederationServer, self).__init__(hs)
|
super(FederationServer, self).__init__(hs)
|
||||||
|
|
||||||
self._room_pdu_linearizer = Linearizer()
|
self.auth = hs.get_auth()
|
||||||
self._server_linearizer = Linearizer()
|
|
||||||
|
self._server_linearizer = async.Linearizer("fed_server")
|
||||||
|
self._transaction_linearizer = async.Linearizer("fed_txn_handler")
|
||||||
|
|
||||||
|
# We cache responses to state queries, as they take a while and often
|
||||||
|
# come in waves.
|
||||||
|
self._state_resp_cache = ResponseCache(hs, timeout_ms=30000)
|
||||||
|
|
||||||
def set_handler(self, handler):
|
def set_handler(self, handler):
|
||||||
"""Sets the handler that the replication layer will use to communicate
|
"""Sets the handler that the replication layer will use to communicate
|
||||||
@@ -102,30 +111,46 @@ class FederationServer(FederationBase):
|
|||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
@log_function
|
@log_function
|
||||||
def on_incoming_transaction(self, transaction_data):
|
def on_incoming_transaction(self, transaction_data):
|
||||||
|
# keep this as early as possible to make the calculated origin ts as
|
||||||
|
# accurate as possible.
|
||||||
|
request_time = self._clock.time_msec()
|
||||||
|
|
||||||
transaction = Transaction(**transaction_data)
|
transaction = Transaction(**transaction_data)
|
||||||
|
|
||||||
received_pdus_counter.inc_by(len(transaction.pdus))
|
if not transaction.transaction_id:
|
||||||
|
raise Exception("Transaction missing transaction_id")
|
||||||
for p in transaction.pdus:
|
if not transaction.origin:
|
||||||
if "unsigned" in p:
|
raise Exception("Transaction missing origin")
|
||||||
unsigned = p["unsigned"]
|
|
||||||
if "age" in unsigned:
|
|
||||||
p["age"] = unsigned["age"]
|
|
||||||
if "age" in p:
|
|
||||||
p["age_ts"] = int(self._clock.time_msec()) - int(p["age"])
|
|
||||||
del p["age"]
|
|
||||||
|
|
||||||
pdu_list = [
|
|
||||||
self.event_from_pdu_json(p) for p in transaction.pdus
|
|
||||||
]
|
|
||||||
|
|
||||||
logger.debug("[%s] Got transaction", transaction.transaction_id)
|
logger.debug("[%s] Got transaction", transaction.transaction_id)
|
||||||
|
|
||||||
|
# use a linearizer to ensure that we don't process the same transaction
|
||||||
|
# multiple times in parallel.
|
||||||
|
with (yield self._transaction_linearizer.queue(
|
||||||
|
(transaction.origin, transaction.transaction_id),
|
||||||
|
)):
|
||||||
|
result = yield self._handle_incoming_transaction(
|
||||||
|
transaction, request_time,
|
||||||
|
)
|
||||||
|
|
||||||
|
defer.returnValue(result)
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def _handle_incoming_transaction(self, transaction, request_time):
|
||||||
|
""" Process an incoming transaction and return the HTTP response
|
||||||
|
|
||||||
|
Args:
|
||||||
|
transaction (Transaction): incoming transaction
|
||||||
|
request_time (int): timestamp that the HTTP request arrived at
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Deferred[(int, object)]: http response code and body
|
||||||
|
"""
|
||||||
response = yield self.transaction_actions.have_responded(transaction)
|
response = yield self.transaction_actions.have_responded(transaction)
|
||||||
|
|
||||||
if response:
|
if response:
|
||||||
logger.debug(
|
logger.debug(
|
||||||
"[%s] We've already responed to this request",
|
"[%s] We've already responded to this request",
|
||||||
transaction.transaction_id
|
transaction.transaction_id
|
||||||
)
|
)
|
||||||
defer.returnValue(response)
|
defer.returnValue(response)
|
||||||
@@ -133,18 +158,49 @@ class FederationServer(FederationBase):
|
|||||||
|
|
||||||
logger.debug("[%s] Transaction is new", transaction.transaction_id)
|
logger.debug("[%s] Transaction is new", transaction.transaction_id)
|
||||||
|
|
||||||
results = []
|
received_pdus_counter.inc_by(len(transaction.pdus))
|
||||||
|
|
||||||
for pdu in pdu_list:
|
pdus_by_room = {}
|
||||||
|
|
||||||
|
for p in transaction.pdus:
|
||||||
|
if "unsigned" in p:
|
||||||
|
unsigned = p["unsigned"]
|
||||||
|
if "age" in unsigned:
|
||||||
|
p["age"] = unsigned["age"]
|
||||||
|
if "age" in p:
|
||||||
|
p["age_ts"] = request_time - int(p["age"])
|
||||||
|
del p["age"]
|
||||||
|
|
||||||
|
event = self.event_from_pdu_json(p)
|
||||||
|
room_id = event.room_id
|
||||||
|
pdus_by_room.setdefault(room_id, []).append(event)
|
||||||
|
|
||||||
|
pdu_results = {}
|
||||||
|
|
||||||
|
# we can process different rooms in parallel (which is useful if they
|
||||||
|
# require callouts to other servers to fetch missing events), but
|
||||||
|
# impose a limit to avoid going too crazy with ram/cpu.
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def process_pdus_for_room(room_id):
|
||||||
|
logger.debug("Processing PDUs for %s", room_id)
|
||||||
|
for pdu in pdus_by_room[room_id]:
|
||||||
|
event_id = pdu.event_id
|
||||||
try:
|
try:
|
||||||
yield self._handle_new_pdu(transaction.origin, pdu)
|
yield self._handle_received_pdu(
|
||||||
results.append({})
|
transaction.origin, pdu
|
||||||
|
)
|
||||||
|
pdu_results[event_id] = {}
|
||||||
except FederationError as e:
|
except FederationError as e:
|
||||||
self.send_failure(e, transaction.origin)
|
logger.warn("Error handling PDU %s: %s", event_id, e)
|
||||||
results.append({"error": str(e)})
|
pdu_results[event_id] = {"error": str(e)}
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
results.append({"error": str(e)})
|
pdu_results[event_id] = {"error": str(e)}
|
||||||
logger.exception("Failed to handle PDU")
|
logger.exception("Failed to handle PDU %s", event_id)
|
||||||
|
|
||||||
|
yield async.concurrently_execute(
|
||||||
|
process_pdus_for_room, pdus_by_room.keys(),
|
||||||
|
TRANSACTION_CONCURRENCY_LIMIT,
|
||||||
|
)
|
||||||
|
|
||||||
if hasattr(transaction, "edus"):
|
if hasattr(transaction, "edus"):
|
||||||
for edu in (Edu(**x) for x in transaction.edus):
|
for edu in (Edu(**x) for x in transaction.edus):
|
||||||
@@ -154,17 +210,16 @@ class FederationServer(FederationBase):
|
|||||||
edu.content
|
edu.content
|
||||||
)
|
)
|
||||||
|
|
||||||
for failure in getattr(transaction, "pdu_failures", []):
|
pdu_failures = getattr(transaction, "pdu_failures", [])
|
||||||
|
for failure in pdu_failures:
|
||||||
logger.info("Got failure %r", failure)
|
logger.info("Got failure %r", failure)
|
||||||
|
|
||||||
logger.debug("Returning: %s", str(results))
|
|
||||||
|
|
||||||
response = {
|
response = {
|
||||||
"pdus": dict(zip(
|
"pdus": pdu_results,
|
||||||
(p.event_id for p in pdu_list), results
|
|
||||||
)),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
logger.debug("Returning: %s", str(response))
|
||||||
|
|
||||||
yield self.transaction_actions.set_response(
|
yield self.transaction_actions.set_response(
|
||||||
transaction,
|
transaction,
|
||||||
200, response
|
200, response
|
||||||
@@ -181,17 +236,55 @@ class FederationServer(FederationBase):
|
|||||||
except SynapseError as e:
|
except SynapseError as e:
|
||||||
logger.info("Failed to handle edu %r: %r", edu_type, e)
|
logger.info("Failed to handle edu %r: %r", edu_type, e)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.exception("Failed to handle edu %r", edu_type, e)
|
logger.exception("Failed to handle edu %r", edu_type)
|
||||||
else:
|
else:
|
||||||
logger.warn("Received EDU of type %s with no handler", edu_type)
|
logger.warn("Received EDU of type %s with no handler", edu_type)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
@log_function
|
@log_function
|
||||||
def on_context_state_request(self, origin, room_id, event_id):
|
def on_context_state_request(self, origin, room_id, event_id):
|
||||||
|
if not event_id:
|
||||||
|
raise NotImplementedError("Specify an event")
|
||||||
|
|
||||||
|
in_room = yield self.auth.check_host_in_room(room_id, origin)
|
||||||
|
if not in_room:
|
||||||
|
raise AuthError(403, "Host not in room.")
|
||||||
|
|
||||||
|
result = self._state_resp_cache.get((room_id, event_id))
|
||||||
|
if not result:
|
||||||
with (yield self._server_linearizer.queue((origin, room_id))):
|
with (yield self._server_linearizer.queue((origin, room_id))):
|
||||||
if event_id:
|
resp = yield self._state_resp_cache.set(
|
||||||
|
(room_id, event_id),
|
||||||
|
self._on_context_state_request_compute(room_id, event_id)
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
resp = yield result
|
||||||
|
|
||||||
|
defer.returnValue((200, resp))
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def on_state_ids_request(self, origin, room_id, event_id):
|
||||||
|
if not event_id:
|
||||||
|
raise NotImplementedError("Specify an event")
|
||||||
|
|
||||||
|
in_room = yield self.auth.check_host_in_room(room_id, origin)
|
||||||
|
if not in_room:
|
||||||
|
raise AuthError(403, "Host not in room.")
|
||||||
|
|
||||||
|
state_ids = yield self.handler.get_state_ids_for_pdu(
|
||||||
|
room_id, event_id,
|
||||||
|
)
|
||||||
|
auth_chain_ids = yield self.store.get_auth_chain_ids(state_ids)
|
||||||
|
|
||||||
|
defer.returnValue((200, {
|
||||||
|
"pdu_ids": state_ids,
|
||||||
|
"auth_chain_ids": auth_chain_ids,
|
||||||
|
}))
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def _on_context_state_request_compute(self, room_id, event_id):
|
||||||
pdus = yield self.handler.get_state_for_pdu(
|
pdus = yield self.handler.get_state_for_pdu(
|
||||||
origin, room_id, event_id,
|
room_id, event_id,
|
||||||
)
|
)
|
||||||
auth_chain = yield self.store.get_auth_chain(
|
auth_chain = yield self.store.get_auth_chain(
|
||||||
[pdu.event_id for pdu in pdus]
|
[pdu.event_id for pdu in pdus]
|
||||||
@@ -208,13 +301,11 @@ class FederationServer(FederationBase):
|
|||||||
self.hs.config.signing_key[0]
|
self.hs.config.signing_key[0]
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
else:
|
|
||||||
raise NotImplementedError("Specify an event")
|
|
||||||
|
|
||||||
defer.returnValue((200, {
|
defer.returnValue({
|
||||||
"pdus": [pdu.get_pdu_json() for pdu in pdus],
|
"pdus": [pdu.get_pdu_json() for pdu in pdus],
|
||||||
"auth_chain": [pdu.get_pdu_json() for pdu in auth_chain],
|
"auth_chain": [pdu.get_pdu_json() for pdu in auth_chain],
|
||||||
}))
|
})
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
@log_function
|
@log_function
|
||||||
@@ -348,27 +439,12 @@ class FederationServer(FederationBase):
|
|||||||
(200, send_content)
|
(200, send_content)
|
||||||
)
|
)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
@log_function
|
@log_function
|
||||||
def on_query_client_keys(self, origin, content):
|
def on_query_client_keys(self, origin, content):
|
||||||
query = []
|
return self.on_query_request("client_keys", content)
|
||||||
for user_id, device_ids in content.get("device_keys", {}).items():
|
|
||||||
if not device_ids:
|
|
||||||
query.append((user_id, None))
|
|
||||||
else:
|
|
||||||
for device_id in device_ids:
|
|
||||||
query.append((user_id, device_id))
|
|
||||||
|
|
||||||
results = yield self.store.get_e2e_device_keys(query)
|
def on_query_user_devices(self, origin, user_id):
|
||||||
|
return self.on_query_request("user_devices", user_id)
|
||||||
json_result = {}
|
|
||||||
for user_id, device_keys in results.items():
|
|
||||||
for device_id, json_bytes in device_keys.items():
|
|
||||||
json_result.setdefault(user_id, {})[device_id] = json.loads(
|
|
||||||
json_bytes
|
|
||||||
)
|
|
||||||
|
|
||||||
defer.returnValue({"device_keys": json_result})
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
@log_function
|
@log_function
|
||||||
@@ -388,6 +464,16 @@ class FederationServer(FederationBase):
|
|||||||
key_id: json.loads(json_bytes)
|
key_id: json.loads(json_bytes)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
logger.info(
|
||||||
|
"Claimed one-time-keys: %s",
|
||||||
|
",".join((
|
||||||
|
"%s for %s:%s" % (key_id, user_id, device_id)
|
||||||
|
for user_id, user_keys in json_result.iteritems()
|
||||||
|
for device_id, device_keys in user_keys.iteritems()
|
||||||
|
for key_id, _ in device_keys.iteritems()
|
||||||
|
)),
|
||||||
|
)
|
||||||
|
|
||||||
defer.returnValue({"one_time_keys": json_result})
|
defer.returnValue({"one_time_keys": json_result})
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
@@ -400,6 +486,7 @@ class FederationServer(FederationBase):
|
|||||||
" limit: %d, min_depth: %d",
|
" limit: %d, min_depth: %d",
|
||||||
earliest_events, latest_events, limit, min_depth
|
earliest_events, latest_events, limit, min_depth
|
||||||
)
|
)
|
||||||
|
|
||||||
missing_events = yield self.handler.on_get_missing_events(
|
missing_events = yield self.handler.on_get_missing_events(
|
||||||
origin, room_id, earliest_events, latest_events, limit, min_depth
|
origin, room_id, earliest_events, latest_events, limit, min_depth
|
||||||
)
|
)
|
||||||
@@ -447,25 +534,39 @@ class FederationServer(FederationBase):
|
|||||||
)
|
)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
@log_function
|
def _handle_received_pdu(self, origin, pdu):
|
||||||
def _handle_new_pdu(self, origin, pdu, get_missing=True):
|
""" Process a PDU received in a federation /send/ transaction.
|
||||||
# We reprocess pdus when we have seen them only as outliers
|
|
||||||
existing = yield self._get_persisted_pdu(
|
|
||||||
origin, pdu.event_id, do_auth=False
|
|
||||||
)
|
|
||||||
|
|
||||||
# FIXME: Currently we fetch an event again when we already have it
|
Args:
|
||||||
# if it has been marked as an outlier.
|
origin (str): server which sent the pdu
|
||||||
|
pdu (FrozenEvent): received pdu
|
||||||
|
|
||||||
already_seen = (
|
Returns (Deferred): completes with None
|
||||||
existing and (
|
Raises: FederationError if the signatures / hash do not match
|
||||||
not existing.internal_metadata.is_outlier()
|
"""
|
||||||
or pdu.internal_metadata.is_outlier()
|
# check that it's actually being sent from a valid destination to
|
||||||
|
# workaround bug #1753 in 0.18.5 and 0.18.6
|
||||||
|
if origin != get_domain_from_id(pdu.event_id):
|
||||||
|
# We continue to accept join events from any server; this is
|
||||||
|
# necessary for the federation join dance to work correctly.
|
||||||
|
# (When we join over federation, the "helper" server is
|
||||||
|
# responsible for sending out the join event, rather than the
|
||||||
|
# origin. See bug #1893).
|
||||||
|
if not (
|
||||||
|
pdu.type == 'm.room.member' and
|
||||||
|
pdu.content and
|
||||||
|
pdu.content.get("membership", None) == 'join'
|
||||||
|
):
|
||||||
|
logger.info(
|
||||||
|
"Discarding PDU %s from invalid origin %s",
|
||||||
|
pdu.event_id, origin
|
||||||
)
|
)
|
||||||
)
|
|
||||||
if already_seen:
|
|
||||||
logger.debug("Already seen pdu %s", pdu.event_id)
|
|
||||||
return
|
return
|
||||||
|
else:
|
||||||
|
logger.info(
|
||||||
|
"Accepting join PDU %s from %s",
|
||||||
|
pdu.event_id, origin
|
||||||
|
)
|
||||||
|
|
||||||
# Check signature.
|
# Check signature.
|
||||||
try:
|
try:
|
||||||
@@ -478,114 +579,7 @@ class FederationServer(FederationBase):
|
|||||||
affected=pdu.event_id,
|
affected=pdu.event_id,
|
||||||
)
|
)
|
||||||
|
|
||||||
state = None
|
yield self.handler.on_receive_pdu(origin, pdu, get_missing=True)
|
||||||
|
|
||||||
auth_chain = []
|
|
||||||
|
|
||||||
have_seen = yield self.store.have_events(
|
|
||||||
[ev for ev, _ in pdu.prev_events]
|
|
||||||
)
|
|
||||||
|
|
||||||
fetch_state = False
|
|
||||||
|
|
||||||
# Get missing pdus if necessary.
|
|
||||||
if not pdu.internal_metadata.is_outlier():
|
|
||||||
# We only backfill backwards to the min depth.
|
|
||||||
min_depth = yield self.handler.get_min_depth_for_context(
|
|
||||||
pdu.room_id
|
|
||||||
)
|
|
||||||
|
|
||||||
logger.debug(
|
|
||||||
"_handle_new_pdu min_depth for %s: %d",
|
|
||||||
pdu.room_id, min_depth
|
|
||||||
)
|
|
||||||
|
|
||||||
prevs = {e_id for e_id, _ in pdu.prev_events}
|
|
||||||
seen = set(have_seen.keys())
|
|
||||||
|
|
||||||
if min_depth and pdu.depth < min_depth:
|
|
||||||
# This is so that we don't notify the user about this
|
|
||||||
# message, to work around the fact that some events will
|
|
||||||
# reference really really old events we really don't want to
|
|
||||||
# send to the clients.
|
|
||||||
pdu.internal_metadata.outlier = True
|
|
||||||
elif min_depth and pdu.depth > min_depth:
|
|
||||||
if get_missing and prevs - seen:
|
|
||||||
# If we're missing stuff, ensure we only fetch stuff one
|
|
||||||
# at a time.
|
|
||||||
with (yield self._room_pdu_linearizer.queue(pdu.room_id)):
|
|
||||||
# We recalculate seen, since it may have changed.
|
|
||||||
have_seen = yield self.store.have_events(prevs)
|
|
||||||
seen = set(have_seen.keys())
|
|
||||||
|
|
||||||
if prevs - seen:
|
|
||||||
latest = yield self.store.get_latest_event_ids_in_room(
|
|
||||||
pdu.room_id
|
|
||||||
)
|
|
||||||
|
|
||||||
# We add the prev events that we have seen to the latest
|
|
||||||
# list to ensure the remote server doesn't give them to us
|
|
||||||
latest = set(latest)
|
|
||||||
latest |= seen
|
|
||||||
|
|
||||||
logger.info(
|
|
||||||
"Missing %d events for room %r: %r...",
|
|
||||||
len(prevs - seen), pdu.room_id, list(prevs - seen)[:5]
|
|
||||||
)
|
|
||||||
|
|
||||||
missing_events = yield self.get_missing_events(
|
|
||||||
origin,
|
|
||||||
pdu.room_id,
|
|
||||||
earliest_events_ids=list(latest),
|
|
||||||
latest_events=[pdu],
|
|
||||||
limit=10,
|
|
||||||
min_depth=min_depth,
|
|
||||||
)
|
|
||||||
|
|
||||||
# We want to sort these by depth so we process them and
|
|
||||||
# tell clients about them in order.
|
|
||||||
missing_events.sort(key=lambda x: x.depth)
|
|
||||||
|
|
||||||
for e in missing_events:
|
|
||||||
yield self._handle_new_pdu(
|
|
||||||
origin,
|
|
||||||
e,
|
|
||||||
get_missing=False
|
|
||||||
)
|
|
||||||
|
|
||||||
have_seen = yield self.store.have_events(
|
|
||||||
[ev for ev, _ in pdu.prev_events]
|
|
||||||
)
|
|
||||||
|
|
||||||
prevs = {e_id for e_id, _ in pdu.prev_events}
|
|
||||||
seen = set(have_seen.keys())
|
|
||||||
if prevs - seen:
|
|
||||||
logger.info(
|
|
||||||
"Still missing %d events for room %r: %r...",
|
|
||||||
len(prevs - seen), pdu.room_id, list(prevs - seen)[:5]
|
|
||||||
)
|
|
||||||
fetch_state = True
|
|
||||||
|
|
||||||
if fetch_state:
|
|
||||||
# We need to get the state at this event, since we haven't
|
|
||||||
# processed all the prev events.
|
|
||||||
logger.debug(
|
|
||||||
"_handle_new_pdu getting state for %s",
|
|
||||||
pdu.room_id
|
|
||||||
)
|
|
||||||
try:
|
|
||||||
state, auth_chain = yield self.get_state_for_room(
|
|
||||||
origin, pdu.room_id, pdu.event_id,
|
|
||||||
)
|
|
||||||
except:
|
|
||||||
logger.warn("Failed to get state for event: %s", pdu.event_id)
|
|
||||||
|
|
||||||
yield self.handler.on_receive_pdu(
|
|
||||||
origin,
|
|
||||||
pdu,
|
|
||||||
state=state,
|
|
||||||
auth_chain=auth_chain,
|
|
||||||
)
|
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
return "<ReplicationLayer(%s)>" % self.server_name
|
return "<ReplicationLayer(%s)>" % self.server_name
|
||||||
|
|||||||
@@ -20,8 +20,6 @@ a given transport.
|
|||||||
from .federation_client import FederationClient
|
from .federation_client import FederationClient
|
||||||
from .federation_server import FederationServer
|
from .federation_server import FederationServer
|
||||||
|
|
||||||
from .transaction_queue import TransactionQueue
|
|
||||||
|
|
||||||
from .persistence import TransactionActions
|
from .persistence import TransactionActions
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
@@ -66,9 +64,6 @@ class ReplicationLayer(FederationClient, FederationServer):
|
|||||||
self._clock = hs.get_clock()
|
self._clock = hs.get_clock()
|
||||||
|
|
||||||
self.transaction_actions = TransactionActions(self.store)
|
self.transaction_actions = TransactionActions(self.store)
|
||||||
self._transaction_queue = TransactionQueue(hs, transport_layer)
|
|
||||||
|
|
||||||
self._order = 0
|
|
||||||
|
|
||||||
self.hs = hs
|
self.hs = hs
|
||||||
|
|
||||||
|
|||||||
548
synapse/federation/send_queue.py
Normal file
548
synapse/federation/send_queue.py
Normal file
@@ -0,0 +1,548 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright 2014-2016 OpenMarket Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
"""A federation sender that forwards things to be sent across replication to
|
||||||
|
a worker process.
|
||||||
|
|
||||||
|
It assumes there is a single worker process feeding off of it.
|
||||||
|
|
||||||
|
Each row in the replication stream consists of a type and some json, where the
|
||||||
|
types indicate whether they are presence, or edus, etc.
|
||||||
|
|
||||||
|
Ephemeral or non-event data are queued up in-memory. When the worker requests
|
||||||
|
updates since a particular point, all in-memory data since before that point is
|
||||||
|
dropped. We also expire things in the queue after 5 minutes, to ensure that a
|
||||||
|
dead worker doesn't cause the queues to grow limitlessly.
|
||||||
|
|
||||||
|
Events are replicated via a separate events stream.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from .units import Edu
|
||||||
|
|
||||||
|
from synapse.storage.presence import UserPresenceState
|
||||||
|
from synapse.util.metrics import Measure
|
||||||
|
import synapse.metrics
|
||||||
|
|
||||||
|
from blist import sorteddict
|
||||||
|
from collections import namedtuple
|
||||||
|
|
||||||
|
import logging
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
metrics = synapse.metrics.get_metrics_for(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class FederationRemoteSendQueue(object):
|
||||||
|
"""A drop in replacement for TransactionQueue"""
|
||||||
|
|
||||||
|
def __init__(self, hs):
|
||||||
|
self.server_name = hs.hostname
|
||||||
|
self.clock = hs.get_clock()
|
||||||
|
self.notifier = hs.get_notifier()
|
||||||
|
self.is_mine_id = hs.is_mine_id
|
||||||
|
|
||||||
|
self.presence_map = {} # Pending presence map user_id -> UserPresenceState
|
||||||
|
self.presence_changed = sorteddict() # Stream position -> user_id
|
||||||
|
|
||||||
|
self.keyed_edu = {} # (destination, key) -> EDU
|
||||||
|
self.keyed_edu_changed = sorteddict() # stream position -> (destination, key)
|
||||||
|
|
||||||
|
self.edus = sorteddict() # stream position -> Edu
|
||||||
|
|
||||||
|
self.failures = sorteddict() # stream position -> (destination, Failure)
|
||||||
|
|
||||||
|
self.device_messages = sorteddict() # stream position -> destination
|
||||||
|
|
||||||
|
self.pos = 1
|
||||||
|
self.pos_time = sorteddict()
|
||||||
|
|
||||||
|
# EVERYTHING IS SAD. In particular, python only makes new scopes when
|
||||||
|
# we make a new function, so we need to make a new function so the inner
|
||||||
|
# lambda binds to the queue rather than to the name of the queue which
|
||||||
|
# changes. ARGH.
|
||||||
|
def register(name, queue):
|
||||||
|
metrics.register_callback(
|
||||||
|
queue_name + "_size",
|
||||||
|
lambda: len(queue),
|
||||||
|
)
|
||||||
|
|
||||||
|
for queue_name in [
|
||||||
|
"presence_map", "presence_changed", "keyed_edu", "keyed_edu_changed",
|
||||||
|
"edus", "failures", "device_messages", "pos_time",
|
||||||
|
]:
|
||||||
|
register(queue_name, getattr(self, queue_name))
|
||||||
|
|
||||||
|
self.clock.looping_call(self._clear_queue, 30 * 1000)
|
||||||
|
|
||||||
|
def _next_pos(self):
|
||||||
|
pos = self.pos
|
||||||
|
self.pos += 1
|
||||||
|
self.pos_time[self.clock.time_msec()] = pos
|
||||||
|
return pos
|
||||||
|
|
||||||
|
def _clear_queue(self):
|
||||||
|
"""Clear the queues for anything older than N minutes"""
|
||||||
|
|
||||||
|
FIVE_MINUTES_AGO = 5 * 60 * 1000
|
||||||
|
now = self.clock.time_msec()
|
||||||
|
|
||||||
|
keys = self.pos_time.keys()
|
||||||
|
time = keys.bisect_left(now - FIVE_MINUTES_AGO)
|
||||||
|
if not keys[:time]:
|
||||||
|
return
|
||||||
|
|
||||||
|
position_to_delete = max(keys[:time])
|
||||||
|
for key in keys[:time]:
|
||||||
|
del self.pos_time[key]
|
||||||
|
|
||||||
|
self._clear_queue_before_pos(position_to_delete)
|
||||||
|
|
||||||
|
def _clear_queue_before_pos(self, position_to_delete):
|
||||||
|
"""Clear all the queues from before a given position"""
|
||||||
|
with Measure(self.clock, "send_queue._clear"):
|
||||||
|
# Delete things out of presence maps
|
||||||
|
keys = self.presence_changed.keys()
|
||||||
|
i = keys.bisect_left(position_to_delete)
|
||||||
|
for key in keys[:i]:
|
||||||
|
del self.presence_changed[key]
|
||||||
|
|
||||||
|
user_ids = set(
|
||||||
|
user_id
|
||||||
|
for uids in self.presence_changed.itervalues()
|
||||||
|
for user_id in uids
|
||||||
|
)
|
||||||
|
|
||||||
|
to_del = [
|
||||||
|
user_id for user_id in self.presence_map if user_id not in user_ids
|
||||||
|
]
|
||||||
|
for user_id in to_del:
|
||||||
|
del self.presence_map[user_id]
|
||||||
|
|
||||||
|
# Delete things out of keyed edus
|
||||||
|
keys = self.keyed_edu_changed.keys()
|
||||||
|
i = keys.bisect_left(position_to_delete)
|
||||||
|
for key in keys[:i]:
|
||||||
|
del self.keyed_edu_changed[key]
|
||||||
|
|
||||||
|
live_keys = set()
|
||||||
|
for edu_key in self.keyed_edu_changed.values():
|
||||||
|
live_keys.add(edu_key)
|
||||||
|
|
||||||
|
to_del = [edu_key for edu_key in self.keyed_edu if edu_key not in live_keys]
|
||||||
|
for edu_key in to_del:
|
||||||
|
del self.keyed_edu[edu_key]
|
||||||
|
|
||||||
|
# Delete things out of edu map
|
||||||
|
keys = self.edus.keys()
|
||||||
|
i = keys.bisect_left(position_to_delete)
|
||||||
|
for key in keys[:i]:
|
||||||
|
del self.edus[key]
|
||||||
|
|
||||||
|
# Delete things out of failure map
|
||||||
|
keys = self.failures.keys()
|
||||||
|
i = keys.bisect_left(position_to_delete)
|
||||||
|
for key in keys[:i]:
|
||||||
|
del self.failures[key]
|
||||||
|
|
||||||
|
# Delete things out of device map
|
||||||
|
keys = self.device_messages.keys()
|
||||||
|
i = keys.bisect_left(position_to_delete)
|
||||||
|
for key in keys[:i]:
|
||||||
|
del self.device_messages[key]
|
||||||
|
|
||||||
|
def notify_new_events(self, current_id):
|
||||||
|
"""As per TransactionQueue"""
|
||||||
|
# We don't need to replicate this as it gets sent down a different
|
||||||
|
# stream.
|
||||||
|
pass
|
||||||
|
|
||||||
|
def send_edu(self, destination, edu_type, content, key=None):
|
||||||
|
"""As per TransactionQueue"""
|
||||||
|
pos = self._next_pos()
|
||||||
|
|
||||||
|
edu = Edu(
|
||||||
|
origin=self.server_name,
|
||||||
|
destination=destination,
|
||||||
|
edu_type=edu_type,
|
||||||
|
content=content,
|
||||||
|
)
|
||||||
|
|
||||||
|
if key:
|
||||||
|
assert isinstance(key, tuple)
|
||||||
|
self.keyed_edu[(destination, key)] = edu
|
||||||
|
self.keyed_edu_changed[pos] = (destination, key)
|
||||||
|
else:
|
||||||
|
self.edus[pos] = edu
|
||||||
|
|
||||||
|
self.notifier.on_new_replication_data()
|
||||||
|
|
||||||
|
def send_presence(self, states):
|
||||||
|
"""As per TransactionQueue
|
||||||
|
|
||||||
|
Args:
|
||||||
|
states (list(UserPresenceState))
|
||||||
|
"""
|
||||||
|
pos = self._next_pos()
|
||||||
|
|
||||||
|
# We only want to send presence for our own users, so lets always just
|
||||||
|
# filter here just in case.
|
||||||
|
local_states = filter(lambda s: self.is_mine_id(s.user_id), states)
|
||||||
|
|
||||||
|
self.presence_map.update({state.user_id: state for state in local_states})
|
||||||
|
self.presence_changed[pos] = [state.user_id for state in local_states]
|
||||||
|
|
||||||
|
self.notifier.on_new_replication_data()
|
||||||
|
|
||||||
|
def send_failure(self, failure, destination):
|
||||||
|
"""As per TransactionQueue"""
|
||||||
|
pos = self._next_pos()
|
||||||
|
|
||||||
|
self.failures[pos] = (destination, str(failure))
|
||||||
|
self.notifier.on_new_replication_data()
|
||||||
|
|
||||||
|
def send_device_messages(self, destination):
|
||||||
|
"""As per TransactionQueue"""
|
||||||
|
pos = self._next_pos()
|
||||||
|
self.device_messages[pos] = destination
|
||||||
|
self.notifier.on_new_replication_data()
|
||||||
|
|
||||||
|
def get_current_token(self):
|
||||||
|
return self.pos - 1
|
||||||
|
|
||||||
|
def federation_ack(self, token):
|
||||||
|
self._clear_queue_before_pos(token)
|
||||||
|
|
||||||
|
def get_replication_rows(self, from_token, to_token, limit, federation_ack=None):
|
||||||
|
"""Get rows to be sent over federation between the two tokens
|
||||||
|
|
||||||
|
Args:
|
||||||
|
from_token (int)
|
||||||
|
to_token(int)
|
||||||
|
limit (int)
|
||||||
|
federation_ack (int): Optional. The position where the worker is
|
||||||
|
explicitly acknowledged it has handled. Allows us to drop
|
||||||
|
data from before that point
|
||||||
|
"""
|
||||||
|
# TODO: Handle limit.
|
||||||
|
|
||||||
|
# To handle restarts where we wrap around
|
||||||
|
if from_token > self.pos:
|
||||||
|
from_token = -1
|
||||||
|
|
||||||
|
# list of tuple(int, BaseFederationRow), where the first is the position
|
||||||
|
# of the federation stream.
|
||||||
|
rows = []
|
||||||
|
|
||||||
|
# There should be only one reader, so lets delete everything its
|
||||||
|
# acknowledged its seen.
|
||||||
|
if federation_ack:
|
||||||
|
self._clear_queue_before_pos(federation_ack)
|
||||||
|
|
||||||
|
# Fetch changed presence
|
||||||
|
keys = self.presence_changed.keys()
|
||||||
|
i = keys.bisect_right(from_token)
|
||||||
|
j = keys.bisect_right(to_token) + 1
|
||||||
|
dest_user_ids = [
|
||||||
|
(pos, user_id)
|
||||||
|
for pos in keys[i:j]
|
||||||
|
for user_id in self.presence_changed[pos]
|
||||||
|
]
|
||||||
|
|
||||||
|
for (key, user_id) in dest_user_ids:
|
||||||
|
rows.append((key, PresenceRow(
|
||||||
|
state=self.presence_map[user_id],
|
||||||
|
)))
|
||||||
|
|
||||||
|
# Fetch changes keyed edus
|
||||||
|
keys = self.keyed_edu_changed.keys()
|
||||||
|
i = keys.bisect_right(from_token)
|
||||||
|
j = keys.bisect_right(to_token) + 1
|
||||||
|
# We purposefully clobber based on the key here, python dict comprehensions
|
||||||
|
# always use the last value, so this will correctly point to the last
|
||||||
|
# stream position.
|
||||||
|
keyed_edus = {self.keyed_edu_changed[k]: k for k in keys[i:j]}
|
||||||
|
|
||||||
|
for ((destination, edu_key), pos) in keyed_edus.iteritems():
|
||||||
|
rows.append((pos, KeyedEduRow(
|
||||||
|
key=edu_key,
|
||||||
|
edu=self.keyed_edu[(destination, edu_key)],
|
||||||
|
)))
|
||||||
|
|
||||||
|
# Fetch changed edus
|
||||||
|
keys = self.edus.keys()
|
||||||
|
i = keys.bisect_right(from_token)
|
||||||
|
j = keys.bisect_right(to_token) + 1
|
||||||
|
edus = ((k, self.edus[k]) for k in keys[i:j])
|
||||||
|
|
||||||
|
for (pos, edu) in edus:
|
||||||
|
rows.append((pos, EduRow(edu)))
|
||||||
|
|
||||||
|
# Fetch changed failures
|
||||||
|
keys = self.failures.keys()
|
||||||
|
i = keys.bisect_right(from_token)
|
||||||
|
j = keys.bisect_right(to_token) + 1
|
||||||
|
failures = ((k, self.failures[k]) for k in keys[i:j])
|
||||||
|
|
||||||
|
for (pos, (destination, failure)) in failures:
|
||||||
|
rows.append((pos, FailureRow(
|
||||||
|
destination=destination,
|
||||||
|
failure=failure,
|
||||||
|
)))
|
||||||
|
|
||||||
|
# Fetch changed device messages
|
||||||
|
keys = self.device_messages.keys()
|
||||||
|
i = keys.bisect_right(from_token)
|
||||||
|
j = keys.bisect_right(to_token) + 1
|
||||||
|
device_messages = {self.device_messages[k]: k for k in keys[i:j]}
|
||||||
|
|
||||||
|
for (destination, pos) in device_messages.iteritems():
|
||||||
|
rows.append((pos, DeviceRow(
|
||||||
|
destination=destination,
|
||||||
|
)))
|
||||||
|
|
||||||
|
# Sort rows based on pos
|
||||||
|
rows.sort()
|
||||||
|
|
||||||
|
return [(pos, row.TypeId, row.to_data()) for pos, row in rows]
|
||||||
|
|
||||||
|
|
||||||
|
class BaseFederationRow(object):
|
||||||
|
"""Base class for rows to be sent in the federation stream.
|
||||||
|
|
||||||
|
Specifies how to identify, serialize and deserialize the different types.
|
||||||
|
"""
|
||||||
|
|
||||||
|
TypeId = None # Unique string that ids the type. Must be overriden in sub classes.
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def from_data(data):
|
||||||
|
"""Parse the data from the federation stream into a row.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data: The value of ``data`` from FederationStreamRow.data, type
|
||||||
|
depends on the type of stream
|
||||||
|
"""
|
||||||
|
raise NotImplementedError()
|
||||||
|
|
||||||
|
def to_data(self):
|
||||||
|
"""Serialize this row to be sent over the federation stream.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
The value to be sent in FederationStreamRow.data. The type depends
|
||||||
|
on the type of stream.
|
||||||
|
"""
|
||||||
|
raise NotImplementedError()
|
||||||
|
|
||||||
|
def add_to_buffer(self, buff):
|
||||||
|
"""Add this row to the appropriate field in the buffer ready for this
|
||||||
|
to be sent over federation.
|
||||||
|
|
||||||
|
We use a buffer so that we can batch up events that have come in at
|
||||||
|
the same time and send them all at once.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
buff (BufferedToSend)
|
||||||
|
"""
|
||||||
|
raise NotImplementedError()
|
||||||
|
|
||||||
|
|
||||||
|
class PresenceRow(BaseFederationRow, namedtuple("PresenceRow", (
|
||||||
|
"state", # UserPresenceState
|
||||||
|
))):
|
||||||
|
TypeId = "p"
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def from_data(data):
|
||||||
|
return PresenceRow(
|
||||||
|
state=UserPresenceState.from_dict(data)
|
||||||
|
)
|
||||||
|
|
||||||
|
def to_data(self):
|
||||||
|
return self.state.as_dict()
|
||||||
|
|
||||||
|
def add_to_buffer(self, buff):
|
||||||
|
buff.presence.append(self.state)
|
||||||
|
|
||||||
|
|
||||||
|
class KeyedEduRow(BaseFederationRow, namedtuple("KeyedEduRow", (
|
||||||
|
"key", # tuple(str) - the edu key passed to send_edu
|
||||||
|
"edu", # Edu
|
||||||
|
))):
|
||||||
|
"""Streams EDUs that have an associated key that is ued to clobber. For example,
|
||||||
|
typing EDUs clobber based on room_id.
|
||||||
|
"""
|
||||||
|
|
||||||
|
TypeId = "k"
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def from_data(data):
|
||||||
|
return KeyedEduRow(
|
||||||
|
key=tuple(data["key"]),
|
||||||
|
edu=Edu(**data["edu"]),
|
||||||
|
)
|
||||||
|
|
||||||
|
def to_data(self):
|
||||||
|
return {
|
||||||
|
"key": self.key,
|
||||||
|
"edu": self.edu.get_internal_dict(),
|
||||||
|
}
|
||||||
|
|
||||||
|
def add_to_buffer(self, buff):
|
||||||
|
buff.keyed_edus.setdefault(
|
||||||
|
self.edu.destination, {}
|
||||||
|
)[self.key] = self.edu
|
||||||
|
|
||||||
|
|
||||||
|
class EduRow(BaseFederationRow, namedtuple("EduRow", (
|
||||||
|
"edu", # Edu
|
||||||
|
))):
|
||||||
|
"""Streams EDUs that don't have keys. See KeyedEduRow
|
||||||
|
"""
|
||||||
|
TypeId = "e"
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def from_data(data):
|
||||||
|
return EduRow(Edu(**data))
|
||||||
|
|
||||||
|
def to_data(self):
|
||||||
|
return self.edu.get_internal_dict()
|
||||||
|
|
||||||
|
def add_to_buffer(self, buff):
|
||||||
|
buff.edus.setdefault(self.edu.destination, []).append(self.edu)
|
||||||
|
|
||||||
|
|
||||||
|
class FailureRow(BaseFederationRow, namedtuple("FailureRow", (
|
||||||
|
"destination", # str
|
||||||
|
"failure",
|
||||||
|
))):
|
||||||
|
"""Streams failures to a remote server. Failures are issued when there was
|
||||||
|
something wrong with a transaction the remote sent us, e.g. it included
|
||||||
|
an event that was invalid.
|
||||||
|
"""
|
||||||
|
|
||||||
|
TypeId = "f"
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def from_data(data):
|
||||||
|
return FailureRow(
|
||||||
|
destination=data["destination"],
|
||||||
|
failure=data["failure"],
|
||||||
|
)
|
||||||
|
|
||||||
|
def to_data(self):
|
||||||
|
return {
|
||||||
|
"destination": self.destination,
|
||||||
|
"failure": self.failure,
|
||||||
|
}
|
||||||
|
|
||||||
|
def add_to_buffer(self, buff):
|
||||||
|
buff.failures.setdefault(self.destination, []).append(self.failure)
|
||||||
|
|
||||||
|
|
||||||
|
class DeviceRow(BaseFederationRow, namedtuple("DeviceRow", (
|
||||||
|
"destination", # str
|
||||||
|
))):
|
||||||
|
"""Streams the fact that either a) there is pending to device messages for
|
||||||
|
users on the remote, or b) a local users device has changed and needs to
|
||||||
|
be sent to the remote.
|
||||||
|
"""
|
||||||
|
TypeId = "d"
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def from_data(data):
|
||||||
|
return DeviceRow(destination=data["destination"])
|
||||||
|
|
||||||
|
def to_data(self):
|
||||||
|
return {"destination": self.destination}
|
||||||
|
|
||||||
|
def add_to_buffer(self, buff):
|
||||||
|
buff.device_destinations.add(self.destination)
|
||||||
|
|
||||||
|
|
||||||
|
TypeToRow = {
|
||||||
|
Row.TypeId: Row
|
||||||
|
for Row in (
|
||||||
|
PresenceRow,
|
||||||
|
KeyedEduRow,
|
||||||
|
EduRow,
|
||||||
|
FailureRow,
|
||||||
|
DeviceRow,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
ParsedFederationStreamData = namedtuple("ParsedFederationStreamData", (
|
||||||
|
"presence", # list(UserPresenceState)
|
||||||
|
"keyed_edus", # dict of destination -> { key -> Edu }
|
||||||
|
"edus", # dict of destination -> [Edu]
|
||||||
|
"failures", # dict of destination -> [failures]
|
||||||
|
"device_destinations", # set of destinations
|
||||||
|
))
|
||||||
|
|
||||||
|
|
||||||
|
def process_rows_for_federation(transaction_queue, rows):
|
||||||
|
"""Parse a list of rows from the federation stream and put them in the
|
||||||
|
transaction queue ready for sending to the relevant homeservers.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
transaction_queue (TransactionQueue)
|
||||||
|
rows (list(synapse.replication.tcp.streams.FederationStreamRow))
|
||||||
|
"""
|
||||||
|
|
||||||
|
# The federation stream contains a bunch of different types of
|
||||||
|
# rows that need to be handled differently. We parse the rows, put
|
||||||
|
# them into the appropriate collection and then send them off.
|
||||||
|
|
||||||
|
buff = ParsedFederationStreamData(
|
||||||
|
presence=[],
|
||||||
|
keyed_edus={},
|
||||||
|
edus={},
|
||||||
|
failures={},
|
||||||
|
device_destinations=set(),
|
||||||
|
)
|
||||||
|
|
||||||
|
# Parse the rows in the stream and add to the buffer
|
||||||
|
for row in rows:
|
||||||
|
if row.type not in TypeToRow:
|
||||||
|
logger.error("Unrecognized federation row type %r", row.type)
|
||||||
|
continue
|
||||||
|
|
||||||
|
RowType = TypeToRow[row.type]
|
||||||
|
parsed_row = RowType.from_data(row.data)
|
||||||
|
parsed_row.add_to_buffer(buff)
|
||||||
|
|
||||||
|
if buff.presence:
|
||||||
|
transaction_queue.send_presence(buff.presence)
|
||||||
|
|
||||||
|
for destination, edu_map in buff.keyed_edus.iteritems():
|
||||||
|
for key, edu in edu_map.items():
|
||||||
|
transaction_queue.send_edu(
|
||||||
|
edu.destination, edu.edu_type, edu.content, key=key,
|
||||||
|
)
|
||||||
|
|
||||||
|
for destination, edu_list in buff.edus.iteritems():
|
||||||
|
for edu in edu_list:
|
||||||
|
transaction_queue.send_edu(
|
||||||
|
edu.destination, edu.edu_type, edu.content, key=None,
|
||||||
|
)
|
||||||
|
|
||||||
|
for destination, failure_list in buff.failures.iteritems():
|
||||||
|
for failure in failure_list:
|
||||||
|
transaction_queue.send_failure(destination, failure)
|
||||||
|
|
||||||
|
for destination in buff.device_destinations:
|
||||||
|
transaction_queue.send_device_messages(destination)
|
||||||
@@ -12,20 +12,19 @@
|
|||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
import datetime
|
||||||
|
|
||||||
from twisted.internet import defer
|
from twisted.internet import defer
|
||||||
|
|
||||||
from .persistence import TransactionActions
|
from .persistence import TransactionActions
|
||||||
from .units import Transaction
|
from .units import Transaction, Edu
|
||||||
|
|
||||||
from synapse.api.errors import HttpResponseException
|
from synapse.api.errors import HttpResponseException
|
||||||
|
from synapse.util import logcontext
|
||||||
from synapse.util.async import run_on_reactor
|
from synapse.util.async import run_on_reactor
|
||||||
from synapse.util.logutils import log_function
|
from synapse.util.retryutils import NotRetryingDestination, get_retry_limiter
|
||||||
from synapse.util.logcontext import PreserveLoggingContext
|
from synapse.util.metrics import measure_func
|
||||||
from synapse.util.retryutils import (
|
from synapse.handlers.presence import format_user_presence_state, get_interested_remotes
|
||||||
get_retry_limiter, NotRetryingDestination,
|
|
||||||
)
|
|
||||||
import synapse.metrics
|
import synapse.metrics
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
@@ -35,6 +34,14 @@ logger = logging.getLogger(__name__)
|
|||||||
|
|
||||||
metrics = synapse.metrics.get_metrics_for(__name__)
|
metrics = synapse.metrics.get_metrics_for(__name__)
|
||||||
|
|
||||||
|
client_metrics = synapse.metrics.get_metrics_for("synapse.federation.client")
|
||||||
|
sent_pdus_destination_dist = client_metrics.register_distribution(
|
||||||
|
"sent_pdu_destinations"
|
||||||
|
)
|
||||||
|
sent_edus_counter = client_metrics.register_counter("sent_edus")
|
||||||
|
|
||||||
|
sent_transactions_counter = client_metrics.register_counter("sent_transactions")
|
||||||
|
|
||||||
|
|
||||||
class TransactionQueue(object):
|
class TransactionQueue(object):
|
||||||
"""This class makes sure we only have one transaction in flight at
|
"""This class makes sure we only have one transaction in flight at
|
||||||
@@ -43,15 +50,17 @@ class TransactionQueue(object):
|
|||||||
It batches pending PDUs into single transactions.
|
It batches pending PDUs into single transactions.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, hs, transport_layer):
|
def __init__(self, hs):
|
||||||
self.server_name = hs.hostname
|
self.server_name = hs.hostname
|
||||||
|
|
||||||
self.store = hs.get_datastore()
|
self.store = hs.get_datastore()
|
||||||
|
self.state = hs.get_state_handler()
|
||||||
self.transaction_actions = TransactionActions(self.store)
|
self.transaction_actions = TransactionActions(self.store)
|
||||||
|
|
||||||
self.transport_layer = transport_layer
|
self.transport_layer = hs.get_federation_transport_client()
|
||||||
|
|
||||||
self._clock = hs.get_clock()
|
self.clock = hs.get_clock()
|
||||||
|
self.is_mine_id = hs.is_mine_id
|
||||||
|
|
||||||
# Is a mapping from destinations -> deferreds. Used to keep track
|
# Is a mapping from destinations -> deferreds. Used to keep track
|
||||||
# of which destinations have transactions in flight and when they are
|
# of which destinations have transactions in flight and when they are
|
||||||
@@ -69,20 +78,53 @@ class TransactionQueue(object):
|
|||||||
# destination -> list of tuple(edu, deferred)
|
# destination -> list of tuple(edu, deferred)
|
||||||
self.pending_edus_by_dest = edus = {}
|
self.pending_edus_by_dest = edus = {}
|
||||||
|
|
||||||
|
# Map of user_id -> UserPresenceState for all the pending presence
|
||||||
|
# to be sent out by user_id. Entries here get processed and put in
|
||||||
|
# pending_presence_by_dest
|
||||||
|
self.pending_presence = {}
|
||||||
|
|
||||||
|
# Map of destination -> user_id -> UserPresenceState of pending presence
|
||||||
|
# to be sent to each destinations
|
||||||
|
self.pending_presence_by_dest = presence = {}
|
||||||
|
|
||||||
|
# Pending EDUs by their "key". Keyed EDUs are EDUs that get clobbered
|
||||||
|
# based on their key (e.g. typing events by room_id)
|
||||||
|
# Map of destination -> (edu_type, key) -> Edu
|
||||||
|
self.pending_edus_keyed_by_dest = edus_keyed = {}
|
||||||
|
|
||||||
metrics.register_callback(
|
metrics.register_callback(
|
||||||
"pending_pdus",
|
"pending_pdus",
|
||||||
lambda: sum(map(len, pdus.values())),
|
lambda: sum(map(len, pdus.values())),
|
||||||
)
|
)
|
||||||
metrics.register_callback(
|
metrics.register_callback(
|
||||||
"pending_edus",
|
"pending_edus",
|
||||||
lambda: sum(map(len, edus.values())),
|
lambda: (
|
||||||
|
sum(map(len, edus.values()))
|
||||||
|
+ sum(map(len, presence.values()))
|
||||||
|
+ sum(map(len, edus_keyed.values()))
|
||||||
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
# destination -> list of tuple(failure, deferred)
|
# destination -> list of tuple(failure, deferred)
|
||||||
self.pending_failures_by_dest = {}
|
self.pending_failures_by_dest = {}
|
||||||
|
|
||||||
|
# destination -> stream_id of last successfully sent to-device message.
|
||||||
|
# NB: may be a long or an int.
|
||||||
|
self.last_device_stream_id_by_dest = {}
|
||||||
|
|
||||||
|
# destination -> stream_id of last successfully sent device list
|
||||||
|
# update.
|
||||||
|
self.last_device_list_stream_id_by_dest = {}
|
||||||
|
|
||||||
# HACK to get unique tx id
|
# HACK to get unique tx id
|
||||||
self._next_txn_id = int(self._clock.time_msec())
|
self._next_txn_id = int(self.clock.time_msec())
|
||||||
|
|
||||||
|
self._order = 1
|
||||||
|
|
||||||
|
self._is_processing = False
|
||||||
|
self._last_poked_id = -1
|
||||||
|
|
||||||
|
self._processing_pending_presence = False
|
||||||
|
|
||||||
def can_send_to(self, destination):
|
def can_send_to(self, destination):
|
||||||
"""Can we send messages to the given server?
|
"""Can we send messages to the given server?
|
||||||
@@ -104,11 +146,74 @@ class TransactionQueue(object):
|
|||||||
else:
|
else:
|
||||||
return not destination.startswith("localhost")
|
return not destination.startswith("localhost")
|
||||||
|
|
||||||
def enqueue_pdu(self, pdu, destinations, order):
|
@defer.inlineCallbacks
|
||||||
|
def notify_new_events(self, current_id):
|
||||||
|
"""This gets called when we have some new events we might want to
|
||||||
|
send out to other servers.
|
||||||
|
"""
|
||||||
|
self._last_poked_id = max(current_id, self._last_poked_id)
|
||||||
|
|
||||||
|
if self._is_processing:
|
||||||
|
return
|
||||||
|
|
||||||
|
try:
|
||||||
|
self._is_processing = True
|
||||||
|
while True:
|
||||||
|
last_token = yield self.store.get_federation_out_pos("events")
|
||||||
|
next_token, events = yield self.store.get_all_new_events_stream(
|
||||||
|
last_token, self._last_poked_id, limit=20,
|
||||||
|
)
|
||||||
|
|
||||||
|
logger.debug("Handling %s -> %s", last_token, next_token)
|
||||||
|
|
||||||
|
if not events and next_token >= self._last_poked_id:
|
||||||
|
break
|
||||||
|
|
||||||
|
for event in events:
|
||||||
|
# Only send events for this server.
|
||||||
|
send_on_behalf_of = event.internal_metadata.get_send_on_behalf_of()
|
||||||
|
is_mine = self.is_mine_id(event.event_id)
|
||||||
|
if not is_mine and send_on_behalf_of is None:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Get the state from before the event.
|
||||||
|
# We need to make sure that this is the state from before
|
||||||
|
# the event and not from after it.
|
||||||
|
# Otherwise if the last member on a server in a room is
|
||||||
|
# banned then it won't receive the event because it won't
|
||||||
|
# be in the room after the ban.
|
||||||
|
destinations = yield self.state.get_current_hosts_in_room(
|
||||||
|
event.room_id, latest_event_ids=[
|
||||||
|
prev_id for prev_id, _ in event.prev_events
|
||||||
|
],
|
||||||
|
)
|
||||||
|
destinations = set(destinations)
|
||||||
|
|
||||||
|
if send_on_behalf_of is not None:
|
||||||
|
# If we are sending the event on behalf of another server
|
||||||
|
# then it already has the event and there is no reason to
|
||||||
|
# send the event to it.
|
||||||
|
destinations.discard(send_on_behalf_of)
|
||||||
|
|
||||||
|
logger.debug("Sending %s to %r", event, destinations)
|
||||||
|
|
||||||
|
self._send_pdu(event, destinations)
|
||||||
|
|
||||||
|
yield self.store.update_federation_out_pos(
|
||||||
|
"events", next_token
|
||||||
|
)
|
||||||
|
|
||||||
|
finally:
|
||||||
|
self._is_processing = False
|
||||||
|
|
||||||
|
def _send_pdu(self, pdu, destinations):
|
||||||
# We loop through all destinations to see whether we already have
|
# We loop through all destinations to see whether we already have
|
||||||
# a transaction in progress. If we do, stick it in the pending_pdus
|
# a transaction in progress. If we do, stick it in the pending_pdus
|
||||||
# table and we'll get back to it later.
|
# table and we'll get back to it later.
|
||||||
|
|
||||||
|
order = self._order
|
||||||
|
self._order += 1
|
||||||
|
|
||||||
destinations = set(destinations)
|
destinations = set(destinations)
|
||||||
destinations = set(
|
destinations = set(
|
||||||
dest for dest in destinations if self.can_send_to(dest)
|
dest for dest in destinations if self.can_send_to(dest)
|
||||||
@@ -119,89 +224,141 @@ class TransactionQueue(object):
|
|||||||
if not destinations:
|
if not destinations:
|
||||||
return
|
return
|
||||||
|
|
||||||
deferreds = []
|
sent_pdus_destination_dist.inc_by(len(destinations))
|
||||||
|
|
||||||
for destination in destinations:
|
for destination in destinations:
|
||||||
deferred = defer.Deferred()
|
|
||||||
self.pending_pdus_by_dest.setdefault(destination, []).append(
|
self.pending_pdus_by_dest.setdefault(destination, []).append(
|
||||||
(pdu, deferred, order)
|
(pdu, order)
|
||||||
)
|
)
|
||||||
|
|
||||||
def chain(failure):
|
self._attempt_new_transaction(destination)
|
||||||
if not deferred.called:
|
|
||||||
deferred.errback(failure)
|
|
||||||
|
|
||||||
def log_failure(f):
|
@logcontext.preserve_fn # the caller should not yield on this
|
||||||
logger.warn("Failed to send pdu to %s: %s", destination, f.value)
|
@defer.inlineCallbacks
|
||||||
|
def send_presence(self, states):
|
||||||
|
"""Send the new presence states to the appropriate destinations.
|
||||||
|
|
||||||
deferred.addErrback(log_failure)
|
This actually queues up the presence states ready for sending and
|
||||||
|
triggers a background task to process them and send out the transactions.
|
||||||
|
|
||||||
with PreserveLoggingContext():
|
Args:
|
||||||
self._attempt_new_transaction(destination).addErrback(chain)
|
states (list(UserPresenceState))
|
||||||
|
"""
|
||||||
|
|
||||||
deferreds.append(deferred)
|
# First we queue up the new presence by user ID, so multiple presence
|
||||||
|
# updates in quick successtion are correctly handled
|
||||||
|
# We only want to send presence for our own users, so lets always just
|
||||||
|
# filter here just in case.
|
||||||
|
self.pending_presence.update({
|
||||||
|
state.user_id: state for state in states
|
||||||
|
if self.is_mine_id(state.user_id)
|
||||||
|
})
|
||||||
|
|
||||||
# NO inlineCallbacks
|
# We then handle the new pending presence in batches, first figuring
|
||||||
def enqueue_edu(self, edu):
|
# out the destinations we need to send each state to and then poking it
|
||||||
destination = edu.destination
|
# to attempt a new transaction. We linearize this so that we don't
|
||||||
|
# accidentally mess up the ordering and send multiple presence updates
|
||||||
|
# in the wrong order
|
||||||
|
if self._processing_pending_presence:
|
||||||
|
return
|
||||||
|
|
||||||
|
self._processing_pending_presence = True
|
||||||
|
try:
|
||||||
|
while True:
|
||||||
|
states_map = self.pending_presence
|
||||||
|
self.pending_presence = {}
|
||||||
|
|
||||||
|
if not states_map:
|
||||||
|
break
|
||||||
|
|
||||||
|
yield self._process_presence_inner(states_map.values())
|
||||||
|
finally:
|
||||||
|
self._processing_pending_presence = False
|
||||||
|
|
||||||
|
@measure_func("txnqueue._process_presence")
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def _process_presence_inner(self, states):
|
||||||
|
"""Given a list of states populate self.pending_presence_by_dest and
|
||||||
|
poke to send a new transaction to each destination
|
||||||
|
|
||||||
|
Args:
|
||||||
|
states (list(UserPresenceState))
|
||||||
|
"""
|
||||||
|
hosts_and_states = yield get_interested_remotes(self.store, states, self.state)
|
||||||
|
|
||||||
|
for destinations, states in hosts_and_states:
|
||||||
|
for destination in destinations:
|
||||||
|
if not self.can_send_to(destination):
|
||||||
|
continue
|
||||||
|
|
||||||
|
self.pending_presence_by_dest.setdefault(
|
||||||
|
destination, {}
|
||||||
|
).update({
|
||||||
|
state.user_id: state for state in states
|
||||||
|
})
|
||||||
|
|
||||||
|
self._attempt_new_transaction(destination)
|
||||||
|
|
||||||
|
def send_edu(self, destination, edu_type, content, key=None):
|
||||||
|
edu = Edu(
|
||||||
|
origin=self.server_name,
|
||||||
|
destination=destination,
|
||||||
|
edu_type=edu_type,
|
||||||
|
content=content,
|
||||||
|
)
|
||||||
|
|
||||||
if not self.can_send_to(destination):
|
if not self.can_send_to(destination):
|
||||||
return
|
return
|
||||||
|
|
||||||
deferred = defer.Deferred()
|
sent_edus_counter.inc()
|
||||||
self.pending_edus_by_dest.setdefault(destination, []).append(
|
|
||||||
(edu, deferred)
|
|
||||||
)
|
|
||||||
|
|
||||||
def chain(failure):
|
if key:
|
||||||
if not deferred.called:
|
self.pending_edus_keyed_by_dest.setdefault(
|
||||||
deferred.errback(failure)
|
destination, {}
|
||||||
|
)[(edu.edu_type, key)] = edu
|
||||||
|
else:
|
||||||
|
self.pending_edus_by_dest.setdefault(destination, []).append(edu)
|
||||||
|
|
||||||
def log_failure(f):
|
self._attempt_new_transaction(destination)
|
||||||
logger.warn("Failed to send edu to %s: %s", destination, f.value)
|
|
||||||
|
|
||||||
deferred.addErrback(log_failure)
|
def send_failure(self, failure, destination):
|
||||||
|
|
||||||
with PreserveLoggingContext():
|
|
||||||
self._attempt_new_transaction(destination).addErrback(chain)
|
|
||||||
|
|
||||||
return deferred
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def enqueue_failure(self, failure, destination):
|
|
||||||
if destination == self.server_name or destination == "localhost":
|
if destination == self.server_name or destination == "localhost":
|
||||||
return
|
return
|
||||||
|
|
||||||
deferred = defer.Deferred()
|
|
||||||
|
|
||||||
if not self.can_send_to(destination):
|
if not self.can_send_to(destination):
|
||||||
return
|
return
|
||||||
|
|
||||||
self.pending_failures_by_dest.setdefault(
|
self.pending_failures_by_dest.setdefault(
|
||||||
destination, []
|
destination, []
|
||||||
).append(
|
).append(failure)
|
||||||
(failure, deferred)
|
|
||||||
)
|
|
||||||
|
|
||||||
def chain(f):
|
self._attempt_new_transaction(destination)
|
||||||
if not deferred.called:
|
|
||||||
deferred.errback(f)
|
|
||||||
|
|
||||||
def log_failure(f):
|
def send_device_messages(self, destination):
|
||||||
logger.warn("Failed to send failure to %s: %s", destination, f.value)
|
if destination == self.server_name or destination == "localhost":
|
||||||
|
return
|
||||||
|
|
||||||
deferred.addErrback(log_failure)
|
if not self.can_send_to(destination):
|
||||||
|
return
|
||||||
|
|
||||||
with PreserveLoggingContext():
|
self._attempt_new_transaction(destination)
|
||||||
self._attempt_new_transaction(destination).addErrback(chain)
|
|
||||||
|
|
||||||
yield deferred
|
def get_current_token(self):
|
||||||
|
return 0
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
@log_function
|
|
||||||
def _attempt_new_transaction(self, destination):
|
def _attempt_new_transaction(self, destination):
|
||||||
yield run_on_reactor()
|
"""Try to start a new transaction to this destination
|
||||||
|
|
||||||
|
If there is already a transaction in progress to this destination,
|
||||||
|
returns immediately. Otherwise kicks off the process of sending a
|
||||||
|
transaction in the background.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
destination (str):
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
None
|
||||||
|
"""
|
||||||
# list of (pending_pdu, deferred, order)
|
# list of (pending_pdu, deferred, order)
|
||||||
if destination in self.pending_transactions:
|
if destination in self.pending_transactions:
|
||||||
# XXX: pending_transactions can get stuck on by a never-ending
|
# XXX: pending_transactions can get stuck on by a never-ending
|
||||||
@@ -214,55 +371,190 @@ class TransactionQueue(object):
|
|||||||
)
|
)
|
||||||
return
|
return
|
||||||
|
|
||||||
|
logger.debug("TX [%s] Starting transaction loop", destination)
|
||||||
|
|
||||||
|
# Drop the logcontext before starting the transaction. It doesn't
|
||||||
|
# really make sense to log all the outbound transactions against
|
||||||
|
# whatever path led us to this point: that's pretty arbitrary really.
|
||||||
|
#
|
||||||
|
# (this also means we can fire off _perform_transaction without
|
||||||
|
# yielding)
|
||||||
|
with logcontext.PreserveLoggingContext():
|
||||||
|
self._transaction_transmission_loop(destination)
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def _transaction_transmission_loop(self, destination):
|
||||||
|
pending_pdus = []
|
||||||
|
try:
|
||||||
|
self.pending_transactions[destination] = 1
|
||||||
|
|
||||||
|
# This will throw if we wouldn't retry. We do this here so we fail
|
||||||
|
# quickly, but we will later check this again in the http client,
|
||||||
|
# hence why we throw the result away.
|
||||||
|
yield get_retry_limiter(destination, self.clock, self.store)
|
||||||
|
|
||||||
|
# XXX: what's this for?
|
||||||
|
yield run_on_reactor()
|
||||||
|
|
||||||
|
pending_pdus = []
|
||||||
|
while True:
|
||||||
|
device_message_edus, device_stream_id, dev_list_id = (
|
||||||
|
yield self._get_new_device_messages(destination)
|
||||||
|
)
|
||||||
|
|
||||||
|
# BEGIN CRITICAL SECTION
|
||||||
|
#
|
||||||
|
# In order to avoid a race condition, we need to make sure that
|
||||||
|
# the following code (from popping the queues up to the point
|
||||||
|
# where we decide if we actually have any pending messages) is
|
||||||
|
# atomic - otherwise new PDUs or EDUs might arrive in the
|
||||||
|
# meantime, but not get sent because we hold the
|
||||||
|
# pending_transactions flag.
|
||||||
|
|
||||||
pending_pdus = self.pending_pdus_by_dest.pop(destination, [])
|
pending_pdus = self.pending_pdus_by_dest.pop(destination, [])
|
||||||
pending_edus = self.pending_edus_by_dest.pop(destination, [])
|
pending_edus = self.pending_edus_by_dest.pop(destination, [])
|
||||||
|
pending_presence = self.pending_presence_by_dest.pop(destination, {})
|
||||||
pending_failures = self.pending_failures_by_dest.pop(destination, [])
|
pending_failures = self.pending_failures_by_dest.pop(destination, [])
|
||||||
|
|
||||||
|
pending_edus.extend(
|
||||||
|
self.pending_edus_keyed_by_dest.pop(destination, {}).values()
|
||||||
|
)
|
||||||
|
|
||||||
|
pending_edus.extend(device_message_edus)
|
||||||
|
if pending_presence:
|
||||||
|
pending_edus.append(
|
||||||
|
Edu(
|
||||||
|
origin=self.server_name,
|
||||||
|
destination=destination,
|
||||||
|
edu_type="m.presence",
|
||||||
|
content={
|
||||||
|
"push": [
|
||||||
|
format_user_presence_state(
|
||||||
|
presence, self.clock.time_msec()
|
||||||
|
)
|
||||||
|
for presence in pending_presence.values()
|
||||||
|
]
|
||||||
|
},
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
if pending_pdus:
|
if pending_pdus:
|
||||||
logger.debug("TX [%s] len(pending_pdus_by_dest[dest]) = %d",
|
logger.debug("TX [%s] len(pending_pdus_by_dest[dest]) = %d",
|
||||||
destination, len(pending_pdus))
|
destination, len(pending_pdus))
|
||||||
|
|
||||||
if not pending_pdus and not pending_edus and not pending_failures:
|
if not pending_pdus and not pending_edus and not pending_failures:
|
||||||
logger.debug("TX [%s] Nothing to send", destination)
|
logger.debug("TX [%s] Nothing to send", destination)
|
||||||
|
self.last_device_stream_id_by_dest[destination] = (
|
||||||
|
device_stream_id
|
||||||
|
)
|
||||||
return
|
return
|
||||||
|
|
||||||
try:
|
# END CRITICAL SECTION
|
||||||
self.pending_transactions[destination] = 1
|
|
||||||
|
success = yield self._send_new_transaction(
|
||||||
|
destination, pending_pdus, pending_edus, pending_failures,
|
||||||
|
)
|
||||||
|
if success:
|
||||||
|
sent_transactions_counter.inc()
|
||||||
|
# Remove the acknowledged device messages from the database
|
||||||
|
# Only bother if we actually sent some device messages
|
||||||
|
if device_message_edus:
|
||||||
|
yield self.store.delete_device_msgs_for_remote(
|
||||||
|
destination, device_stream_id
|
||||||
|
)
|
||||||
|
logger.info("Marking as sent %r %r", destination, dev_list_id)
|
||||||
|
yield self.store.mark_as_sent_devices_by_remote(
|
||||||
|
destination, dev_list_id
|
||||||
|
)
|
||||||
|
|
||||||
|
self.last_device_stream_id_by_dest[destination] = device_stream_id
|
||||||
|
self.last_device_list_stream_id_by_dest[destination] = dev_list_id
|
||||||
|
else:
|
||||||
|
break
|
||||||
|
except NotRetryingDestination as e:
|
||||||
|
logger.debug(
|
||||||
|
"TX [%s] not ready for retry yet (next retry at %s) - "
|
||||||
|
"dropping transaction for now",
|
||||||
|
destination,
|
||||||
|
datetime.datetime.fromtimestamp(
|
||||||
|
(e.retry_last_ts + e.retry_interval) / 1000.0
|
||||||
|
),
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
logger.warn(
|
||||||
|
"TX [%s] Failed to send transaction: %s",
|
||||||
|
destination,
|
||||||
|
e,
|
||||||
|
)
|
||||||
|
for p, _ in pending_pdus:
|
||||||
|
logger.info("Failed to send event %s to %s", p.event_id,
|
||||||
|
destination)
|
||||||
|
finally:
|
||||||
|
# We want to be *very* sure we delete this after we stop processing
|
||||||
|
self.pending_transactions.pop(destination, None)
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def _get_new_device_messages(self, destination):
|
||||||
|
last_device_stream_id = self.last_device_stream_id_by_dest.get(destination, 0)
|
||||||
|
to_device_stream_id = self.store.get_to_device_stream_token()
|
||||||
|
contents, stream_id = yield self.store.get_new_device_msgs_for_remote(
|
||||||
|
destination, last_device_stream_id, to_device_stream_id
|
||||||
|
)
|
||||||
|
edus = [
|
||||||
|
Edu(
|
||||||
|
origin=self.server_name,
|
||||||
|
destination=destination,
|
||||||
|
edu_type="m.direct_to_device",
|
||||||
|
content=content,
|
||||||
|
)
|
||||||
|
for content in contents
|
||||||
|
]
|
||||||
|
|
||||||
|
last_device_list = self.last_device_list_stream_id_by_dest.get(destination, 0)
|
||||||
|
now_stream_id, results = yield self.store.get_devices_by_remote(
|
||||||
|
destination, last_device_list
|
||||||
|
)
|
||||||
|
edus.extend(
|
||||||
|
Edu(
|
||||||
|
origin=self.server_name,
|
||||||
|
destination=destination,
|
||||||
|
edu_type="m.device_list_update",
|
||||||
|
content=content,
|
||||||
|
)
|
||||||
|
for content in results
|
||||||
|
)
|
||||||
|
defer.returnValue((edus, stream_id, now_stream_id))
|
||||||
|
|
||||||
|
@measure_func("_send_new_transaction")
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def _send_new_transaction(self, destination, pending_pdus, pending_edus,
|
||||||
|
pending_failures):
|
||||||
|
|
||||||
|
# Sort based on the order field
|
||||||
|
pending_pdus.sort(key=lambda t: t[1])
|
||||||
|
pdus = [x[0] for x in pending_pdus]
|
||||||
|
edus = pending_edus
|
||||||
|
failures = [x.get_dict() for x in pending_failures]
|
||||||
|
|
||||||
|
success = True
|
||||||
|
|
||||||
logger.debug("TX [%s] _attempt_new_transaction", destination)
|
logger.debug("TX [%s] _attempt_new_transaction", destination)
|
||||||
|
|
||||||
# Sort based on the order field
|
|
||||||
pending_pdus.sort(key=lambda t: t[2])
|
|
||||||
|
|
||||||
pdus = [x[0] for x in pending_pdus]
|
|
||||||
edus = [x[0] for x in pending_edus]
|
|
||||||
failures = [x[0].get_dict() for x in pending_failures]
|
|
||||||
deferreds = [
|
|
||||||
x[1]
|
|
||||||
for x in pending_pdus + pending_edus + pending_failures
|
|
||||||
]
|
|
||||||
|
|
||||||
txn_id = str(self._next_txn_id)
|
txn_id = str(self._next_txn_id)
|
||||||
|
|
||||||
limiter = yield get_retry_limiter(
|
|
||||||
destination,
|
|
||||||
self._clock,
|
|
||||||
self.store,
|
|
||||||
)
|
|
||||||
|
|
||||||
logger.debug(
|
logger.debug(
|
||||||
"TX [%s] {%s} Attempting new transaction"
|
"TX [%s] {%s} Attempting new transaction"
|
||||||
" (pdus: %d, edus: %d, failures: %d)",
|
" (pdus: %d, edus: %d, failures: %d)",
|
||||||
destination, txn_id,
|
destination, txn_id,
|
||||||
len(pending_pdus),
|
len(pdus),
|
||||||
len(pending_edus),
|
len(edus),
|
||||||
len(pending_failures)
|
len(failures)
|
||||||
)
|
)
|
||||||
|
|
||||||
logger.debug("TX [%s] Persisting transaction...", destination)
|
logger.debug("TX [%s] Persisting transaction...", destination)
|
||||||
|
|
||||||
transaction = Transaction.create_new(
|
transaction = Transaction.create_new(
|
||||||
origin_server_ts=int(self._clock.time_msec()),
|
origin_server_ts=int(self.clock.time_msec()),
|
||||||
transaction_id=txn_id,
|
transaction_id=txn_id,
|
||||||
origin=self.server_name,
|
origin=self.server_name,
|
||||||
destination=destination,
|
destination=destination,
|
||||||
@@ -281,19 +573,18 @@ class TransactionQueue(object):
|
|||||||
" (PDUs: %d, EDUs: %d, failures: %d)",
|
" (PDUs: %d, EDUs: %d, failures: %d)",
|
||||||
destination, txn_id,
|
destination, txn_id,
|
||||||
transaction.transaction_id,
|
transaction.transaction_id,
|
||||||
len(pending_pdus),
|
len(pdus),
|
||||||
len(pending_edus),
|
len(edus),
|
||||||
len(pending_failures),
|
len(failures),
|
||||||
)
|
)
|
||||||
|
|
||||||
with limiter:
|
|
||||||
# Actually send the transaction
|
# Actually send the transaction
|
||||||
|
|
||||||
# FIXME (erikj): This is a bit of a hack to make the Pdu age
|
# FIXME (erikj): This is a bit of a hack to make the Pdu age
|
||||||
# keys work
|
# keys work
|
||||||
def json_data_cb():
|
def json_data_cb():
|
||||||
data = transaction.get_dict()
|
data = transaction.get_dict()
|
||||||
now = int(self._clock.time_msec())
|
now = int(self.clock.time_msec())
|
||||||
if "pdus" in data:
|
if "pdus" in data:
|
||||||
for p in data["pdus"]:
|
for p in data["pdus"]:
|
||||||
if "age_ts" in p:
|
if "age_ts" in p:
|
||||||
@@ -319,6 +610,13 @@ class TransactionQueue(object):
|
|||||||
code = e.code
|
code = e.code
|
||||||
response = e.response
|
response = e.response
|
||||||
|
|
||||||
|
if e.code in (401, 404, 429) or 500 <= e.code:
|
||||||
|
logger.info(
|
||||||
|
"TX [%s] {%s} got %d response",
|
||||||
|
destination, txn_id, code
|
||||||
|
)
|
||||||
|
raise e
|
||||||
|
|
||||||
logger.info(
|
logger.info(
|
||||||
"TX [%s] {%s} got %d response",
|
"TX [%s] {%s} got %d response",
|
||||||
destination, txn_id, code
|
destination, txn_id, code
|
||||||
@@ -333,52 +631,11 @@ class TransactionQueue(object):
|
|||||||
|
|
||||||
logger.debug("TX [%s] Marked as delivered", destination)
|
logger.debug("TX [%s] Marked as delivered", destination)
|
||||||
|
|
||||||
logger.debug("TX [%s] Yielding to callbacks...", destination)
|
if code != 200:
|
||||||
|
for p in pdus:
|
||||||
for deferred in deferreds:
|
|
||||||
if code == 200:
|
|
||||||
deferred.callback(None)
|
|
||||||
else:
|
|
||||||
deferred.errback(RuntimeError("Got status %d" % code))
|
|
||||||
|
|
||||||
# Ensures we don't continue until all callbacks on that
|
|
||||||
# deferred have fired
|
|
||||||
try:
|
|
||||||
yield deferred
|
|
||||||
except:
|
|
||||||
pass
|
|
||||||
|
|
||||||
logger.debug("TX [%s] Yielded to callbacks", destination)
|
|
||||||
except NotRetryingDestination:
|
|
||||||
logger.info(
|
logger.info(
|
||||||
"TX [%s] not ready for retry yet - "
|
"Failed to send event %s to %s", p.event_id, destination
|
||||||
"dropping transaction for now",
|
|
||||||
destination,
|
|
||||||
)
|
|
||||||
except RuntimeError as e:
|
|
||||||
# We capture this here as there as nothing actually listens
|
|
||||||
# for this finishing functions deferred.
|
|
||||||
logger.warn(
|
|
||||||
"TX [%s] Problem in _attempt_transaction: %s",
|
|
||||||
destination,
|
|
||||||
e,
|
|
||||||
)
|
|
||||||
except Exception as e:
|
|
||||||
# We capture this here as there as nothing actually listens
|
|
||||||
# for this finishing functions deferred.
|
|
||||||
logger.warn(
|
|
||||||
"TX [%s] Problem in _attempt_transaction: %s",
|
|
||||||
destination,
|
|
||||||
e,
|
|
||||||
)
|
)
|
||||||
|
success = False
|
||||||
|
|
||||||
for deferred in deferreds:
|
defer.returnValue(success)
|
||||||
if not deferred.called:
|
|
||||||
deferred.errback(e)
|
|
||||||
|
|
||||||
finally:
|
|
||||||
# We want to be *very* sure we delete this after we stop processing
|
|
||||||
self.pending_transactions.pop(destination, None)
|
|
||||||
|
|
||||||
# Check to see if there is anything else to send.
|
|
||||||
self._attempt_new_transaction(destination)
|
|
||||||
|
|||||||
@@ -54,6 +54,28 @@ class TransportLayerClient(object):
|
|||||||
destination, path=path, args={"event_id": event_id},
|
destination, path=path, args={"event_id": event_id},
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@log_function
|
||||||
|
def get_room_state_ids(self, destination, room_id, event_id):
|
||||||
|
""" Requests all state for a given room from the given server at the
|
||||||
|
given event. Returns the state's event_id's
|
||||||
|
|
||||||
|
Args:
|
||||||
|
destination (str): The host name of the remote home server we want
|
||||||
|
to get the state from.
|
||||||
|
context (str): The name of the context we want the state of
|
||||||
|
event_id (str): The event we want the context at.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Deferred: Results in a dict received from the remote homeserver.
|
||||||
|
"""
|
||||||
|
logger.debug("get_room_state_ids dest=%s, room=%s",
|
||||||
|
destination, room_id)
|
||||||
|
|
||||||
|
path = PREFIX + "/state_ids/%s/" % room_id
|
||||||
|
return self.client.get_json(
|
||||||
|
destination, path=path, args={"event_id": event_id},
|
||||||
|
)
|
||||||
|
|
||||||
@log_function
|
@log_function
|
||||||
def get_event(self, destination, event_id, timeout=None):
|
def get_event(self, destination, event_id, timeout=None):
|
||||||
""" Requests the pdu with give id and origin from the given server.
|
""" Requests the pdu with give id and origin from the given server.
|
||||||
@@ -141,6 +163,7 @@ class TransportLayerClient(object):
|
|||||||
data=json_data,
|
data=json_data,
|
||||||
json_data_callback=json_data_callback,
|
json_data_callback=json_data_callback,
|
||||||
long_retries=True,
|
long_retries=True,
|
||||||
|
backoff_on_404=True, # If we get a 404 the other side has gone
|
||||||
)
|
)
|
||||||
|
|
||||||
logger.debug(
|
logger.debug(
|
||||||
@@ -152,7 +175,8 @@ class TransportLayerClient(object):
|
|||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
@log_function
|
@log_function
|
||||||
def make_query(self, destination, query_type, args, retry_on_dns_fail):
|
def make_query(self, destination, query_type, args, retry_on_dns_fail,
|
||||||
|
ignore_backoff=False):
|
||||||
path = PREFIX + "/query/%s" % query_type
|
path = PREFIX + "/query/%s" % query_type
|
||||||
|
|
||||||
content = yield self.client.get_json(
|
content = yield self.client.get_json(
|
||||||
@@ -161,6 +185,7 @@ class TransportLayerClient(object):
|
|||||||
args=args,
|
args=args,
|
||||||
retry_on_dns_fail=retry_on_dns_fail,
|
retry_on_dns_fail=retry_on_dns_fail,
|
||||||
timeout=10000,
|
timeout=10000,
|
||||||
|
ignore_backoff=ignore_backoff,
|
||||||
)
|
)
|
||||||
|
|
||||||
defer.returnValue(content)
|
defer.returnValue(content)
|
||||||
@@ -168,6 +193,26 @@ class TransportLayerClient(object):
|
|||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
@log_function
|
@log_function
|
||||||
def make_membership_event(self, destination, room_id, user_id, membership):
|
def make_membership_event(self, destination, room_id, user_id, membership):
|
||||||
|
"""Asks a remote server to build and sign us a membership event
|
||||||
|
|
||||||
|
Note that this does not append any events to any graphs.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
destination (str): address of remote homeserver
|
||||||
|
room_id (str): room to join/leave
|
||||||
|
user_id (str): user to be joined/left
|
||||||
|
membership (str): one of join/leave
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Deferred: Succeeds when we get a 2xx HTTP response. The result
|
||||||
|
will be the decoded JSON body (ie, the new event).
|
||||||
|
|
||||||
|
Fails with ``HTTPRequestException`` if we get an HTTP response
|
||||||
|
code >= 300.
|
||||||
|
|
||||||
|
Fails with ``NotRetryingDestination`` if we are not yet ready
|
||||||
|
to retry this server.
|
||||||
|
"""
|
||||||
valid_memberships = {Membership.JOIN, Membership.LEAVE}
|
valid_memberships = {Membership.JOIN, Membership.LEAVE}
|
||||||
if membership not in valid_memberships:
|
if membership not in valid_memberships:
|
||||||
raise RuntimeError(
|
raise RuntimeError(
|
||||||
@@ -176,11 +221,23 @@ class TransportLayerClient(object):
|
|||||||
)
|
)
|
||||||
path = PREFIX + "/make_%s/%s/%s" % (membership, room_id, user_id)
|
path = PREFIX + "/make_%s/%s/%s" % (membership, room_id, user_id)
|
||||||
|
|
||||||
|
ignore_backoff = False
|
||||||
|
retry_on_dns_fail = False
|
||||||
|
|
||||||
|
if membership == Membership.LEAVE:
|
||||||
|
# we particularly want to do our best to send leave events. The
|
||||||
|
# problem is that if it fails, we won't retry it later, so if the
|
||||||
|
# remote server was just having a momentary blip, the room will be
|
||||||
|
# out of sync.
|
||||||
|
ignore_backoff = True
|
||||||
|
retry_on_dns_fail = True
|
||||||
|
|
||||||
content = yield self.client.get_json(
|
content = yield self.client.get_json(
|
||||||
destination=destination,
|
destination=destination,
|
||||||
path=path,
|
path=path,
|
||||||
retry_on_dns_fail=False,
|
retry_on_dns_fail=retry_on_dns_fail,
|
||||||
timeout=20000,
|
timeout=20000,
|
||||||
|
ignore_backoff=ignore_backoff,
|
||||||
)
|
)
|
||||||
|
|
||||||
defer.returnValue(content)
|
defer.returnValue(content)
|
||||||
@@ -207,6 +264,12 @@ class TransportLayerClient(object):
|
|||||||
destination=destination,
|
destination=destination,
|
||||||
path=path,
|
path=path,
|
||||||
data=content,
|
data=content,
|
||||||
|
|
||||||
|
# we want to do our best to send this through. The problem is
|
||||||
|
# that if it fails, we won't retry it later, so if the remote
|
||||||
|
# server was just having a momentary blip, the room will be out of
|
||||||
|
# sync.
|
||||||
|
ignore_backoff=True,
|
||||||
)
|
)
|
||||||
|
|
||||||
defer.returnValue(response)
|
defer.returnValue(response)
|
||||||
@@ -220,18 +283,35 @@ class TransportLayerClient(object):
|
|||||||
destination=destination,
|
destination=destination,
|
||||||
path=path,
|
path=path,
|
||||||
data=content,
|
data=content,
|
||||||
|
ignore_backoff=True,
|
||||||
)
|
)
|
||||||
|
|
||||||
defer.returnValue(response)
|
defer.returnValue(response)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
@log_function
|
@log_function
|
||||||
def get_public_rooms(self, remote_server):
|
def get_public_rooms(self, remote_server, limit, since_token,
|
||||||
|
search_filter=None, include_all_networks=False,
|
||||||
|
third_party_instance_id=None):
|
||||||
path = PREFIX + "/publicRooms"
|
path = PREFIX + "/publicRooms"
|
||||||
|
|
||||||
|
args = {
|
||||||
|
"include_all_networks": "true" if include_all_networks else "false",
|
||||||
|
}
|
||||||
|
if third_party_instance_id:
|
||||||
|
args["third_party_instance_id"] = third_party_instance_id,
|
||||||
|
if limit:
|
||||||
|
args["limit"] = [str(limit)]
|
||||||
|
if since_token:
|
||||||
|
args["since"] = [since_token]
|
||||||
|
|
||||||
|
# TODO(erikj): Actually send the search_filter across federation.
|
||||||
|
|
||||||
response = yield self.client.get_json(
|
response = yield self.client.get_json(
|
||||||
destination=remote_server,
|
destination=remote_server,
|
||||||
path=path,
|
path=path,
|
||||||
|
args=args,
|
||||||
|
ignore_backoff=True,
|
||||||
)
|
)
|
||||||
|
|
||||||
defer.returnValue(response)
|
defer.returnValue(response)
|
||||||
@@ -276,7 +356,7 @@ class TransportLayerClient(object):
|
|||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
@log_function
|
@log_function
|
||||||
def query_client_keys(self, destination, query_content):
|
def query_client_keys(self, destination, query_content, timeout):
|
||||||
"""Query the device keys for a list of user ids hosted on a remote
|
"""Query the device keys for a list of user ids hosted on a remote
|
||||||
server.
|
server.
|
||||||
|
|
||||||
@@ -305,12 +385,39 @@ class TransportLayerClient(object):
|
|||||||
destination=destination,
|
destination=destination,
|
||||||
path=path,
|
path=path,
|
||||||
data=query_content,
|
data=query_content,
|
||||||
|
timeout=timeout,
|
||||||
)
|
)
|
||||||
defer.returnValue(content)
|
defer.returnValue(content)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
@log_function
|
@log_function
|
||||||
def claim_client_keys(self, destination, query_content):
|
def query_user_devices(self, destination, user_id, timeout):
|
||||||
|
"""Query the devices for a user id hosted on a remote server.
|
||||||
|
|
||||||
|
Response:
|
||||||
|
{
|
||||||
|
"stream_id": "...",
|
||||||
|
"devices": [ { ... } ]
|
||||||
|
}
|
||||||
|
|
||||||
|
Args:
|
||||||
|
destination(str): The server to query.
|
||||||
|
query_content(dict): The user ids to query.
|
||||||
|
Returns:
|
||||||
|
A dict containg the device keys.
|
||||||
|
"""
|
||||||
|
path = PREFIX + "/user/devices/" + user_id
|
||||||
|
|
||||||
|
content = yield self.client.get_json(
|
||||||
|
destination=destination,
|
||||||
|
path=path,
|
||||||
|
timeout=timeout,
|
||||||
|
)
|
||||||
|
defer.returnValue(content)
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
@log_function
|
||||||
|
def claim_client_keys(self, destination, query_content, timeout):
|
||||||
"""Claim one-time keys for a list of devices hosted on a remote server.
|
"""Claim one-time keys for a list of devices hosted on a remote server.
|
||||||
|
|
||||||
Request:
|
Request:
|
||||||
@@ -341,13 +448,14 @@ class TransportLayerClient(object):
|
|||||||
destination=destination,
|
destination=destination,
|
||||||
path=path,
|
path=path,
|
||||||
data=query_content,
|
data=query_content,
|
||||||
|
timeout=timeout,
|
||||||
)
|
)
|
||||||
defer.returnValue(content)
|
defer.returnValue(content)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
@log_function
|
@log_function
|
||||||
def get_missing_events(self, destination, room_id, earliest_events,
|
def get_missing_events(self, destination, room_id, earliest_events,
|
||||||
latest_events, limit, min_depth):
|
latest_events, limit, min_depth, timeout):
|
||||||
path = PREFIX + "/get_missing_events/%s" % (room_id,)
|
path = PREFIX + "/get_missing_events/%s" % (room_id,)
|
||||||
|
|
||||||
content = yield self.client.post_json(
|
content = yield self.client.post_json(
|
||||||
@@ -358,7 +466,409 @@ class TransportLayerClient(object):
|
|||||||
"min_depth": int(min_depth),
|
"min_depth": int(min_depth),
|
||||||
"earliest_events": earliest_events,
|
"earliest_events": earliest_events,
|
||||||
"latest_events": latest_events,
|
"latest_events": latest_events,
|
||||||
}
|
},
|
||||||
|
timeout=timeout,
|
||||||
)
|
)
|
||||||
|
|
||||||
defer.returnValue(content)
|
defer.returnValue(content)
|
||||||
|
|
||||||
|
@log_function
|
||||||
|
def get_group_profile(self, destination, group_id, requester_user_id):
|
||||||
|
"""Get a group profile
|
||||||
|
"""
|
||||||
|
path = PREFIX + "/groups/%s/profile" % (group_id,)
|
||||||
|
|
||||||
|
return self.client.get_json(
|
||||||
|
destination=destination,
|
||||||
|
path=path,
|
||||||
|
args={"requester_user_id": requester_user_id},
|
||||||
|
ignore_backoff=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
@log_function
|
||||||
|
def update_group_profile(self, destination, group_id, requester_user_id, content):
|
||||||
|
"""Update a remote group profile
|
||||||
|
|
||||||
|
Args:
|
||||||
|
destination (str)
|
||||||
|
group_id (str)
|
||||||
|
requester_user_id (str)
|
||||||
|
content (dict): The new profile of the group
|
||||||
|
"""
|
||||||
|
path = PREFIX + "/groups/%s/profile" % (group_id,)
|
||||||
|
|
||||||
|
return self.client.post_json(
|
||||||
|
destination=destination,
|
||||||
|
path=path,
|
||||||
|
args={"requester_user_id": requester_user_id},
|
||||||
|
data=content,
|
||||||
|
ignore_backoff=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
@log_function
|
||||||
|
def get_group_summary(self, destination, group_id, requester_user_id):
|
||||||
|
"""Get a group summary
|
||||||
|
"""
|
||||||
|
path = PREFIX + "/groups/%s/summary" % (group_id,)
|
||||||
|
|
||||||
|
return self.client.get_json(
|
||||||
|
destination=destination,
|
||||||
|
path=path,
|
||||||
|
args={"requester_user_id": requester_user_id},
|
||||||
|
ignore_backoff=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
@log_function
|
||||||
|
def get_rooms_in_group(self, destination, group_id, requester_user_id):
|
||||||
|
"""Get all rooms in a group
|
||||||
|
"""
|
||||||
|
path = PREFIX + "/groups/%s/rooms" % (group_id,)
|
||||||
|
|
||||||
|
return self.client.get_json(
|
||||||
|
destination=destination,
|
||||||
|
path=path,
|
||||||
|
args={"requester_user_id": requester_user_id},
|
||||||
|
ignore_backoff=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
def add_room_to_group(self, destination, group_id, requester_user_id, room_id,
|
||||||
|
content):
|
||||||
|
"""Add a room to a group
|
||||||
|
"""
|
||||||
|
path = PREFIX + "/groups/%s/room/%s" % (group_id, room_id,)
|
||||||
|
|
||||||
|
return self.client.post_json(
|
||||||
|
destination=destination,
|
||||||
|
path=path,
|
||||||
|
args={"requester_user_id": requester_user_id},
|
||||||
|
data=content,
|
||||||
|
ignore_backoff=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
def remove_room_from_group(self, destination, group_id, requester_user_id, room_id):
|
||||||
|
"""Remove a room from a group
|
||||||
|
"""
|
||||||
|
path = PREFIX + "/groups/%s/room/%s" % (group_id, room_id,)
|
||||||
|
|
||||||
|
return self.client.delete_json(
|
||||||
|
destination=destination,
|
||||||
|
path=path,
|
||||||
|
args={"requester_user_id": requester_user_id},
|
||||||
|
ignore_backoff=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
@log_function
|
||||||
|
def get_users_in_group(self, destination, group_id, requester_user_id):
|
||||||
|
"""Get users in a group
|
||||||
|
"""
|
||||||
|
path = PREFIX + "/groups/%s/users" % (group_id,)
|
||||||
|
|
||||||
|
return self.client.get_json(
|
||||||
|
destination=destination,
|
||||||
|
path=path,
|
||||||
|
args={"requester_user_id": requester_user_id},
|
||||||
|
ignore_backoff=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
@log_function
|
||||||
|
def get_invited_users_in_group(self, destination, group_id, requester_user_id):
|
||||||
|
"""Get users that have been invited to a group
|
||||||
|
"""
|
||||||
|
path = PREFIX + "/groups/%s/invited_users" % (group_id,)
|
||||||
|
|
||||||
|
return self.client.get_json(
|
||||||
|
destination=destination,
|
||||||
|
path=path,
|
||||||
|
args={"requester_user_id": requester_user_id},
|
||||||
|
ignore_backoff=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
@log_function
|
||||||
|
def accept_group_invite(self, destination, group_id, user_id, content):
|
||||||
|
"""Accept a group invite
|
||||||
|
"""
|
||||||
|
path = PREFIX + "/groups/%s/users/%s/accept_invite" % (group_id, user_id)
|
||||||
|
|
||||||
|
return self.client.post_json(
|
||||||
|
destination=destination,
|
||||||
|
path=path,
|
||||||
|
data=content,
|
||||||
|
ignore_backoff=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
@log_function
|
||||||
|
def invite_to_group(self, destination, group_id, user_id, requester_user_id, content):
|
||||||
|
"""Invite a user to a group
|
||||||
|
"""
|
||||||
|
path = PREFIX + "/groups/%s/users/%s/invite" % (group_id, user_id)
|
||||||
|
|
||||||
|
return self.client.post_json(
|
||||||
|
destination=destination,
|
||||||
|
path=path,
|
||||||
|
args={"requester_user_id": requester_user_id},
|
||||||
|
data=content,
|
||||||
|
ignore_backoff=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
@log_function
|
||||||
|
def invite_to_group_notification(self, destination, group_id, user_id, content):
|
||||||
|
"""Sent by group server to inform a user's server that they have been
|
||||||
|
invited.
|
||||||
|
"""
|
||||||
|
|
||||||
|
path = PREFIX + "/groups/local/%s/users/%s/invite" % (group_id, user_id)
|
||||||
|
|
||||||
|
return self.client.post_json(
|
||||||
|
destination=destination,
|
||||||
|
path=path,
|
||||||
|
data=content,
|
||||||
|
ignore_backoff=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
@log_function
|
||||||
|
def remove_user_from_group(self, destination, group_id, requester_user_id,
|
||||||
|
user_id, content):
|
||||||
|
"""Remove a user fron a group
|
||||||
|
"""
|
||||||
|
path = PREFIX + "/groups/%s/users/%s/remove" % (group_id, user_id)
|
||||||
|
|
||||||
|
return self.client.post_json(
|
||||||
|
destination=destination,
|
||||||
|
path=path,
|
||||||
|
args={"requester_user_id": requester_user_id},
|
||||||
|
data=content,
|
||||||
|
ignore_backoff=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
@log_function
|
||||||
|
def remove_user_from_group_notification(self, destination, group_id, user_id,
|
||||||
|
content):
|
||||||
|
"""Sent by group server to inform a user's server that they have been
|
||||||
|
kicked from the group.
|
||||||
|
"""
|
||||||
|
|
||||||
|
path = PREFIX + "/groups/local/%s/users/%s/remove" % (group_id, user_id)
|
||||||
|
|
||||||
|
return self.client.post_json(
|
||||||
|
destination=destination,
|
||||||
|
path=path,
|
||||||
|
data=content,
|
||||||
|
ignore_backoff=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
@log_function
|
||||||
|
def renew_group_attestation(self, destination, group_id, user_id, content):
|
||||||
|
"""Sent by either a group server or a user's server to periodically update
|
||||||
|
the attestations
|
||||||
|
"""
|
||||||
|
|
||||||
|
path = PREFIX + "/groups/%s/renew_attestation/%s" % (group_id, user_id)
|
||||||
|
|
||||||
|
return self.client.post_json(
|
||||||
|
destination=destination,
|
||||||
|
path=path,
|
||||||
|
data=content,
|
||||||
|
ignore_backoff=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
@log_function
|
||||||
|
def update_group_summary_room(self, destination, group_id, user_id, room_id,
|
||||||
|
category_id, content):
|
||||||
|
"""Update a room entry in a group summary
|
||||||
|
"""
|
||||||
|
if category_id:
|
||||||
|
path = PREFIX + "/groups/%s/summary/categories/%s/rooms/%s" % (
|
||||||
|
group_id, category_id, room_id,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
path = PREFIX + "/groups/%s/summary/rooms/%s" % (group_id, room_id,)
|
||||||
|
|
||||||
|
return self.client.post_json(
|
||||||
|
destination=destination,
|
||||||
|
path=path,
|
||||||
|
args={"requester_user_id": user_id},
|
||||||
|
data=content,
|
||||||
|
ignore_backoff=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
@log_function
|
||||||
|
def delete_group_summary_room(self, destination, group_id, user_id, room_id,
|
||||||
|
category_id):
|
||||||
|
"""Delete a room entry in a group summary
|
||||||
|
"""
|
||||||
|
if category_id:
|
||||||
|
path = PREFIX + "/groups/%s/summary/categories/%s/rooms/%s" % (
|
||||||
|
group_id, category_id, room_id,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
path = PREFIX + "/groups/%s/summary/rooms/%s" % (group_id, room_id,)
|
||||||
|
|
||||||
|
return self.client.delete_json(
|
||||||
|
destination=destination,
|
||||||
|
path=path,
|
||||||
|
args={"requester_user_id": user_id},
|
||||||
|
ignore_backoff=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
@log_function
|
||||||
|
def get_group_categories(self, destination, group_id, requester_user_id):
|
||||||
|
"""Get all categories in a group
|
||||||
|
"""
|
||||||
|
path = PREFIX + "/groups/%s/categories" % (group_id,)
|
||||||
|
|
||||||
|
return self.client.get_json(
|
||||||
|
destination=destination,
|
||||||
|
path=path,
|
||||||
|
args={"requester_user_id": requester_user_id},
|
||||||
|
ignore_backoff=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
@log_function
|
||||||
|
def get_group_category(self, destination, group_id, requester_user_id, category_id):
|
||||||
|
"""Get category info in a group
|
||||||
|
"""
|
||||||
|
path = PREFIX + "/groups/%s/categories/%s" % (group_id, category_id,)
|
||||||
|
|
||||||
|
return self.client.get_json(
|
||||||
|
destination=destination,
|
||||||
|
path=path,
|
||||||
|
args={"requester_user_id": requester_user_id},
|
||||||
|
ignore_backoff=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
@log_function
|
||||||
|
def update_group_category(self, destination, group_id, requester_user_id, category_id,
|
||||||
|
content):
|
||||||
|
"""Update a category in a group
|
||||||
|
"""
|
||||||
|
path = PREFIX + "/groups/%s/categories/%s" % (group_id, category_id,)
|
||||||
|
|
||||||
|
return self.client.post_json(
|
||||||
|
destination=destination,
|
||||||
|
path=path,
|
||||||
|
args={"requester_user_id": requester_user_id},
|
||||||
|
data=content,
|
||||||
|
ignore_backoff=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
@log_function
|
||||||
|
def delete_group_category(self, destination, group_id, requester_user_id,
|
||||||
|
category_id):
|
||||||
|
"""Delete a category in a group
|
||||||
|
"""
|
||||||
|
path = PREFIX + "/groups/%s/categories/%s" % (group_id, category_id,)
|
||||||
|
|
||||||
|
return self.client.delete_json(
|
||||||
|
destination=destination,
|
||||||
|
path=path,
|
||||||
|
args={"requester_user_id": requester_user_id},
|
||||||
|
ignore_backoff=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
@log_function
|
||||||
|
def get_group_roles(self, destination, group_id, requester_user_id):
|
||||||
|
"""Get all roles in a group
|
||||||
|
"""
|
||||||
|
path = PREFIX + "/groups/%s/roles" % (group_id,)
|
||||||
|
|
||||||
|
return self.client.get_json(
|
||||||
|
destination=destination,
|
||||||
|
path=path,
|
||||||
|
args={"requester_user_id": requester_user_id},
|
||||||
|
ignore_backoff=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
@log_function
|
||||||
|
def get_group_role(self, destination, group_id, requester_user_id, role_id):
|
||||||
|
"""Get a roles info
|
||||||
|
"""
|
||||||
|
path = PREFIX + "/groups/%s/roles/%s" % (group_id, role_id,)
|
||||||
|
|
||||||
|
return self.client.get_json(
|
||||||
|
destination=destination,
|
||||||
|
path=path,
|
||||||
|
args={"requester_user_id": requester_user_id},
|
||||||
|
ignore_backoff=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
@log_function
|
||||||
|
def update_group_role(self, destination, group_id, requester_user_id, role_id,
|
||||||
|
content):
|
||||||
|
"""Update a role in a group
|
||||||
|
"""
|
||||||
|
path = PREFIX + "/groups/%s/roles/%s" % (group_id, role_id,)
|
||||||
|
|
||||||
|
return self.client.post_json(
|
||||||
|
destination=destination,
|
||||||
|
path=path,
|
||||||
|
args={"requester_user_id": requester_user_id},
|
||||||
|
data=content,
|
||||||
|
ignore_backoff=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
@log_function
|
||||||
|
def delete_group_role(self, destination, group_id, requester_user_id, role_id):
|
||||||
|
"""Delete a role in a group
|
||||||
|
"""
|
||||||
|
path = PREFIX + "/groups/%s/roles/%s" % (group_id, role_id,)
|
||||||
|
|
||||||
|
return self.client.delete_json(
|
||||||
|
destination=destination,
|
||||||
|
path=path,
|
||||||
|
args={"requester_user_id": requester_user_id},
|
||||||
|
ignore_backoff=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
@log_function
|
||||||
|
def update_group_summary_user(self, destination, group_id, requester_user_id,
|
||||||
|
user_id, role_id, content):
|
||||||
|
"""Update a users entry in a group
|
||||||
|
"""
|
||||||
|
if role_id:
|
||||||
|
path = PREFIX + "/groups/%s/summary/roles/%s/users/%s" % (
|
||||||
|
group_id, role_id, user_id,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
path = PREFIX + "/groups/%s/summary/users/%s" % (group_id, user_id,)
|
||||||
|
|
||||||
|
return self.client.post_json(
|
||||||
|
destination=destination,
|
||||||
|
path=path,
|
||||||
|
args={"requester_user_id": requester_user_id},
|
||||||
|
data=content,
|
||||||
|
ignore_backoff=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
@log_function
|
||||||
|
def delete_group_summary_user(self, destination, group_id, requester_user_id,
|
||||||
|
user_id, role_id):
|
||||||
|
"""Delete a users entry in a group
|
||||||
|
"""
|
||||||
|
if role_id:
|
||||||
|
path = PREFIX + "/groups/%s/summary/roles/%s/users/%s" % (
|
||||||
|
group_id, role_id, user_id,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
path = PREFIX + "/groups/%s/summary/users/%s" % (group_id, user_id,)
|
||||||
|
|
||||||
|
return self.client.delete_json(
|
||||||
|
destination=destination,
|
||||||
|
path=path,
|
||||||
|
args={"requester_user_id": requester_user_id},
|
||||||
|
ignore_backoff=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
def bulk_get_publicised_groups(self, destination, user_ids):
|
||||||
|
"""Get the groups a list of users are publicising
|
||||||
|
"""
|
||||||
|
|
||||||
|
path = PREFIX + "/get_groups_publicised"
|
||||||
|
|
||||||
|
content = {"user_ids": user_ids}
|
||||||
|
|
||||||
|
return self.client.post_json(
|
||||||
|
destination=destination,
|
||||||
|
path=path,
|
||||||
|
data=content,
|
||||||
|
ignore_backoff=True,
|
||||||
|
)
|
||||||
|
|||||||
@@ -18,13 +18,19 @@ from twisted.internet import defer
|
|||||||
from synapse.api.urls import FEDERATION_PREFIX as PREFIX
|
from synapse.api.urls import FEDERATION_PREFIX as PREFIX
|
||||||
from synapse.api.errors import Codes, SynapseError
|
from synapse.api.errors import Codes, SynapseError
|
||||||
from synapse.http.server import JsonResource
|
from synapse.http.server import JsonResource
|
||||||
from synapse.http.servlet import parse_json_object_from_request, parse_string
|
from synapse.http.servlet import (
|
||||||
|
parse_json_object_from_request, parse_integer_from_args, parse_string_from_args,
|
||||||
|
parse_boolean_from_args,
|
||||||
|
)
|
||||||
from synapse.util.ratelimitutils import FederationRateLimiter
|
from synapse.util.ratelimitutils import FederationRateLimiter
|
||||||
|
from synapse.util.versionstring import get_version_string
|
||||||
|
from synapse.util.logcontext import preserve_fn
|
||||||
|
from synapse.types import ThirdPartyInstanceID, get_domain_from_id
|
||||||
|
|
||||||
import functools
|
import functools
|
||||||
import logging
|
import logging
|
||||||
import simplejson as json
|
|
||||||
import re
|
import re
|
||||||
|
import synapse
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
@@ -60,14 +66,25 @@ class TransportLayerServer(JsonResource):
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class AuthenticationError(SynapseError):
|
||||||
|
"""There was a problem authenticating the request"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class NoAuthenticationError(AuthenticationError):
|
||||||
|
"""The request had no authentication information"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
class Authenticator(object):
|
class Authenticator(object):
|
||||||
def __init__(self, hs):
|
def __init__(self, hs):
|
||||||
self.keyring = hs.get_keyring()
|
self.keyring = hs.get_keyring()
|
||||||
self.server_name = hs.hostname
|
self.server_name = hs.hostname
|
||||||
|
self.store = hs.get_datastore()
|
||||||
|
|
||||||
# A method just so we can pass 'self' as the authenticator to the Servlets
|
# A method just so we can pass 'self' as the authenticator to the Servlets
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def authenticate_request(self, request):
|
def authenticate_request(self, request, content):
|
||||||
json_request = {
|
json_request = {
|
||||||
"method": request.method,
|
"method": request.method,
|
||||||
"uri": request.uri,
|
"uri": request.uri,
|
||||||
@@ -75,17 +92,10 @@ class Authenticator(object):
|
|||||||
"signatures": {},
|
"signatures": {},
|
||||||
}
|
}
|
||||||
|
|
||||||
content = None
|
if content is not None:
|
||||||
origin = None
|
|
||||||
|
|
||||||
if request.method in ["PUT", "POST"]:
|
|
||||||
# TODO: Handle other method types? other content types?
|
|
||||||
try:
|
|
||||||
content_bytes = request.content.read()
|
|
||||||
content = json.loads(content_bytes)
|
|
||||||
json_request["content"] = content
|
json_request["content"] = content
|
||||||
except:
|
|
||||||
raise SynapseError(400, "Unable to parse JSON", Codes.BAD_JSON)
|
origin = None
|
||||||
|
|
||||||
def parse_auth_header(header_str):
|
def parse_auth_header(header_str):
|
||||||
try:
|
try:
|
||||||
@@ -102,15 +112,15 @@ class Authenticator(object):
|
|||||||
key = strip_quotes(param_dict["key"])
|
key = strip_quotes(param_dict["key"])
|
||||||
sig = strip_quotes(param_dict["sig"])
|
sig = strip_quotes(param_dict["sig"])
|
||||||
return (origin, key, sig)
|
return (origin, key, sig)
|
||||||
except:
|
except Exception:
|
||||||
raise SynapseError(
|
raise AuthenticationError(
|
||||||
400, "Malformed Authorization header", Codes.UNAUTHORIZED
|
400, "Malformed Authorization header", Codes.UNAUTHORIZED
|
||||||
)
|
)
|
||||||
|
|
||||||
auth_headers = request.requestHeaders.getRawHeaders(b"Authorization")
|
auth_headers = request.requestHeaders.getRawHeaders(b"Authorization")
|
||||||
|
|
||||||
if not auth_headers:
|
if not auth_headers:
|
||||||
raise SynapseError(
|
raise NoAuthenticationError(
|
||||||
401, "Missing Authorization headers", Codes.UNAUTHORIZED,
|
401, "Missing Authorization headers", Codes.UNAUTHORIZED,
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -121,7 +131,7 @@ class Authenticator(object):
|
|||||||
json_request["signatures"].setdefault(origin, {})[key] = sig
|
json_request["signatures"].setdefault(origin, {})[key] = sig
|
||||||
|
|
||||||
if not json_request["signatures"]:
|
if not json_request["signatures"]:
|
||||||
raise SynapseError(
|
raise NoAuthenticationError(
|
||||||
401, "Missing Authorization headers", Codes.UNAUTHORIZED,
|
401, "Missing Authorization headers", Codes.UNAUTHORIZED,
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -130,40 +140,64 @@ class Authenticator(object):
|
|||||||
logger.info("Request from %s", origin)
|
logger.info("Request from %s", origin)
|
||||||
request.authenticated_entity = origin
|
request.authenticated_entity = origin
|
||||||
|
|
||||||
defer.returnValue((origin, content))
|
# If we get a valid signed request from the other side, its probably
|
||||||
|
# alive
|
||||||
|
retry_timings = yield self.store.get_destination_retry_timings(origin)
|
||||||
|
if retry_timings and retry_timings["retry_last_ts"]:
|
||||||
|
logger.info("Marking origin %r as up", origin)
|
||||||
|
preserve_fn(self.store.set_destination_retry_timings)(origin, 0, 0)
|
||||||
|
|
||||||
|
defer.returnValue(origin)
|
||||||
|
|
||||||
|
|
||||||
class BaseFederationServlet(object):
|
class BaseFederationServlet(object):
|
||||||
def __init__(self, handler, authenticator, ratelimiter, server_name,
|
REQUIRE_AUTH = True
|
||||||
room_list_handler):
|
|
||||||
|
def __init__(self, handler, authenticator, ratelimiter, server_name):
|
||||||
self.handler = handler
|
self.handler = handler
|
||||||
self.authenticator = authenticator
|
self.authenticator = authenticator
|
||||||
self.ratelimiter = ratelimiter
|
self.ratelimiter = ratelimiter
|
||||||
self.room_list_handler = room_list_handler
|
|
||||||
|
|
||||||
def _wrap(self, code):
|
def _wrap(self, func):
|
||||||
authenticator = self.authenticator
|
authenticator = self.authenticator
|
||||||
ratelimiter = self.ratelimiter
|
ratelimiter = self.ratelimiter
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
@functools.wraps(code)
|
@functools.wraps(func)
|
||||||
def new_code(request, *args, **kwargs):
|
def new_func(request, *args, **kwargs):
|
||||||
|
content = None
|
||||||
|
if request.method in ["PUT", "POST"]:
|
||||||
|
# TODO: Handle other method types? other content types?
|
||||||
|
content = parse_json_object_from_request(request)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
(origin, content) = yield authenticator.authenticate_request(request)
|
origin = yield authenticator.authenticate_request(request, content)
|
||||||
with ratelimiter.ratelimit(origin) as d:
|
except NoAuthenticationError:
|
||||||
yield d
|
origin = None
|
||||||
response = yield code(
|
if self.REQUIRE_AUTH:
|
||||||
origin, content, request.args, *args, **kwargs
|
|
||||||
)
|
|
||||||
except:
|
|
||||||
logger.exception("authenticate_request failed")
|
logger.exception("authenticate_request failed")
|
||||||
raise
|
raise
|
||||||
|
except Exception:
|
||||||
|
logger.exception("authenticate_request failed")
|
||||||
|
raise
|
||||||
|
|
||||||
|
if origin:
|
||||||
|
with ratelimiter.ratelimit(origin) as d:
|
||||||
|
yield d
|
||||||
|
response = yield func(
|
||||||
|
origin, content, request.args, *args, **kwargs
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
response = yield func(
|
||||||
|
origin, content, request.args, *args, **kwargs
|
||||||
|
)
|
||||||
|
|
||||||
defer.returnValue(response)
|
defer.returnValue(response)
|
||||||
|
|
||||||
# Extra logic that functools.wraps() doesn't finish
|
# Extra logic that functools.wraps() doesn't finish
|
||||||
new_code.__self__ = code.__self__
|
new_func.__self__ = func.__self__
|
||||||
|
|
||||||
return new_code
|
return new_func
|
||||||
|
|
||||||
def register(self, server):
|
def register(self, server):
|
||||||
pattern = re.compile("^" + PREFIX + self.PATH + "$")
|
pattern = re.compile("^" + PREFIX + self.PATH + "$")
|
||||||
@@ -236,7 +270,7 @@ class FederationSendServlet(BaseFederationServlet):
|
|||||||
code, response = yield self.handler.on_incoming_transaction(
|
code, response = yield self.handler.on_incoming_transaction(
|
||||||
transaction_data
|
transaction_data
|
||||||
)
|
)
|
||||||
except:
|
except Exception:
|
||||||
logger.exception("on_incoming_transaction failed")
|
logger.exception("on_incoming_transaction failed")
|
||||||
raise
|
raise
|
||||||
|
|
||||||
@@ -271,6 +305,17 @@ class FederationStateServlet(BaseFederationServlet):
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class FederationStateIdsServlet(BaseFederationServlet):
|
||||||
|
PATH = "/state_ids/(?P<room_id>[^/]*)/"
|
||||||
|
|
||||||
|
def on_GET(self, origin, content, query, room_id):
|
||||||
|
return self.handler.on_state_ids_request(
|
||||||
|
origin,
|
||||||
|
room_id,
|
||||||
|
query.get("event_id", [None])[0],
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class FederationBackfillServlet(BaseFederationServlet):
|
class FederationBackfillServlet(BaseFederationServlet):
|
||||||
PATH = "/backfill/(?P<context>[^/]*)/"
|
PATH = "/backfill/(?P<context>[^/]*)/"
|
||||||
|
|
||||||
@@ -367,10 +412,15 @@ class FederationThirdPartyInviteExchangeServlet(BaseFederationServlet):
|
|||||||
class FederationClientKeysQueryServlet(BaseFederationServlet):
|
class FederationClientKeysQueryServlet(BaseFederationServlet):
|
||||||
PATH = "/user/keys/query"
|
PATH = "/user/keys/query"
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def on_POST(self, origin, content, query):
|
def on_POST(self, origin, content, query):
|
||||||
response = yield self.handler.on_query_client_keys(origin, content)
|
return self.handler.on_query_client_keys(origin, content)
|
||||||
defer.returnValue((200, response))
|
|
||||||
|
|
||||||
|
class FederationUserDevicesQueryServlet(BaseFederationServlet):
|
||||||
|
PATH = "/user/devices/(?P<user_id>[^/]*)"
|
||||||
|
|
||||||
|
def on_GET(self, origin, content, query, user_id):
|
||||||
|
return self.handler.on_query_user_devices(origin, user_id)
|
||||||
|
|
||||||
|
|
||||||
class FederationClientKeysClaimServlet(BaseFederationServlet):
|
class FederationClientKeysClaimServlet(BaseFederationServlet):
|
||||||
@@ -420,9 +470,10 @@ class FederationGetMissingEventsServlet(BaseFederationServlet):
|
|||||||
class On3pidBindServlet(BaseFederationServlet):
|
class On3pidBindServlet(BaseFederationServlet):
|
||||||
PATH = "/3pid/onbind"
|
PATH = "/3pid/onbind"
|
||||||
|
|
||||||
|
REQUIRE_AUTH = False
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def on_POST(self, request):
|
def on_POST(self, origin, content, query):
|
||||||
content = parse_json_object_from_request(request)
|
|
||||||
if "invites" in content:
|
if "invites" in content:
|
||||||
last_exception = None
|
last_exception = None
|
||||||
for invite in content["invites"]:
|
for invite in content["invites"]:
|
||||||
@@ -444,11 +495,6 @@ class On3pidBindServlet(BaseFederationServlet):
|
|||||||
raise last_exception
|
raise last_exception
|
||||||
defer.returnValue((200, {}))
|
defer.returnValue((200, {}))
|
||||||
|
|
||||||
# Avoid doing remote HS authorization checks which are done by default by
|
|
||||||
# BaseFederationServlet.
|
|
||||||
def _wrap(self, code):
|
|
||||||
return code
|
|
||||||
|
|
||||||
|
|
||||||
class OpenIdUserInfo(BaseFederationServlet):
|
class OpenIdUserInfo(BaseFederationServlet):
|
||||||
"""
|
"""
|
||||||
@@ -469,9 +515,11 @@ class OpenIdUserInfo(BaseFederationServlet):
|
|||||||
|
|
||||||
PATH = "/openid/userinfo"
|
PATH = "/openid/userinfo"
|
||||||
|
|
||||||
|
REQUIRE_AUTH = False
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def on_GET(self, request):
|
def on_GET(self, origin, content, query):
|
||||||
token = parse_string(request, "access_token")
|
token = query.get("access_token", [None])[0]
|
||||||
if token is None:
|
if token is None:
|
||||||
defer.returnValue((401, {
|
defer.returnValue((401, {
|
||||||
"errcode": "M_MISSING_TOKEN", "error": "Access Token required"
|
"errcode": "M_MISSING_TOKEN", "error": "Access Token required"
|
||||||
@@ -488,11 +536,6 @@ class OpenIdUserInfo(BaseFederationServlet):
|
|||||||
|
|
||||||
defer.returnValue((200, {"sub": user_id}))
|
defer.returnValue((200, {"sub": user_id}))
|
||||||
|
|
||||||
# Avoid doing remote HS authorization checks which are done by default by
|
|
||||||
# BaseFederationServlet.
|
|
||||||
def _wrap(self, code):
|
|
||||||
return code
|
|
||||||
|
|
||||||
|
|
||||||
class PublicRoomList(BaseFederationServlet):
|
class PublicRoomList(BaseFederationServlet):
|
||||||
"""
|
"""
|
||||||
@@ -529,15 +572,536 @@ class PublicRoomList(BaseFederationServlet):
|
|||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def on_GET(self, origin, content, query):
|
def on_GET(self, origin, content, query):
|
||||||
data = yield self.room_list_handler.get_local_public_room_list()
|
limit = parse_integer_from_args(query, "limit", 0)
|
||||||
|
since_token = parse_string_from_args(query, "since", None)
|
||||||
|
include_all_networks = parse_boolean_from_args(
|
||||||
|
query, "include_all_networks", False
|
||||||
|
)
|
||||||
|
third_party_instance_id = parse_string_from_args(
|
||||||
|
query, "third_party_instance_id", None
|
||||||
|
)
|
||||||
|
|
||||||
|
if include_all_networks:
|
||||||
|
network_tuple = None
|
||||||
|
elif third_party_instance_id:
|
||||||
|
network_tuple = ThirdPartyInstanceID.from_string(third_party_instance_id)
|
||||||
|
else:
|
||||||
|
network_tuple = ThirdPartyInstanceID(None, None)
|
||||||
|
|
||||||
|
data = yield self.handler.get_local_public_room_list(
|
||||||
|
limit, since_token,
|
||||||
|
network_tuple=network_tuple
|
||||||
|
)
|
||||||
defer.returnValue((200, data))
|
defer.returnValue((200, data))
|
||||||
|
|
||||||
|
|
||||||
SERVLET_CLASSES = (
|
class FederationVersionServlet(BaseFederationServlet):
|
||||||
|
PATH = "/version"
|
||||||
|
|
||||||
|
REQUIRE_AUTH = False
|
||||||
|
|
||||||
|
def on_GET(self, origin, content, query):
|
||||||
|
return defer.succeed((200, {
|
||||||
|
"server": {
|
||||||
|
"name": "Synapse",
|
||||||
|
"version": get_version_string(synapse)
|
||||||
|
},
|
||||||
|
}))
|
||||||
|
|
||||||
|
|
||||||
|
class FederationGroupsProfileServlet(BaseFederationServlet):
|
||||||
|
"""Get/set the basic profile of a group on behalf of a user
|
||||||
|
"""
|
||||||
|
PATH = "/groups/(?P<group_id>[^/]*)/profile$"
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def on_GET(self, origin, content, query, group_id):
|
||||||
|
requester_user_id = parse_string_from_args(query, "requester_user_id")
|
||||||
|
if get_domain_from_id(requester_user_id) != origin:
|
||||||
|
raise SynapseError(403, "requester_user_id doesn't match origin")
|
||||||
|
|
||||||
|
new_content = yield self.handler.get_group_profile(
|
||||||
|
group_id, requester_user_id
|
||||||
|
)
|
||||||
|
|
||||||
|
defer.returnValue((200, new_content))
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def on_POST(self, origin, content, query, group_id):
|
||||||
|
requester_user_id = parse_string_from_args(query, "requester_user_id")
|
||||||
|
if get_domain_from_id(requester_user_id) != origin:
|
||||||
|
raise SynapseError(403, "requester_user_id doesn't match origin")
|
||||||
|
|
||||||
|
new_content = yield self.handler.update_group_profile(
|
||||||
|
group_id, requester_user_id, content
|
||||||
|
)
|
||||||
|
|
||||||
|
defer.returnValue((200, new_content))
|
||||||
|
|
||||||
|
|
||||||
|
class FederationGroupsSummaryServlet(BaseFederationServlet):
|
||||||
|
PATH = "/groups/(?P<group_id>[^/]*)/summary$"
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def on_GET(self, origin, content, query, group_id):
|
||||||
|
requester_user_id = parse_string_from_args(query, "requester_user_id")
|
||||||
|
if get_domain_from_id(requester_user_id) != origin:
|
||||||
|
raise SynapseError(403, "requester_user_id doesn't match origin")
|
||||||
|
|
||||||
|
new_content = yield self.handler.get_group_summary(
|
||||||
|
group_id, requester_user_id
|
||||||
|
)
|
||||||
|
|
||||||
|
defer.returnValue((200, new_content))
|
||||||
|
|
||||||
|
|
||||||
|
class FederationGroupsRoomsServlet(BaseFederationServlet):
|
||||||
|
"""Get the rooms in a group on behalf of a user
|
||||||
|
"""
|
||||||
|
PATH = "/groups/(?P<group_id>[^/]*)/rooms$"
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def on_GET(self, origin, content, query, group_id):
|
||||||
|
requester_user_id = parse_string_from_args(query, "requester_user_id")
|
||||||
|
if get_domain_from_id(requester_user_id) != origin:
|
||||||
|
raise SynapseError(403, "requester_user_id doesn't match origin")
|
||||||
|
|
||||||
|
new_content = yield self.handler.get_rooms_in_group(
|
||||||
|
group_id, requester_user_id
|
||||||
|
)
|
||||||
|
|
||||||
|
defer.returnValue((200, new_content))
|
||||||
|
|
||||||
|
|
||||||
|
class FederationGroupsAddRoomsServlet(BaseFederationServlet):
|
||||||
|
"""Add/remove room from group
|
||||||
|
"""
|
||||||
|
PATH = "/groups/(?P<group_id>[^/]*)/room/(?<room_id>)$"
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def on_POST(self, origin, content, query, group_id, room_id):
|
||||||
|
requester_user_id = parse_string_from_args(query, "requester_user_id")
|
||||||
|
if get_domain_from_id(requester_user_id) != origin:
|
||||||
|
raise SynapseError(403, "requester_user_id doesn't match origin")
|
||||||
|
|
||||||
|
new_content = yield self.handler.add_room_to_group(
|
||||||
|
group_id, requester_user_id, room_id, content
|
||||||
|
)
|
||||||
|
|
||||||
|
defer.returnValue((200, new_content))
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def on_DELETE(self, origin, content, query, group_id, room_id):
|
||||||
|
requester_user_id = parse_string_from_args(query, "requester_user_id")
|
||||||
|
if get_domain_from_id(requester_user_id) != origin:
|
||||||
|
raise SynapseError(403, "requester_user_id doesn't match origin")
|
||||||
|
|
||||||
|
new_content = yield self.handler.remove_room_from_group(
|
||||||
|
group_id, requester_user_id, room_id,
|
||||||
|
)
|
||||||
|
|
||||||
|
defer.returnValue((200, new_content))
|
||||||
|
|
||||||
|
|
||||||
|
class FederationGroupsUsersServlet(BaseFederationServlet):
|
||||||
|
"""Get the users in a group on behalf of a user
|
||||||
|
"""
|
||||||
|
PATH = "/groups/(?P<group_id>[^/]*)/users$"
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def on_GET(self, origin, content, query, group_id):
|
||||||
|
requester_user_id = parse_string_from_args(query, "requester_user_id")
|
||||||
|
if get_domain_from_id(requester_user_id) != origin:
|
||||||
|
raise SynapseError(403, "requester_user_id doesn't match origin")
|
||||||
|
|
||||||
|
new_content = yield self.handler.get_users_in_group(
|
||||||
|
group_id, requester_user_id
|
||||||
|
)
|
||||||
|
|
||||||
|
defer.returnValue((200, new_content))
|
||||||
|
|
||||||
|
|
||||||
|
class FederationGroupsInvitedUsersServlet(BaseFederationServlet):
|
||||||
|
"""Get the users that have been invited to a group
|
||||||
|
"""
|
||||||
|
PATH = "/groups/(?P<group_id>[^/]*)/invited_users$"
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def on_GET(self, origin, content, query, group_id):
|
||||||
|
requester_user_id = parse_string_from_args(query, "requester_user_id")
|
||||||
|
if get_domain_from_id(requester_user_id) != origin:
|
||||||
|
raise SynapseError(403, "requester_user_id doesn't match origin")
|
||||||
|
|
||||||
|
new_content = yield self.handler.get_invited_users_in_group(
|
||||||
|
group_id, requester_user_id
|
||||||
|
)
|
||||||
|
|
||||||
|
defer.returnValue((200, new_content))
|
||||||
|
|
||||||
|
|
||||||
|
class FederationGroupsInviteServlet(BaseFederationServlet):
|
||||||
|
"""Ask a group server to invite someone to the group
|
||||||
|
"""
|
||||||
|
PATH = "/groups/(?P<group_id>[^/]*)/users/(?P<user_id>[^/]*)/invite$"
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def on_POST(self, origin, content, query, group_id, user_id):
|
||||||
|
requester_user_id = parse_string_from_args(query, "requester_user_id")
|
||||||
|
if get_domain_from_id(requester_user_id) != origin:
|
||||||
|
raise SynapseError(403, "requester_user_id doesn't match origin")
|
||||||
|
|
||||||
|
new_content = yield self.handler.invite_to_group(
|
||||||
|
group_id, user_id, requester_user_id, content,
|
||||||
|
)
|
||||||
|
|
||||||
|
defer.returnValue((200, new_content))
|
||||||
|
|
||||||
|
|
||||||
|
class FederationGroupsAcceptInviteServlet(BaseFederationServlet):
|
||||||
|
"""Accept an invitation from the group server
|
||||||
|
"""
|
||||||
|
PATH = "/groups/(?P<group_id>[^/]*)/users/(?P<user_id>[^/]*)/accept_invite$"
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def on_POST(self, origin, content, query, group_id, user_id):
|
||||||
|
if get_domain_from_id(user_id) != origin:
|
||||||
|
raise SynapseError(403, "user_id doesn't match origin")
|
||||||
|
|
||||||
|
new_content = yield self.handler.accept_invite(
|
||||||
|
group_id, user_id, content,
|
||||||
|
)
|
||||||
|
|
||||||
|
defer.returnValue((200, new_content))
|
||||||
|
|
||||||
|
|
||||||
|
class FederationGroupsRemoveUserServlet(BaseFederationServlet):
|
||||||
|
"""Leave or kick a user from the group
|
||||||
|
"""
|
||||||
|
PATH = "/groups/(?P<group_id>[^/]*)/users/(?P<user_id>[^/]*)/remove$"
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def on_POST(self, origin, content, query, group_id, user_id):
|
||||||
|
requester_user_id = parse_string_from_args(query, "requester_user_id")
|
||||||
|
if get_domain_from_id(requester_user_id) != origin:
|
||||||
|
raise SynapseError(403, "requester_user_id doesn't match origin")
|
||||||
|
|
||||||
|
new_content = yield self.handler.remove_user_from_group(
|
||||||
|
group_id, user_id, requester_user_id, content,
|
||||||
|
)
|
||||||
|
|
||||||
|
defer.returnValue((200, new_content))
|
||||||
|
|
||||||
|
|
||||||
|
class FederationGroupsLocalInviteServlet(BaseFederationServlet):
|
||||||
|
"""A group server has invited a local user
|
||||||
|
"""
|
||||||
|
PATH = "/groups/local/(?P<group_id>[^/]*)/users/(?P<user_id>[^/]*)/invite$"
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def on_POST(self, origin, content, query, group_id, user_id):
|
||||||
|
if get_domain_from_id(group_id) != origin:
|
||||||
|
raise SynapseError(403, "group_id doesn't match origin")
|
||||||
|
|
||||||
|
new_content = yield self.handler.on_invite(
|
||||||
|
group_id, user_id, content,
|
||||||
|
)
|
||||||
|
|
||||||
|
defer.returnValue((200, new_content))
|
||||||
|
|
||||||
|
|
||||||
|
class FederationGroupsRemoveLocalUserServlet(BaseFederationServlet):
|
||||||
|
"""A group server has removed a local user
|
||||||
|
"""
|
||||||
|
PATH = "/groups/local/(?P<group_id>[^/]*)/users/(?P<user_id>[^/]*)/remove$"
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def on_POST(self, origin, content, query, group_id, user_id):
|
||||||
|
if get_domain_from_id(group_id) != origin:
|
||||||
|
raise SynapseError(403, "user_id doesn't match origin")
|
||||||
|
|
||||||
|
new_content = yield self.handler.user_removed_from_group(
|
||||||
|
group_id, user_id, content,
|
||||||
|
)
|
||||||
|
|
||||||
|
defer.returnValue((200, new_content))
|
||||||
|
|
||||||
|
|
||||||
|
class FederationGroupsRenewAttestaionServlet(BaseFederationServlet):
|
||||||
|
"""A group or user's server renews their attestation
|
||||||
|
"""
|
||||||
|
PATH = "/groups/(?P<group_id>[^/]*)/renew_attestation/(?P<user_id>[^/]*)$"
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def on_POST(self, origin, content, query, group_id, user_id):
|
||||||
|
# We don't need to check auth here as we check the attestation signatures
|
||||||
|
|
||||||
|
new_content = yield self.handler.on_renew_attestation(
|
||||||
|
group_id, user_id, content
|
||||||
|
)
|
||||||
|
|
||||||
|
defer.returnValue((200, new_content))
|
||||||
|
|
||||||
|
|
||||||
|
class FederationGroupsSummaryRoomsServlet(BaseFederationServlet):
|
||||||
|
"""Add/remove a room from the group summary, with optional category.
|
||||||
|
|
||||||
|
Matches both:
|
||||||
|
- /groups/:group/summary/rooms/:room_id
|
||||||
|
- /groups/:group/summary/categories/:category/rooms/:room_id
|
||||||
|
"""
|
||||||
|
PATH = (
|
||||||
|
"/groups/(?P<group_id>[^/]*)/summary"
|
||||||
|
"(/categories/(?P<category_id>[^/]+))?"
|
||||||
|
"/rooms/(?P<room_id>[^/]*)$"
|
||||||
|
)
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def on_POST(self, origin, content, query, group_id, category_id, room_id):
|
||||||
|
requester_user_id = parse_string_from_args(query, "requester_user_id")
|
||||||
|
if get_domain_from_id(requester_user_id) != origin:
|
||||||
|
raise SynapseError(403, "requester_user_id doesn't match origin")
|
||||||
|
|
||||||
|
if category_id == "":
|
||||||
|
raise SynapseError(400, "category_id cannot be empty string")
|
||||||
|
|
||||||
|
resp = yield self.handler.update_group_summary_room(
|
||||||
|
group_id, requester_user_id,
|
||||||
|
room_id=room_id,
|
||||||
|
category_id=category_id,
|
||||||
|
content=content,
|
||||||
|
)
|
||||||
|
|
||||||
|
defer.returnValue((200, resp))
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def on_DELETE(self, origin, content, query, group_id, category_id, room_id):
|
||||||
|
requester_user_id = parse_string_from_args(query, "requester_user_id")
|
||||||
|
if get_domain_from_id(requester_user_id) != origin:
|
||||||
|
raise SynapseError(403, "requester_user_id doesn't match origin")
|
||||||
|
|
||||||
|
if category_id == "":
|
||||||
|
raise SynapseError(400, "category_id cannot be empty string")
|
||||||
|
|
||||||
|
resp = yield self.handler.delete_group_summary_room(
|
||||||
|
group_id, requester_user_id,
|
||||||
|
room_id=room_id,
|
||||||
|
category_id=category_id,
|
||||||
|
)
|
||||||
|
|
||||||
|
defer.returnValue((200, resp))
|
||||||
|
|
||||||
|
|
||||||
|
class FederationGroupsCategoriesServlet(BaseFederationServlet):
|
||||||
|
"""Get all categories for a group
|
||||||
|
"""
|
||||||
|
PATH = (
|
||||||
|
"/groups/(?P<group_id>[^/]*)/categories/$"
|
||||||
|
)
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def on_GET(self, origin, content, query, group_id):
|
||||||
|
requester_user_id = parse_string_from_args(query, "requester_user_id")
|
||||||
|
if get_domain_from_id(requester_user_id) != origin:
|
||||||
|
raise SynapseError(403, "requester_user_id doesn't match origin")
|
||||||
|
|
||||||
|
resp = yield self.handler.get_group_categories(
|
||||||
|
group_id, requester_user_id,
|
||||||
|
)
|
||||||
|
|
||||||
|
defer.returnValue((200, resp))
|
||||||
|
|
||||||
|
|
||||||
|
class FederationGroupsCategoryServlet(BaseFederationServlet):
|
||||||
|
"""Add/remove/get a category in a group
|
||||||
|
"""
|
||||||
|
PATH = (
|
||||||
|
"/groups/(?P<group_id>[^/]*)/categories/(?P<category_id>[^/]+)$"
|
||||||
|
)
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def on_GET(self, origin, content, query, group_id, category_id):
|
||||||
|
requester_user_id = parse_string_from_args(query, "requester_user_id")
|
||||||
|
if get_domain_from_id(requester_user_id) != origin:
|
||||||
|
raise SynapseError(403, "requester_user_id doesn't match origin")
|
||||||
|
|
||||||
|
resp = yield self.handler.get_group_category(
|
||||||
|
group_id, requester_user_id, category_id
|
||||||
|
)
|
||||||
|
|
||||||
|
defer.returnValue((200, resp))
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def on_POST(self, origin, content, query, group_id, category_id):
|
||||||
|
requester_user_id = parse_string_from_args(query, "requester_user_id")
|
||||||
|
if get_domain_from_id(requester_user_id) != origin:
|
||||||
|
raise SynapseError(403, "requester_user_id doesn't match origin")
|
||||||
|
|
||||||
|
if category_id == "":
|
||||||
|
raise SynapseError(400, "category_id cannot be empty string")
|
||||||
|
|
||||||
|
resp = yield self.handler.upsert_group_category(
|
||||||
|
group_id, requester_user_id, category_id, content,
|
||||||
|
)
|
||||||
|
|
||||||
|
defer.returnValue((200, resp))
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def on_DELETE(self, origin, content, query, group_id, category_id):
|
||||||
|
requester_user_id = parse_string_from_args(query, "requester_user_id")
|
||||||
|
if get_domain_from_id(requester_user_id) != origin:
|
||||||
|
raise SynapseError(403, "requester_user_id doesn't match origin")
|
||||||
|
|
||||||
|
if category_id == "":
|
||||||
|
raise SynapseError(400, "category_id cannot be empty string")
|
||||||
|
|
||||||
|
resp = yield self.handler.delete_group_category(
|
||||||
|
group_id, requester_user_id, category_id,
|
||||||
|
)
|
||||||
|
|
||||||
|
defer.returnValue((200, resp))
|
||||||
|
|
||||||
|
|
||||||
|
class FederationGroupsRolesServlet(BaseFederationServlet):
|
||||||
|
"""Get roles in a group
|
||||||
|
"""
|
||||||
|
PATH = (
|
||||||
|
"/groups/(?P<group_id>[^/]*)/roles/$"
|
||||||
|
)
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def on_GET(self, origin, content, query, group_id):
|
||||||
|
requester_user_id = parse_string_from_args(query, "requester_user_id")
|
||||||
|
if get_domain_from_id(requester_user_id) != origin:
|
||||||
|
raise SynapseError(403, "requester_user_id doesn't match origin")
|
||||||
|
|
||||||
|
resp = yield self.handler.get_group_roles(
|
||||||
|
group_id, requester_user_id,
|
||||||
|
)
|
||||||
|
|
||||||
|
defer.returnValue((200, resp))
|
||||||
|
|
||||||
|
|
||||||
|
class FederationGroupsRoleServlet(BaseFederationServlet):
|
||||||
|
"""Add/remove/get a role in a group
|
||||||
|
"""
|
||||||
|
PATH = (
|
||||||
|
"/groups/(?P<group_id>[^/]*)/roles/(?P<role_id>[^/]+)$"
|
||||||
|
)
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def on_GET(self, origin, content, query, group_id, role_id):
|
||||||
|
requester_user_id = parse_string_from_args(query, "requester_user_id")
|
||||||
|
if get_domain_from_id(requester_user_id) != origin:
|
||||||
|
raise SynapseError(403, "requester_user_id doesn't match origin")
|
||||||
|
|
||||||
|
resp = yield self.handler.get_group_role(
|
||||||
|
group_id, requester_user_id, role_id
|
||||||
|
)
|
||||||
|
|
||||||
|
defer.returnValue((200, resp))
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def on_POST(self, origin, content, query, group_id, role_id):
|
||||||
|
requester_user_id = parse_string_from_args(query, "requester_user_id")
|
||||||
|
if get_domain_from_id(requester_user_id) != origin:
|
||||||
|
raise SynapseError(403, "requester_user_id doesn't match origin")
|
||||||
|
|
||||||
|
if role_id == "":
|
||||||
|
raise SynapseError(400, "role_id cannot be empty string")
|
||||||
|
|
||||||
|
resp = yield self.handler.update_group_role(
|
||||||
|
group_id, requester_user_id, role_id, content,
|
||||||
|
)
|
||||||
|
|
||||||
|
defer.returnValue((200, resp))
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def on_DELETE(self, origin, content, query, group_id, role_id):
|
||||||
|
requester_user_id = parse_string_from_args(query, "requester_user_id")
|
||||||
|
if get_domain_from_id(requester_user_id) != origin:
|
||||||
|
raise SynapseError(403, "requester_user_id doesn't match origin")
|
||||||
|
|
||||||
|
if role_id == "":
|
||||||
|
raise SynapseError(400, "role_id cannot be empty string")
|
||||||
|
|
||||||
|
resp = yield self.handler.delete_group_role(
|
||||||
|
group_id, requester_user_id, role_id,
|
||||||
|
)
|
||||||
|
|
||||||
|
defer.returnValue((200, resp))
|
||||||
|
|
||||||
|
|
||||||
|
class FederationGroupsSummaryUsersServlet(BaseFederationServlet):
|
||||||
|
"""Add/remove a user from the group summary, with optional role.
|
||||||
|
|
||||||
|
Matches both:
|
||||||
|
- /groups/:group/summary/users/:user_id
|
||||||
|
- /groups/:group/summary/roles/:role/users/:user_id
|
||||||
|
"""
|
||||||
|
PATH = (
|
||||||
|
"/groups/(?P<group_id>[^/]*)/summary"
|
||||||
|
"(/roles/(?P<role_id>[^/]+))?"
|
||||||
|
"/users/(?P<user_id>[^/]*)$"
|
||||||
|
)
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def on_POST(self, origin, content, query, group_id, role_id, user_id):
|
||||||
|
requester_user_id = parse_string_from_args(query, "requester_user_id")
|
||||||
|
if get_domain_from_id(requester_user_id) != origin:
|
||||||
|
raise SynapseError(403, "requester_user_id doesn't match origin")
|
||||||
|
|
||||||
|
if role_id == "":
|
||||||
|
raise SynapseError(400, "role_id cannot be empty string")
|
||||||
|
|
||||||
|
resp = yield self.handler.update_group_summary_user(
|
||||||
|
group_id, requester_user_id,
|
||||||
|
user_id=user_id,
|
||||||
|
role_id=role_id,
|
||||||
|
content=content,
|
||||||
|
)
|
||||||
|
|
||||||
|
defer.returnValue((200, resp))
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def on_DELETE(self, origin, content, query, group_id, role_id, user_id):
|
||||||
|
requester_user_id = parse_string_from_args(query, "requester_user_id")
|
||||||
|
if get_domain_from_id(requester_user_id) != origin:
|
||||||
|
raise SynapseError(403, "requester_user_id doesn't match origin")
|
||||||
|
|
||||||
|
if role_id == "":
|
||||||
|
raise SynapseError(400, "role_id cannot be empty string")
|
||||||
|
|
||||||
|
resp = yield self.handler.delete_group_summary_user(
|
||||||
|
group_id, requester_user_id,
|
||||||
|
user_id=user_id,
|
||||||
|
role_id=role_id,
|
||||||
|
)
|
||||||
|
|
||||||
|
defer.returnValue((200, resp))
|
||||||
|
|
||||||
|
|
||||||
|
class FederationGroupsBulkPublicisedServlet(BaseFederationServlet):
|
||||||
|
"""Get roles in a group
|
||||||
|
"""
|
||||||
|
PATH = (
|
||||||
|
"/get_groups_publicised$"
|
||||||
|
)
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def on_POST(self, origin, content, query):
|
||||||
|
resp = yield self.handler.bulk_get_publicised_groups(
|
||||||
|
content["user_ids"], proxy=False,
|
||||||
|
)
|
||||||
|
|
||||||
|
defer.returnValue((200, resp))
|
||||||
|
|
||||||
|
|
||||||
|
FEDERATION_SERVLET_CLASSES = (
|
||||||
FederationSendServlet,
|
FederationSendServlet,
|
||||||
FederationPullServlet,
|
FederationPullServlet,
|
||||||
FederationEventServlet,
|
FederationEventServlet,
|
||||||
FederationStateServlet,
|
FederationStateServlet,
|
||||||
|
FederationStateIdsServlet,
|
||||||
FederationBackfillServlet,
|
FederationBackfillServlet,
|
||||||
FederationQueryServlet,
|
FederationQueryServlet,
|
||||||
FederationMakeJoinServlet,
|
FederationMakeJoinServlet,
|
||||||
@@ -550,20 +1114,86 @@ SERVLET_CLASSES = (
|
|||||||
FederationGetMissingEventsServlet,
|
FederationGetMissingEventsServlet,
|
||||||
FederationEventAuthServlet,
|
FederationEventAuthServlet,
|
||||||
FederationClientKeysQueryServlet,
|
FederationClientKeysQueryServlet,
|
||||||
|
FederationUserDevicesQueryServlet,
|
||||||
FederationClientKeysClaimServlet,
|
FederationClientKeysClaimServlet,
|
||||||
FederationThirdPartyInviteExchangeServlet,
|
FederationThirdPartyInviteExchangeServlet,
|
||||||
On3pidBindServlet,
|
On3pidBindServlet,
|
||||||
OpenIdUserInfo,
|
OpenIdUserInfo,
|
||||||
|
FederationVersionServlet,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
ROOM_LIST_CLASSES = (
|
||||||
PublicRoomList,
|
PublicRoomList,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
GROUP_SERVER_SERVLET_CLASSES = (
|
||||||
|
FederationGroupsProfileServlet,
|
||||||
|
FederationGroupsSummaryServlet,
|
||||||
|
FederationGroupsRoomsServlet,
|
||||||
|
FederationGroupsUsersServlet,
|
||||||
|
FederationGroupsInvitedUsersServlet,
|
||||||
|
FederationGroupsInviteServlet,
|
||||||
|
FederationGroupsAcceptInviteServlet,
|
||||||
|
FederationGroupsRemoveUserServlet,
|
||||||
|
FederationGroupsSummaryRoomsServlet,
|
||||||
|
FederationGroupsCategoriesServlet,
|
||||||
|
FederationGroupsCategoryServlet,
|
||||||
|
FederationGroupsRolesServlet,
|
||||||
|
FederationGroupsRoleServlet,
|
||||||
|
FederationGroupsSummaryUsersServlet,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
GROUP_LOCAL_SERVLET_CLASSES = (
|
||||||
|
FederationGroupsLocalInviteServlet,
|
||||||
|
FederationGroupsRemoveLocalUserServlet,
|
||||||
|
FederationGroupsBulkPublicisedServlet,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
GROUP_ATTESTATION_SERVLET_CLASSES = (
|
||||||
|
FederationGroupsRenewAttestaionServlet,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def register_servlets(hs, resource, authenticator, ratelimiter):
|
def register_servlets(hs, resource, authenticator, ratelimiter):
|
||||||
for servletclass in SERVLET_CLASSES:
|
for servletclass in FEDERATION_SERVLET_CLASSES:
|
||||||
servletclass(
|
servletclass(
|
||||||
handler=hs.get_replication_layer(),
|
handler=hs.get_replication_layer(),
|
||||||
authenticator=authenticator,
|
authenticator=authenticator,
|
||||||
ratelimiter=ratelimiter,
|
ratelimiter=ratelimiter,
|
||||||
server_name=hs.hostname,
|
server_name=hs.hostname,
|
||||||
room_list_handler=hs.get_room_list_handler(),
|
).register(resource)
|
||||||
|
|
||||||
|
for servletclass in ROOM_LIST_CLASSES:
|
||||||
|
servletclass(
|
||||||
|
handler=hs.get_room_list_handler(),
|
||||||
|
authenticator=authenticator,
|
||||||
|
ratelimiter=ratelimiter,
|
||||||
|
server_name=hs.hostname,
|
||||||
|
).register(resource)
|
||||||
|
|
||||||
|
for servletclass in GROUP_SERVER_SERVLET_CLASSES:
|
||||||
|
servletclass(
|
||||||
|
handler=hs.get_groups_server_handler(),
|
||||||
|
authenticator=authenticator,
|
||||||
|
ratelimiter=ratelimiter,
|
||||||
|
server_name=hs.hostname,
|
||||||
|
).register(resource)
|
||||||
|
|
||||||
|
for servletclass in GROUP_LOCAL_SERVLET_CLASSES:
|
||||||
|
servletclass(
|
||||||
|
handler=hs.get_groups_local_handler(),
|
||||||
|
authenticator=authenticator,
|
||||||
|
ratelimiter=ratelimiter,
|
||||||
|
server_name=hs.hostname,
|
||||||
|
).register(resource)
|
||||||
|
|
||||||
|
for servletclass in GROUP_ATTESTATION_SERVLET_CLASSES:
|
||||||
|
servletclass(
|
||||||
|
handler=hs.get_groups_attestation_renewer(),
|
||||||
|
authenticator=authenticator,
|
||||||
|
ratelimiter=ratelimiter,
|
||||||
|
server_name=hs.hostname,
|
||||||
).register(resource)
|
).register(resource)
|
||||||
|
|||||||
0
synapse/groups/__init__.py
Normal file
0
synapse/groups/__init__.py
Normal file
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user