mirror of
https://github.com/element-hq/synapse.git
synced 2025-12-05 01:10:13 +00:00
Compare commits
968 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
532ebc4a82 | ||
|
|
56f2d31676 | ||
|
|
c178e4e6ca | ||
|
|
b26d85c30f | ||
|
|
0dcb145c7e | ||
|
|
64cf1483e5 | ||
|
|
d028207a6e | ||
|
|
0a55a2b692 | ||
|
|
6cc046302f | ||
|
|
ed4d44d833 | ||
|
|
f88db7ac0b | ||
|
|
57976f646f | ||
|
|
bb24609158 | ||
|
|
93978c5e2b | ||
|
|
1489521ee5 | ||
|
|
6d33f97703 | ||
|
|
7564dac8cb | ||
|
|
3f7a31d366 | ||
|
|
be170b1426 | ||
|
|
cd2539ab2a | ||
|
|
6df319b6f0 | ||
|
|
f1d2b94e0b | ||
|
|
857810d2dd | ||
|
|
ac8eb0f319 | ||
|
|
c2c9471cba | ||
|
|
8bad40701b | ||
|
|
250e143084 | ||
|
|
b2e6ee5b43 | ||
|
|
e7ce5d8b06 | ||
|
|
758d114cbc | ||
|
|
ea8590cf66 | ||
|
|
ab8229479b | ||
|
|
b677ff6692 | ||
|
|
c8032aec17 | ||
|
|
256fe08963 | ||
|
|
e731d30d90 | ||
|
|
a1abee013c | ||
|
|
98a3825614 | ||
|
|
7393c5ce4c | ||
|
|
598c47a108 | ||
|
|
9266cb0a22 | ||
|
|
bcfce93ccd | ||
|
|
dea236e4fa | ||
|
|
69135f59aa | ||
|
|
58367a9da2 | ||
|
|
58247c8b4b | ||
|
|
f55bd3f94b | ||
|
|
e90002ca1d | ||
|
|
bbb010a30f | ||
|
|
05a056a409 | ||
|
|
0eb7e6b9a8 | ||
|
|
128cf2daf7 | ||
|
|
b98b4c135d | ||
|
|
a2cdd11d4a | ||
|
|
e0214a263b | ||
|
|
e75fa8bbbf | ||
|
|
c782e893ec | ||
|
|
89ac1fa8ba | ||
|
|
2e4f0b2bd7 | ||
|
|
c1cdd7954d | ||
|
|
63cb7ece62 | ||
|
|
493e3fa0ca | ||
|
|
f1fbe3e09f | ||
|
|
642f725fd7 | ||
|
|
cbc0406be8 | ||
|
|
1748605c5d | ||
|
|
4d661ec0f3 | ||
|
|
0e847540c3 | ||
|
|
22b37b75db | ||
|
|
b0cf867319 | ||
|
|
0b96bb793e | ||
|
|
b3a0179d64 | ||
|
|
f9478e475b | ||
|
|
399689dcc7 | ||
|
|
fa319a5786 | ||
|
|
6d146e15df | ||
|
|
25187ab674 | ||
|
|
f52acf3b12 | ||
|
|
a99d6edc05 | ||
|
|
72625f2f4d | ||
|
|
e1a7e3564f | ||
|
|
094803cf82 | ||
|
|
e9c4b0d178 | ||
|
|
23ab0c68c2 | ||
|
|
849300bc73 | ||
|
|
8664599af7 | ||
|
|
e02cc249da | ||
|
|
59c448f074 | ||
|
|
d8caa5454d | ||
|
|
b0cdf097f4 | ||
|
|
ce8b5769f7 | ||
|
|
7d72e44eb9 | ||
|
|
c53ec53d80 | ||
|
|
9470412316 | ||
|
|
a594087f06 | ||
|
|
74bc42cfdd | ||
|
|
120b689284 | ||
|
|
e7420a3bef | ||
|
|
e07fc62833 | ||
|
|
5b6e11d560 | ||
|
|
211c14c391 | ||
|
|
ad5701f50f | ||
|
|
c92fdf88a3 | ||
|
|
d33a3b91c3 | ||
|
|
a7a28f85ae | ||
|
|
59a5f012cc | ||
|
|
099e4b88d8 | ||
|
|
cdb2e045ee | ||
|
|
465354ffde | ||
|
|
83b1e7fb3c | ||
|
|
04f8478aaa | ||
|
|
8916acbc13 | ||
|
|
abaf47bbb6 | ||
|
|
045afd6b61 | ||
|
|
98b867f7b7 | ||
|
|
e84fe3599b | ||
|
|
c37eceeb9e | ||
|
|
b8a6692657 | ||
|
|
019422ebba | ||
|
|
9fccb0df08 | ||
|
|
6d74e46621 | ||
|
|
8e28db5cc9 | ||
|
|
d5174065af | ||
|
|
f31e65ca8b | ||
|
|
1df3ccf7ee | ||
|
|
118c883429 | ||
|
|
5d43eaed61 | ||
|
|
9ccccd4874 | ||
|
|
be9dafcd37 | ||
|
|
a2c6f25190 | ||
|
|
96eda876a4 | ||
|
|
e7d7152c3c | ||
|
|
b67765dccf | ||
|
|
2763587acd | ||
|
|
5ecc768970 | ||
|
|
369449827d | ||
|
|
c54773473f | ||
|
|
b102a87348 | ||
|
|
cf66ddc1b4 | ||
|
|
c06b45129c | ||
|
|
b1491dfd7c | ||
|
|
e49d6b1568 | ||
|
|
657a0d2568 | ||
|
|
3ce8540484 | ||
|
|
e780492ecf | ||
|
|
1487bba226 | ||
|
|
83d31144eb | ||
|
|
130df8fb01 | ||
|
|
d79d91a4a7 | ||
|
|
5eab2549ab | ||
|
|
7644cb79b2 | ||
|
|
ba8ac996f9 | ||
|
|
a901ed16b5 | ||
|
|
5b5c7a28d6 | ||
|
|
12bcf3d179 | ||
|
|
9708f49abf | ||
|
|
96fee64421 | ||
|
|
39aa968a76 | ||
|
|
6dfd8c73fc | ||
|
|
9d9d39536b | ||
|
|
ae702d161a | ||
|
|
dc4b774f1e | ||
|
|
027fd1242c | ||
|
|
590b544f67 | ||
|
|
ed72fc3a50 | ||
|
|
1af1c45dc0 | ||
|
|
d56c01fff4 | ||
|
|
17d319a20d | ||
|
|
92b3dc3219 | ||
|
|
5681264faa | ||
|
|
f701197227 | ||
|
|
2a45f3d448 | ||
|
|
16dd87d848 | ||
|
|
5eefd1f618 | ||
|
|
b4c38738f4 | ||
|
|
640e53935d | ||
|
|
8c8354e85a | ||
|
|
c3530c3fb3 | ||
|
|
811355ccd0 | ||
|
|
82b34e813d | ||
|
|
84a4367657 | ||
|
|
abbee6b29b | ||
|
|
527e0c43a5 | ||
|
|
ede89ae3b4 | ||
|
|
a313e97873 | ||
|
|
da877aad15 | ||
|
|
8d33adfbbb | ||
|
|
6fab7bd2c1 | ||
|
|
09f9e8493c | ||
|
|
3c8bd7809c | ||
|
|
9f03553f48 | ||
|
|
b41dc68773 | ||
|
|
20436cdf75 | ||
|
|
2de5b14fe0 | ||
|
|
8486910b64 | ||
|
|
8ad024ea80 | ||
|
|
b2d2118476 | ||
|
|
fb7b6c4681 | ||
|
|
0a036944bd | ||
|
|
3fce185c77 | ||
|
|
e4f301e7a0 | ||
|
|
4195e55ccc | ||
|
|
c3c01641d2 | ||
|
|
210d3c5d72 | ||
|
|
3077cb2915 | ||
|
|
769f8b58e8 | ||
|
|
33f93d389e | ||
|
|
29481690c5 | ||
|
|
3f6b36d96e | ||
|
|
23d9bd1d74 | ||
|
|
9d9b230501 | ||
|
|
cb97ea3ec2 | ||
|
|
377ae369c1 | ||
|
|
b216b36892 | ||
|
|
3d73383d18 | ||
|
|
ebc4830666 | ||
|
|
2a6dedd7cc | ||
|
|
0554d07082 | ||
|
|
9dc9118e55 | ||
|
|
58ff066064 | ||
|
|
de190e49d5 | ||
|
|
127efeeb68 | ||
|
|
40c9896705 | ||
|
|
16b90764ad | ||
|
|
806a6c886a | ||
|
|
0ebd632d39 | ||
|
|
1cc77145d4 | ||
|
|
cfac3b7873 | ||
|
|
1959088156 | ||
|
|
f0995436e7 | ||
|
|
210ef79100 | ||
|
|
dcec7175dc | ||
|
|
93d90765c4 | ||
|
|
59362454dd | ||
|
|
92478e96d6 | ||
|
|
944003021b | ||
|
|
94fa334b01 | ||
|
|
29267cf9d7 | ||
|
|
978ce87c86 | ||
|
|
2c79c4dc7f | ||
|
|
2b8ca84296 | ||
|
|
2d20466f9a | ||
|
|
a025055643 | ||
|
|
255f989c7b | ||
|
|
e60353c4a0 | ||
|
|
4212e7a049 | ||
|
|
64c23352f9 | ||
|
|
4390a36b6e | ||
|
|
4a39c10eef | ||
|
|
1b4e3b7fa6 | ||
|
|
443ba4eecc | ||
|
|
c0aaf9fe76 | ||
|
|
082c88a4b2 | ||
|
|
2eeb8ec4fa | ||
|
|
9640510de2 | ||
|
|
f53fcbce97 | ||
|
|
27080698e7 | ||
|
|
74048bdd41 | ||
|
|
28d8614f48 | ||
|
|
bd84755e64 | ||
|
|
f30d4d5308 | ||
|
|
e36b18ad5b | ||
|
|
a09e59a698 | ||
|
|
e6363857d0 | ||
|
|
044d813ef7 | ||
|
|
357fba2c24 | ||
|
|
e76d485e29 | ||
|
|
0696dfd94b | ||
|
|
22399d3d8f | ||
|
|
852816befe | ||
|
|
4631b737fd | ||
|
|
e25e0f4da9 | ||
|
|
42b972bccd | ||
|
|
db215b7e00 | ||
|
|
3741c336ff | ||
|
|
596daf6e68 | ||
|
|
b33a4cd6cc | ||
|
|
a87c56c673 | ||
|
|
1f29fafc95 | ||
|
|
7c56210f20 | ||
|
|
7367ca42b5 | ||
|
|
2b45ca1541 | ||
|
|
dc0ee55110 | ||
|
|
0edfecc904 | ||
|
|
2bafeca270 | ||
|
|
e944b767d7 | ||
|
|
15e2d7e387 | ||
|
|
55022d6ca5 | ||
|
|
ebc3db295b | ||
|
|
077d200342 | ||
|
|
0ac2a79faa | ||
|
|
61959928bb | ||
|
|
5f4c28d313 | ||
|
|
0722f982d3 | ||
|
|
81163f822e | ||
|
|
894a89d99b | ||
|
|
939273c4b0 | ||
|
|
c3eb7dd9c5 | ||
|
|
8321e8a2e0 | ||
|
|
63c1f4fa98 | ||
|
|
b457f1677c | ||
|
|
faf4f67847 | ||
|
|
7025781df8 | ||
|
|
142f1263f6 | ||
|
|
6311ae8968 | ||
|
|
3f1871021e | ||
|
|
b6771037a6 | ||
|
|
5b753d472b | ||
|
|
1df8bad63e | ||
|
|
5358966a87 | ||
|
|
aa577df064 | ||
|
|
d122e215ff | ||
|
|
a7925259a1 | ||
|
|
7d304ae11c | ||
|
|
d4952e6849 | ||
|
|
446ef58992 | ||
|
|
cc3d3babb0 | ||
|
|
6375bd3e33 | ||
|
|
2462aacd77 | ||
|
|
b68e4a729f | ||
|
|
47d3ff4cf8 | ||
|
|
36e144091b | ||
|
|
b17bd31da0 | ||
|
|
5806d52423 | ||
|
|
87e9aeb914 | ||
|
|
7e9d59f3b4 | ||
|
|
cedad8fbd6 | ||
|
|
65ca713ff5 | ||
|
|
5e24471469 | ||
|
|
e482541e1d | ||
|
|
0db52d43fa | ||
|
|
859fbd4423 | ||
|
|
1be67eca8a | ||
|
|
2635d4e634 | ||
|
|
fe672a04f7 | ||
|
|
08f804208b | ||
|
|
ec847059f3 | ||
|
|
4fd176a41d | ||
|
|
d77912ff44 | ||
|
|
9371019133 | ||
|
|
649dc8a7e2 | ||
|
|
c8436b38a0 | ||
|
|
f91263b1e0 | ||
|
|
1177245e86 | ||
|
|
20e3172f38 | ||
|
|
58554fa658 | ||
|
|
2c29ed3e84 | ||
|
|
2b8f1a956c | ||
|
|
5025305fb2 | ||
|
|
1a989c436c | ||
|
|
964bb43fbe | ||
|
|
e7e20417ca | ||
|
|
8b919c00f3 | ||
|
|
676e8ee78a | ||
|
|
08e70231c9 | ||
|
|
0647e27a41 | ||
|
|
fa6c93bd26 | ||
|
|
c02da58a9d | ||
|
|
472734a8cc | ||
|
|
4de93001bf | ||
|
|
659ead082f | ||
|
|
c82e26ad4b | ||
|
|
47281f8fa4 | ||
|
|
02bfa889de | ||
|
|
c37e7e1774 | ||
|
|
c2b1dbd84c | ||
|
|
ea1d6c16cd | ||
|
|
72a4de2ce6 | ||
|
|
0194e71e99 | ||
|
|
baa5b9a975 | ||
|
|
bfffd2e108 | ||
|
|
2674aeb96a | ||
|
|
91fc5eef1d | ||
|
|
6138584651 | ||
|
|
a5ad6f862c | ||
|
|
8a59915d7d | ||
|
|
0421eb84ac | ||
|
|
6dd5c95841 | ||
|
|
b99a33f283 | ||
|
|
2b8903ce2f | ||
|
|
5f68529036 | ||
|
|
d502013c6e | ||
|
|
64def4f953 | ||
|
|
a78838c5ba | ||
|
|
8d5cce62ab | ||
|
|
650dc7f0f9 | ||
|
|
be26697b29 | ||
|
|
b11a6e1c3c | ||
|
|
0d872f5aa6 | ||
|
|
fa662b52d0 | ||
|
|
183b3d4e47 | ||
|
|
49eb11530c | ||
|
|
0546126cc5 | ||
|
|
2aa87305c0 | ||
|
|
e441c10a73 | ||
|
|
8c652a2b5f | ||
|
|
6375abcdac | ||
|
|
c09493d7aa | ||
|
|
74626a8de4 | ||
|
|
55e0916ffc | ||
|
|
f22646efcc | ||
|
|
16c6b860ac | ||
|
|
789251afa7 | ||
|
|
38df10b99e | ||
|
|
93d07c87dc | ||
|
|
5f6e6530d0 | ||
|
|
29805213d1 | ||
|
|
860b1b4841 | ||
|
|
58d848adc0 | ||
|
|
963256638d | ||
|
|
92d850fc87 | ||
|
|
a268c31737 | ||
|
|
48fbe79f71 | ||
|
|
6b186a57ba | ||
|
|
717687e1fc | ||
|
|
1ed836cc2b | ||
|
|
49fb1067ec | ||
|
|
df29666d3c | ||
|
|
b656081c99 | ||
|
|
959d77fc7a | ||
|
|
a566ed2f0e | ||
|
|
596728698f | ||
|
|
e76c24d6e6 | ||
|
|
28b468b292 | ||
|
|
2ad2b0dcca | ||
|
|
74f49b872e | ||
|
|
7fedc36ccc | ||
|
|
b7df941589 | ||
|
|
83d41f25d8 | ||
|
|
ff2a2ae56e | ||
|
|
8bbdf32849 | ||
|
|
2bf0e85f3d | ||
|
|
e9e54449f5 | ||
|
|
af89456c3c | ||
|
|
c52e8d395b | ||
|
|
021d93db11 | ||
|
|
54fdbc7e50 | ||
|
|
a793a0b810 | ||
|
|
42bc56dad3 | ||
|
|
9c24cff6ef | ||
|
|
7eef84a95b | ||
|
|
76935078d1 | ||
|
|
ed877d6585 | ||
|
|
ef276e8770 | ||
|
|
cb43fbeeb4 | ||
|
|
f2fdcb7c4b | ||
|
|
f518324426 | ||
|
|
b164e0896c | ||
|
|
7f47ba7383 | ||
|
|
41a9a76a99 | ||
|
|
45b56609ae | ||
|
|
7be0f6594e | ||
|
|
ddb816cf60 | ||
|
|
40f332e534 | ||
|
|
ddc25cf4e2 | ||
|
|
aff892ce79 | ||
|
|
f5a70e0d2e | ||
|
|
d8324d5a2b | ||
|
|
4ebbaf0d43 | ||
|
|
d4abeb8354 | ||
|
|
f42e29cf95 | ||
|
|
896253e085 | ||
|
|
14d413752b | ||
|
|
fd40d992ad | ||
|
|
8beb613916 | ||
|
|
c7783d6fee | ||
|
|
5758dafb4e | ||
|
|
6370cffbbf | ||
|
|
fb233dc40b | ||
|
|
05b961d7e3 | ||
|
|
dcf52469e8 | ||
|
|
8c83cc471b | ||
|
|
9978c5c103 | ||
|
|
ba63b4be5d | ||
|
|
eab141ee67 | ||
|
|
0e6b3e4e40 | ||
|
|
5e54365234 | ||
|
|
84a769cdb7 | ||
|
|
a9684730ac | ||
|
|
7ed971d9b2 | ||
|
|
eae0842bc1 | ||
|
|
c8e1da930d | ||
|
|
771892b314 | ||
|
|
b61a308b27 | ||
|
|
e8d4a31475 | ||
|
|
b085fac735 | ||
|
|
093e34e301 | ||
|
|
697ab75a34 | ||
|
|
f8abbae99f | ||
|
|
794fe2ca45 | ||
|
|
f88d3ee8ae | ||
|
|
fda4422bc9 | ||
|
|
d7c7efb691 | ||
|
|
91f0e41153 | ||
|
|
f91345bdb5 | ||
|
|
30595b466f | ||
|
|
c86ebe7673 | ||
|
|
2b042ad67f | ||
|
|
d19e2ed02f | ||
|
|
b90d377af4 | ||
|
|
8ce100c7b4 | ||
|
|
5c5f5c1f0e | ||
|
|
375eba6a18 | ||
|
|
0c4536da8f | ||
|
|
347b497db0 | ||
|
|
3a5ad7dbd5 | ||
|
|
d94f682a4c | ||
|
|
8f616684a3 | ||
|
|
0b725f5c4f | ||
|
|
bd2373277d | ||
|
|
a578251b48 | ||
|
|
53557fc532 | ||
|
|
f7cac2f7b6 | ||
|
|
76c5a5c2f6 | ||
|
|
c4ee4ce93e | ||
|
|
ef995e6946 | ||
|
|
66fde49f07 | ||
|
|
164f6b9256 | ||
|
|
75656712e3 | ||
|
|
784d714a3f | ||
|
|
ab3c897ce1 | ||
|
|
5a7dd05818 | ||
|
|
24cc6979fb | ||
|
|
ac3183caaa | ||
|
|
ecb0f78063 | ||
|
|
c2afc2ad90 | ||
|
|
8be07e0db4 | ||
|
|
b2b29efb75 | ||
|
|
37b6b880ef | ||
|
|
adc4310a73 | ||
|
|
0c0ae2e886 | ||
|
|
52eb5d6a09 | ||
|
|
582019f870 | ||
|
|
f02bf64d0e | ||
|
|
e117bc3fc5 | ||
|
|
34c39398fa | ||
|
|
03c25ebeae | ||
|
|
73a680b2a8 | ||
|
|
af613824e4 | ||
|
|
5bf318e9a6 | ||
|
|
b4886264a3 | ||
|
|
c4e3029d55 | ||
|
|
20db147ef3 | ||
|
|
55a186485c | ||
|
|
cc0532a4bf | ||
|
|
0cd66885e3 | ||
|
|
e890ce223c | ||
|
|
c78b5fb1f1 | ||
|
|
0995810273 | ||
|
|
c3ae8def75 | ||
|
|
e426df8e10 | ||
|
|
9f2573eea1 | ||
|
|
3737329d9b | ||
|
|
0227618d3c | ||
|
|
11e6b3d18b | ||
|
|
a3c6010718 | ||
|
|
cab4c73088 | ||
|
|
e9484d6a95 | ||
|
|
c20281ee33 | ||
|
|
a93fa42bce | ||
|
|
fc8bcc809d | ||
|
|
5b99b471b2 | ||
|
|
aaf50bf6f3 | ||
|
|
c163357f38 | ||
|
|
2df41aa138 | ||
|
|
f90782a658 | ||
|
|
951690e54d | ||
|
|
c71456117d | ||
|
|
4996398858 | ||
|
|
f08bd95880 | ||
|
|
8f5b858a1b | ||
|
|
e9c85a4d5a | ||
|
|
e1515c3e91 | ||
|
|
0613666d9c | ||
|
|
131e036402 | ||
|
|
51d63ac329 | ||
|
|
26a041541b | ||
|
|
bc658907f0 | ||
|
|
b932600653 | ||
|
|
f0c730252f | ||
|
|
6a7e168009 | ||
|
|
27091f146a | ||
|
|
a1a4960baf | ||
|
|
559a26b025 | ||
|
|
d60658c2db | ||
|
|
77e5ae22a9 | ||
|
|
19ebdc321d | ||
|
|
92c43e4a0e | ||
|
|
47b1e1491f | ||
|
|
3b5e8125eb | ||
|
|
f292ad4b2b | ||
|
|
543e84fe70 | ||
|
|
6de799422d | ||
|
|
8046df6efa | ||
|
|
6d3e4f4d0a | ||
|
|
96d4bf9012 | ||
|
|
aa8cce58bf | ||
|
|
d45e2302ed | ||
|
|
ae46f10fc5 | ||
|
|
2e77ba637a | ||
|
|
ce8bc642ae | ||
|
|
89f2e8fbdf | ||
|
|
95e2d2d36d | ||
|
|
650e32d455 | ||
|
|
ff78eded01 | ||
|
|
525a218b2b | ||
|
|
17753f0c20 | ||
|
|
03d415a6a2 | ||
|
|
f275ba49bb | ||
|
|
c0462dbf15 | ||
|
|
02be8da5e1 | ||
|
|
dc7bb70f22 | ||
|
|
3c39f42a05 | ||
|
|
7dd1c5c542 | ||
|
|
9a71add1c0 | ||
|
|
9bace3a367 | ||
|
|
8dae5c8108 | ||
|
|
7b810e136e | ||
|
|
0dd3aea319 | ||
|
|
94a5db9f4d | ||
|
|
6efd4d1649 | ||
|
|
77a076bd25 | ||
|
|
f2c039bfb9 | ||
|
|
fed29251d7 | ||
|
|
a060b47b13 | ||
|
|
3bd2841fdb | ||
|
|
197f3ea4ba | ||
|
|
06c34bfbae | ||
|
|
4ff2273b30 | ||
|
|
0f48e22ef6 | ||
|
|
51969f9e5f | ||
|
|
e7ca813dd4 | ||
|
|
09601255f5 | ||
|
|
9ff349a3cb | ||
|
|
1a2de0c5fe | ||
|
|
a2da04b8ab | ||
|
|
f3a4267757 | ||
|
|
4574b5a9e6 | ||
|
|
8c52e6e8a1 | ||
|
|
40c6fe1b81 | ||
|
|
1bb0528316 | ||
|
|
941f59101b | ||
|
|
f2eda123b7 | ||
|
|
038f5afb07 | ||
|
|
a006d168c5 | ||
|
|
22c1ffb0a0 | ||
|
|
c059c9fea5 | ||
|
|
6e856d7729 | ||
|
|
30ed0884fc | ||
|
|
898835d924 | ||
|
|
d8cf06e525 | ||
|
|
d3dd749044 | ||
|
|
c3979b236e | ||
|
|
b993555bf4 | ||
|
|
bcb8d2fe54 | ||
|
|
83c31735d0 | ||
|
|
3b33529dfd | ||
|
|
c934760014 | ||
|
|
365e007bee | ||
|
|
e9dfc4cfae | ||
|
|
0b354fcb84 | ||
|
|
fe10b882b7 | ||
|
|
4c0da49d7c | ||
|
|
68bd7dfbb7 | ||
|
|
9ccfdfcd7c | ||
|
|
166c2cd4f3 | ||
|
|
33cf48118f | ||
|
|
e709d61964 | ||
|
|
0b1cc7cc0b | ||
|
|
2cd29dbdd9 | ||
|
|
7d897f5bfc | ||
|
|
88391bcdc3 | ||
|
|
776ac820f9 | ||
|
|
b724a809c4 | ||
|
|
7a1e881665 | ||
|
|
b4b892f4a3 | ||
|
|
6dc92d3427 | ||
|
|
017dfaef4c | ||
|
|
1bd540ef79 | ||
|
|
9ec9d6f2cb | ||
|
|
4ffac34a64 | ||
|
|
9bfc8bf752 | ||
|
|
91015ad008 | ||
|
|
4f7fe63b6d | ||
|
|
fdd2ac495a | ||
|
|
8bc3066e0b | ||
|
|
471c47441d | ||
|
|
e97f756a05 | ||
|
|
2f4cb04f45 | ||
|
|
472cf532b7 | ||
|
|
322a047502 | ||
|
|
1251d017c1 | ||
|
|
3d7026e709 | ||
|
|
c515d37797 | ||
|
|
84b78c3b5f | ||
|
|
f7b84eb92a | ||
|
|
2aaedab203 | ||
|
|
e0b7c521cb | ||
|
|
875a481a1e | ||
|
|
7a9f6f083e | ||
|
|
76d7fd39cd | ||
|
|
8fe39a0311 | ||
|
|
a70a801184 | ||
|
|
4a67834bc8 | ||
|
|
c562f237f6 | ||
|
|
8498d348d8 | ||
|
|
e97de6d96a | ||
|
|
22dd1cde2d | ||
|
|
0adf3e5445 | ||
|
|
2c9e136d57 | ||
|
|
bd03947c05 | ||
|
|
2ebf795c0a | ||
|
|
0c2d245fdf | ||
|
|
823999716e | ||
|
|
c1d860870b | ||
|
|
c1c7b39827 | ||
|
|
fc946f3b8d | ||
|
|
0b16886397 | ||
|
|
1235f7f383 | ||
|
|
ece828a7b7 | ||
|
|
365a186729 | ||
|
|
7ceda8bf3d | ||
|
|
93ed31dda2 | ||
|
|
4bdfce30d7 | ||
|
|
e0d2c6889b | ||
|
|
78015948a7 | ||
|
|
4ad45f2582 | ||
|
|
722b65f461 | ||
|
|
cc42d3f907 | ||
|
|
4d9dd9bdc0 | ||
|
|
8e571cbed8 | ||
|
|
295322048d | ||
|
|
acb68a39e0 | ||
|
|
8b1dd9f57f | ||
|
|
9150a0d62e | ||
|
|
cf7c54ec93 | ||
|
|
33391db5f8 | ||
|
|
396a67a09a | ||
|
|
9d8f798a3f | ||
|
|
e4f50fa0aa | ||
|
|
e016f4043b | ||
|
|
38b27bd2cb | ||
|
|
5a3a15f5c1 | ||
|
|
c183cec8f6 | ||
|
|
83172487b0 | ||
|
|
5561a87920 | ||
|
|
777d9914b5 | ||
|
|
50de1eaad9 | ||
|
|
2a4fda7b88 | ||
|
|
3773759c0f | ||
|
|
e3e72b8c5c | ||
|
|
3dbce6f4a5 | ||
|
|
b9c442c85c | ||
|
|
1b4a164c02 | ||
|
|
b0b80074e0 | ||
|
|
d5bdf3c0c7 | ||
|
|
8552ed8df2 | ||
|
|
11634017f4 | ||
|
|
c81a19552f | ||
|
|
9c61556504 | ||
|
|
26c8fff19e | ||
|
|
3cca61e006 | ||
|
|
c18e551640 | ||
|
|
388581e087 | ||
|
|
c23e3db544 | ||
|
|
0ef5bfd6a9 | ||
|
|
6840e7cece | ||
|
|
60b143a52e | ||
|
|
c59bcabf0b | ||
|
|
fddc7a080a | ||
|
|
e78dd33292 | ||
|
|
93aac9bb7b | ||
|
|
445ad9941e | ||
|
|
6d485dd1c7 | ||
|
|
fb0928097a | ||
|
|
0cbb6b0f52 | ||
|
|
2cfdfee572 | ||
|
|
289a249874 | ||
|
|
3cb5b73c0d | ||
|
|
8807f4170e | ||
|
|
032f8d4ed3 | ||
|
|
d93ce29a86 | ||
|
|
6741c3dbd9 | ||
|
|
4fbf2328c2 | ||
|
|
30fbba168b | ||
|
|
dd3abbd61f | ||
|
|
6fde707add | ||
|
|
5f2665320f | ||
|
|
20c47383dc | ||
|
|
03149ad23a | ||
|
|
e1ca0f1396 | ||
|
|
6df6f5e084 | ||
|
|
ca7240a2f0 | ||
|
|
fb532d8425 | ||
|
|
c291a4d522 | ||
|
|
42876969b9 | ||
|
|
273b12729b | ||
|
|
e32ded7b3e | ||
|
|
b46fa8603e | ||
|
|
e020574d65 | ||
|
|
b19cf6a105 | ||
|
|
8398f19bce | ||
|
|
06cc147012 | ||
|
|
0c14a699bb | ||
|
|
54e513b4e6 | ||
|
|
fbeaeb8689 | ||
|
|
ec3719b583 | ||
|
|
92171f9dd1 | ||
|
|
a56008842b | ||
|
|
7331d34839 | ||
|
|
059651efa1 | ||
|
|
f7c4daa8f9 | ||
|
|
5eacaeb4a7 | ||
|
|
eba89f093f | ||
|
|
1d77969124 | ||
|
|
b1503112ce | ||
|
|
51449e0665 | ||
|
|
6efdc11cc8 | ||
|
|
05c7cba73a | ||
|
|
fa8e6ff900 | ||
|
|
f9958f3404 | ||
|
|
0484d7f6e9 | ||
|
|
57d2bfca3f | ||
|
|
39c1892b22 | ||
|
|
436513068d | ||
|
|
b481889117 | ||
|
|
69a75b7ebe | ||
|
|
3186c5bdbc | ||
|
|
9b6aaf2074 | ||
|
|
e5725eb3b9 | ||
|
|
7f6f3f9d62 | ||
|
|
0cfb4591a7 | ||
|
|
37b8a71f10 | ||
|
|
efac71d6ca | ||
|
|
8d7accb28f | ||
|
|
c92d64a6c3 | ||
|
|
d07dfe5392 | ||
|
|
14ff33bd93 | ||
|
|
7b88619241 | ||
|
|
7b814d3f7f | ||
|
|
2b1799883d | ||
|
|
e26340cee7 | ||
|
|
85419e1257 | ||
|
|
5f84ba8ea1 | ||
|
|
f21f9fa3c5 | ||
|
|
9b1e552b51 | ||
|
|
30a89d2fdb | ||
|
|
3b9cc882a5 | ||
|
|
bda5d7d14f | ||
|
|
e0bf18addf | ||
|
|
4be637cb12 | ||
|
|
f7cb604211 | ||
|
|
fc7a05c443 | ||
|
|
b3f66ea6fb | ||
|
|
d3e72b4d87 | ||
|
|
98e1080555 | ||
|
|
54c689c819 | ||
|
|
c4652d7772 | ||
|
|
6188c4f69c | ||
|
|
ada711504e | ||
|
|
1c06c48ce2 | ||
|
|
5759bec43c | ||
|
|
49fe31792b | ||
|
|
7dfd99f163 | ||
|
|
7256def8e4 | ||
|
|
f87586e661 | ||
|
|
bcd48b9636 | ||
|
|
6927b6b197 | ||
|
|
0c4b696727 | ||
|
|
3a243c53f4 | ||
|
|
cbb10879cb | ||
|
|
8a850573c9 | ||
|
|
673773b217 | ||
|
|
7ecb49ef25 | ||
|
|
5c6189ea3e | ||
|
|
ede491b4e0 | ||
|
|
dc93860619 | ||
|
|
22f00a09dd | ||
|
|
ca65a9d03e | ||
|
|
53584420a5 | ||
|
|
97c68c508d | ||
|
|
c2f9768740 | ||
|
|
73dd81ca62 | ||
|
|
b1b85753d7 | ||
|
|
1d2016b4a8 | ||
|
|
16bfabb9c5 | ||
|
|
d00cca11b9 | ||
|
|
58691680b8 | ||
|
|
7f058c5ff7 | ||
|
|
d3d0713de5 | ||
|
|
8907e143c1 | ||
|
|
f4ce61ed36 | ||
|
|
73315ce9de | ||
|
|
dbe71e670c | ||
|
|
b390bf39f2 | ||
|
|
6dcade97be | ||
|
|
5d5932d493 | ||
|
|
afb714f7be | ||
|
|
dc70d1fef8 | ||
|
|
42529cbced | ||
|
|
00e9c08609 | ||
|
|
3e85e52b3f | ||
|
|
5fed042640 | ||
|
|
2408c4b0a4 | ||
|
|
602684eac5 | ||
|
|
2bdee98269 | ||
|
|
2d2953cf5f | ||
|
|
2ca2dbc821 | ||
|
|
e3e2fc3255 | ||
|
|
2cb30767fa | ||
|
|
34a5fbe2b7 | ||
|
|
cf7e723808 | ||
|
|
c2e7c84e58 | ||
|
|
3891597eb3 | ||
|
|
fda63064fc | ||
|
|
895fcb377e | ||
|
|
c06a9063e1 | ||
|
|
70d0a453f3 | ||
|
|
38e3241eb7 | ||
|
|
b1a38c39ad | ||
|
|
1d3d37937d | ||
|
|
39585bf556 | ||
|
|
02ffbb20d0 | ||
|
|
9c804bc3fd | ||
|
|
67d8305aea | ||
|
|
db72a07ef5 | ||
|
|
968dc988f9 | ||
|
|
c43d898119 | ||
|
|
d8fcc4e00a | ||
|
|
bfb198a6eb | ||
|
|
28db5dde4c | ||
|
|
80e89772e2 | ||
|
|
63403aa7a5 | ||
|
|
9d0dcf2e3c | ||
|
|
7f83613733 | ||
|
|
b5924cae04 | ||
|
|
379a653ae3 | ||
|
|
edb557b2ad | ||
|
|
5940ec993b | ||
|
|
5720ab59e0 | ||
|
|
d44dd47fbf | ||
|
|
8ac9199f56 | ||
|
|
42507b0011 | ||
|
|
76e1565200 | ||
|
|
333836ff92 | ||
|
|
a09882de83 | ||
|
|
4c68460392 | ||
|
|
9cb4f75d53 | ||
|
|
9b8e348b15 | ||
|
|
cc7a267a85 | ||
|
|
82be4457de | ||
|
|
fead431c18 | ||
|
|
b56730bb6e | ||
|
|
afa953a293 | ||
|
|
0a6664493a | ||
|
|
4c7ad50f6e | ||
|
|
173264b656 | ||
|
|
fc7c5e9cd7 | ||
|
|
9728c305a3 | ||
|
|
88af58d41d | ||
|
|
bdc21e7282 | ||
|
|
7642d95d5e | ||
|
|
eb6aedf92c | ||
|
|
58f82e2e54 | ||
|
|
23465a30b6 | ||
|
|
ebf6c08a47 | ||
|
|
051b185811 | ||
|
|
74c3879760 |
18
.gitignore
vendored
18
.gitignore
vendored
@@ -26,17 +26,19 @@ htmlcov
|
||||
|
||||
demo/*.db
|
||||
demo/*.log
|
||||
demo/*.log.*
|
||||
demo/*.pid
|
||||
demo/media_store.*
|
||||
demo/etc
|
||||
|
||||
graph/*.svg
|
||||
graph/*.png
|
||||
graph/*.dot
|
||||
|
||||
**/webclient/config.js
|
||||
**/webclient/test/coverage/
|
||||
**/webclient/test/environment-protractor.js
|
||||
|
||||
uploads
|
||||
|
||||
.idea/
|
||||
media_store/
|
||||
|
||||
*.tac
|
||||
|
||||
build/
|
||||
|
||||
localhost-800*/
|
||||
static/client/register/register_config.js
|
||||
|
||||
146
CHANGES.rst
146
CHANGES.rst
@@ -1,54 +1,144 @@
|
||||
Changes in synapse v0.8.1 (2015-03-18)
|
||||
======================================
|
||||
|
||||
* Disable registration by default. New users can be added using the command
|
||||
``register_new_matrix_user`` or by enabling registration in the config.
|
||||
* Add metrics to synapse. To enable metrics use config options
|
||||
``enable_metrics`` and ``metrics_port``.
|
||||
* Fix bug where banning only kicked the user.
|
||||
|
||||
Changes in synapse v0.8.0 (2015-03-06)
|
||||
======================================
|
||||
|
||||
General:
|
||||
|
||||
* Add support for registration fallback. This is a page hosted on the server
|
||||
which allows a user to register for an account, regardless of what client
|
||||
they are using (e.g. mobile devices).
|
||||
|
||||
* Added new default push rules and made them configurable by clients:
|
||||
|
||||
* Suppress all notice messages.
|
||||
* Notify when invited to a new room.
|
||||
* Notify for messages that don't match any rule.
|
||||
* Notify on incoming call.
|
||||
|
||||
Federation:
|
||||
|
||||
* Added per host server side rate-limiting of incoming federation requests.
|
||||
* Added a ``/get_missing_events/`` API to federation to reduce number of
|
||||
``/events/`` requests.
|
||||
|
||||
Configuration:
|
||||
|
||||
* Added configuration option to disable registration:
|
||||
``disable_registration``.
|
||||
* Added configuration option to change soft limit of number of open file
|
||||
descriptors: ``soft_file_limit``.
|
||||
* Make ``tls_private_key_path`` optional when running with ``no_tls``.
|
||||
|
||||
Application services:
|
||||
|
||||
* Application services can now poll on the CS API ``/events`` for their events,
|
||||
by providing their application service ``access_token``.
|
||||
* Added exclusive namespace support to application services API.
|
||||
|
||||
|
||||
Changes in synapse v0.7.1 (2015-02-19)
|
||||
======================================
|
||||
|
||||
* Initial alpha implementation of parts of the Application Services API.
|
||||
Including:
|
||||
|
||||
- AS Registration / Unregistration
|
||||
- User Query API
|
||||
- Room Alias Query API
|
||||
- Push transport for receiving events.
|
||||
- User/Alias namespace admin control
|
||||
|
||||
* Add cache when fetching events from remote servers to stop repeatedly
|
||||
fetching events with bad signatures.
|
||||
* Respect the per remote server retry scheme when fetching both events and
|
||||
server keys to reduce the number of times we send requests to dead servers.
|
||||
* Inform remote servers when the local server fails to handle a received event.
|
||||
* Turn off python bytecode generation due to problems experienced when
|
||||
upgrading from previous versions.
|
||||
|
||||
Changes in synapse v0.7.0 (2015-02-12)
|
||||
======================================
|
||||
|
||||
* Add initial implementation of the query auth federation API, allowing
|
||||
servers to agree on whether an event should be allowed or rejected.
|
||||
* Persist events we have rejected from federation, fixing the bug where
|
||||
servers would keep requesting the same events.
|
||||
* Various federation performance improvements, including:
|
||||
|
||||
- Add in memory caches on queries such as:
|
||||
|
||||
* Computing the state of a room at a point in time, used for
|
||||
authorization on federation requests.
|
||||
* Fetching events from the database.
|
||||
* User's room membership, used for authorizing presence updates.
|
||||
|
||||
- Upgraded JSON library to improve parsing and serialisation speeds.
|
||||
|
||||
* Add default avatars to new user accounts using pydenticon library.
|
||||
* Correctly time out federation requests.
|
||||
* Retry federation requests against different servers.
|
||||
* Add support for push and push rules.
|
||||
* Add alpha versions of proposed new CSv2 APIs, including ``/sync`` API.
|
||||
|
||||
Changes in synapse 0.6.1 (2015-01-07)
|
||||
=====================================
|
||||
|
||||
* Major optimizations to improve performance of initial sync and event sending
|
||||
in large rooms (by up to 10x)
|
||||
* Media repository now includes a Content-Length header on media downloads.
|
||||
* Improve quality of thumbnails by changing resizing algorithm.
|
||||
* Major optimizations to improve performance of initial sync and event sending
|
||||
in large rooms (by up to 10x)
|
||||
* Media repository now includes a Content-Length header on media downloads.
|
||||
* Improve quality of thumbnails by changing resizing algorithm.
|
||||
|
||||
Changes in synapse 0.6.0 (2014-12-16)
|
||||
=====================================
|
||||
|
||||
* Add new API for media upload and download that supports thumbnailing.
|
||||
* Replicate media uploads over multiple homeservers so media is always served
|
||||
to clients from their local homeserver. This obsoletes the
|
||||
--content-addr parameter and confusion over accessing content directly
|
||||
from remote homeservers.
|
||||
* Implement exponential backoff when retrying federation requests when
|
||||
sending to remote homeservers which are offline.
|
||||
* Implement typing notifications.
|
||||
* Fix bugs where we sent events with invalid signatures due to bugs where
|
||||
we incorrectly persisted events.
|
||||
* Improve performance of database queries involving retrieving events.
|
||||
* Add new API for media upload and download that supports thumbnailing.
|
||||
* Replicate media uploads over multiple homeservers so media is always served
|
||||
to clients from their local homeserver. This obsoletes the
|
||||
--content-addr parameter and confusion over accessing content directly
|
||||
from remote homeservers.
|
||||
* Implement exponential backoff when retrying federation requests when
|
||||
sending to remote homeservers which are offline.
|
||||
* Implement typing notifications.
|
||||
* Fix bugs where we sent events with invalid signatures due to bugs where
|
||||
we incorrectly persisted events.
|
||||
* Improve performance of database queries involving retrieving events.
|
||||
|
||||
Changes in synapse 0.5.4a (2014-12-13)
|
||||
======================================
|
||||
|
||||
* Fix bug while generating the error message when a file path specified in
|
||||
the config doesn't exist.
|
||||
* Fix bug while generating the error message when a file path specified in
|
||||
the config doesn't exist.
|
||||
|
||||
Changes in synapse 0.5.4 (2014-12-03)
|
||||
=====================================
|
||||
|
||||
* Fix presence bug where some rooms did not display presence updates for
|
||||
remote users.
|
||||
* Do not log SQL timing log lines when started with "-v"
|
||||
* Fix potential memory leak.
|
||||
* Fix presence bug where some rooms did not display presence updates for
|
||||
remote users.
|
||||
* Do not log SQL timing log lines when started with "-v"
|
||||
* Fix potential memory leak.
|
||||
|
||||
Changes in synapse 0.5.3c (2014-12-02)
|
||||
======================================
|
||||
|
||||
* Change the default value for the `content_addr` option to use the HTTP
|
||||
listener, as by default the HTTPS listener will be using a self-signed
|
||||
certificate.
|
||||
* Change the default value for the `content_addr` option to use the HTTP
|
||||
listener, as by default the HTTPS listener will be using a self-signed
|
||||
certificate.
|
||||
|
||||
Changes in synapse 0.5.3 (2014-11-27)
|
||||
=====================================
|
||||
|
||||
* Fix bug that caused joining a remote room to fail if a single event was not
|
||||
signed correctly.
|
||||
* Fix bug which caused servers to continuously try and fetch events from other
|
||||
servers.
|
||||
* Fix bug that caused joining a remote room to fail if a single event was not
|
||||
signed correctly.
|
||||
* Fix bug which caused servers to continuously try and fetch events from other
|
||||
servers.
|
||||
|
||||
Changes in synapse 0.5.2 (2014-11-26)
|
||||
=====================================
|
||||
|
||||
16
MANIFEST.in
16
MANIFEST.in
@@ -1,4 +1,14 @@
|
||||
recursive-include docs *
|
||||
recursive-include tests *.py
|
||||
include synctl
|
||||
include LICENSE
|
||||
include VERSION
|
||||
include *.rst
|
||||
include demo/README
|
||||
|
||||
recursive-include synapse/storage/schema *.sql
|
||||
recursive-include syweb/webclient *
|
||||
|
||||
recursive-include demo *.dh
|
||||
recursive-include demo *.py
|
||||
recursive-include demo *.sh
|
||||
recursive-include docs *
|
||||
recursive-include scripts *
|
||||
recursive-include tests *.py
|
||||
|
||||
128
README.rst
128
README.rst
@@ -1,3 +1,5 @@
|
||||
.. contents::
|
||||
|
||||
Introduction
|
||||
============
|
||||
|
||||
@@ -6,7 +8,7 @@ VoIP. The basics you need to know to get up and running are:
|
||||
|
||||
- Everything in Matrix happens in a room. Rooms are distributed and do not
|
||||
exist on any single server. Rooms can be located using convenience aliases
|
||||
like ``#matrix:matrix.org`` or ``#test:localhost:8008``.
|
||||
like ``#matrix:matrix.org`` or ``#test:localhost:8448``.
|
||||
|
||||
- Matrix user IDs look like ``@matthew:matrix.org`` (although in the future
|
||||
you will normally refer to yourself and others using a 3PID: email
|
||||
@@ -95,18 +97,47 @@ Installing prerequisites on Ubuntu or Debian::
|
||||
|
||||
$ sudo apt-get install build-essential python2.7-dev libffi-dev \
|
||||
python-pip python-setuptools sqlite3 \
|
||||
libssl-dev
|
||||
libssl-dev python-virtualenv libjpeg-dev
|
||||
|
||||
Installing prerequisites on ArchLinux::
|
||||
|
||||
$ sudo pacman -S base-devel python2 python-pip \
|
||||
python-setuptools python-virtualenv sqlite3
|
||||
|
||||
Installing prerequisites on Mac OS X::
|
||||
|
||||
$ xcode-select --install
|
||||
$ sudo pip install virtualenv
|
||||
|
||||
To install the synapse homeserver run::
|
||||
|
||||
$ pip install --user --process-dependency-links https://github.com/matrix-org/synapse/tarball/master
|
||||
$ virtualenv ~/.synapse
|
||||
$ source ~/.synapse/bin/activate
|
||||
$ pip install --process-dependency-links https://github.com/matrix-org/synapse/tarball/master
|
||||
|
||||
This installs synapse, along with the libraries it uses, into
|
||||
``$HOME/.local/lib/`` on Linux or ``$HOME/Library/Python/2.7/lib/`` on OSX.
|
||||
This installs synapse, along with the libraries it uses, into a virtual
|
||||
environment under ``~/.synapse``.
|
||||
|
||||
To set up your homeserver, run (in your virtualenv, as before)::
|
||||
|
||||
$ cd ~/.synapse
|
||||
$ python -m synapse.app.homeserver \
|
||||
--server-name machine.my.domain.name \
|
||||
--config-path homeserver.yaml \
|
||||
--generate-config
|
||||
|
||||
Substituting your host and domain name as appropriate.
|
||||
|
||||
By default, registration of new users is disabled. You can either enable
|
||||
registration in the config (it is then recommended to also set up CAPTCHA), or
|
||||
you can use the command line to register new users::
|
||||
|
||||
$ source ~/.synapse/bin/activate
|
||||
$ register_new_matrix_user -c homeserver.yaml https://localhost:8448
|
||||
New user localpart: erikj
|
||||
Password:
|
||||
Confirm password:
|
||||
Success!
|
||||
|
||||
For reliable VoIP calls to be routed via this homeserver, you MUST configure
|
||||
a TURN server. See docs/turn-howto.rst for details.
|
||||
@@ -119,23 +150,57 @@ you get errors about ``error: no such option: --process-dependency-links`` you
|
||||
may need to manually upgrade it::
|
||||
|
||||
$ sudo pip install --upgrade pip
|
||||
|
||||
|
||||
If pip crashes mid-installation for reason (e.g. lost terminal), pip may
|
||||
refuse to run until you remove the temporary installation directory it
|
||||
created. To reset the installation::
|
||||
|
||||
$ rm -rf /tmp/pip_install_matrix
|
||||
|
||||
|
||||
pip seems to leak *lots* of memory during installation. For instance, a Linux
|
||||
host with 512MB of RAM may run out of memory whilst installing Twisted. If this
|
||||
happens, you will have to individually install the dependencies which are
|
||||
failing, e.g.::
|
||||
|
||||
$ pip install --user twisted
|
||||
$ pip install twisted
|
||||
|
||||
On OSX, if you encounter clang: error: unknown argument: '-mno-fused-madd' you
|
||||
will need to export CFLAGS=-Qunused-arguments.
|
||||
|
||||
ArchLinux
|
||||
---------
|
||||
|
||||
Installation on ArchLinux may encounter a few hiccups as Arch defaults to
|
||||
python 3, but synapse currently assumes python 2.7 by default.
|
||||
|
||||
pip may be outdated (6.0.7-1 and needs to be upgraded to 6.0.8-1 )::
|
||||
|
||||
$ sudo pip2.7 install --upgrade pip
|
||||
|
||||
You also may need to explicitly specify python 2.7 again during the install
|
||||
request::
|
||||
|
||||
$ pip2.7 install --process-dependency-links \
|
||||
https://github.com/matrix-org/synapse/tarball/master
|
||||
|
||||
If you encounter an error with lib bcrypt causing an Wrong ELF Class:
|
||||
ELFCLASS32 (x64 Systems), you may need to reinstall py-bcrypt to correctly
|
||||
compile it under the right architecture. (This should not be needed if
|
||||
installing under virtualenv)::
|
||||
|
||||
$ sudo pip2.7 uninstall py-bcrypt
|
||||
$ sudo pip2.7 install py-bcrypt
|
||||
|
||||
During setup of homeserver you need to call python2.7 directly again::
|
||||
|
||||
$ cd ~/.synapse
|
||||
$ python2.7 -m synapse.app.homeserver \
|
||||
--server-name machine.my.domain.name \
|
||||
--config-path homeserver.yaml \
|
||||
--generate-config
|
||||
|
||||
...substituting your host and domain name as appropriate.
|
||||
|
||||
Windows Install
|
||||
---------------
|
||||
Synapse can be installed on Cygwin. It requires the following Cygwin packages:
|
||||
@@ -146,7 +211,7 @@ Synapse can be installed on Cygwin. It requires the following Cygwin packages:
|
||||
- openssl (and openssl-devel, python-openssl)
|
||||
- python
|
||||
- python-setuptools
|
||||
|
||||
|
||||
The content repository requires additional packages and will be unable to process
|
||||
uploads without them:
|
||||
- libjpeg8
|
||||
@@ -173,23 +238,13 @@ Running Your Homeserver
|
||||
To actually run your new homeserver, pick a working directory for Synapse to run
|
||||
(e.g. ``~/.synapse``), and::
|
||||
|
||||
$ mkdir ~/.synapse
|
||||
$ cd ~/.synapse
|
||||
|
||||
$ # on Linux
|
||||
$ ~/.local/bin/synctl start
|
||||
|
||||
$ # on OSX
|
||||
$ ~/Library/Python/2.7/bin/synctl start
|
||||
$ source ./bin/activate
|
||||
$ synctl start
|
||||
|
||||
Troubleshooting Running
|
||||
-----------------------
|
||||
|
||||
If ``synctl`` fails with ``pkg_resources.DistributionNotFound`` errors you may
|
||||
need a newer version of setuptools than that provided by your OS.::
|
||||
|
||||
$ sudo pip install setuptools --upgrade
|
||||
|
||||
If synapse fails with ``missing "sodium.h"`` crypto errors, you may need
|
||||
to manually upgrade PyNaCL, as synapse uses NaCl (http://nacl.cr.yp.to/) for
|
||||
encryption and digital signatures.
|
||||
@@ -205,6 +260,15 @@ fix try re-installing from PyPI or directly from
|
||||
$ # Install from github
|
||||
$ pip install --user https://github.com/pyca/pynacl/tarball/master
|
||||
|
||||
ArchLinux
|
||||
---------
|
||||
|
||||
If running `$ synctl start` fails with 'returned non-zero exit status 1',
|
||||
you will need to explicitly call Python2.7 - either running as::
|
||||
|
||||
$ python2.7 -m synapse.app.homeserver --daemonize -c homeserver.yaml --pid-file homeserver.pid
|
||||
|
||||
...or by editing synctl with the correct python executable.
|
||||
|
||||
Homeserver Development
|
||||
======================
|
||||
@@ -216,13 +280,15 @@ directory of your choice::
|
||||
$ cd synapse
|
||||
|
||||
The homeserver has a number of external dependencies, that are easiest
|
||||
to install by making setup.py do so, in --user mode::
|
||||
to install using pip and a virtualenv::
|
||||
|
||||
$ python setup.py develop --user
|
||||
$ virtualenv env
|
||||
$ source env/bin/activate
|
||||
$ python synapse/python_dependencies.py | xargs -n1 pip install
|
||||
$ pip install setuptools_trial mock
|
||||
|
||||
This will run a process of downloading and installing into your
|
||||
user's .local/lib directory all of the required dependencies that are
|
||||
missing.
|
||||
This will run a process of downloading and installing all the needed
|
||||
dependencies into a virtual env.
|
||||
|
||||
Once this is done, you may wish to run the homeserver's unit tests, to
|
||||
check that everything is installed as it should be::
|
||||
@@ -243,7 +309,7 @@ IMPORTANT: Before upgrading an existing homeserver to a new version, please
|
||||
refer to UPGRADE.rst for any additional instructions.
|
||||
|
||||
Otherwise, simply re-install the new codebase over the current one - e.g.
|
||||
by ``pip install --user --process-dependency-links
|
||||
by ``pip install --process-dependency-links
|
||||
https://github.com/matrix-org/synapse/tarball/master``
|
||||
if using pip, or by ``git pull`` if running off a git working copy.
|
||||
|
||||
@@ -270,9 +336,9 @@ For the first form, simply pass the required hostname (of the machine) as the
|
||||
|
||||
$ python -m synapse.app.homeserver \
|
||||
--server-name machine.my.domain.name \
|
||||
--config-path homeserver.config \
|
||||
--config-path homeserver.yaml \
|
||||
--generate-config
|
||||
$ python -m synapse.app.homeserver --config-path homeserver.config
|
||||
$ python -m synapse.app.homeserver --config-path homeserver.yaml
|
||||
|
||||
Alternatively, you can run ``synctl start`` to guide you through the process.
|
||||
|
||||
@@ -292,9 +358,9 @@ SRV record, as that is the name other machines will expect it to have::
|
||||
$ python -m synapse.app.homeserver \
|
||||
--server-name YOURDOMAIN \
|
||||
--bind-port 8448 \
|
||||
--config-path homeserver.config \
|
||||
--config-path homeserver.yaml \
|
||||
--generate-config
|
||||
$ python -m synapse.app.homeserver --config-path homeserver.config
|
||||
$ python -m synapse.app.homeserver --config-path homeserver.yaml
|
||||
|
||||
|
||||
You may additionally want to pass one or more "-v" options, in order to
|
||||
|
||||
33
UPGRADE.rst
33
UPGRADE.rst
@@ -1,3 +1,32 @@
|
||||
Upgrading to v0.8.0
|
||||
===================
|
||||
|
||||
Servers which use captchas will need to add their public key to::
|
||||
|
||||
static/client/register/register_config.js
|
||||
|
||||
window.matrixRegistrationConfig = {
|
||||
recaptcha_public_key: "YOUR_PUBLIC_KEY"
|
||||
};
|
||||
|
||||
This is required in order to support registration fallback (typically used on
|
||||
mobile devices).
|
||||
|
||||
|
||||
Upgrading to v0.7.0
|
||||
===================
|
||||
|
||||
New dependencies are:
|
||||
|
||||
- pydenticon
|
||||
- simplejson
|
||||
- syutil
|
||||
- matrix-angular-sdk
|
||||
|
||||
To pull in these dependencies in a virtual env, run::
|
||||
|
||||
python synapse/python_dependencies.py | xargs -n 1 pip install
|
||||
|
||||
Upgrading to v0.6.0
|
||||
===================
|
||||
|
||||
@@ -52,7 +81,7 @@ resulting conflicts during the upgrade process.
|
||||
Before running the command the homeserver should be first completely
|
||||
shutdown. To run it, simply specify the location of the database, e.g.:
|
||||
|
||||
./database-prepare-for-0.5.0.sh "homeserver.db"
|
||||
./scripts/database-prepare-for-0.5.0.sh "homeserver.db"
|
||||
|
||||
Once this has successfully completed it will be safe to restart the
|
||||
homeserver. You may notice that the homeserver takes a few seconds longer to
|
||||
@@ -147,7 +176,7 @@ rooms the home server was a member of and room alias mappings.
|
||||
Before running the command the homeserver should be first completely
|
||||
shutdown. To run it, simply specify the location of the database, e.g.:
|
||||
|
||||
./database-prepare-for-0.0.1.sh "homeserver.db"
|
||||
./scripts/database-prepare-for-0.0.1.sh "homeserver.db"
|
||||
|
||||
Once this has successfully completed it will be safe to restart the
|
||||
homeserver. You may notice that the homeserver takes a few seconds longer to
|
||||
|
||||
@@ -21,16 +21,30 @@ import datetime
|
||||
import argparse
|
||||
|
||||
from synapse.events import FrozenEvent
|
||||
from synapse.util.frozenutils import unfreeze
|
||||
|
||||
|
||||
def make_graph(db_name, room_id, file_prefix):
|
||||
def make_graph(db_name, room_id, file_prefix, limit):
|
||||
conn = sqlite3.connect(db_name)
|
||||
|
||||
c = conn.execute(
|
||||
"SELECT json FROM event_json where room_id = ?",
|
||||
(room_id,)
|
||||
sql = (
|
||||
"SELECT json FROM event_json as j "
|
||||
"INNER JOIN events as e ON e.event_id = j.event_id "
|
||||
"WHERE j.room_id = ?"
|
||||
)
|
||||
|
||||
args = [room_id]
|
||||
|
||||
if limit:
|
||||
sql += (
|
||||
" ORDER BY topological_ordering DESC, stream_ordering DESC "
|
||||
"LIMIT ?"
|
||||
)
|
||||
|
||||
args.append(limit)
|
||||
|
||||
c = conn.execute(sql, args)
|
||||
|
||||
events = [FrozenEvent(json.loads(e[0])) for e in c.fetchall()]
|
||||
|
||||
events.sort(key=lambda e: e.depth)
|
||||
@@ -57,7 +71,7 @@ def make_graph(db_name, room_id, file_prefix):
|
||||
float(event.origin_server_ts) / 1000
|
||||
).strftime('%Y-%m-%d %H:%M:%S,%f')
|
||||
|
||||
content = json.dumps(event.get_dict()["content"])
|
||||
content = json.dumps(unfreeze(event.get_dict()["content"]))
|
||||
|
||||
label = (
|
||||
"<"
|
||||
@@ -128,11 +142,16 @@ if __name__ == "__main__":
|
||||
)
|
||||
parser.add_argument(
|
||||
"-p", "--prefix", dest="prefix",
|
||||
help="String to prefix output files with"
|
||||
help="String to prefix output files with",
|
||||
default="graph_output"
|
||||
)
|
||||
parser.add_argument(
|
||||
"-l", "--limit",
|
||||
help="Only retrieve the last N events.",
|
||||
)
|
||||
parser.add_argument('db')
|
||||
parser.add_argument('room')
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
make_graph(args.db, args.room, args.prefix)
|
||||
make_graph(args.db, args.room, args.prefix, args.limit)
|
||||
@@ -39,43 +39,43 @@ ROOMDOMAIN="meet.jit.si"
|
||||
#ROOMDOMAIN="conference.jitsi.vuc.me"
|
||||
|
||||
class TrivialMatrixClient:
|
||||
def __init__(self, access_token):
|
||||
self.token = None
|
||||
self.access_token = access_token
|
||||
def __init__(self, access_token):
|
||||
self.token = None
|
||||
self.access_token = access_token
|
||||
|
||||
def getEvent(self):
|
||||
while True:
|
||||
url = MATRIXBASE+'events?access_token='+self.access_token+"&timeout=60000"
|
||||
if self.token:
|
||||
url += "&from="+self.token
|
||||
req = grequests.get(url)
|
||||
resps = grequests.map([req])
|
||||
obj = json.loads(resps[0].content)
|
||||
print "incoming from matrix",obj
|
||||
if 'end' not in obj:
|
||||
continue
|
||||
self.token = obj['end']
|
||||
if len(obj['chunk']):
|
||||
return obj['chunk'][0]
|
||||
def getEvent(self):
|
||||
while True:
|
||||
url = MATRIXBASE+'events?access_token='+self.access_token+"&timeout=60000"
|
||||
if self.token:
|
||||
url += "&from="+self.token
|
||||
req = grequests.get(url)
|
||||
resps = grequests.map([req])
|
||||
obj = json.loads(resps[0].content)
|
||||
print "incoming from matrix",obj
|
||||
if 'end' not in obj:
|
||||
continue
|
||||
self.token = obj['end']
|
||||
if len(obj['chunk']):
|
||||
return obj['chunk'][0]
|
||||
|
||||
def joinRoom(self, roomId):
|
||||
url = MATRIXBASE+'rooms/'+roomId+'/join?access_token='+self.access_token
|
||||
print url
|
||||
headers={ 'Content-Type': 'application/json' }
|
||||
req = grequests.post(url, headers=headers, data='{}')
|
||||
resps = grequests.map([req])
|
||||
obj = json.loads(resps[0].content)
|
||||
print "response: ",obj
|
||||
def joinRoom(self, roomId):
|
||||
url = MATRIXBASE+'rooms/'+roomId+'/join?access_token='+self.access_token
|
||||
print url
|
||||
headers={ 'Content-Type': 'application/json' }
|
||||
req = grequests.post(url, headers=headers, data='{}')
|
||||
resps = grequests.map([req])
|
||||
obj = json.loads(resps[0].content)
|
||||
print "response: ",obj
|
||||
|
||||
def sendEvent(self, roomId, evType, event):
|
||||
url = MATRIXBASE+'rooms/'+roomId+'/send/'+evType+'?access_token='+self.access_token
|
||||
print url
|
||||
print json.dumps(event)
|
||||
headers={ 'Content-Type': 'application/json' }
|
||||
req = grequests.post(url, headers=headers, data=json.dumps(event))
|
||||
resps = grequests.map([req])
|
||||
obj = json.loads(resps[0].content)
|
||||
print "response: ",obj
|
||||
def sendEvent(self, roomId, evType, event):
|
||||
url = MATRIXBASE+'rooms/'+roomId+'/send/'+evType+'?access_token='+self.access_token
|
||||
print url
|
||||
print json.dumps(event)
|
||||
headers={ 'Content-Type': 'application/json' }
|
||||
req = grequests.post(url, headers=headers, data=json.dumps(event))
|
||||
resps = grequests.map([req])
|
||||
obj = json.loads(resps[0].content)
|
||||
print "response: ",obj
|
||||
|
||||
|
||||
|
||||
@@ -83,178 +83,178 @@ xmppClients = {}
|
||||
|
||||
|
||||
def matrixLoop():
|
||||
while True:
|
||||
ev = matrixCli.getEvent()
|
||||
print ev
|
||||
if ev['type'] == 'm.room.member':
|
||||
print 'membership event'
|
||||
if ev['membership'] == 'invite' and ev['state_key'] == MYUSERNAME:
|
||||
roomId = ev['room_id']
|
||||
print "joining room %s" % (roomId)
|
||||
matrixCli.joinRoom(roomId)
|
||||
elif ev['type'] == 'm.room.message':
|
||||
if ev['room_id'] in xmppClients:
|
||||
print "already have a bridge for that user, ignoring"
|
||||
continue
|
||||
print "got message, connecting"
|
||||
xmppClients[ev['room_id']] = TrivialXmppClient(ev['room_id'], ev['user_id'])
|
||||
gevent.spawn(xmppClients[ev['room_id']].xmppLoop)
|
||||
elif ev['type'] == 'm.call.invite':
|
||||
print "Incoming call"
|
||||
#sdp = ev['content']['offer']['sdp']
|
||||
#print "sdp: %s" % (sdp)
|
||||
#xmppClients[ev['room_id']] = TrivialXmppClient(ev['room_id'], ev['user_id'])
|
||||
#gevent.spawn(xmppClients[ev['room_id']].xmppLoop)
|
||||
elif ev['type'] == 'm.call.answer':
|
||||
print "Call answered"
|
||||
sdp = ev['content']['answer']['sdp']
|
||||
if ev['room_id'] not in xmppClients:
|
||||
print "We didn't have a call for that room"
|
||||
continue
|
||||
# should probably check call ID too
|
||||
xmppCli = xmppClients[ev['room_id']]
|
||||
xmppCli.sendAnswer(sdp)
|
||||
elif ev['type'] == 'm.call.hangup':
|
||||
if ev['room_id'] in xmppClients:
|
||||
xmppClients[ev['room_id']].stop()
|
||||
del xmppClients[ev['room_id']]
|
||||
|
||||
while True:
|
||||
ev = matrixCli.getEvent()
|
||||
print ev
|
||||
if ev['type'] == 'm.room.member':
|
||||
print 'membership event'
|
||||
if ev['membership'] == 'invite' and ev['state_key'] == MYUSERNAME:
|
||||
roomId = ev['room_id']
|
||||
print "joining room %s" % (roomId)
|
||||
matrixCli.joinRoom(roomId)
|
||||
elif ev['type'] == 'm.room.message':
|
||||
if ev['room_id'] in xmppClients:
|
||||
print "already have a bridge for that user, ignoring"
|
||||
continue
|
||||
print "got message, connecting"
|
||||
xmppClients[ev['room_id']] = TrivialXmppClient(ev['room_id'], ev['user_id'])
|
||||
gevent.spawn(xmppClients[ev['room_id']].xmppLoop)
|
||||
elif ev['type'] == 'm.call.invite':
|
||||
print "Incoming call"
|
||||
#sdp = ev['content']['offer']['sdp']
|
||||
#print "sdp: %s" % (sdp)
|
||||
#xmppClients[ev['room_id']] = TrivialXmppClient(ev['room_id'], ev['user_id'])
|
||||
#gevent.spawn(xmppClients[ev['room_id']].xmppLoop)
|
||||
elif ev['type'] == 'm.call.answer':
|
||||
print "Call answered"
|
||||
sdp = ev['content']['answer']['sdp']
|
||||
if ev['room_id'] not in xmppClients:
|
||||
print "We didn't have a call for that room"
|
||||
continue
|
||||
# should probably check call ID too
|
||||
xmppCli = xmppClients[ev['room_id']]
|
||||
xmppCli.sendAnswer(sdp)
|
||||
elif ev['type'] == 'm.call.hangup':
|
||||
if ev['room_id'] in xmppClients:
|
||||
xmppClients[ev['room_id']].stop()
|
||||
del xmppClients[ev['room_id']]
|
||||
|
||||
class TrivialXmppClient:
|
||||
def __init__(self, matrixRoom, userId):
|
||||
self.rid = 0
|
||||
self.matrixRoom = matrixRoom
|
||||
self.userId = userId
|
||||
self.running = True
|
||||
def __init__(self, matrixRoom, userId):
|
||||
self.rid = 0
|
||||
self.matrixRoom = matrixRoom
|
||||
self.userId = userId
|
||||
self.running = True
|
||||
|
||||
def stop(self):
|
||||
self.running = False
|
||||
def stop(self):
|
||||
self.running = False
|
||||
|
||||
def nextRid(self):
|
||||
self.rid += 1
|
||||
return '%d' % (self.rid)
|
||||
def nextRid(self):
|
||||
self.rid += 1
|
||||
return '%d' % (self.rid)
|
||||
|
||||
def sendIq(self, xml):
|
||||
fullXml = "<body rid='%s' xmlns='http://jabber.org/protocol/httpbind' sid='%s'>%s</body>" % (self.nextRid(), self.sid, xml)
|
||||
#print "\t>>>%s" % (fullXml)
|
||||
return self.xmppPoke(fullXml)
|
||||
|
||||
def xmppPoke(self, xml):
|
||||
headers = {'Content-Type': 'application/xml'}
|
||||
req = grequests.post(HTTPBIND, verify=False, headers=headers, data=xml)
|
||||
resps = grequests.map([req])
|
||||
obj = BeautifulSoup(resps[0].content)
|
||||
return obj
|
||||
def sendIq(self, xml):
|
||||
fullXml = "<body rid='%s' xmlns='http://jabber.org/protocol/httpbind' sid='%s'>%s</body>" % (self.nextRid(), self.sid, xml)
|
||||
#print "\t>>>%s" % (fullXml)
|
||||
return self.xmppPoke(fullXml)
|
||||
|
||||
def sendAnswer(self, answer):
|
||||
print "sdp from matrix client",answer
|
||||
p = subprocess.Popen(['node', 'unjingle/unjingle.js', '--sdp'], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
|
||||
jingle, out_err = p.communicate(answer)
|
||||
jingle = jingle % {
|
||||
'tojid': self.callfrom,
|
||||
'action': 'session-accept',
|
||||
'initiator': self.callfrom,
|
||||
'responder': self.jid,
|
||||
'sid': self.callsid
|
||||
}
|
||||
print "answer jingle from sdp",jingle
|
||||
res = self.sendIq(jingle)
|
||||
print "reply from answer: ",res
|
||||
|
||||
self.ssrcs = {}
|
||||
jingleSoup = BeautifulSoup(jingle)
|
||||
for cont in jingleSoup.iq.jingle.findAll('content'):
|
||||
if cont.description:
|
||||
self.ssrcs[cont['name']] = cont.description['ssrc']
|
||||
print "my ssrcs:",self.ssrcs
|
||||
def xmppPoke(self, xml):
|
||||
headers = {'Content-Type': 'application/xml'}
|
||||
req = grequests.post(HTTPBIND, verify=False, headers=headers, data=xml)
|
||||
resps = grequests.map([req])
|
||||
obj = BeautifulSoup(resps[0].content)
|
||||
return obj
|
||||
|
||||
gevent.joinall([
|
||||
gevent.spawn(self.advertiseSsrcs)
|
||||
])
|
||||
|
||||
def advertiseSsrcs(self):
|
||||
def sendAnswer(self, answer):
|
||||
print "sdp from matrix client",answer
|
||||
p = subprocess.Popen(['node', 'unjingle/unjingle.js', '--sdp'], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
|
||||
jingle, out_err = p.communicate(answer)
|
||||
jingle = jingle % {
|
||||
'tojid': self.callfrom,
|
||||
'action': 'session-accept',
|
||||
'initiator': self.callfrom,
|
||||
'responder': self.jid,
|
||||
'sid': self.callsid
|
||||
}
|
||||
print "answer jingle from sdp",jingle
|
||||
res = self.sendIq(jingle)
|
||||
print "reply from answer: ",res
|
||||
|
||||
self.ssrcs = {}
|
||||
jingleSoup = BeautifulSoup(jingle)
|
||||
for cont in jingleSoup.iq.jingle.findAll('content'):
|
||||
if cont.description:
|
||||
self.ssrcs[cont['name']] = cont.description['ssrc']
|
||||
print "my ssrcs:",self.ssrcs
|
||||
|
||||
gevent.joinall([
|
||||
gevent.spawn(self.advertiseSsrcs)
|
||||
])
|
||||
|
||||
def advertiseSsrcs(self):
|
||||
time.sleep(7)
|
||||
print "SSRC spammer started"
|
||||
while self.running:
|
||||
ssrcMsg = "<presence to='%(tojid)s' xmlns='jabber:client'><x xmlns='http://jabber.org/protocol/muc'/><c xmlns='http://jabber.org/protocol/caps' hash='sha-1' node='http://jitsi.org/jitsimeet' ver='0WkSdhFnAUxrz4ImQQLdB80GFlE='/><nick xmlns='http://jabber.org/protocol/nick'>%(nick)s</nick><stats xmlns='http://jitsi.org/jitmeet/stats'><stat name='bitrate_download' value='175'/><stat name='bitrate_upload' value='176'/><stat name='packetLoss_total' value='0'/><stat name='packetLoss_download' value='0'/><stat name='packetLoss_upload' value='0'/></stats><media xmlns='http://estos.de/ns/mjs'><source type='audio' ssrc='%(assrc)s' direction='sendre'/><source type='video' ssrc='%(vssrc)s' direction='sendre'/></media></presence>" % { 'tojid': "%s@%s/%s" % (ROOMNAME, ROOMDOMAIN, self.shortJid), 'nick': self.userId, 'assrc': self.ssrcs['audio'], 'vssrc': self.ssrcs['video'] }
|
||||
res = self.sendIq(ssrcMsg)
|
||||
print "reply from ssrc announce: ",res
|
||||
time.sleep(10)
|
||||
|
||||
|
||||
print "SSRC spammer started"
|
||||
while self.running:
|
||||
ssrcMsg = "<presence to='%(tojid)s' xmlns='jabber:client'><x xmlns='http://jabber.org/protocol/muc'/><c xmlns='http://jabber.org/protocol/caps' hash='sha-1' node='http://jitsi.org/jitsimeet' ver='0WkSdhFnAUxrz4ImQQLdB80GFlE='/><nick xmlns='http://jabber.org/protocol/nick'>%(nick)s</nick><stats xmlns='http://jitsi.org/jitmeet/stats'><stat name='bitrate_download' value='175'/><stat name='bitrate_upload' value='176'/><stat name='packetLoss_total' value='0'/><stat name='packetLoss_download' value='0'/><stat name='packetLoss_upload' value='0'/></stats><media xmlns='http://estos.de/ns/mjs'><source type='audio' ssrc='%(assrc)s' direction='sendre'/><source type='video' ssrc='%(vssrc)s' direction='sendre'/></media></presence>" % { 'tojid': "%s@%s/%s" % (ROOMNAME, ROOMDOMAIN, self.shortJid), 'nick': self.userId, 'assrc': self.ssrcs['audio'], 'vssrc': self.ssrcs['video'] }
|
||||
res = self.sendIq(ssrcMsg)
|
||||
print "reply from ssrc announce: ",res
|
||||
time.sleep(10)
|
||||
|
||||
def xmppLoop(self):
|
||||
self.matrixCallId = time.time()
|
||||
res = self.xmppPoke("<body rid='%s' xmlns='http://jabber.org/protocol/httpbind' to='%s' xml:lang='en' wait='60' hold='1' content='text/xml; charset=utf-8' ver='1.6' xmpp:version='1.0' xmlns:xmpp='urn:xmpp:xbosh'/>" % (self.nextRid(), HOST))
|
||||
|
||||
print res
|
||||
self.sid = res.body['sid']
|
||||
print "sid %s" % (self.sid)
|
||||
|
||||
res = self.sendIq("<auth xmlns='urn:ietf:params:xml:ns:xmpp-sasl' mechanism='ANONYMOUS'/>")
|
||||
def xmppLoop(self):
|
||||
self.matrixCallId = time.time()
|
||||
res = self.xmppPoke("<body rid='%s' xmlns='http://jabber.org/protocol/httpbind' to='%s' xml:lang='en' wait='60' hold='1' content='text/xml; charset=utf-8' ver='1.6' xmpp:version='1.0' xmlns:xmpp='urn:xmpp:xbosh'/>" % (self.nextRid(), HOST))
|
||||
|
||||
res = self.xmppPoke("<body rid='%s' xmlns='http://jabber.org/protocol/httpbind' sid='%s' to='%s' xml:lang='en' xmpp:restart='true' xmlns:xmpp='urn:xmpp:xbosh'/>" % (self.nextRid(), self.sid, HOST))
|
||||
|
||||
res = self.sendIq("<iq type='set' id='_bind_auth_2' xmlns='jabber:client'><bind xmlns='urn:ietf:params:xml:ns:xmpp-bind'/></iq>")
|
||||
print res
|
||||
print res
|
||||
self.sid = res.body['sid']
|
||||
print "sid %s" % (self.sid)
|
||||
|
||||
self.jid = res.body.iq.bind.jid.string
|
||||
print "jid: %s" % (self.jid)
|
||||
self.shortJid = self.jid.split('-')[0]
|
||||
res = self.sendIq("<auth xmlns='urn:ietf:params:xml:ns:xmpp-sasl' mechanism='ANONYMOUS'/>")
|
||||
|
||||
res = self.sendIq("<iq type='set' id='_session_auth_2' xmlns='jabber:client'><session xmlns='urn:ietf:params:xml:ns:xmpp-session'/></iq>")
|
||||
res = self.xmppPoke("<body rid='%s' xmlns='http://jabber.org/protocol/httpbind' sid='%s' to='%s' xml:lang='en' xmpp:restart='true' xmlns:xmpp='urn:xmpp:xbosh'/>" % (self.nextRid(), self.sid, HOST))
|
||||
|
||||
#randomthing = res.body.iq['to']
|
||||
#whatsitpart = randomthing.split('-')[0]
|
||||
res = self.sendIq("<iq type='set' id='_bind_auth_2' xmlns='jabber:client'><bind xmlns='urn:ietf:params:xml:ns:xmpp-bind'/></iq>")
|
||||
print res
|
||||
|
||||
#print "other random bind thing: %s" % (randomthing)
|
||||
self.jid = res.body.iq.bind.jid.string
|
||||
print "jid: %s" % (self.jid)
|
||||
self.shortJid = self.jid.split('-')[0]
|
||||
|
||||
# advertise preence to the jitsi room, with our nick
|
||||
res = self.sendIq("<iq type='get' to='%s' xmlns='jabber:client' id='1:sendIQ'><services xmlns='urn:xmpp:extdisco:1'><service host='%s'/></services></iq><presence to='%s@%s/d98f6c40' xmlns='jabber:client'><x xmlns='http://jabber.org/protocol/muc'/><c xmlns='http://jabber.org/protocol/caps' hash='sha-1' node='http://jitsi.org/jitsimeet' ver='0WkSdhFnAUxrz4ImQQLdB80GFlE='/><nick xmlns='http://jabber.org/protocol/nick'>%s</nick></presence>" % (HOST, TURNSERVER, ROOMNAME, ROOMDOMAIN, self.userId))
|
||||
self.muc = {'users': []}
|
||||
for p in res.body.findAll('presence'):
|
||||
u = {}
|
||||
u['shortJid'] = p['from'].split('/')[1]
|
||||
if p.c and p.c.nick:
|
||||
u['nick'] = p.c.nick.string
|
||||
self.muc['users'].append(u)
|
||||
print "muc: ",self.muc
|
||||
res = self.sendIq("<iq type='set' id='_session_auth_2' xmlns='jabber:client'><session xmlns='urn:ietf:params:xml:ns:xmpp-session'/></iq>")
|
||||
|
||||
# wait for stuff
|
||||
while True:
|
||||
print "waiting..."
|
||||
res = self.sendIq("")
|
||||
print "got from stream: ",res
|
||||
if res.body.iq:
|
||||
jingles = res.body.iq.findAll('jingle')
|
||||
if len(jingles):
|
||||
self.callfrom = res.body.iq['from']
|
||||
self.handleInvite(jingles[0])
|
||||
elif 'type' in res.body and res.body['type'] == 'terminate':
|
||||
self.running = False
|
||||
del xmppClients[self.matrixRoom]
|
||||
return
|
||||
#randomthing = res.body.iq['to']
|
||||
#whatsitpart = randomthing.split('-')[0]
|
||||
|
||||
#print "other random bind thing: %s" % (randomthing)
|
||||
|
||||
# advertise preence to the jitsi room, with our nick
|
||||
res = self.sendIq("<iq type='get' to='%s' xmlns='jabber:client' id='1:sendIQ'><services xmlns='urn:xmpp:extdisco:1'><service host='%s'/></services></iq><presence to='%s@%s/d98f6c40' xmlns='jabber:client'><x xmlns='http://jabber.org/protocol/muc'/><c xmlns='http://jabber.org/protocol/caps' hash='sha-1' node='http://jitsi.org/jitsimeet' ver='0WkSdhFnAUxrz4ImQQLdB80GFlE='/><nick xmlns='http://jabber.org/protocol/nick'>%s</nick></presence>" % (HOST, TURNSERVER, ROOMNAME, ROOMDOMAIN, self.userId))
|
||||
self.muc = {'users': []}
|
||||
for p in res.body.findAll('presence'):
|
||||
u = {}
|
||||
u['shortJid'] = p['from'].split('/')[1]
|
||||
if p.c and p.c.nick:
|
||||
u['nick'] = p.c.nick.string
|
||||
self.muc['users'].append(u)
|
||||
print "muc: ",self.muc
|
||||
|
||||
# wait for stuff
|
||||
while True:
|
||||
print "waiting..."
|
||||
res = self.sendIq("")
|
||||
print "got from stream: ",res
|
||||
if res.body.iq:
|
||||
jingles = res.body.iq.findAll('jingle')
|
||||
if len(jingles):
|
||||
self.callfrom = res.body.iq['from']
|
||||
self.handleInvite(jingles[0])
|
||||
elif 'type' in res.body and res.body['type'] == 'terminate':
|
||||
self.running = False
|
||||
del xmppClients[self.matrixRoom]
|
||||
return
|
||||
|
||||
def handleInvite(self, jingle):
|
||||
self.initiator = jingle['initiator']
|
||||
self.callsid = jingle['sid']
|
||||
p = subprocess.Popen(['node', 'unjingle/unjingle.js', '--jingle'], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
|
||||
print "raw jingle invite",str(jingle)
|
||||
sdp, out_err = p.communicate(str(jingle))
|
||||
print "transformed remote offer sdp",sdp
|
||||
inviteEvent = {
|
||||
'offer': {
|
||||
'type': 'offer',
|
||||
'sdp': sdp
|
||||
},
|
||||
'call_id': self.matrixCallId,
|
||||
'version': 0,
|
||||
'lifetime': 30000
|
||||
}
|
||||
matrixCli.sendEvent(self.matrixRoom, 'm.call.invite', inviteEvent)
|
||||
|
||||
def handleInvite(self, jingle):
|
||||
self.initiator = jingle['initiator']
|
||||
self.callsid = jingle['sid']
|
||||
p = subprocess.Popen(['node', 'unjingle/unjingle.js', '--jingle'], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
|
||||
print "raw jingle invite",str(jingle)
|
||||
sdp, out_err = p.communicate(str(jingle))
|
||||
print "transformed remote offer sdp",sdp
|
||||
inviteEvent = {
|
||||
'offer': {
|
||||
'type': 'offer',
|
||||
'sdp': sdp
|
||||
},
|
||||
'call_id': self.matrixCallId,
|
||||
'version': 0,
|
||||
'lifetime': 30000
|
||||
}
|
||||
matrixCli.sendEvent(self.matrixRoom, 'm.call.invite', inviteEvent)
|
||||
|
||||
matrixCli = TrivialMatrixClient(ACCESS_TOKEN)
|
||||
|
||||
gevent.joinall([
|
||||
gevent.spawn(matrixLoop)
|
||||
gevent.spawn(matrixLoop)
|
||||
])
|
||||
|
||||
|
||||
@@ -175,13 +175,12 @@ sub on_room_message
|
||||
my $verto_connecting = $loop->new_future;
|
||||
$bot_verto->connect(
|
||||
%{ $CONFIG{"verto-bot"} },
|
||||
on_connected => sub {
|
||||
warn("[Verto] connected to websocket");
|
||||
$verto_connecting->done($bot_verto) if not $verto_connecting->is_done;
|
||||
},
|
||||
on_connect_error => sub { die "Cannot connect to verto - $_[-1]" },
|
||||
on_resolve_error => sub { die "Cannot resolve to verto - $_[-1]" },
|
||||
);
|
||||
)->then( sub {
|
||||
warn("[Verto] connected to websocket");
|
||||
$verto_connecting->done($bot_verto) if not $verto_connecting->is_done;
|
||||
});
|
||||
|
||||
Future->needs_all(
|
||||
$bot_matrix->login( %{ $CONFIG{"matrix-bot"} } )->then( sub {
|
||||
|
||||
493
contrib/vertobot/bridge.pl
Executable file
493
contrib/vertobot/bridge.pl
Executable file
@@ -0,0 +1,493 @@
|
||||
#!/usr/bin/env perl
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
use 5.010; # //
|
||||
use IO::Socket::SSL qw(SSL_VERIFY_NONE);
|
||||
use IO::Async::Loop;
|
||||
use Net::Async::WebSocket::Client;
|
||||
use Net::Async::HTTP;
|
||||
use Net::Async::HTTP::Server;
|
||||
use JSON;
|
||||
use YAML;
|
||||
use Data::UUID;
|
||||
use Getopt::Long;
|
||||
use Data::Dumper;
|
||||
use URI::Encode qw(uri_encode uri_decode);
|
||||
|
||||
binmode STDOUT, ":encoding(UTF-8)";
|
||||
binmode STDERR, ":encoding(UTF-8)";
|
||||
|
||||
my $msisdn_to_matrix = {
|
||||
'447417892400' => '@matthew:matrix.org',
|
||||
};
|
||||
|
||||
my $matrix_to_msisdn = {};
|
||||
foreach (keys %$msisdn_to_matrix) {
|
||||
$matrix_to_msisdn->{$msisdn_to_matrix->{$_}} = $_;
|
||||
}
|
||||
|
||||
|
||||
my $loop = IO::Async::Loop->new;
|
||||
# Net::Async::HTTP + SSL + IO::Poll doesn't play well. See
|
||||
# https://rt.cpan.org/Ticket/Display.html?id=93107
|
||||
# ref $loop eq "IO::Async::Loop::Poll" and
|
||||
# warn "Using SSL with IO::Poll causes known memory-leaks!!\n";
|
||||
|
||||
GetOptions(
|
||||
'C|config=s' => \my $CONFIG,
|
||||
'eval-from=s' => \my $EVAL_FROM,
|
||||
) or exit 1;
|
||||
|
||||
if( defined $EVAL_FROM ) {
|
||||
# An emergency 'eval() this file' hack
|
||||
$SIG{HUP} = sub {
|
||||
my $code = do {
|
||||
open my $fh, "<", $EVAL_FROM or warn( "Cannot read - $!" ), return;
|
||||
local $/; <$fh>
|
||||
};
|
||||
|
||||
eval $code or warn "Cannot eval() - $@";
|
||||
};
|
||||
}
|
||||
|
||||
defined $CONFIG or die "Must supply --config\n";
|
||||
|
||||
my %CONFIG = %{ YAML::LoadFile( $CONFIG ) };
|
||||
|
||||
my %MATRIX_CONFIG = %{ $CONFIG{matrix} };
|
||||
# No harm in always applying this
|
||||
$MATRIX_CONFIG{SSL_verify_mode} = SSL_VERIFY_NONE;
|
||||
|
||||
my $bridgestate = {};
|
||||
my $roomid_by_callid = {};
|
||||
|
||||
my $sessid = lc new Data::UUID->create_str();
|
||||
my $as_token = $CONFIG{"matrix-bot"}->{as_token};
|
||||
my $hs_domain = $CONFIG{"matrix-bot"}->{domain};
|
||||
|
||||
my $http = Net::Async::HTTP->new();
|
||||
$loop->add( $http );
|
||||
|
||||
sub create_virtual_user
|
||||
{
|
||||
my ($localpart) = @_;
|
||||
my ( $response ) = $http->do_request(
|
||||
method => "POST",
|
||||
uri => URI->new(
|
||||
$CONFIG{"matrix"}->{server}.
|
||||
"/_matrix/client/api/v1/register?".
|
||||
"access_token=$as_token&user_id=$localpart"
|
||||
),
|
||||
content_type => "application/json",
|
||||
content => <<EOT
|
||||
{
|
||||
"type": "m.login.application_service",
|
||||
"user": "$localpart"
|
||||
}
|
||||
EOT
|
||||
)->get;
|
||||
warn $response->as_string if ($response->code != 200);
|
||||
}
|
||||
|
||||
my $http_server = Net::Async::HTTP::Server->new(
|
||||
on_request => sub {
|
||||
my $self = shift;
|
||||
my ( $req ) = @_;
|
||||
|
||||
my $response;
|
||||
my $path = uri_decode($req->path);
|
||||
warn("request: $path");
|
||||
if ($path =~ m#/users/\@(\+.*)#) {
|
||||
# when queried about virtual users, auto-create them in the HS
|
||||
my $localpart = $1;
|
||||
create_virtual_user($localpart);
|
||||
$response = HTTP::Response->new( 200 );
|
||||
$response->add_content('{}');
|
||||
$response->content_type( "application/json" );
|
||||
}
|
||||
elsif ($path =~ m#/transactions/(.*)#) {
|
||||
my $event = JSON->new->decode($req->body);
|
||||
print Dumper($event);
|
||||
|
||||
my $room_id = $event->{room_id};
|
||||
my %dp = %{$CONFIG{'verto-dialog-params'}};
|
||||
$dp{callID} = $bridgestate->{$room_id}->{callid};
|
||||
|
||||
if ($event->{type} eq 'm.room.membership') {
|
||||
my $membership = $event->{content}->{membership};
|
||||
my $state_key = $event->{state_key};
|
||||
my $room_id = $event->{state_id};
|
||||
|
||||
if ($membership eq 'invite') {
|
||||
# autojoin invites
|
||||
my ( $response ) = $http->do_request(
|
||||
method => "POST",
|
||||
uri => URI->new(
|
||||
$CONFIG{"matrix"}->{server}.
|
||||
"/_matrix/client/api/v1/rooms/$room_id/join?".
|
||||
"access_token=$as_token&user_id=$state_key"
|
||||
),
|
||||
content_type => "application/json",
|
||||
content => "{}",
|
||||
)->get;
|
||||
warn $response->as_string if ($response->code != 200);
|
||||
}
|
||||
}
|
||||
elsif ($event->{type} eq 'm.call.invite') {
|
||||
my $room_id = $event->{room_id};
|
||||
$bridgestate->{$room_id}->{matrix_callid} = $event->{content}->{call_id};
|
||||
$bridgestate->{$room_id}->{callid} = lc new Data::UUID->create_str();
|
||||
$bridgestate->{$room_id}->{sessid} = $sessid;
|
||||
# $bridgestate->{$room_id}->{offer} = $event->{content}->{offer}->{sdp};
|
||||
my $offer = $event->{content}->{offer}->{sdp};
|
||||
# $bridgestate->{$room_id}->{gathered_candidates} = 0;
|
||||
$roomid_by_callid->{ $bridgestate->{$room_id}->{callid} } = $room_id;
|
||||
# no trickle ICE in verto apparently
|
||||
|
||||
my $f = send_verto_json_request("verto.invite", {
|
||||
"sdp" => $offer,
|
||||
"dialogParams" => \%dp,
|
||||
"sessid" => $bridgestate->{$room_id}->{sessid},
|
||||
});
|
||||
$self->adopt_future($f);
|
||||
}
|
||||
# elsif ($event->{type} eq 'm.call.candidates') {
|
||||
# # XXX: this could fire for both matrix->verto and verto->matrix calls
|
||||
# # and races as it collects candidates. much better to just turn off
|
||||
# # candidate gathering in the webclient entirely for now
|
||||
#
|
||||
# my $room_id = $event->{room_id};
|
||||
# # XXX: compare call IDs
|
||||
# if (!$bridgestate->{$room_id}->{gathered_candidates}) {
|
||||
# $bridgestate->{$room_id}->{gathered_candidates} = 1;
|
||||
# my $offer = $bridgestate->{$room_id}->{offer};
|
||||
# my $candidate_block = "";
|
||||
# foreach (@{$event->{content}->{candidates}}) {
|
||||
# $candidate_block .= "a=" . $_->{candidate} . "\r\n";
|
||||
# }
|
||||
# # XXX: collate using the right m= line - for now assume audio call
|
||||
# $offer =~ s/(a=rtcp.*[\r\n]+)/$1$candidate_block/;
|
||||
#
|
||||
# my $f = send_verto_json_request("verto.invite", {
|
||||
# "sdp" => $offer,
|
||||
# "dialogParams" => \%dp,
|
||||
# "sessid" => $bridgestate->{$room_id}->{sessid},
|
||||
# });
|
||||
# $self->adopt_future($f);
|
||||
# }
|
||||
# else {
|
||||
# # ignore them, as no trickle ICE, although we might as well
|
||||
# # batch them up
|
||||
# # foreach (@{$event->{content}->{candidates}}) {
|
||||
# # push @{$bridgestate->{$room_id}->{candidates}}, $_;
|
||||
# # }
|
||||
# }
|
||||
# }
|
||||
elsif ($event->{type} eq 'm.call.answer') {
|
||||
# grab the answer and relay it to verto as a verto.answer
|
||||
my $room_id = $event->{room_id};
|
||||
|
||||
my $answer = $event->{content}->{answer}->{sdp};
|
||||
my $f = send_verto_json_request("verto.answer", {
|
||||
"sdp" => $answer,
|
||||
"dialogParams" => \%dp,
|
||||
"sessid" => $bridgestate->{$room_id}->{sessid},
|
||||
});
|
||||
$self->adopt_future($f);
|
||||
}
|
||||
elsif ($event->{type} eq 'm.call.hangup') {
|
||||
my $room_id = $event->{room_id};
|
||||
if ($bridgestate->{$room_id}->{matrix_callid} eq $event->{content}->{call_id}) {
|
||||
my $f = send_verto_json_request("verto.bye", {
|
||||
"dialogParams" => \%dp,
|
||||
"sessid" => $bridgestate->{$room_id}->{sessid},
|
||||
});
|
||||
$self->adopt_future($f);
|
||||
}
|
||||
else {
|
||||
warn "Ignoring unrecognised callid: ".$event->{content}->{call_id};
|
||||
}
|
||||
}
|
||||
else {
|
||||
warn "Unhandled event: $event->{type}";
|
||||
}
|
||||
|
||||
$response = HTTP::Response->new( 200 );
|
||||
$response->add_content('{}');
|
||||
$response->content_type( "application/json" );
|
||||
}
|
||||
else {
|
||||
warn "Unhandled path: $path";
|
||||
$response = HTTP::Response->new( 404 );
|
||||
}
|
||||
|
||||
$req->respond( $response );
|
||||
},
|
||||
);
|
||||
$loop->add( $http_server );
|
||||
|
||||
$http_server->listen(
|
||||
addr => { family => "inet", socktype => "stream", port => 8009 },
|
||||
on_listen_error => sub { die "Cannot listen - $_[-1]\n" },
|
||||
);
|
||||
|
||||
my $bot_verto = Net::Async::WebSocket::Client->new(
|
||||
on_frame => sub {
|
||||
my ( $self, $frame ) = @_;
|
||||
warn "[Verto] receiving $frame";
|
||||
on_verto_json($frame);
|
||||
},
|
||||
);
|
||||
$loop->add( $bot_verto );
|
||||
|
||||
my $verto_connecting = $loop->new_future;
|
||||
$bot_verto->connect(
|
||||
%{ $CONFIG{"verto-bot"} },
|
||||
on_connected => sub {
|
||||
warn("[Verto] connected to websocket");
|
||||
if (not $verto_connecting->is_done) {
|
||||
$verto_connecting->done($bot_verto);
|
||||
|
||||
send_verto_json_request("login", {
|
||||
'login' => $CONFIG{'verto-dialog-params'}{'login'},
|
||||
'passwd' => $CONFIG{'verto-config'}{'passwd'},
|
||||
'sessid' => $sessid,
|
||||
});
|
||||
}
|
||||
},
|
||||
on_connect_error => sub { die "Cannot connect to verto - $_[-1]" },
|
||||
on_resolve_error => sub { die "Cannot resolve to verto - $_[-1]" },
|
||||
);
|
||||
|
||||
# die Dumper($verto_connecting);
|
||||
|
||||
my $as_url = $CONFIG{"matrix-bot"}->{as_url};
|
||||
|
||||
Future->needs_all(
|
||||
$http->do_request(
|
||||
method => "POST",
|
||||
uri => URI->new( $CONFIG{"matrix"}->{server}."/_matrix/appservice/v1/register" ),
|
||||
content_type => "application/json",
|
||||
content => <<EOT
|
||||
{
|
||||
"as_token": "$as_token",
|
||||
"url": "$as_url",
|
||||
"namespaces": { "users": [ { "regex": "\@\\\\+.*", "exclusive": false } ] }
|
||||
}
|
||||
EOT
|
||||
)->then( sub{
|
||||
my ($response) = (@_);
|
||||
warn $response->as_string if ($response->code != 200);
|
||||
return Future->done;
|
||||
}),
|
||||
$verto_connecting,
|
||||
)->get;
|
||||
|
||||
$loop->attach_signal(
|
||||
PIPE => sub { warn "pipe\n" }
|
||||
);
|
||||
$loop->attach_signal(
|
||||
INT => sub { $loop->stop },
|
||||
);
|
||||
$loop->attach_signal(
|
||||
TERM => sub { $loop->stop },
|
||||
);
|
||||
|
||||
eval {
|
||||
$loop->run;
|
||||
} or my $e = $@;
|
||||
|
||||
die $e if $e;
|
||||
|
||||
exit 0;
|
||||
|
||||
{
|
||||
my $json_id;
|
||||
my $requests;
|
||||
|
||||
sub send_verto_json_request
|
||||
{
|
||||
$json_id ||= 1;
|
||||
|
||||
my ($method, $params) = @_;
|
||||
my $json = {
|
||||
jsonrpc => "2.0",
|
||||
method => $method,
|
||||
params => $params,
|
||||
id => $json_id,
|
||||
};
|
||||
my $text = JSON->new->encode( $json );
|
||||
warn "[Verto] sending $text";
|
||||
$bot_verto->send_frame ( $text );
|
||||
my $request = $loop->new_future;
|
||||
$requests->{$json_id} = $request;
|
||||
$json_id++;
|
||||
return $request;
|
||||
}
|
||||
|
||||
sub send_verto_json_response
|
||||
{
|
||||
my ($result, $id) = @_;
|
||||
my $json = {
|
||||
jsonrpc => "2.0",
|
||||
result => $result,
|
||||
id => $id,
|
||||
};
|
||||
my $text = JSON->new->encode( $json );
|
||||
warn "[Verto] sending $text";
|
||||
$bot_verto->send_frame ( $text );
|
||||
}
|
||||
|
||||
sub on_verto_json
|
||||
{
|
||||
my $json = JSON->new->decode( $_[0] );
|
||||
if ($json->{method}) {
|
||||
if (($json->{method} eq 'verto.answer' && $json->{params}->{sdp}) ||
|
||||
$json->{method} eq 'verto.media') {
|
||||
|
||||
my $caller = $json->{dialogParams}->{caller_id_number};
|
||||
my $callee = $json->{dialogParams}->{destination_number};
|
||||
my $caller_user = '@+' . $caller . ':' . $hs_domain;
|
||||
my $callee_user = $msisdn_to_matrix->{$callee} || warn "unrecogised callee: $callee";
|
||||
my $room_id = $roomid_by_callid->{$json->{params}->{callID}};
|
||||
|
||||
if ($json->{params}->{sdp}) {
|
||||
$http->do_request(
|
||||
method => "POST",
|
||||
uri => URI->new(
|
||||
$CONFIG{"matrix"}->{server}.
|
||||
"/_matrix/client/api/v1/send/m.call.answer?".
|
||||
"access_token=$as_token&user_id=$caller_user"
|
||||
),
|
||||
content_type => "application/json",
|
||||
content => JSON->new->encode({
|
||||
call_id => $bridgestate->{$room_id}->{matrix_callid},
|
||||
version => 0,
|
||||
answer => {
|
||||
sdp => $json->{params}->{sdp},
|
||||
type => "answer",
|
||||
},
|
||||
}),
|
||||
)->then( sub {
|
||||
send_verto_json_response( {
|
||||
method => $json->{method},
|
||||
}, $json->{id});
|
||||
})->get;
|
||||
}
|
||||
}
|
||||
elsif ($json->{method} eq 'verto.invite') {
|
||||
my $caller = $json->{dialogParams}->{caller_id_number};
|
||||
my $callee = $json->{dialogParams}->{destination_number};
|
||||
my $caller_user = '@+' . $caller . ':' . $hs_domain;
|
||||
my $callee_user = $msisdn_to_matrix->{$callee} || warn "unrecogised callee: $callee";
|
||||
|
||||
my $alias = ($caller lt $callee) ? ($caller.'-'.$callee) : ($callee.'-'.$caller);
|
||||
my $room_id;
|
||||
|
||||
# create a virtual user for the caller if needed.
|
||||
create_virtual_user($caller);
|
||||
|
||||
# create a room of form #peer-peer and invite the callee
|
||||
$http->do_request(
|
||||
method => "POST",
|
||||
uri => URI->new(
|
||||
$CONFIG{"matrix"}->{server}.
|
||||
"/_matrix/client/api/v1/createRoom?".
|
||||
"access_token=$as_token&user_id=$caller_user"
|
||||
),
|
||||
content_type => "application/json",
|
||||
content => JSON->new->encode({
|
||||
room_alias_name => $alias,
|
||||
invite => [ $callee_user ],
|
||||
}),
|
||||
)->then( sub {
|
||||
my ( $response ) = @_;
|
||||
my $resp = JSON->new->decode($response->content);
|
||||
$room_id = $resp->{room_id};
|
||||
$roomid_by_callid->{$json->{params}->{callID}} = $room_id;
|
||||
})->get;
|
||||
|
||||
# join it
|
||||
my ($response) = $http->do_request(
|
||||
method => "POST",
|
||||
uri => URI->new(
|
||||
$CONFIG{"matrix"}->{server}.
|
||||
"/_matrix/client/api/v1/join/$room_id?".
|
||||
"access_token=$as_token&user_id=$caller_user"
|
||||
),
|
||||
content_type => "application/json",
|
||||
content => '{}',
|
||||
)->get;
|
||||
|
||||
$bridgestate->{$room_id}->{matrix_callid} = lc new Data::UUID->create_str();
|
||||
$bridgestate->{$room_id}->{callid} = $json->{dialogParams}->{callID};
|
||||
$bridgestate->{$room_id}->{sessid} = $sessid;
|
||||
|
||||
# put the m.call.invite in there
|
||||
$http->do_request(
|
||||
method => "POST",
|
||||
uri => URI->new(
|
||||
$CONFIG{"matrix"}->{server}.
|
||||
"/_matrix/client/api/v1/send/m.call.invite?".
|
||||
"access_token=$as_token&user_id=$caller_user"
|
||||
),
|
||||
content_type => "application/json",
|
||||
content => JSON->new->encode({
|
||||
call_id => $bridgestate->{$room_id}->{matrix_callid},
|
||||
version => 0,
|
||||
answer => {
|
||||
sdp => $json->{params}->{sdp},
|
||||
type => "offer",
|
||||
},
|
||||
}),
|
||||
)->then( sub {
|
||||
# acknowledge the verto
|
||||
send_verto_json_response( {
|
||||
method => $json->{method},
|
||||
}, $json->{id});
|
||||
})->get;
|
||||
}
|
||||
elsif ($json->{method} eq 'verto.bye') {
|
||||
my $caller = $json->{dialogParams}->{caller_id_number};
|
||||
my $callee = $json->{dialogParams}->{destination_number};
|
||||
my $caller_user = '@+' . $caller . ':' . $hs_domain;
|
||||
my $callee_user = $msisdn_to_matrix->{$callee} || warn "unrecogised callee: $callee";
|
||||
my $room_id = $roomid_by_callid->{$json->{params}->{callID}};
|
||||
|
||||
# put the m.call.hangup into the room
|
||||
$http->do_request(
|
||||
method => "POST",
|
||||
uri => URI->new(
|
||||
$CONFIG{"matrix"}->{server}.
|
||||
"/_matrix/client/api/v1/send/m.call.hangup?".
|
||||
"access_token=$as_token&user_id=$caller_user"
|
||||
),
|
||||
content_type => "application/json",
|
||||
content => JSON->new->encode({
|
||||
call_id => $bridgestate->{$room_id}->{matrix_callid},
|
||||
version => 0,
|
||||
}),
|
||||
)->then( sub {
|
||||
# acknowledge the verto
|
||||
send_verto_json_response( {
|
||||
method => $json->{method},
|
||||
}, $json->{id});
|
||||
})->get;
|
||||
}
|
||||
else {
|
||||
warn ("[Verto] unhandled method: " . $json->{method});
|
||||
send_verto_json_response( {
|
||||
method => $json->{method},
|
||||
}, $json->{id});
|
||||
}
|
||||
}
|
||||
elsif ($json->{result}) {
|
||||
$requests->{$json->{id}}->done($json->{result});
|
||||
}
|
||||
elsif ($json->{error}) {
|
||||
$requests->{$json->{id}}->fail($json->{error}->{message}, $json->{error});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -32,7 +32,8 @@ for port in 8080 8081 8082; do
|
||||
-D --pid-file "$DIR/$port.pid" \
|
||||
--manhole $((port + 1000)) \
|
||||
--tls-dh-params-path "demo/demo.tls.dh" \
|
||||
$PARAMS $SYNAPSE_PARAMS
|
||||
--media-store-path "demo/media_store.$port" \
|
||||
$PARAMS $SYNAPSE_PARAMS \
|
||||
|
||||
python -m synapse.app.homeserver \
|
||||
--config-path "demo/etc/$port.config" \
|
||||
|
||||
@@ -81,7 +81,7 @@ Your home server configuration file needs the following extra keys:
|
||||
As an example, here is the relevant section of the config file for
|
||||
matrix.org::
|
||||
|
||||
turn_uris: turn:turn.matrix.org:3478?transport=udp,turn:turn.matrix.org:3478?transport=tcp
|
||||
turn_uris: [ "turn:turn.matrix.org:3478?transport=udp", "turn:turn.matrix.org:3478?transport=tcp" ]
|
||||
turn_shared_secret: n0t4ctuAllymatr1Xd0TorgSshar3d5ecret4obvIousreAsons
|
||||
turn_user_lifetime: 86400000
|
||||
|
||||
|
||||
@@ -1,17 +0,0 @@
|
||||
.loggedin {
|
||||
visibility: hidden;
|
||||
}
|
||||
|
||||
p {
|
||||
font-family: monospace;
|
||||
}
|
||||
|
||||
table
|
||||
{
|
||||
border-spacing:5px;
|
||||
}
|
||||
|
||||
th,td
|
||||
{
|
||||
padding:5px;
|
||||
}
|
||||
@@ -1,30 +0,0 @@
|
||||
<div>
|
||||
<p>This room creation / message sending demo requires a home server to be running on http://localhost:8008</p>
|
||||
</div>
|
||||
<form class="loginForm">
|
||||
<input type="text" id="userLogin" placeholder="Username"></input>
|
||||
<input type="password" id="passwordLogin" placeholder="Password"></input>
|
||||
<input type="button" class="login" value="Login"></input>
|
||||
</form>
|
||||
<div class="loggedin">
|
||||
<form class="createRoomForm">
|
||||
<input type="text" id="roomAlias" placeholder="Room alias (optional)"></input>
|
||||
<input type="button" class="createRoom" value="Create Room"></input>
|
||||
</form>
|
||||
<form class="sendMessageForm">
|
||||
<input type="text" id="roomId" placeholder="Room ID"></input>
|
||||
<input type="text" id="messageBody" placeholder="Message body"></input>
|
||||
<input type="button" class="sendMessage" value="Send Message"></input>
|
||||
</form>
|
||||
<table id="rooms">
|
||||
<tbody>
|
||||
<tr>
|
||||
<th>Room ID</th>
|
||||
<th>My state</th>
|
||||
<th>Room Alias</th>
|
||||
<th>Latest message</th>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
|
||||
@@ -1,113 +0,0 @@
|
||||
var accountInfo = {};
|
||||
|
||||
var showLoggedIn = function(data) {
|
||||
accountInfo = data;
|
||||
getCurrentRoomList();
|
||||
$(".loggedin").css({visibility: "visible"});
|
||||
};
|
||||
|
||||
$('.login').live('click', function() {
|
||||
var user = $("#userLogin").val();
|
||||
var password = $("#passwordLogin").val();
|
||||
$.ajax({
|
||||
url: "http://localhost:8008/_matrix/client/api/v1/login",
|
||||
type: "POST",
|
||||
contentType: "application/json; charset=utf-8",
|
||||
data: JSON.stringify({ user: user, password: password, type: "m.login.password" }),
|
||||
dataType: "json",
|
||||
success: function(data) {
|
||||
showLoggedIn(data);
|
||||
},
|
||||
error: function(err) {
|
||||
var errMsg = "To try this, you need a home server running!";
|
||||
var errJson = $.parseJSON(err.responseText);
|
||||
if (errJson) {
|
||||
errMsg = JSON.stringify(errJson);
|
||||
}
|
||||
alert(errMsg);
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
var getCurrentRoomList = function() {
|
||||
var url = "http://localhost:8008/_matrix/client/api/v1/initialSync?access_token=" + accountInfo.access_token + "&limit=1";
|
||||
$.getJSON(url, function(data) {
|
||||
var rooms = data.rooms;
|
||||
for (var i=0; i<rooms.length; ++i) {
|
||||
rooms[i].latest_message = rooms[i].messages.chunk[0].content.body;
|
||||
addRoom(rooms[i]);
|
||||
}
|
||||
}).fail(function(err) {
|
||||
alert(JSON.stringify($.parseJSON(err.responseText)));
|
||||
});
|
||||
};
|
||||
|
||||
$('.createRoom').live('click', function() {
|
||||
var roomAlias = $("#roomAlias").val();
|
||||
var data = {};
|
||||
if (roomAlias.length > 0) {
|
||||
data.room_alias_name = roomAlias;
|
||||
}
|
||||
$.ajax({
|
||||
url: "http://localhost:8008/_matrix/client/api/v1/createRoom?access_token="+accountInfo.access_token,
|
||||
type: "POST",
|
||||
contentType: "application/json; charset=utf-8",
|
||||
data: JSON.stringify(data),
|
||||
dataType: "json",
|
||||
success: function(data) {
|
||||
data.membership = "join"; // you are automatically joined into every room you make.
|
||||
data.latest_message = "";
|
||||
addRoom(data);
|
||||
},
|
||||
error: function(err) {
|
||||
alert(JSON.stringify($.parseJSON(err.responseText)));
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
var addRoom = function(data) {
|
||||
row = "<tr>" +
|
||||
"<td>"+data.room_id+"</td>" +
|
||||
"<td>"+data.membership+"</td>" +
|
||||
"<td>"+data.room_alias+"</td>" +
|
||||
"<td>"+data.latest_message+"</td>" +
|
||||
"</tr>";
|
||||
$("#rooms").append(row);
|
||||
};
|
||||
|
||||
$('.sendMessage').live('click', function() {
|
||||
var roomId = $("#roomId").val();
|
||||
var body = $("#messageBody").val();
|
||||
var msgId = $.now();
|
||||
|
||||
if (roomId.length === 0 || body.length === 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
var url = "http://localhost:8008/_matrix/client/api/v1/rooms/$roomid/send/m.room.message?access_token=$token";
|
||||
url = url.replace("$token", accountInfo.access_token);
|
||||
url = url.replace("$roomid", encodeURIComponent(roomId));
|
||||
|
||||
var data = {
|
||||
msgtype: "m.text",
|
||||
body: body
|
||||
};
|
||||
|
||||
$.ajax({
|
||||
url: url,
|
||||
type: "POST",
|
||||
contentType: "application/json; charset=utf-8",
|
||||
data: JSON.stringify(data),
|
||||
dataType: "json",
|
||||
success: function(data) {
|
||||
$("#messageBody").val("");
|
||||
// wipe the table and reload it. Using the event stream would be the best
|
||||
// solution but that is out of scope of this fiddle.
|
||||
$("#rooms").find("tr:gt(0)").remove();
|
||||
getCurrentRoomList();
|
||||
},
|
||||
error: function(err) {
|
||||
alert(JSON.stringify($.parseJSON(err.responseText)));
|
||||
}
|
||||
});
|
||||
});
|
||||
@@ -1,17 +0,0 @@
|
||||
.loggedin {
|
||||
visibility: hidden;
|
||||
}
|
||||
|
||||
p {
|
||||
font-family: monospace;
|
||||
}
|
||||
|
||||
table
|
||||
{
|
||||
border-spacing:5px;
|
||||
}
|
||||
|
||||
th,td
|
||||
{
|
||||
padding:5px;
|
||||
}
|
||||
@@ -1,23 +0,0 @@
|
||||
<div>
|
||||
<p>This event stream demo requires a home server to be running on http://localhost:8008</p>
|
||||
</div>
|
||||
<form class="loginForm">
|
||||
<input type="text" id="userLogin" placeholder="Username"></input>
|
||||
<input type="password" id="passwordLogin" placeholder="Password"></input>
|
||||
<input type="button" class="login" value="Login"></input>
|
||||
</form>
|
||||
<div class="loggedin">
|
||||
<form class="sendMessageForm">
|
||||
<input type="button" class="sendMessage" value="Send random message"></input>
|
||||
</form>
|
||||
<p id="streamErrorText"></p>
|
||||
<table id="rooms">
|
||||
<tbody>
|
||||
<tr>
|
||||
<th>Room ID</th>
|
||||
<th>Latest message</th>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
|
||||
@@ -1,145 +0,0 @@
|
||||
var accountInfo = {};
|
||||
|
||||
var eventStreamInfo = {
|
||||
from: "END"
|
||||
};
|
||||
|
||||
var roomInfo = [];
|
||||
|
||||
var longpollEventStream = function() {
|
||||
var url = "http://localhost:8008/_matrix/client/api/v1/events?access_token=$token&from=$from";
|
||||
url = url.replace("$token", accountInfo.access_token);
|
||||
url = url.replace("$from", eventStreamInfo.from);
|
||||
|
||||
$.getJSON(url, function(data) {
|
||||
eventStreamInfo.from = data.end;
|
||||
|
||||
var hasNewLatestMessage = false;
|
||||
for (var i=0; i<data.chunk.length; ++i) {
|
||||
if (data.chunk[i].type === "m.room.message") {
|
||||
for (var j=0; j<roomInfo.length; ++j) {
|
||||
if (roomInfo[j].room_id === data.chunk[i].room_id) {
|
||||
roomInfo[j].latest_message = data.chunk[i].content.body;
|
||||
hasNewLatestMessage = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (hasNewLatestMessage) {
|
||||
setRooms(roomInfo);
|
||||
}
|
||||
$("#streamErrorText").text("");
|
||||
longpollEventStream();
|
||||
}).fail(function(err) {
|
||||
$("#streamErrorText").text("Event stream error: "+JSON.stringify($.parseJSON(err.responseText)));
|
||||
setTimeout(longpollEventStream, 5000);
|
||||
});
|
||||
};
|
||||
|
||||
var showLoggedIn = function(data) {
|
||||
accountInfo = data;
|
||||
longpollEventStream();
|
||||
getCurrentRoomList();
|
||||
$(".loggedin").css({visibility: "visible"});
|
||||
};
|
||||
|
||||
$('.login').live('click', function() {
|
||||
var user = $("#userLogin").val();
|
||||
var password = $("#passwordLogin").val();
|
||||
$.ajax({
|
||||
url: "http://localhost:8008/_matrix/client/api/v1/login",
|
||||
type: "POST",
|
||||
contentType: "application/json; charset=utf-8",
|
||||
data: JSON.stringify({ user: user, password: password, type: "m.login.password" }),
|
||||
dataType: "json",
|
||||
success: function(data) {
|
||||
$("#rooms").find("tr:gt(0)").remove();
|
||||
showLoggedIn(data);
|
||||
},
|
||||
error: function(err) {
|
||||
var errMsg = "To try this, you need a home server running!";
|
||||
var errJson = $.parseJSON(err.responseText);
|
||||
if (errJson) {
|
||||
errMsg = JSON.stringify(errJson);
|
||||
}
|
||||
alert(errMsg);
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
var getCurrentRoomList = function() {
|
||||
$("#roomId").val("");
|
||||
var url = "http://localhost:8008/_matrix/client/api/v1/initialSync?access_token=" + accountInfo.access_token + "&limit=1";
|
||||
$.getJSON(url, function(data) {
|
||||
var rooms = data.rooms;
|
||||
for (var i=0; i<rooms.length; ++i) {
|
||||
if ("messages" in rooms[i]) {
|
||||
rooms[i].latest_message = rooms[i].messages.chunk[0].content.body;
|
||||
}
|
||||
}
|
||||
roomInfo = rooms;
|
||||
setRooms(roomInfo);
|
||||
}).fail(function(err) {
|
||||
alert(JSON.stringify($.parseJSON(err.responseText)));
|
||||
});
|
||||
};
|
||||
|
||||
$('.sendMessage').live('click', function() {
|
||||
if (roomInfo.length === 0) {
|
||||
alert("There is no room to send a message to!");
|
||||
return;
|
||||
}
|
||||
|
||||
var index = Math.floor(Math.random() * roomInfo.length);
|
||||
|
||||
sendMessage(roomInfo[index].room_id);
|
||||
});
|
||||
|
||||
var sendMessage = function(roomId) {
|
||||
var body = "jsfiddle message @" + $.now();
|
||||
|
||||
if (roomId.length === 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
var url = "http://localhost:8008/_matrix/client/api/v1/rooms/$roomid/send/m.room.message?access_token=$token";
|
||||
url = url.replace("$token", accountInfo.access_token);
|
||||
url = url.replace("$roomid", encodeURIComponent(roomId));
|
||||
|
||||
var data = {
|
||||
msgtype: "m.text",
|
||||
body: body
|
||||
};
|
||||
|
||||
$.ajax({
|
||||
url: url,
|
||||
type: "POST",
|
||||
contentType: "application/json; charset=utf-8",
|
||||
data: JSON.stringify(data),
|
||||
dataType: "json",
|
||||
success: function(data) {
|
||||
$("#messageBody").val("");
|
||||
},
|
||||
error: function(err) {
|
||||
alert(JSON.stringify($.parseJSON(err.responseText)));
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
var setRooms = function(roomList) {
|
||||
// wipe existing entries
|
||||
$("#rooms").find("tr:gt(0)").remove();
|
||||
|
||||
var rows = "";
|
||||
for (var i=0; i<roomList.length; ++i) {
|
||||
row = "<tr>" +
|
||||
"<td>"+roomList[i].room_id+"</td>" +
|
||||
"<td>"+roomList[i].latest_message+"</td>" +
|
||||
"</tr>";
|
||||
rows += row;
|
||||
}
|
||||
|
||||
$("#rooms").append(rows);
|
||||
};
|
||||
|
||||
@@ -1,43 +0,0 @@
|
||||
.roomListDashboard, .roomContents, .sendMessageForm {
|
||||
visibility: hidden;
|
||||
}
|
||||
|
||||
.roomList {
|
||||
background-color: #909090;
|
||||
}
|
||||
|
||||
.messageWrapper {
|
||||
background-color: #EEEEEE;
|
||||
height: 400px;
|
||||
overflow: scroll;
|
||||
}
|
||||
|
||||
.membersWrapper {
|
||||
background-color: #EEEEEE;
|
||||
height: 200px;
|
||||
width: 50%;
|
||||
overflow: scroll;
|
||||
}
|
||||
|
||||
.textEntry {
|
||||
width: 100%
|
||||
}
|
||||
|
||||
p {
|
||||
font-family: monospace;
|
||||
}
|
||||
|
||||
table
|
||||
{
|
||||
border-spacing:5px;
|
||||
}
|
||||
|
||||
th,td
|
||||
{
|
||||
padding:5px;
|
||||
}
|
||||
|
||||
.roomList tr:not(:first-child):hover {
|
||||
background-color: orange;
|
||||
cursor: pointer;
|
||||
}
|
||||
@@ -1,7 +0,0 @@
|
||||
name: Example Matrix Client
|
||||
description: Includes login, live event streaming, creating rooms, sending messages and viewing member lists.
|
||||
authors:
|
||||
- matrix.org
|
||||
resources:
|
||||
- http://matrix.org
|
||||
normalize_css: no
|
||||
@@ -1,56 +0,0 @@
|
||||
<div class="signUp">
|
||||
<p>Matrix example application: Requires a local home server running at http://localhost:8008</p>
|
||||
<form class="registrationForm">
|
||||
<p>No account? Register:</p>
|
||||
<input type="text" id="userReg" placeholder="Username"></input>
|
||||
<input type="password" id="passwordReg" placeholder="Password"></input>
|
||||
<input type="button" class="register" value="Register"></input>
|
||||
</form>
|
||||
<form class="loginForm">
|
||||
<p>Got an account? Login:</p>
|
||||
<input type="text" id="userLogin" placeholder="Username"></input>
|
||||
<input type="password" id="passwordLogin" placeholder="Password"></input>
|
||||
<input type="button" class="login" value="Login"></input>
|
||||
</form>
|
||||
</div>
|
||||
|
||||
<div class="roomListDashboard">
|
||||
<form class="createRoomForm">
|
||||
<input type="text" id="roomAlias" placeholder="Room alias"></input>
|
||||
<input type="button" class="createRoom" value="Create Room"></input>
|
||||
</form>
|
||||
<table id="rooms" class="roomList">
|
||||
<tbody>
|
||||
<tr>
|
||||
<th>Room</th>
|
||||
<th>My state</th>
|
||||
<th>Latest message</th>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
|
||||
<div class="roomContents">
|
||||
<p id="roomName">Select a room</p>
|
||||
<div class="messageWrapper">
|
||||
<table id="messages">
|
||||
<tbody>
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
<form class="sendMessageForm">
|
||||
<input type="text" class="textEntry" id="body" placeholder="Enter text here..." onkeydown="javascript:if (event.keyCode == 13) document.getElementById('sendMsg').focus()"></input>
|
||||
<input type="button" class="sendMessage" id="sendMsg" value="Send"></input>
|
||||
</form>
|
||||
</div>
|
||||
|
||||
<div>
|
||||
<p>Member list:</p>
|
||||
<div class="membersWrapper">
|
||||
<table id="members">
|
||||
<tbody>
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@@ -1,327 +0,0 @@
|
||||
var accountInfo = {};
|
||||
|
||||
var eventStreamInfo = {
|
||||
from: "END"
|
||||
};
|
||||
|
||||
var roomInfo = [];
|
||||
var memberInfo = [];
|
||||
var viewingRoomId;
|
||||
|
||||
// ************** Event Streaming **************
|
||||
var longpollEventStream = function() {
|
||||
var url = "http://localhost:8008/_matrix/client/api/v1/events?access_token=$token&from=$from";
|
||||
url = url.replace("$token", accountInfo.access_token);
|
||||
url = url.replace("$from", eventStreamInfo.from);
|
||||
|
||||
$.getJSON(url, function(data) {
|
||||
eventStreamInfo.from = data.end;
|
||||
|
||||
var hasNewLatestMessage = false;
|
||||
var updatedMemberList = false;
|
||||
var i=0;
|
||||
var j=0;
|
||||
for (i=0; i<data.chunk.length; ++i) {
|
||||
if (data.chunk[i].type === "m.room.message") {
|
||||
console.log("Got new message: " + JSON.stringify(data.chunk[i]));
|
||||
if (viewingRoomId === data.chunk[i].room_id) {
|
||||
addMessage(data.chunk[i]);
|
||||
}
|
||||
|
||||
for (j=0; j<roomInfo.length; ++j) {
|
||||
if (roomInfo[j].room_id === data.chunk[i].room_id) {
|
||||
roomInfo[j].latest_message = data.chunk[i].content.body;
|
||||
hasNewLatestMessage = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
else if (data.chunk[i].type === "m.room.member") {
|
||||
if (viewingRoomId === data.chunk[i].room_id) {
|
||||
console.log("Got new member: " + JSON.stringify(data.chunk[i]));
|
||||
addMessage(data.chunk[i]);
|
||||
for (j=0; j<memberInfo.length; ++j) {
|
||||
if (memberInfo[j].state_key === data.chunk[i].state_key) {
|
||||
memberInfo[j] = data.chunk[i];
|
||||
updatedMemberList = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!updatedMemberList) {
|
||||
memberInfo.push(data.chunk[i]);
|
||||
updatedMemberList = true;
|
||||
}
|
||||
}
|
||||
if (data.chunk[i].state_key === accountInfo.user_id) {
|
||||
getCurrentRoomList(); // update our join/invite list
|
||||
}
|
||||
}
|
||||
else {
|
||||
console.log("Discarding: " + JSON.stringify(data.chunk[i]));
|
||||
}
|
||||
}
|
||||
|
||||
if (hasNewLatestMessage) {
|
||||
setRooms(roomInfo);
|
||||
}
|
||||
if (updatedMemberList) {
|
||||
$("#members").empty();
|
||||
for (i=0; i<memberInfo.length; ++i) {
|
||||
addMember(memberInfo[i]);
|
||||
}
|
||||
}
|
||||
longpollEventStream();
|
||||
}).fail(function(err) {
|
||||
setTimeout(longpollEventStream, 5000);
|
||||
});
|
||||
};
|
||||
|
||||
// ************** Registration and Login **************
|
||||
var onLoggedIn = function(data) {
|
||||
accountInfo = data;
|
||||
longpollEventStream();
|
||||
getCurrentRoomList();
|
||||
$(".roomListDashboard").css({visibility: "visible"});
|
||||
$(".roomContents").css({visibility: "visible"});
|
||||
$(".signUp").css({display: "none"});
|
||||
};
|
||||
|
||||
$('.login').live('click', function() {
|
||||
var user = $("#userLogin").val();
|
||||
var password = $("#passwordLogin").val();
|
||||
$.ajax({
|
||||
url: "http://localhost:8008/_matrix/client/api/v1/login",
|
||||
type: "POST",
|
||||
contentType: "application/json; charset=utf-8",
|
||||
data: JSON.stringify({ user: user, password: password, type: "m.login.password" }),
|
||||
dataType: "json",
|
||||
success: function(data) {
|
||||
onLoggedIn(data);
|
||||
},
|
||||
error: function(err) {
|
||||
alert("Unable to login: is the home server running?");
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
$('.register').live('click', function() {
|
||||
var user = $("#userReg").val();
|
||||
var password = $("#passwordReg").val();
|
||||
$.ajax({
|
||||
url: "http://localhost:8008/_matrix/client/api/v1/register",
|
||||
type: "POST",
|
||||
contentType: "application/json; charset=utf-8",
|
||||
data: JSON.stringify({ user: user, password: password, type: "m.login.password" }),
|
||||
dataType: "json",
|
||||
success: function(data) {
|
||||
onLoggedIn(data);
|
||||
},
|
||||
error: function(err) {
|
||||
var msg = "Is the home server running?";
|
||||
var errJson = $.parseJSON(err.responseText);
|
||||
if (errJson !== null) {
|
||||
msg = errJson.error;
|
||||
}
|
||||
alert("Unable to register: "+msg);
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
// ************** Creating a room ******************
|
||||
$('.createRoom').live('click', function() {
|
||||
var roomAlias = $("#roomAlias").val();
|
||||
var data = {};
|
||||
if (roomAlias.length > 0) {
|
||||
data.room_alias_name = roomAlias;
|
||||
}
|
||||
$.ajax({
|
||||
url: "http://localhost:8008/_matrix/client/api/v1/createRoom?access_token="+accountInfo.access_token,
|
||||
type: "POST",
|
||||
contentType: "application/json; charset=utf-8",
|
||||
data: JSON.stringify(data),
|
||||
dataType: "json",
|
||||
success: function(response) {
|
||||
$("#roomAlias").val("");
|
||||
response.membership = "join"; // you are automatically joined into every room you make.
|
||||
response.latest_message = "";
|
||||
|
||||
roomInfo.push(response);
|
||||
setRooms(roomInfo);
|
||||
},
|
||||
error: function(err) {
|
||||
alert(JSON.stringify($.parseJSON(err.responseText)));
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
// ************** Getting current state **************
|
||||
var getCurrentRoomList = function() {
|
||||
var url = "http://localhost:8008/_matrix/client/api/v1/initialSync?access_token=" + accountInfo.access_token + "&limit=1";
|
||||
$.getJSON(url, function(data) {
|
||||
var rooms = data.rooms;
|
||||
for (var i=0; i<rooms.length; ++i) {
|
||||
if ("messages" in rooms[i]) {
|
||||
rooms[i].latest_message = rooms[i].messages.chunk[0].content.body;
|
||||
}
|
||||
}
|
||||
roomInfo = rooms;
|
||||
setRooms(roomInfo);
|
||||
}).fail(function(err) {
|
||||
alert(JSON.stringify($.parseJSON(err.responseText)));
|
||||
});
|
||||
};
|
||||
|
||||
var loadRoomContent = function(roomId) {
|
||||
console.log("loadRoomContent " + roomId);
|
||||
viewingRoomId = roomId;
|
||||
$("#roomName").text("Room: "+roomId);
|
||||
$(".sendMessageForm").css({visibility: "visible"});
|
||||
getMessages(roomId);
|
||||
getMemberList(roomId);
|
||||
};
|
||||
|
||||
var getMessages = function(roomId) {
|
||||
$("#messages").empty();
|
||||
var url = "http://localhost:8008/_matrix/client/api/v1/rooms/" +
|
||||
encodeURIComponent(roomId) + "/messages?access_token=" + accountInfo.access_token + "&from=END&dir=b&limit=10";
|
||||
$.getJSON(url, function(data) {
|
||||
for (var i=data.chunk.length-1; i>=0; --i) {
|
||||
addMessage(data.chunk[i]);
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
var getMemberList = function(roomId) {
|
||||
$("#members").empty();
|
||||
memberInfo = [];
|
||||
var url = "http://localhost:8008/_matrix/client/api/v1/rooms/" +
|
||||
encodeURIComponent(roomId) + "/members?access_token=" + accountInfo.access_token;
|
||||
$.getJSON(url, function(data) {
|
||||
for (var i=0; i<data.chunk.length; ++i) {
|
||||
memberInfo.push(data.chunk[i]);
|
||||
addMember(data.chunk[i]);
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
// ************** Sending messages **************
|
||||
$('.sendMessage').live('click', function() {
|
||||
if (viewingRoomId === undefined) {
|
||||
alert("There is no room to send a message to!");
|
||||
return;
|
||||
}
|
||||
var body = $("#body").val();
|
||||
sendMessage(viewingRoomId, body);
|
||||
});
|
||||
|
||||
var sendMessage = function(roomId, body) {
|
||||
var msgId = $.now();
|
||||
|
||||
var url = "http://localhost:8008/_matrix/client/api/v1/rooms/$roomid/send/m.room.message?access_token=$token";
|
||||
url = url.replace("$token", accountInfo.access_token);
|
||||
url = url.replace("$roomid", encodeURIComponent(roomId));
|
||||
|
||||
var data = {
|
||||
msgtype: "m.text",
|
||||
body: body
|
||||
};
|
||||
|
||||
$.ajax({
|
||||
url: url,
|
||||
type: "POST",
|
||||
contentType: "application/json; charset=utf-8",
|
||||
data: JSON.stringify(data),
|
||||
dataType: "json",
|
||||
success: function(data) {
|
||||
$("#body").val("");
|
||||
},
|
||||
error: function(err) {
|
||||
alert(JSON.stringify($.parseJSON(err.responseText)));
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
// ************** Navigation and DOM manipulation **************
|
||||
var setRooms = function(roomList) {
|
||||
// wipe existing entries
|
||||
$("#rooms").find("tr:gt(0)").remove();
|
||||
|
||||
var rows = "";
|
||||
for (var i=0; i<roomList.length; ++i) {
|
||||
row = "<tr>" +
|
||||
"<td>"+roomList[i].room_id+"</td>" +
|
||||
"<td>"+roomList[i].membership+"</td>" +
|
||||
"<td>"+roomList[i].latest_message+"</td>" +
|
||||
"</tr>";
|
||||
rows += row;
|
||||
}
|
||||
|
||||
$("#rooms").append(rows);
|
||||
|
||||
$('#rooms').find("tr").click(function(){
|
||||
var roomId = $(this).find('td:eq(0)').text();
|
||||
var membership = $(this).find('td:eq(1)').text();
|
||||
if (membership !== "join") {
|
||||
console.log("Joining room " + roomId);
|
||||
var url = "http://localhost:8008/_matrix/client/api/v1/rooms/$roomid/join?access_token=$token";
|
||||
url = url.replace("$token", accountInfo.access_token);
|
||||
url = url.replace("$roomid", encodeURIComponent(roomId));
|
||||
$.ajax({
|
||||
url: url,
|
||||
type: "POST",
|
||||
contentType: "application/json; charset=utf-8",
|
||||
data: JSON.stringify({membership: "join"}),
|
||||
dataType: "json",
|
||||
success: function(data) {
|
||||
loadRoomContent(roomId);
|
||||
getCurrentRoomList();
|
||||
},
|
||||
error: function(err) {
|
||||
alert(JSON.stringify($.parseJSON(err.responseText)));
|
||||
}
|
||||
});
|
||||
}
|
||||
else {
|
||||
loadRoomContent(roomId);
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
var addMessage = function(data) {
|
||||
|
||||
var msg = data.content.body;
|
||||
if (data.type === "m.room.member") {
|
||||
if (data.content.membership === undefined) {
|
||||
return;
|
||||
}
|
||||
if (data.content.membership === "invite") {
|
||||
msg = "<em>invited " + data.state_key + " to the room</em>";
|
||||
}
|
||||
else if (data.content.membership === "join") {
|
||||
msg = "<em>joined the room</em>";
|
||||
}
|
||||
else if (data.content.membership === "leave") {
|
||||
msg = "<em>left the room</em>";
|
||||
}
|
||||
else if (data.content.membership === "ban") {
|
||||
msg = "<em>was banned from the room</em>";
|
||||
}
|
||||
}
|
||||
if (msg === undefined) {
|
||||
return;
|
||||
}
|
||||
|
||||
var row = "<tr>" +
|
||||
"<td>"+data.user_id+"</td>" +
|
||||
"<td>"+msg+"</td>" +
|
||||
"</tr>";
|
||||
$("#messages").append(row);
|
||||
};
|
||||
|
||||
var addMember = function(data) {
|
||||
var row = "<tr>" +
|
||||
"<td>"+data.state_key+"</td>" +
|
||||
"<td>"+data.content.membership+"</td>" +
|
||||
"</tr>";
|
||||
$("#members").append(row);
|
||||
};
|
||||
|
||||
@@ -1,7 +0,0 @@
|
||||
.loggedin {
|
||||
visibility: hidden;
|
||||
}
|
||||
|
||||
p {
|
||||
font-family: monospace;
|
||||
}
|
||||
@@ -1,20 +0,0 @@
|
||||
<div>
|
||||
<p>This registration/login demo requires a home server to be running on http://localhost:8008</p>
|
||||
</div>
|
||||
<form class="registrationForm">
|
||||
<input type="text" id="user" placeholder="Username"></input>
|
||||
<input type="password" id="password" placeholder="Password"></input>
|
||||
<input type="button" class="register" value="Register"></input>
|
||||
</form>
|
||||
<form class="loginForm">
|
||||
<input type="text" id="userLogin" placeholder="Username"></input>
|
||||
<input type="password" id="passwordLogin" placeholder="Password"></input>
|
||||
<input type="button" class="login" value="Login"></input>
|
||||
</form>
|
||||
<div class="loggedin">
|
||||
<p id="welcomeText"></p>
|
||||
<input type="button" class="testToken" value="Test token"></input>
|
||||
<input type="button" class="logout" value="Logout"></input>
|
||||
<p id="imSyncText"></p>
|
||||
</div>
|
||||
|
||||
@@ -1,79 +0,0 @@
|
||||
var accountInfo = {};
|
||||
|
||||
var showLoggedIn = function(data) {
|
||||
accountInfo = data;
|
||||
$(".loggedin").css({visibility: "visible"});
|
||||
$("#welcomeText").text("Welcome " + accountInfo.user_id+". Your access token is: " +
|
||||
accountInfo.access_token);
|
||||
};
|
||||
|
||||
$('.register').live('click', function() {
|
||||
var user = $("#user").val();
|
||||
var password = $("#password").val();
|
||||
$.ajax({
|
||||
url: "http://localhost:8008/_matrix/client/api/v1/register",
|
||||
type: "POST",
|
||||
contentType: "application/json; charset=utf-8",
|
||||
data: JSON.stringify({ user: user, password: password, type: "m.login.password" }),
|
||||
dataType: "json",
|
||||
success: function(data) {
|
||||
showLoggedIn(data);
|
||||
},
|
||||
error: function(err) {
|
||||
var errMsg = "To try this, you need a home server running!";
|
||||
var errJson = $.parseJSON(err.responseText);
|
||||
if (errJson) {
|
||||
errMsg = JSON.stringify(errJson);
|
||||
}
|
||||
alert(errMsg);
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
var login = function(user, password) {
|
||||
$.ajax({
|
||||
url: "http://localhost:8008/_matrix/client/api/v1/login",
|
||||
type: "POST",
|
||||
contentType: "application/json; charset=utf-8",
|
||||
data: JSON.stringify({ user: user, password: password, type: "m.login.password" }),
|
||||
dataType: "json",
|
||||
success: function(data) {
|
||||
showLoggedIn(data);
|
||||
},
|
||||
error: function(err) {
|
||||
var errMsg = "To try this, you need a home server running!";
|
||||
var errJson = $.parseJSON(err.responseText);
|
||||
if (errJson) {
|
||||
errMsg = JSON.stringify(errJson);
|
||||
}
|
||||
alert(errMsg);
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
$('.login').live('click', function() {
|
||||
var user = $("#userLogin").val();
|
||||
var password = $("#passwordLogin").val();
|
||||
$.getJSON("http://localhost:8008/_matrix/client/api/v1/login", function(data) {
|
||||
if (data.flows[0].type !== "m.login.password") {
|
||||
alert("I don't know how to login with this type: " + data.type);
|
||||
return;
|
||||
}
|
||||
login(user, password);
|
||||
});
|
||||
});
|
||||
|
||||
$('.logout').live('click', function() {
|
||||
accountInfo = {};
|
||||
$("#imSyncText").text("");
|
||||
$(".loggedin").css({visibility: "hidden"});
|
||||
});
|
||||
|
||||
$('.testToken').live('click', function() {
|
||||
var url = "http://localhost:8008/_matrix/client/api/v1/initialSync?access_token=" + accountInfo.access_token + "&limit=1";
|
||||
$.getJSON(url, function(data) {
|
||||
$("#imSyncText").text(JSON.stringify(data, undefined, 2));
|
||||
}).fail(function(err) {
|
||||
$("#imSyncText").text(JSON.stringify($.parseJSON(err.responseText)));
|
||||
});
|
||||
});
|
||||
@@ -1,17 +0,0 @@
|
||||
.loggedin {
|
||||
visibility: hidden;
|
||||
}
|
||||
|
||||
p {
|
||||
font-family: monospace;
|
||||
}
|
||||
|
||||
table
|
||||
{
|
||||
border-spacing:5px;
|
||||
}
|
||||
|
||||
th,td
|
||||
{
|
||||
padding:5px;
|
||||
}
|
||||
@@ -1,37 +0,0 @@
|
||||
<div>
|
||||
<p>This room membership demo requires a home server to be running on http://localhost:8008</p>
|
||||
</div>
|
||||
<form class="loginForm">
|
||||
<input type="text" id="userLogin" placeholder="Username"></input>
|
||||
<input type="password" id="passwordLogin" placeholder="Password"></input>
|
||||
<input type="button" class="login" value="Login"></input>
|
||||
</form>
|
||||
<div class="loggedin">
|
||||
<form class="createRoomForm">
|
||||
<input type="button" class="createRoom" value="Create Room"></input>
|
||||
</form>
|
||||
<form class="changeMembershipForm">
|
||||
<input type="text" id="roomId" placeholder="Room ID"></input>
|
||||
<input type="text" id="targetUser" placeholder="Target User ID"></input>
|
||||
<select id="membership">
|
||||
<option value="invite">invite</option>
|
||||
<option value="join">join</option>
|
||||
<option value="leave">leave</option>
|
||||
</select>
|
||||
<input type="button" class="changeMembership" value="Change Membership"></input>
|
||||
</form>
|
||||
<form class="joinAliasForm">
|
||||
<input type="text" id="roomAlias" placeholder="Room Alias (#name:domain)"></input>
|
||||
<input type="button" class="joinAlias" value="Join via Alias"></input>
|
||||
</form>
|
||||
<table id="rooms">
|
||||
<tbody>
|
||||
<tr>
|
||||
<th>Room ID</th>
|
||||
<th>My state</th>
|
||||
<th>Room Alias</th>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
|
||||
@@ -1,141 +0,0 @@
|
||||
var accountInfo = {};
|
||||
|
||||
var showLoggedIn = function(data) {
|
||||
accountInfo = data;
|
||||
getCurrentRoomList();
|
||||
$(".loggedin").css({visibility: "visible"});
|
||||
$("#membership").change(function() {
|
||||
if ($("#membership").val() === "invite") {
|
||||
$("#targetUser").css({visibility: "visible"});
|
||||
}
|
||||
else {
|
||||
$("#targetUser").css({visibility: "hidden"});
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
$('.login').live('click', function() {
|
||||
var user = $("#userLogin").val();
|
||||
var password = $("#passwordLogin").val();
|
||||
$.ajax({
|
||||
url: "http://localhost:8008/_matrix/client/api/v1/login",
|
||||
type: "POST",
|
||||
contentType: "application/json; charset=utf-8",
|
||||
data: JSON.stringify({ user: user, password: password, type: "m.login.password" }),
|
||||
dataType: "json",
|
||||
success: function(data) {
|
||||
$("#rooms").find("tr:gt(0)").remove();
|
||||
showLoggedIn(data);
|
||||
},
|
||||
error: function(err) {
|
||||
var errMsg = "To try this, you need a home server running!";
|
||||
var errJson = $.parseJSON(err.responseText);
|
||||
if (errJson) {
|
||||
errMsg = JSON.stringify(errJson);
|
||||
}
|
||||
alert(errMsg);
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
var getCurrentRoomList = function() {
|
||||
$("#roomId").val("");
|
||||
// wipe the table and reload it. Using the event stream would be the best
|
||||
// solution but that is out of scope of this fiddle.
|
||||
$("#rooms").find("tr:gt(0)").remove();
|
||||
|
||||
var url = "http://localhost:8008/_matrix/client/api/v1/initialSync?access_token=" + accountInfo.access_token + "&limit=1";
|
||||
$.getJSON(url, function(data) {
|
||||
var rooms = data.rooms;
|
||||
for (var i=0; i<rooms.length; ++i) {
|
||||
addRoom(rooms[i]);
|
||||
}
|
||||
}).fail(function(err) {
|
||||
alert(JSON.stringify($.parseJSON(err.responseText)));
|
||||
});
|
||||
};
|
||||
|
||||
$('.createRoom').live('click', function() {
|
||||
var data = {};
|
||||
$.ajax({
|
||||
url: "http://localhost:8008/_matrix/client/api/v1/createRoom?access_token="+accountInfo.access_token,
|
||||
type: "POST",
|
||||
contentType: "application/json; charset=utf-8",
|
||||
data: JSON.stringify(data),
|
||||
dataType: "json",
|
||||
success: function(data) {
|
||||
data.membership = "join"; // you are automatically joined into every room you make.
|
||||
data.latest_message = "";
|
||||
addRoom(data);
|
||||
},
|
||||
error: function(err) {
|
||||
alert(JSON.stringify($.parseJSON(err.responseText)));
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
var addRoom = function(data) {
|
||||
row = "<tr>" +
|
||||
"<td>"+data.room_id+"</td>" +
|
||||
"<td>"+data.membership+"</td>" +
|
||||
"<td>"+data.room_alias+"</td>" +
|
||||
"</tr>";
|
||||
$("#rooms").append(row);
|
||||
};
|
||||
|
||||
$('.changeMembership').live('click', function() {
|
||||
var roomId = $("#roomId").val();
|
||||
var member = $("#targetUser").val();
|
||||
var membership = $("#membership").val();
|
||||
|
||||
if (roomId.length === 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
var url = "http://localhost:8008/_matrix/client/api/v1/rooms/$roomid/$membership?access_token=$token";
|
||||
url = url.replace("$token", accountInfo.access_token);
|
||||
url = url.replace("$roomid", encodeURIComponent(roomId));
|
||||
url = url.replace("$membership", membership);
|
||||
|
||||
var data = {};
|
||||
|
||||
if (membership === "invite") {
|
||||
data = {
|
||||
user_id: member
|
||||
};
|
||||
}
|
||||
|
||||
$.ajax({
|
||||
url: url,
|
||||
type: "POST",
|
||||
contentType: "application/json; charset=utf-8",
|
||||
data: JSON.stringify(data),
|
||||
dataType: "json",
|
||||
success: function(data) {
|
||||
getCurrentRoomList();
|
||||
},
|
||||
error: function(err) {
|
||||
alert(JSON.stringify($.parseJSON(err.responseText)));
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
$('.joinAlias').live('click', function() {
|
||||
var roomAlias = $("#roomAlias").val();
|
||||
var url = "http://localhost:8008/_matrix/client/api/v1/join/$roomalias?access_token=$token";
|
||||
url = url.replace("$token", accountInfo.access_token);
|
||||
url = url.replace("$roomalias", encodeURIComponent(roomAlias));
|
||||
$.ajax({
|
||||
url: url,
|
||||
type: "POST",
|
||||
contentType: "application/json; charset=utf-8",
|
||||
data: JSON.stringify({}),
|
||||
dataType: "json",
|
||||
success: function(data) {
|
||||
getCurrentRoomList();
|
||||
},
|
||||
error: function(err) {
|
||||
alert(JSON.stringify($.parseJSON(err.responseText)));
|
||||
}
|
||||
});
|
||||
});
|
||||
149
register_new_matrix_user
Executable file
149
register_new_matrix_user
Executable file
@@ -0,0 +1,149 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2015 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
import argparse
|
||||
import getpass
|
||||
import hashlib
|
||||
import hmac
|
||||
import json
|
||||
import sys
|
||||
import urllib2
|
||||
import yaml
|
||||
|
||||
|
||||
def request_registration(user, password, server_location, shared_secret):
|
||||
mac = hmac.new(
|
||||
key=shared_secret,
|
||||
msg=user,
|
||||
digestmod=hashlib.sha1,
|
||||
).hexdigest()
|
||||
|
||||
data = {
|
||||
"user": user,
|
||||
"password": password,
|
||||
"mac": mac,
|
||||
"type": "org.matrix.login.shared_secret",
|
||||
}
|
||||
|
||||
server_location = server_location.rstrip("/")
|
||||
|
||||
print "Sending registration request..."
|
||||
|
||||
req = urllib2.Request(
|
||||
"%s/_matrix/client/api/v1/register" % (server_location,),
|
||||
data=json.dumps(data),
|
||||
headers={'Content-Type': 'application/json'}
|
||||
)
|
||||
try:
|
||||
f = urllib2.urlopen(req)
|
||||
f.read()
|
||||
f.close()
|
||||
print "Success."
|
||||
except urllib2.HTTPError as e:
|
||||
print "ERROR! Received %d %s" % (e.code, e.reason,)
|
||||
if 400 <= e.code < 500:
|
||||
if e.info().type == "application/json":
|
||||
resp = json.load(e)
|
||||
if "error" in resp:
|
||||
print resp["error"]
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def register_new_user(user, password, server_location, shared_secret):
|
||||
if not user:
|
||||
try:
|
||||
default_user = getpass.getuser()
|
||||
except:
|
||||
default_user = None
|
||||
|
||||
if default_user:
|
||||
user = raw_input("New user localpart [%s]: " % (default_user,))
|
||||
if not user:
|
||||
user = default_user
|
||||
else:
|
||||
user = raw_input("New user localpart: ")
|
||||
|
||||
if not user:
|
||||
print "Invalid user name"
|
||||
sys.exit(1)
|
||||
|
||||
if not password:
|
||||
password = getpass.getpass("Password: ")
|
||||
|
||||
if not password:
|
||||
print "Password cannot be blank."
|
||||
sys.exit(1)
|
||||
|
||||
confirm_password = getpass.getpass("Confirm password: ")
|
||||
|
||||
if password != confirm_password:
|
||||
print "Passwords do not match"
|
||||
sys.exit(1)
|
||||
|
||||
request_registration(user, password, server_location, shared_secret)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Used to register new users with a given home server when"
|
||||
" registration has been disabled. The home server must be"
|
||||
" configured with the 'registration_shared_secret' option"
|
||||
" set.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-u", "--user",
|
||||
default=None,
|
||||
help="Local part of the new user. Will prompt if omitted.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-p", "--password",
|
||||
default=None,
|
||||
help="New password for user. Will prompt if omitted.",
|
||||
)
|
||||
|
||||
group = parser.add_mutually_exclusive_group(required=True)
|
||||
group.add_argument(
|
||||
"-c", "--config",
|
||||
type=argparse.FileType('r'),
|
||||
help="Path to server config file. Used to read in shared secret.",
|
||||
)
|
||||
|
||||
group.add_argument(
|
||||
"-k", "--shared-secret",
|
||||
help="Shared secret as defined in server config file.",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"server_url",
|
||||
default="https://localhost:8448",
|
||||
nargs='?',
|
||||
help="URL to use to talk to the home server. Defaults to "
|
||||
" 'https://localhost:8448'.",
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
if "config" in args and args.config:
|
||||
config = yaml.safe_load(args.config)
|
||||
secret = config.get("registration_shared_secret", None)
|
||||
if not secret:
|
||||
print "No 'registration_shared_secret' defined in config."
|
||||
sys.exit(1)
|
||||
else:
|
||||
secret = args.shared_secret
|
||||
|
||||
register_new_user(args.user, args.password, args.server_url, secret)
|
||||
65
scripts/check_auth.py
Normal file
65
scripts/check_auth.py
Normal file
@@ -0,0 +1,65 @@
|
||||
from synapse.events import FrozenEvent
|
||||
from synapse.api.auth import Auth
|
||||
|
||||
from mock import Mock
|
||||
|
||||
import argparse
|
||||
import itertools
|
||||
import json
|
||||
import sys
|
||||
|
||||
|
||||
def check_auth(auth, auth_chain, events):
|
||||
auth_chain.sort(key=lambda e: e.depth)
|
||||
|
||||
auth_map = {
|
||||
e.event_id: e
|
||||
for e in auth_chain
|
||||
}
|
||||
|
||||
create_events = {}
|
||||
for e in auth_chain:
|
||||
if e.type == "m.room.create":
|
||||
create_events[e.room_id] = e
|
||||
|
||||
for e in itertools.chain(auth_chain, events):
|
||||
auth_events_list = [auth_map[i] for i, _ in e.auth_events]
|
||||
|
||||
auth_events = {
|
||||
(e.type, e.state_key): e
|
||||
for e in auth_events_list
|
||||
}
|
||||
|
||||
auth_events[("m.room.create", "")] = create_events[e.room_id]
|
||||
|
||||
try:
|
||||
auth.check(e, auth_events=auth_events)
|
||||
except Exception as ex:
|
||||
print "Failed:", e.event_id, e.type, e.state_key
|
||||
print "Auth_events:", auth_events
|
||||
print ex
|
||||
print json.dumps(e.get_dict(), sort_keys=True, indent=4)
|
||||
# raise
|
||||
print "Success:", e.event_id, e.type, e.state_key
|
||||
|
||||
if __name__ == '__main__':
|
||||
parser = argparse.ArgumentParser()
|
||||
|
||||
parser.add_argument(
|
||||
'json',
|
||||
nargs='?',
|
||||
type=argparse.FileType('r'),
|
||||
default=sys.stdin,
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
js = json.load(args.json)
|
||||
|
||||
|
||||
auth = Auth(Mock())
|
||||
check_auth(
|
||||
auth,
|
||||
[FrozenEvent(d) for d in js["auth_chain"]],
|
||||
[FrozenEvent(d) for d in js["pdus"]],
|
||||
)
|
||||
33
scripts/copyrighter-sql.pl
Executable file
33
scripts/copyrighter-sql.pl
Executable file
@@ -0,0 +1,33 @@
|
||||
#!/usr/bin/perl -pi
|
||||
# Copyright 2015 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
$copyright = <<EOT;
|
||||
/* Copyright 2015 OpenMarket Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
EOT
|
||||
|
||||
s/^(# -\*- coding: utf-8 -\*-\n)?/$1$copyright/ if ($. == 1);
|
||||
@@ -97,8 +97,11 @@ def lookup(destination, path):
|
||||
if ":" in destination:
|
||||
return "https://%s%s" % (destination, path)
|
||||
else:
|
||||
srv = srvlookup.lookup("matrix", "tcp", destination)[0]
|
||||
return "https://%s:%d%s" % (srv.host, srv.port, path)
|
||||
try:
|
||||
srv = srvlookup.lookup("matrix", "tcp", destination)[0]
|
||||
return "https://%s:%d%s" % (srv.host, srv.port, path)
|
||||
except:
|
||||
return "https://%s:%d%s" % (destination, 8448, path)
|
||||
|
||||
def get_json(origin_name, origin_key, destination, path):
|
||||
request_json = {
|
||||
|
||||
39
scripts/make_identicons.pl
Executable file
39
scripts/make_identicons.pl
Executable file
@@ -0,0 +1,39 @@
|
||||
#!/usr/bin/env perl
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
use DBI;
|
||||
use DBD::SQLite;
|
||||
use JSON;
|
||||
use Getopt::Long;
|
||||
|
||||
my $db; # = "homeserver.db";
|
||||
my $server = "http://localhost:8008";
|
||||
my $size = 320;
|
||||
|
||||
GetOptions("db|d=s", \$db,
|
||||
"server|s=s", \$server,
|
||||
"width|w=i", \$size) or usage();
|
||||
|
||||
usage() unless $db;
|
||||
|
||||
my $dbh = DBI->connect("dbi:SQLite:dbname=$db","","") || die $DBI::errstr;
|
||||
|
||||
my $res = $dbh->selectall_arrayref("select token, name from access_tokens, users where access_tokens.user_id = users.id group by user_id") || die $DBI::errstr;
|
||||
|
||||
foreach (@$res) {
|
||||
my ($token, $mxid) = ($_->[0], $_->[1]);
|
||||
my ($user_id) = ($mxid =~ m/@(.*):/);
|
||||
my ($url) = $dbh->selectrow_array("select avatar_url from profiles where user_id=?", undef, $user_id);
|
||||
if (!$url || $url =~ /#auto$/) {
|
||||
`curl -s -o tmp.png "$server/_matrix/media/v1/identicon?name=${mxid}&width=$size&height=$size"`;
|
||||
my $json = `curl -s -X POST -H "Content-Type: image/png" -T "tmp.png" $server/_matrix/media/v1/upload?access_token=$token`;
|
||||
my $content_uri = from_json($json)->{content_uri};
|
||||
`curl -X PUT -H "Content-Type: application/json" --data '{ "avatar_url": "${content_uri}#auto"}' $server/_matrix/client/api/v1/profile/${mxid}/avatar_url?access_token=$token`;
|
||||
}
|
||||
}
|
||||
|
||||
sub usage {
|
||||
die "usage: ./make-identicons.pl\n\t-d database [e.g. homeserver.db]\n\t-s homeserver (default: http://localhost:8008)\n\t-w identicon size in pixels (default 320)";
|
||||
}
|
||||
@@ -8,3 +8,11 @@ test = trial
|
||||
|
||||
[trial]
|
||||
test_suite = tests
|
||||
|
||||
[check-manifest]
|
||||
ignore =
|
||||
contrib
|
||||
contrib/*
|
||||
docs/*
|
||||
pylint.cfg
|
||||
tox.ini
|
||||
|
||||
61
setup.py
61
setup.py
@@ -18,49 +18,42 @@ import os
|
||||
from setuptools import setup, find_packages
|
||||
|
||||
|
||||
# Utility function to read the README file.
|
||||
# Used for the long_description. It's nice, because now 1) we have a top level
|
||||
# README file and 2) it's easier to type in the README file than to put a raw
|
||||
# string in below ...
|
||||
def read(fname):
|
||||
return open(os.path.join(os.path.dirname(__file__), fname)).read()
|
||||
here = os.path.abspath(os.path.dirname(__file__))
|
||||
|
||||
|
||||
def read_file(path_segments):
|
||||
"""Read a file from the package. Takes a list of strings to join to
|
||||
make the path"""
|
||||
file_path = os.path.join(here, *path_segments)
|
||||
with open(file_path) as f:
|
||||
return f.read()
|
||||
|
||||
|
||||
def exec_file(path_segments):
|
||||
"""Execute a single python file to get the variables defined in it"""
|
||||
result = {}
|
||||
code = read_file(path_segments)
|
||||
exec(code, result)
|
||||
return result
|
||||
|
||||
version = exec_file(("synapse", "__init__.py"))["__version__"]
|
||||
dependencies = exec_file(("synapse", "python_dependencies.py"))
|
||||
long_description = read_file(("README.rst",))
|
||||
|
||||
setup(
|
||||
name="matrix-synapse",
|
||||
version=read("VERSION").strip(),
|
||||
version=version,
|
||||
packages=find_packages(exclude=["tests", "tests.*"]),
|
||||
description="Reference Synapse Home Server",
|
||||
install_requires=[
|
||||
"syutil==0.0.2",
|
||||
"matrix_angular_sdk==0.6.0",
|
||||
"Twisted>=14.0.0",
|
||||
"service_identity>=1.0.0",
|
||||
"pyopenssl>=0.14",
|
||||
"pyyaml",
|
||||
"pyasn1",
|
||||
"pynacl",
|
||||
"daemonize",
|
||||
"py-bcrypt",
|
||||
"frozendict>=0.4",
|
||||
"pillow",
|
||||
],
|
||||
dependency_links=[
|
||||
"https://github.com/matrix-org/syutil/tarball/v0.0.2#egg=syutil-0.0.2",
|
||||
"https://github.com/pyca/pynacl/tarball/d4d3175589b892f6ea7c22f466e0e223853516fa#egg=pynacl-0.3.0",
|
||||
"https://github.com/matrix-org/matrix-angular-sdk/tarball/v0.6.0/#egg=matrix_angular_sdk-0.6.0",
|
||||
],
|
||||
install_requires=dependencies['requirements'](include_conditional=True).keys(),
|
||||
setup_requires=[
|
||||
"Twisted==14.0.2", # Here to override setuptools_trial's dependency on Twisted>=2.4.0
|
||||
"setuptools_trial",
|
||||
"setuptools>=1.0.0", # Needs setuptools that supports git+ssh.
|
||||
# TODO: Do we need this now? we don't use git+ssh.
|
||||
"mock"
|
||||
],
|
||||
dependency_links=dependencies["DEPENDENCY_LINKS"],
|
||||
include_package_data=True,
|
||||
zip_safe=False,
|
||||
long_description=read("README.rst"),
|
||||
entry_points="""
|
||||
[console_scripts]
|
||||
synctl=synapse.app.synctl:main
|
||||
synapse-homeserver=synapse.app.homeserver:main
|
||||
"""
|
||||
long_description=long_description,
|
||||
scripts=["synctl", "register_new_matrix_user"],
|
||||
)
|
||||
|
||||
32
static/client/register/index.html
Normal file
32
static/client/register/index.html
Normal file
@@ -0,0 +1,32 @@
|
||||
<html>
|
||||
<head>
|
||||
<title> Registration </title>
|
||||
<meta name='viewport' content='width=device-width, initial-scale=1, user-scalable=no, minimum-scale=1.0, maximum-scale=1.0'>
|
||||
<link rel="stylesheet" href="style.css">
|
||||
<script src="js/jquery-2.1.3.min.js"></script>
|
||||
<script src="js/recaptcha_ajax.js"></script>
|
||||
<script src="register_config.js"></script>
|
||||
<script src="js/register.js"></script>
|
||||
</head>
|
||||
<body onload="matrixRegistration.onLoad()">
|
||||
<form id="registrationForm" onsubmit="matrixRegistration.signUp(); return false;">
|
||||
<div>
|
||||
Create account:<br/>
|
||||
|
||||
<div style="text-align: center">
|
||||
<input id="desired_user_id" size="32" type="text" placeholder="Matrix ID (e.g. bob)" autocapitalize="off" autocorrect="off" />
|
||||
<br/>
|
||||
<input id="pwd1" size="32" type="password" placeholder="Type a password"/>
|
||||
<br/>
|
||||
<input id="pwd2" size="32" type="password" placeholder="Confirm your password"/>
|
||||
<br/>
|
||||
<span id="feedback" style="color: #f00"></span>
|
||||
<br/>
|
||||
<div id="regcaptcha"></div>
|
||||
|
||||
<button type="submit" style="margin: 10px">Sign up</button>
|
||||
</div>
|
||||
</div>
|
||||
</form>
|
||||
</body>
|
||||
</html>
|
||||
4
static/client/register/js/jquery-2.1.3.min.js
vendored
Normal file
4
static/client/register/js/jquery-2.1.3.min.js
vendored
Normal file
File diff suppressed because one or more lines are too long
195
static/client/register/js/recaptcha_ajax.js
Normal file
195
static/client/register/js/recaptcha_ajax.js
Normal file
File diff suppressed because one or more lines are too long
117
static/client/register/js/register.js
Normal file
117
static/client/register/js/register.js
Normal file
@@ -0,0 +1,117 @@
|
||||
window.matrixRegistration = {
|
||||
endpoint: location.origin + "/_matrix/client/api/v1/register"
|
||||
};
|
||||
|
||||
var setupCaptcha = function() {
|
||||
if (!window.matrixRegistrationConfig) {
|
||||
return;
|
||||
}
|
||||
$.get(matrixRegistration.endpoint, function(response) {
|
||||
var serverExpectsCaptcha = false;
|
||||
for (var i=0; i<response.flows.length; i++) {
|
||||
var flow = response.flows[i];
|
||||
if ("m.login.recaptcha" === flow.type) {
|
||||
serverExpectsCaptcha = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!serverExpectsCaptcha) {
|
||||
console.log("This server does not require a captcha.");
|
||||
return;
|
||||
}
|
||||
console.log("Setting up ReCaptcha for "+matrixRegistration.endpoint);
|
||||
var public_key = window.matrixRegistrationConfig.recaptcha_public_key;
|
||||
if (public_key === undefined) {
|
||||
console.error("No public key defined for captcha!");
|
||||
setFeedbackString("Misconfigured captcha for server. Contact server admin.");
|
||||
return;
|
||||
}
|
||||
Recaptcha.create(public_key,
|
||||
"regcaptcha",
|
||||
{
|
||||
theme: "red",
|
||||
callback: Recaptcha.focus_response_field
|
||||
});
|
||||
window.matrixRegistration.isUsingRecaptcha = true;
|
||||
}).error(errorFunc);
|
||||
|
||||
};
|
||||
|
||||
var submitCaptcha = function(user, pwd) {
|
||||
var challengeToken = Recaptcha.get_challenge();
|
||||
var captchaEntry = Recaptcha.get_response();
|
||||
var data = {
|
||||
type: "m.login.recaptcha",
|
||||
challenge: challengeToken,
|
||||
response: captchaEntry
|
||||
};
|
||||
console.log("Submitting captcha");
|
||||
$.post(matrixRegistration.endpoint, JSON.stringify(data), function(response) {
|
||||
console.log("Success -> "+JSON.stringify(response));
|
||||
submitPassword(user, pwd, response.session);
|
||||
}).error(function(err) {
|
||||
Recaptcha.reload();
|
||||
errorFunc(err);
|
||||
});
|
||||
};
|
||||
|
||||
var submitPassword = function(user, pwd, session) {
|
||||
console.log("Registering...");
|
||||
var data = {
|
||||
type: "m.login.password",
|
||||
user: user,
|
||||
password: pwd,
|
||||
session: session
|
||||
};
|
||||
$.post(matrixRegistration.endpoint, JSON.stringify(data), function(response) {
|
||||
matrixRegistration.onRegistered(
|
||||
response.home_server, response.user_id, response.access_token
|
||||
);
|
||||
}).error(errorFunc);
|
||||
};
|
||||
|
||||
var errorFunc = function(err) {
|
||||
if (err.responseJSON && err.responseJSON.error) {
|
||||
setFeedbackString(err.responseJSON.error + " (" + err.responseJSON.errcode + ")");
|
||||
}
|
||||
else {
|
||||
setFeedbackString("Request failed: " + err.status);
|
||||
}
|
||||
};
|
||||
|
||||
var setFeedbackString = function(text) {
|
||||
$("#feedback").text(text);
|
||||
};
|
||||
|
||||
matrixRegistration.onLoad = function() {
|
||||
setupCaptcha();
|
||||
};
|
||||
|
||||
matrixRegistration.signUp = function() {
|
||||
var user = $("#desired_user_id").val();
|
||||
if (user.length == 0) {
|
||||
setFeedbackString("Must specify a username.");
|
||||
return;
|
||||
}
|
||||
var pwd1 = $("#pwd1").val();
|
||||
var pwd2 = $("#pwd2").val();
|
||||
if (pwd1.length < 6) {
|
||||
setFeedbackString("Password: min. 6 characters.");
|
||||
return;
|
||||
}
|
||||
if (pwd1 != pwd2) {
|
||||
setFeedbackString("Passwords do not match.");
|
||||
return;
|
||||
}
|
||||
if (window.matrixRegistration.isUsingRecaptcha) {
|
||||
submitCaptcha(user, pwd1);
|
||||
}
|
||||
else {
|
||||
submitPassword(user, pwd1);
|
||||
}
|
||||
};
|
||||
|
||||
matrixRegistration.onRegistered = function(hs_url, user_id, access_token) {
|
||||
// clobber this function
|
||||
console.log("onRegistered - This function should be replaced to proceed.");
|
||||
};
|
||||
3
static/client/register/register_config.sample.js
Normal file
3
static/client/register/register_config.sample.js
Normal file
@@ -0,0 +1,3 @@
|
||||
window.matrixRegistrationConfig = {
|
||||
recaptcha_public_key: "YOUR_PUBLIC_KEY"
|
||||
};
|
||||
56
static/client/register/style.css
Normal file
56
static/client/register/style.css
Normal file
@@ -0,0 +1,56 @@
|
||||
html {
|
||||
height: 100%;
|
||||
}
|
||||
|
||||
body {
|
||||
height: 100%;
|
||||
font-family: "Myriad Pro", "Myriad", Helvetica, Arial, sans-serif;
|
||||
font-size: 12pt;
|
||||
margin: 0px;
|
||||
}
|
||||
|
||||
h1 {
|
||||
font-size: 20pt;
|
||||
}
|
||||
|
||||
a:link { color: #666; }
|
||||
a:visited { color: #666; }
|
||||
a:hover { color: #000; }
|
||||
a:active { color: #000; }
|
||||
|
||||
input {
|
||||
width: 100%
|
||||
}
|
||||
|
||||
textarea, input {
|
||||
font-family: inherit;
|
||||
font-size: inherit;
|
||||
}
|
||||
|
||||
.smallPrint {
|
||||
color: #888;
|
||||
font-size: 9pt ! important;
|
||||
font-style: italic ! important;
|
||||
}
|
||||
|
||||
#recaptcha_area {
|
||||
margin: auto
|
||||
}
|
||||
|
||||
#registrationForm {
|
||||
text-align: left;
|
||||
padding: 1em;
|
||||
margin-bottom: 40px;
|
||||
display: inline-block;
|
||||
|
||||
-webkit-border-radius: 10px;
|
||||
-moz-border-radius: 10px;
|
||||
border-radius: 10px;
|
||||
|
||||
-webkit-box-shadow: 0px 0px 20px 0px rgba(0,0,0,0.15);
|
||||
-moz-box-shadow: 0px 0px 20px 0px rgba(0,0,0,0.15);
|
||||
box-shadow: 0px 0px 20px 0px rgba(0,0,0,0.15);
|
||||
|
||||
background-color: #f8f8f8;
|
||||
border: 1px #ccc solid;
|
||||
}
|
||||
@@ -13,7 +13,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
""" This is a reference implementation of a synapse home server.
|
||||
""" This is a reference implementation of a Matrix home server.
|
||||
"""
|
||||
|
||||
__version__ = "0.6.1a"
|
||||
__version__ = "0.8.1-r2"
|
||||
|
||||
@@ -21,12 +21,19 @@ from synapse.api.constants import EventTypes, Membership, JoinRules
|
||||
from synapse.api.errors import AuthError, StoreError, Codes, SynapseError
|
||||
from synapse.util.logutils import log_function
|
||||
from synapse.util.async import run_on_reactor
|
||||
from synapse.types import UserID, ClientInfo
|
||||
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
AuthEventTypes = (
|
||||
EventTypes.Create, EventTypes.Member, EventTypes.PowerLevels,
|
||||
EventTypes.JoinRules,
|
||||
)
|
||||
|
||||
|
||||
class Auth(object):
|
||||
|
||||
def __init__(self, hs):
|
||||
@@ -88,12 +95,19 @@ class Auth(object):
|
||||
raise
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def check_joined_room(self, room_id, user_id):
|
||||
member = yield self.state.get_current_state(
|
||||
room_id=room_id,
|
||||
event_type=EventTypes.Member,
|
||||
state_key=user_id
|
||||
)
|
||||
def check_joined_room(self, room_id, user_id, current_state=None):
|
||||
if current_state:
|
||||
member = current_state.get(
|
||||
(EventTypes.Member, user_id),
|
||||
None
|
||||
)
|
||||
else:
|
||||
member = yield self.state.get_current_state(
|
||||
room_id=room_id,
|
||||
event_type=EventTypes.Member,
|
||||
state_key=user_id
|
||||
)
|
||||
|
||||
self._check_joined_room(member, user_id, room_id)
|
||||
defer.returnValue(member)
|
||||
|
||||
@@ -101,10 +115,10 @@ class Auth(object):
|
||||
def check_host_in_room(self, room_id, host):
|
||||
curr_state = yield self.state.get_current_state(room_id)
|
||||
|
||||
for event in curr_state:
|
||||
for event in curr_state.values():
|
||||
if event.type == EventTypes.Member:
|
||||
try:
|
||||
if self.hs.parse_userid(event.state_key).domain != host:
|
||||
if UserID.from_string(event.state_key).domain != host:
|
||||
continue
|
||||
except:
|
||||
logger.warn("state_key not user_id: %s", event.state_key)
|
||||
@@ -158,6 +172,7 @@ class Auth(object):
|
||||
target = auth_events.get(key)
|
||||
|
||||
target_in_room = target and target.membership == Membership.JOIN
|
||||
target_banned = target and target.membership == Membership.BAN
|
||||
|
||||
key = (EventTypes.JoinRules, "", )
|
||||
join_rule_event = auth_events.get(key)
|
||||
@@ -186,6 +201,7 @@ class Auth(object):
|
||||
{
|
||||
"caller_in_room": caller_in_room,
|
||||
"caller_invited": caller_invited,
|
||||
"target_banned": target_banned,
|
||||
"target_in_room": target_in_room,
|
||||
"membership": membership,
|
||||
"join_rule": join_rule,
|
||||
@@ -194,6 +210,11 @@ class Auth(object):
|
||||
}
|
||||
)
|
||||
|
||||
if ban_level:
|
||||
ban_level = int(ban_level)
|
||||
else:
|
||||
ban_level = 50 # FIXME (erikj): What should we do here?
|
||||
|
||||
if Membership.INVITE == membership:
|
||||
# TODO (erikj): We should probably handle this more intelligently
|
||||
# PRIVATE join rules.
|
||||
@@ -204,6 +225,10 @@ class Auth(object):
|
||||
403,
|
||||
"%s not in room %s." % (event.user_id, event.room_id,)
|
||||
)
|
||||
elif target_banned:
|
||||
raise AuthError(
|
||||
403, "%s is banned from the room" % (target_user_id,)
|
||||
)
|
||||
elif target_in_room: # the target is already in the room.
|
||||
raise AuthError(403, "%s is already in the room." %
|
||||
target_user_id)
|
||||
@@ -213,6 +238,8 @@ class Auth(object):
|
||||
# joined: It's a NOOP
|
||||
if event.user_id != target_user_id:
|
||||
raise AuthError(403, "Cannot force another user to join.")
|
||||
elif target_banned:
|
||||
raise AuthError(403, "You are banned from this room")
|
||||
elif join_rule == JoinRules.PUBLIC:
|
||||
pass
|
||||
elif join_rule == JoinRules.INVITE:
|
||||
@@ -230,6 +257,10 @@ class Auth(object):
|
||||
403,
|
||||
"%s not in room %s." % (target_user_id, event.room_id,)
|
||||
)
|
||||
elif target_banned and user_level < ban_level:
|
||||
raise AuthError(
|
||||
403, "You cannot unban user &s." % (target_user_id,)
|
||||
)
|
||||
elif target_user_id != event.user_id:
|
||||
if kick_level:
|
||||
kick_level = int(kick_level)
|
||||
@@ -241,11 +272,6 @@ class Auth(object):
|
||||
403, "You cannot kick user %s." % target_user_id
|
||||
)
|
||||
elif Membership.BAN == membership:
|
||||
if ban_level:
|
||||
ban_level = int(ban_level)
|
||||
else:
|
||||
ban_level = 50 # FIXME (erikj): What should we do here?
|
||||
|
||||
if user_level < ban_level:
|
||||
raise AuthError(403, "You don't have permission to ban")
|
||||
else:
|
||||
@@ -289,15 +315,47 @@ class Auth(object):
|
||||
Args:
|
||||
request - An HTTP request with an access_token query parameter.
|
||||
Returns:
|
||||
UserID : User ID object of the user making the request
|
||||
tuple : of UserID and device string:
|
||||
User ID object of the user making the request
|
||||
Client ID object of the client instance the user is using
|
||||
Raises:
|
||||
AuthError if no user by that token exists or the token is invalid.
|
||||
"""
|
||||
# Can optionally look elsewhere in the request (e.g. headers)
|
||||
try:
|
||||
access_token = request.args["access_token"][0]
|
||||
|
||||
# Check for application service tokens with a user_id override
|
||||
try:
|
||||
app_service = yield self.store.get_app_service_by_token(
|
||||
access_token
|
||||
)
|
||||
if not app_service:
|
||||
raise KeyError
|
||||
|
||||
user_id = app_service.sender
|
||||
if "user_id" in request.args:
|
||||
user_id = request.args["user_id"][0]
|
||||
if not app_service.is_interested_in_user(user_id):
|
||||
raise AuthError(
|
||||
403,
|
||||
"Application service cannot masquerade as this user."
|
||||
)
|
||||
|
||||
if not user_id:
|
||||
raise KeyError
|
||||
|
||||
defer.returnValue(
|
||||
(UserID.from_string(user_id), ClientInfo("", ""))
|
||||
)
|
||||
return
|
||||
except KeyError:
|
||||
pass # normal users won't have this query parameter set
|
||||
|
||||
user_info = yield self.get_user_by_token(access_token)
|
||||
user = user_info["user"]
|
||||
device_id = user_info["device_id"]
|
||||
token_id = user_info["token_id"]
|
||||
|
||||
ip_addr = self.hs.get_ip_from_request(request)
|
||||
user_agent = request.requestHeaders.getRawHeaders(
|
||||
@@ -313,7 +371,7 @@ class Auth(object):
|
||||
user_agent=user_agent
|
||||
)
|
||||
|
||||
defer.returnValue(user)
|
||||
defer.returnValue((user, ClientInfo(device_id, token_id)))
|
||||
except KeyError:
|
||||
raise AuthError(403, "Missing access token.")
|
||||
|
||||
@@ -330,14 +388,14 @@ class Auth(object):
|
||||
AuthError if no user by that token exists or the token is invalid.
|
||||
"""
|
||||
try:
|
||||
ret = yield self.store.get_user_by_token(token=token)
|
||||
ret = yield self.store.get_user_by_token(token)
|
||||
if not ret:
|
||||
raise StoreError()
|
||||
|
||||
raise StoreError(400, "Unknown token")
|
||||
user_info = {
|
||||
"admin": bool(ret.get("admin", False)),
|
||||
"device_id": ret.get("device_id"),
|
||||
"user": self.hs.parse_userid(ret.get("name")),
|
||||
"user": UserID.from_string(ret.get("name")),
|
||||
"token_id": ret.get("token_id", None),
|
||||
}
|
||||
|
||||
defer.returnValue(user_info)
|
||||
@@ -345,6 +403,18 @@ class Auth(object):
|
||||
raise AuthError(403, "Unrecognised access token.",
|
||||
errcode=Codes.UNKNOWN_TOKEN)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_appservice_by_req(self, request):
|
||||
try:
|
||||
token = request.args["access_token"][0]
|
||||
service = yield self.store.get_app_service_by_token(token)
|
||||
if not service:
|
||||
raise AuthError(403, "Unrecognised access token.",
|
||||
errcode=Codes.UNKNOWN_TOKEN)
|
||||
defer.returnValue(service)
|
||||
except KeyError:
|
||||
raise AuthError(403, "Missing access token.")
|
||||
|
||||
def is_server_admin(self, user):
|
||||
return self.store.is_server_admin(user)
|
||||
|
||||
@@ -352,26 +422,34 @@ class Auth(object):
|
||||
def add_auth_events(self, builder, context):
|
||||
yield run_on_reactor()
|
||||
|
||||
if builder.type == EventTypes.Create:
|
||||
builder.auth_events = []
|
||||
return
|
||||
auth_ids = self.compute_auth_events(builder, context.current_state)
|
||||
|
||||
auth_events_entries = yield self.store.add_event_hashes(
|
||||
auth_ids
|
||||
)
|
||||
|
||||
builder.auth_events = auth_events_entries
|
||||
|
||||
def compute_auth_events(self, event, current_state):
|
||||
if event.type == EventTypes.Create:
|
||||
return []
|
||||
|
||||
auth_ids = []
|
||||
|
||||
key = (EventTypes.PowerLevels, "", )
|
||||
power_level_event = context.current_state.get(key)
|
||||
power_level_event = current_state.get(key)
|
||||
|
||||
if power_level_event:
|
||||
auth_ids.append(power_level_event.event_id)
|
||||
|
||||
key = (EventTypes.JoinRules, "", )
|
||||
join_rule_event = context.current_state.get(key)
|
||||
join_rule_event = current_state.get(key)
|
||||
|
||||
key = (EventTypes.Member, builder.user_id, )
|
||||
member_event = context.current_state.get(key)
|
||||
key = (EventTypes.Member, event.user_id, )
|
||||
member_event = current_state.get(key)
|
||||
|
||||
key = (EventTypes.Create, "", )
|
||||
create_event = context.current_state.get(key)
|
||||
create_event = current_state.get(key)
|
||||
if create_event:
|
||||
auth_ids.append(create_event.event_id)
|
||||
|
||||
@@ -381,8 +459,8 @@ class Auth(object):
|
||||
else:
|
||||
is_public = False
|
||||
|
||||
if builder.type == EventTypes.Member:
|
||||
e_type = builder.content["membership"]
|
||||
if event.type == EventTypes.Member:
|
||||
e_type = event.content["membership"]
|
||||
if e_type in [Membership.JOIN, Membership.INVITE]:
|
||||
if join_rule_event:
|
||||
auth_ids.append(join_rule_event.event_id)
|
||||
@@ -397,17 +475,7 @@ class Auth(object):
|
||||
if member_event.content["membership"] == Membership.JOIN:
|
||||
auth_ids.append(member_event.event_id)
|
||||
|
||||
auth_events_entries = yield self.store.add_event_hashes(
|
||||
auth_ids
|
||||
)
|
||||
|
||||
builder.auth_events = auth_events_entries
|
||||
|
||||
context.auth_events = {
|
||||
k: v
|
||||
for k, v in context.current_state.items()
|
||||
if v.event_id in auth_ids
|
||||
}
|
||||
return auth_ids
|
||||
|
||||
@log_function
|
||||
def _can_send_event(self, event, auth_events):
|
||||
@@ -461,7 +529,7 @@ class Auth(object):
|
||||
"You are not allowed to set others state"
|
||||
)
|
||||
else:
|
||||
sender_domain = self.hs.parse_userid(
|
||||
sender_domain = UserID.from_string(
|
||||
event.user_id
|
||||
).domain
|
||||
|
||||
@@ -496,7 +564,7 @@ class Auth(object):
|
||||
# Validate users
|
||||
for k, v in user_list.items():
|
||||
try:
|
||||
self.hs.parse_userid(k)
|
||||
UserID.from_string(k)
|
||||
except:
|
||||
raise SynapseError(400, "Not a valid user_id: %s" % (k,))
|
||||
|
||||
|
||||
@@ -59,6 +59,8 @@ class LoginType(object):
|
||||
EMAIL_URL = u"m.login.email.url"
|
||||
EMAIL_IDENTITY = u"m.login.email.identity"
|
||||
RECAPTCHA = u"m.login.recaptcha"
|
||||
APPLICATION_SERVICE = u"m.login.application_service"
|
||||
SHARED_SECRET = u"org.matrix.login.shared_secret"
|
||||
|
||||
|
||||
class EventTypes(object):
|
||||
@@ -74,3 +76,9 @@ class EventTypes(object):
|
||||
Message = "m.room.message"
|
||||
Topic = "m.room.topic"
|
||||
Name = "m.room.name"
|
||||
|
||||
|
||||
class RejectedReason(object):
|
||||
AUTH_ERROR = "auth_error"
|
||||
REPLACED = "replaced"
|
||||
NOT_ANCESTOR = "not_ancestor"
|
||||
|
||||
@@ -21,6 +21,7 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Codes(object):
|
||||
UNRECOGNIZED = "M_UNRECOGNIZED"
|
||||
UNAUTHORIZED = "M_UNAUTHORIZED"
|
||||
FORBIDDEN = "M_FORBIDDEN"
|
||||
BAD_JSON = "M_BAD_JSON"
|
||||
@@ -34,10 +35,12 @@ class Codes(object):
|
||||
LIMIT_EXCEEDED = "M_LIMIT_EXCEEDED"
|
||||
CAPTCHA_NEEDED = "M_CAPTCHA_NEEDED"
|
||||
CAPTCHA_INVALID = "M_CAPTCHA_INVALID"
|
||||
TOO_LARGE = "M_TOO_LARGE"
|
||||
MISSING_PARAM = "M_MISSING_PARAM",
|
||||
TOO_LARGE = "M_TOO_LARGE",
|
||||
EXCLUSIVE = "M_EXCLUSIVE"
|
||||
|
||||
|
||||
class CodeMessageException(Exception):
|
||||
class CodeMessageException(RuntimeError):
|
||||
"""An exception with integer code and message string attributes."""
|
||||
|
||||
def __init__(self, code, msg):
|
||||
@@ -81,6 +84,35 @@ class RegistrationError(SynapseError):
|
||||
pass
|
||||
|
||||
|
||||
class UnrecognizedRequestError(SynapseError):
|
||||
"""An error indicating we don't understand the request you're trying to make"""
|
||||
def __init__(self, *args, **kwargs):
|
||||
if "errcode" not in kwargs:
|
||||
kwargs["errcode"] = Codes.UNRECOGNIZED
|
||||
message = None
|
||||
if len(args) == 0:
|
||||
message = "Unrecognized request"
|
||||
else:
|
||||
message = args[0]
|
||||
super(UnrecognizedRequestError, self).__init__(
|
||||
400,
|
||||
message,
|
||||
**kwargs
|
||||
)
|
||||
|
||||
|
||||
class NotFoundError(SynapseError):
|
||||
"""An error indicating we can't find the thing you asked for"""
|
||||
def __init__(self, *args, **kwargs):
|
||||
if "errcode" not in kwargs:
|
||||
kwargs["errcode"] = Codes.NOT_FOUND
|
||||
super(NotFoundError, self).__init__(
|
||||
404,
|
||||
"Not found",
|
||||
**kwargs
|
||||
)
|
||||
|
||||
|
||||
class AuthError(SynapseError):
|
||||
"""An error raised when there was a problem authorising an event."""
|
||||
|
||||
@@ -196,3 +228,9 @@ class FederationError(RuntimeError):
|
||||
"affected": self.affected,
|
||||
"source": self.source if self.source else self.affected,
|
||||
}
|
||||
|
||||
|
||||
class HttpResponseException(CodeMessageException):
|
||||
def __init__(self, code, msg, response):
|
||||
self.response = response
|
||||
super(HttpResponseException, self).__init__(code, msg)
|
||||
|
||||
229
synapse/api/filtering.py
Normal file
229
synapse/api/filtering.py
Normal file
@@ -0,0 +1,229 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2015 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from synapse.api.errors import SynapseError
|
||||
from synapse.types import UserID, RoomID
|
||||
|
||||
|
||||
class Filtering(object):
|
||||
|
||||
def __init__(self, hs):
|
||||
super(Filtering, self).__init__()
|
||||
self.store = hs.get_datastore()
|
||||
|
||||
def get_user_filter(self, user_localpart, filter_id):
|
||||
result = self.store.get_user_filter(user_localpart, filter_id)
|
||||
result.addCallback(Filter)
|
||||
return result
|
||||
|
||||
def add_user_filter(self, user_localpart, user_filter):
|
||||
self._check_valid_filter(user_filter)
|
||||
return self.store.add_user_filter(user_localpart, user_filter)
|
||||
|
||||
# TODO(paul): surely we should probably add a delete_user_filter or
|
||||
# replace_user_filter at some point? There's no REST API specified for
|
||||
# them however
|
||||
|
||||
def _check_valid_filter(self, user_filter_json):
|
||||
"""Check if the provided filter is valid.
|
||||
|
||||
This inspects all definitions contained within the filter.
|
||||
|
||||
Args:
|
||||
user_filter_json(dict): The filter
|
||||
Raises:
|
||||
SynapseError: If the filter is not valid.
|
||||
"""
|
||||
# NB: Filters are the complete json blobs. "Definitions" are an
|
||||
# individual top-level key e.g. public_user_data. Filters are made of
|
||||
# many definitions.
|
||||
|
||||
top_level_definitions = [
|
||||
"public_user_data", "private_user_data", "server_data"
|
||||
]
|
||||
|
||||
room_level_definitions = [
|
||||
"state", "events", "ephemeral"
|
||||
]
|
||||
|
||||
for key in top_level_definitions:
|
||||
if key in user_filter_json:
|
||||
self._check_definition(user_filter_json[key])
|
||||
|
||||
if "room" in user_filter_json:
|
||||
for key in room_level_definitions:
|
||||
if key in user_filter_json["room"]:
|
||||
self._check_definition(user_filter_json["room"][key])
|
||||
|
||||
def _check_definition(self, definition):
|
||||
"""Check if the provided definition is valid.
|
||||
|
||||
This inspects not only the types but also the values to make sure they
|
||||
make sense.
|
||||
|
||||
Args:
|
||||
definition(dict): The filter definition
|
||||
Raises:
|
||||
SynapseError: If there was a problem with this definition.
|
||||
"""
|
||||
# NB: Filters are the complete json blobs. "Definitions" are an
|
||||
# individual top-level key e.g. public_user_data. Filters are made of
|
||||
# many definitions.
|
||||
if type(definition) != dict:
|
||||
raise SynapseError(
|
||||
400, "Expected JSON object, not %s" % (definition,)
|
||||
)
|
||||
|
||||
# check rooms are valid room IDs
|
||||
room_id_keys = ["rooms", "not_rooms"]
|
||||
for key in room_id_keys:
|
||||
if key in definition:
|
||||
if type(definition[key]) != list:
|
||||
raise SynapseError(400, "Expected %s to be a list." % key)
|
||||
for room_id in definition[key]:
|
||||
RoomID.from_string(room_id)
|
||||
|
||||
# check senders are valid user IDs
|
||||
user_id_keys = ["senders", "not_senders"]
|
||||
for key in user_id_keys:
|
||||
if key in definition:
|
||||
if type(definition[key]) != list:
|
||||
raise SynapseError(400, "Expected %s to be a list." % key)
|
||||
for user_id in definition[key]:
|
||||
UserID.from_string(user_id)
|
||||
|
||||
# TODO: We don't limit event type values but we probably should...
|
||||
# check types are valid event types
|
||||
event_keys = ["types", "not_types"]
|
||||
for key in event_keys:
|
||||
if key in definition:
|
||||
if type(definition[key]) != list:
|
||||
raise SynapseError(400, "Expected %s to be a list." % key)
|
||||
for event_type in definition[key]:
|
||||
if not isinstance(event_type, basestring):
|
||||
raise SynapseError(400, "Event type should be a string")
|
||||
|
||||
if "format" in definition:
|
||||
event_format = definition["format"]
|
||||
if event_format not in ["federation", "events"]:
|
||||
raise SynapseError(400, "Invalid format: %s" % (event_format,))
|
||||
|
||||
if "select" in definition:
|
||||
event_select_list = definition["select"]
|
||||
for select_key in event_select_list:
|
||||
if select_key not in ["event_id", "origin_server_ts",
|
||||
"thread_id", "content", "content.body"]:
|
||||
raise SynapseError(400, "Bad select: %s" % (select_key,))
|
||||
|
||||
if ("bundle_updates" in definition and
|
||||
type(definition["bundle_updates"]) != bool):
|
||||
raise SynapseError(400, "Bad bundle_updates: expected bool.")
|
||||
|
||||
|
||||
class Filter(object):
|
||||
def __init__(self, filter_json):
|
||||
self.filter_json = filter_json
|
||||
|
||||
def filter_public_user_data(self, events):
|
||||
return self._filter_on_key(events, ["public_user_data"])
|
||||
|
||||
def filter_private_user_data(self, events):
|
||||
return self._filter_on_key(events, ["private_user_data"])
|
||||
|
||||
def filter_room_state(self, events):
|
||||
return self._filter_on_key(events, ["room", "state"])
|
||||
|
||||
def filter_room_events(self, events):
|
||||
return self._filter_on_key(events, ["room", "events"])
|
||||
|
||||
def filter_room_ephemeral(self, events):
|
||||
return self._filter_on_key(events, ["room", "ephemeral"])
|
||||
|
||||
def _filter_on_key(self, events, keys):
|
||||
filter_json = self.filter_json
|
||||
if not filter_json:
|
||||
return events
|
||||
|
||||
try:
|
||||
# extract the right definition from the filter
|
||||
definition = filter_json
|
||||
for key in keys:
|
||||
definition = definition[key]
|
||||
return self._filter_with_definition(events, definition)
|
||||
except KeyError:
|
||||
# return all events if definition isn't specified.
|
||||
return events
|
||||
|
||||
def _filter_with_definition(self, events, definition):
|
||||
return [e for e in events if self._passes_definition(definition, e)]
|
||||
|
||||
def _passes_definition(self, definition, event):
|
||||
"""Check if the event passes through the given definition.
|
||||
|
||||
Args:
|
||||
definition(dict): The definition to check against.
|
||||
event(Event): The event to check.
|
||||
Returns:
|
||||
True if the event passes through the filter.
|
||||
"""
|
||||
# Algorithm notes:
|
||||
# For each key in the definition, check the event meets the criteria:
|
||||
# * For types: Literal match or prefix match (if ends with wildcard)
|
||||
# * For senders/rooms: Literal match only
|
||||
# * "not_" checks take presedence (e.g. if "m.*" is in both 'types'
|
||||
# and 'not_types' then it is treated as only being in 'not_types')
|
||||
|
||||
# room checks
|
||||
if hasattr(event, "room_id"):
|
||||
room_id = event.room_id
|
||||
allow_rooms = definition.get("rooms", None)
|
||||
reject_rooms = definition.get("not_rooms", None)
|
||||
if reject_rooms and room_id in reject_rooms:
|
||||
return False
|
||||
if allow_rooms and room_id not in allow_rooms:
|
||||
return False
|
||||
|
||||
# sender checks
|
||||
if hasattr(event, "sender"):
|
||||
# Should we be including event.state_key for some event types?
|
||||
sender = event.sender
|
||||
allow_senders = definition.get("senders", None)
|
||||
reject_senders = definition.get("not_senders", None)
|
||||
if reject_senders and sender in reject_senders:
|
||||
return False
|
||||
if allow_senders and sender not in allow_senders:
|
||||
return False
|
||||
|
||||
# type checks
|
||||
if "not_types" in definition:
|
||||
for def_type in definition["not_types"]:
|
||||
if self._event_matches_type(event, def_type):
|
||||
return False
|
||||
if "types" in definition:
|
||||
included = False
|
||||
for def_type in definition["types"]:
|
||||
if self._event_matches_type(event, def_type):
|
||||
included = True
|
||||
break
|
||||
if not included:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def _event_matches_type(self, event, def_type):
|
||||
if def_type.endswith("*"):
|
||||
type_prefix = def_type[:-1]
|
||||
return event.type.startswith(type_prefix)
|
||||
else:
|
||||
return event.type == def_type
|
||||
@@ -16,8 +16,11 @@
|
||||
"""Contains the URL paths to prefix various aspects of the server with. """
|
||||
|
||||
CLIENT_PREFIX = "/_matrix/client/api/v1"
|
||||
CLIENT_V2_ALPHA_PREFIX = "/_matrix/client/v2_alpha"
|
||||
FEDERATION_PREFIX = "/_matrix/federation/v1"
|
||||
STATIC_PREFIX = "/_matrix/static"
|
||||
WEB_CLIENT_PREFIX = "/_matrix/client"
|
||||
CONTENT_REPO_PREFIX = "/_matrix/content"
|
||||
SERVER_KEY_PREFIX = "/_matrix/key/v1"
|
||||
MEDIA_PREFIX = "/_matrix/media/v1"
|
||||
APP_SERVICE_PREFIX = "/_matrix/appservice/v1"
|
||||
|
||||
@@ -14,37 +14,52 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from synapse.storage import prepare_database, UpgradeDatabaseException
|
||||
import sys
|
||||
sys.dont_write_bytecode = True
|
||||
|
||||
from synapse.storage import (
|
||||
prepare_database, prepare_sqlite3_database, UpgradeDatabaseException,
|
||||
)
|
||||
|
||||
from synapse.server import HomeServer
|
||||
|
||||
from synapse.python_dependencies import check_requirements
|
||||
|
||||
from twisted.internet import reactor
|
||||
from twisted.application import service
|
||||
from twisted.enterprise import adbapi
|
||||
from twisted.web.resource import Resource
|
||||
from twisted.web.static import File
|
||||
from twisted.web.server import Site
|
||||
from synapse.http.server import JsonResource, RootRedirect
|
||||
from synapse.media.v0.content_repository import ContentRepoResource
|
||||
from synapse.media.v1.media_repository import MediaRepositoryResource
|
||||
from synapse.rest.appservice.v1 import AppServiceRestResource
|
||||
from synapse.rest.media.v0.content_repository import ContentRepoResource
|
||||
from synapse.rest.media.v1.media_repository import MediaRepositoryResource
|
||||
from synapse.http.server_key_resource import LocalKey
|
||||
from synapse.http.matrixfederationclient import MatrixFederationHttpClient
|
||||
from synapse.api.urls import (
|
||||
CLIENT_PREFIX, FEDERATION_PREFIX, WEB_CLIENT_PREFIX, CONTENT_REPO_PREFIX,
|
||||
SERVER_KEY_PREFIX, MEDIA_PREFIX
|
||||
SERVER_KEY_PREFIX, MEDIA_PREFIX, CLIENT_V2_ALPHA_PREFIX, APP_SERVICE_PREFIX,
|
||||
STATIC_PREFIX
|
||||
)
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.crypto import context_factory
|
||||
from synapse.util.logcontext import LoggingContext
|
||||
from synapse.rest.client.v1 import ClientV1RestResource
|
||||
from synapse.rest.client.v2_alpha import ClientV2AlphaRestResource
|
||||
from synapse.metrics.resource import MetricsResource, METRICS_PREFIX
|
||||
|
||||
from daemonize import Daemonize
|
||||
import twisted.manhole.telnet
|
||||
|
||||
import synapse
|
||||
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import resource
|
||||
import subprocess
|
||||
import sqlite3
|
||||
import syweb
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -55,16 +70,26 @@ class SynapseHomeServer(HomeServer):
|
||||
return MatrixFederationHttpClient(self)
|
||||
|
||||
def build_resource_for_client(self):
|
||||
return JsonResource()
|
||||
return ClientV1RestResource(self)
|
||||
|
||||
def build_resource_for_client_v2_alpha(self):
|
||||
return ClientV2AlphaRestResource(self)
|
||||
|
||||
def build_resource_for_federation(self):
|
||||
return JsonResource()
|
||||
return JsonResource(self)
|
||||
|
||||
def build_resource_for_app_services(self):
|
||||
return AppServiceRestResource(self)
|
||||
|
||||
def build_resource_for_web_client(self):
|
||||
import syweb
|
||||
syweb_path = os.path.dirname(syweb.__file__)
|
||||
webclient_path = os.path.join(syweb_path, "webclient")
|
||||
return File(webclient_path) # TODO configurable?
|
||||
|
||||
def build_resource_for_static_content(self):
|
||||
return File("static")
|
||||
|
||||
def build_resource_for_content_repo(self):
|
||||
return ContentRepoResource(
|
||||
self, self.upload_dir, self.auth, self.content_addr
|
||||
@@ -76,15 +101,23 @@ class SynapseHomeServer(HomeServer):
|
||||
def build_resource_for_server_key(self):
|
||||
return LocalKey(self)
|
||||
|
||||
def build_resource_for_metrics(self):
|
||||
if self.get_config().enable_metrics:
|
||||
return MetricsResource(self)
|
||||
else:
|
||||
return None
|
||||
|
||||
def build_db_pool(self):
|
||||
return adbapi.ConnectionPool(
|
||||
"sqlite3", self.get_db_name(),
|
||||
check_same_thread=False,
|
||||
cp_min=1,
|
||||
cp_max=1
|
||||
cp_max=1,
|
||||
cp_openfun=prepare_database, # Prepare the database for each conn
|
||||
# so that :memory: sqlite works
|
||||
)
|
||||
|
||||
def create_resource_tree(self, web_client, redirect_root_to_web_client):
|
||||
def create_resource_tree(self, redirect_root_to_web_client):
|
||||
"""Create the resource tree for this Home Server.
|
||||
|
||||
This in unduly complicated because Twisted does not support putting
|
||||
@@ -96,15 +129,22 @@ class SynapseHomeServer(HomeServer):
|
||||
location of the web client. This does nothing if web_client is not
|
||||
True.
|
||||
"""
|
||||
config = self.get_config()
|
||||
web_client = config.web_client
|
||||
|
||||
# list containing (path_str, Resource) e.g:
|
||||
# [ ("/aaa/bbb/cc", Resource1), ("/aaa/dummy", Resource2) ]
|
||||
desired_tree = [
|
||||
(CLIENT_PREFIX, self.get_resource_for_client()),
|
||||
(CLIENT_V2_ALPHA_PREFIX, self.get_resource_for_client_v2_alpha()),
|
||||
(FEDERATION_PREFIX, self.get_resource_for_federation()),
|
||||
(CONTENT_REPO_PREFIX, self.get_resource_for_content_repo()),
|
||||
(SERVER_KEY_PREFIX, self.get_resource_for_server_key()),
|
||||
(MEDIA_PREFIX, self.get_resource_for_media_repository()),
|
||||
(APP_SERVICE_PREFIX, self.get_resource_for_app_services()),
|
||||
(STATIC_PREFIX, self.get_resource_for_static_content()),
|
||||
]
|
||||
|
||||
if web_client:
|
||||
logger.info("Adding the web client.")
|
||||
desired_tree.append((WEB_CLIENT_PREFIX,
|
||||
@@ -115,16 +155,20 @@ class SynapseHomeServer(HomeServer):
|
||||
else:
|
||||
self.root_resource = Resource()
|
||||
|
||||
metrics_resource = self.get_resource_for_metrics()
|
||||
if config.metrics_port is None and metrics_resource is not None:
|
||||
desired_tree.append((METRICS_PREFIX, metrics_resource))
|
||||
|
||||
# ideally we'd just use getChild and putChild but getChild doesn't work
|
||||
# unless you give it a Request object IN ADDITION to the name :/ So
|
||||
# instead, we'll store a copy of this mapping so we can actually add
|
||||
# extra resources to existing nodes. See self._resource_id for the key.
|
||||
resource_mappings = {}
|
||||
for (full_path, resource) in desired_tree:
|
||||
logger.info("Attaching %s to path %s", resource, full_path)
|
||||
for full_path, res in desired_tree:
|
||||
logger.info("Attaching %s to path %s", res, full_path)
|
||||
last_resource = self.root_resource
|
||||
for path_seg in full_path.split('/')[1:-1]:
|
||||
if not path_seg in last_resource.listNames():
|
||||
if path_seg not in last_resource.listNames():
|
||||
# resource doesn't exist, so make a "dummy resource"
|
||||
child_resource = Resource()
|
||||
last_resource.putChild(path_seg, child_resource)
|
||||
@@ -152,12 +196,12 @@ class SynapseHomeServer(HomeServer):
|
||||
child_name)
|
||||
child_resource = resource_mappings[child_res_id]
|
||||
# steal the children
|
||||
resource.putChild(child_name, child_resource)
|
||||
res.putChild(child_name, child_resource)
|
||||
|
||||
# finally, insert the desired resource in the right place
|
||||
last_resource.putChild(last_path_seg, resource)
|
||||
last_resource.putChild(last_path_seg, res)
|
||||
res_id = self._resource_id(last_resource, last_path_seg)
|
||||
resource_mappings[res_id] = resource
|
||||
resource_mappings[res_id] = res
|
||||
|
||||
return self.root_resource
|
||||
|
||||
@@ -176,29 +220,136 @@ class SynapseHomeServer(HomeServer):
|
||||
"""
|
||||
return "%s-%s" % (resource, path_seg)
|
||||
|
||||
def start_listening(self, secure_port, unsecure_port):
|
||||
if secure_port is not None:
|
||||
def start_listening(self):
|
||||
config = self.get_config()
|
||||
|
||||
if not config.no_tls and config.bind_port is not None:
|
||||
reactor.listenSSL(
|
||||
secure_port, Site(self.root_resource), self.tls_context_factory
|
||||
config.bind_port,
|
||||
Site(self.root_resource),
|
||||
self.tls_context_factory,
|
||||
interface=config.bind_host
|
||||
)
|
||||
logger.info("Synapse now listening on port %d", secure_port)
|
||||
if unsecure_port is not None:
|
||||
logger.info("Synapse now listening on port %d", config.bind_port)
|
||||
|
||||
if config.unsecure_port is not None:
|
||||
reactor.listenTCP(
|
||||
unsecure_port, Site(self.root_resource)
|
||||
config.unsecure_port,
|
||||
Site(self.root_resource),
|
||||
interface=config.bind_host
|
||||
)
|
||||
logger.info("Synapse now listening on port %d", unsecure_port)
|
||||
logger.info("Synapse now listening on port %d", config.unsecure_port)
|
||||
|
||||
metrics_resource = self.get_resource_for_metrics()
|
||||
if metrics_resource and config.metrics_port is not None:
|
||||
reactor.listenTCP(
|
||||
config.metrics_port, Site(metrics_resource), interface="127.0.0.1",
|
||||
)
|
||||
logger.info("Metrics now running on 127.0.0.1 port %d", config.metrics_port)
|
||||
|
||||
|
||||
def setup():
|
||||
def get_version_string():
|
||||
try:
|
||||
null = open(os.devnull, 'w')
|
||||
cwd = os.path.dirname(os.path.abspath(__file__))
|
||||
try:
|
||||
git_branch = subprocess.check_output(
|
||||
['git', 'rev-parse', '--abbrev-ref', 'HEAD'],
|
||||
stderr=null,
|
||||
cwd=cwd,
|
||||
).strip()
|
||||
git_branch = "b=" + git_branch
|
||||
except subprocess.CalledProcessError:
|
||||
git_branch = ""
|
||||
|
||||
try:
|
||||
git_tag = subprocess.check_output(
|
||||
['git', 'describe', '--exact-match'],
|
||||
stderr=null,
|
||||
cwd=cwd,
|
||||
).strip()
|
||||
git_tag = "t=" + git_tag
|
||||
except subprocess.CalledProcessError:
|
||||
git_tag = ""
|
||||
|
||||
try:
|
||||
git_commit = subprocess.check_output(
|
||||
['git', 'rev-parse', '--short', 'HEAD'],
|
||||
stderr=null,
|
||||
cwd=cwd,
|
||||
).strip()
|
||||
except subprocess.CalledProcessError:
|
||||
git_commit = ""
|
||||
|
||||
try:
|
||||
dirty_string = "-this_is_a_dirty_checkout"
|
||||
is_dirty = subprocess.check_output(
|
||||
['git', 'describe', '--dirty=' + dirty_string],
|
||||
stderr=null,
|
||||
cwd=cwd,
|
||||
).strip().endswith(dirty_string)
|
||||
|
||||
git_dirty = "dirty" if is_dirty else ""
|
||||
except subprocess.CalledProcessError:
|
||||
git_dirty = ""
|
||||
|
||||
if git_branch or git_tag or git_commit or git_dirty:
|
||||
git_version = ",".join(
|
||||
s for s in
|
||||
(git_branch, git_tag, git_commit, git_dirty,)
|
||||
if s
|
||||
)
|
||||
|
||||
return (
|
||||
"Synapse/%s (%s)" % (
|
||||
synapse.__version__, git_version,
|
||||
)
|
||||
).encode("ascii")
|
||||
except Exception as e:
|
||||
logger.warn("Failed to check for git repository: %s", e)
|
||||
|
||||
return ("Synapse/%s" % (synapse.__version__,)).encode("ascii")
|
||||
|
||||
|
||||
def change_resource_limit(soft_file_no):
|
||||
try:
|
||||
soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
|
||||
|
||||
if not soft_file_no:
|
||||
soft_file_no = hard
|
||||
|
||||
resource.setrlimit(resource.RLIMIT_NOFILE, (soft_file_no, hard))
|
||||
|
||||
logger.info("Set file limit to: %d", soft_file_no)
|
||||
except (ValueError, resource.error) as e:
|
||||
logger.warn("Failed to set file limit: %s", e)
|
||||
|
||||
|
||||
def setup(config_options):
|
||||
"""
|
||||
Args:
|
||||
config_options_options: The options passed to Synapse. Usually
|
||||
`sys.argv[1:]`.
|
||||
should_run (bool): Whether to start the reactor.
|
||||
|
||||
Returns:
|
||||
HomeServer
|
||||
"""
|
||||
config = HomeServerConfig.load_config(
|
||||
"Synapse Homeserver",
|
||||
sys.argv[1:],
|
||||
config_options,
|
||||
generate_section="Homeserver"
|
||||
)
|
||||
|
||||
config.setup_logging()
|
||||
|
||||
# check any extra requirements we have now we have a config
|
||||
check_requirements(config)
|
||||
|
||||
version_string = get_version_string()
|
||||
|
||||
logger.info("Server hostname: %s", config.server_name)
|
||||
logger.info("Server version: %s", version_string)
|
||||
|
||||
if re.search(":[0-9]+$", config.server_name):
|
||||
domain_with_port = config.server_name
|
||||
@@ -215,12 +366,10 @@ def setup():
|
||||
tls_context_factory=tls_context_factory,
|
||||
config=config,
|
||||
content_addr=config.content_addr,
|
||||
version_string=version_string,
|
||||
)
|
||||
|
||||
hs.register_servlets()
|
||||
|
||||
hs.create_resource_tree(
|
||||
web_client=config.webclient,
|
||||
redirect_root_to_web_client=True,
|
||||
)
|
||||
|
||||
@@ -230,18 +379,18 @@ def setup():
|
||||
|
||||
try:
|
||||
with sqlite3.connect(db_name) as db_conn:
|
||||
prepare_sqlite3_database(db_conn)
|
||||
prepare_database(db_conn)
|
||||
except UpgradeDatabaseException:
|
||||
sys.stderr.write(
|
||||
"\nFailed to upgrade database.\n"
|
||||
"Have you checked for version specific instructions in UPGRADES.rst?\n"
|
||||
"Have you checked for version specific instructions in"
|
||||
" UPGRADES.rst?\n"
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
logger.info("Database prepared in %s.", db_name)
|
||||
|
||||
hs.get_db_pool()
|
||||
|
||||
if config.manhole:
|
||||
f = twisted.manhole.telnet.ShellFactory()
|
||||
f.username = "matrix"
|
||||
@@ -249,17 +398,47 @@ def setup():
|
||||
f.namespace['hs'] = hs
|
||||
reactor.listenTCP(config.manhole, f, interface='127.0.0.1')
|
||||
|
||||
bind_port = config.bind_port
|
||||
if config.no_tls:
|
||||
bind_port = None
|
||||
hs.start_listening(bind_port, config.unsecure_port)
|
||||
hs.start_listening()
|
||||
|
||||
hs.get_pusherpool().start()
|
||||
hs.get_state_handler().start_caching()
|
||||
hs.get_datastore().start_profiling()
|
||||
hs.get_replication_layer().start_get_pdu_cache()
|
||||
|
||||
return hs
|
||||
|
||||
|
||||
class SynapseService(service.Service):
|
||||
"""A twisted Service class that will start synapse. Used to run synapse
|
||||
via twistd and a .tac.
|
||||
"""
|
||||
def __init__(self, config):
|
||||
self.config = config
|
||||
|
||||
def startService(self):
|
||||
hs = setup(self.config)
|
||||
change_resource_limit(hs.config.soft_file_limit)
|
||||
|
||||
def stopService(self):
|
||||
return self._port.stopListening()
|
||||
|
||||
|
||||
def run(hs):
|
||||
|
||||
def in_thread():
|
||||
with LoggingContext("run"):
|
||||
change_resource_limit(hs.config.soft_file_limit)
|
||||
|
||||
reactor.run()
|
||||
|
||||
if hs.config.daemonize:
|
||||
|
||||
print hs.config.pid_file
|
||||
|
||||
if config.daemonize:
|
||||
print config.pid_file
|
||||
daemon = Daemonize(
|
||||
app="synapse-homeserver",
|
||||
pid=config.pid_file,
|
||||
action=run,
|
||||
pid=hs.config.pid_file,
|
||||
action=lambda: in_thread(),
|
||||
auto_close_fds=False,
|
||||
verbose=True,
|
||||
logger=logger,
|
||||
@@ -267,17 +446,15 @@ def setup():
|
||||
|
||||
daemon.start()
|
||||
else:
|
||||
reactor.run()
|
||||
|
||||
|
||||
def run():
|
||||
with LoggingContext("run"):
|
||||
reactor.run()
|
||||
in_thread()
|
||||
|
||||
|
||||
def main():
|
||||
with LoggingContext("main"):
|
||||
setup()
|
||||
# check base requirements
|
||||
check_requirements()
|
||||
hs = setup(sys.argv[1:])
|
||||
run(hs)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
@@ -19,7 +19,7 @@ import os
|
||||
import subprocess
|
||||
import signal
|
||||
|
||||
SYNAPSE = ["python", "-m", "synapse.app.homeserver"]
|
||||
SYNAPSE = ["python", "-B", "-m", "synapse.app.homeserver"]
|
||||
|
||||
CONFIGFILE = "homeserver.yaml"
|
||||
PIDFILE = "homeserver.pid"
|
||||
|
||||
176
synapse/appservice/__init__.py
Normal file
176
synapse/appservice/__init__.py
Normal file
@@ -0,0 +1,176 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2015 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from synapse.api.constants import EventTypes
|
||||
|
||||
import logging
|
||||
import re
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ApplicationService(object):
|
||||
"""Defines an application service. This definition is mostly what is
|
||||
provided to the /register AS API.
|
||||
|
||||
Provides methods to check if this service is "interested" in events.
|
||||
"""
|
||||
NS_USERS = "users"
|
||||
NS_ALIASES = "aliases"
|
||||
NS_ROOMS = "rooms"
|
||||
# The ordering here is important as it is used to map database values (which
|
||||
# are stored as ints representing the position in this list) to namespace
|
||||
# values.
|
||||
NS_LIST = [NS_USERS, NS_ALIASES, NS_ROOMS]
|
||||
|
||||
def __init__(self, token, url=None, namespaces=None, hs_token=None,
|
||||
sender=None, txn_id=None):
|
||||
self.token = token
|
||||
self.url = url
|
||||
self.hs_token = hs_token
|
||||
self.sender = sender
|
||||
self.namespaces = self._check_namespaces(namespaces)
|
||||
self.txn_id = txn_id
|
||||
|
||||
def _check_namespaces(self, namespaces):
|
||||
# Sanity check that it is of the form:
|
||||
# {
|
||||
# users: [ {regex: "[A-z]+.*", exclusive: true}, ...],
|
||||
# aliases: [ {regex: "[A-z]+.*", exclusive: true}, ...],
|
||||
# rooms: [ {regex: "[A-z]+.*", exclusive: true}, ...],
|
||||
# }
|
||||
if not namespaces:
|
||||
return None
|
||||
|
||||
for ns in ApplicationService.NS_LIST:
|
||||
if ns not in namespaces:
|
||||
namespaces[ns] = []
|
||||
continue
|
||||
|
||||
if type(namespaces[ns]) != list:
|
||||
raise ValueError("Bad namespace value for '%s'" % ns)
|
||||
for regex_obj in namespaces[ns]:
|
||||
if not isinstance(regex_obj, dict):
|
||||
raise ValueError("Expected dict regex for ns '%s'" % ns)
|
||||
if not isinstance(regex_obj.get("exclusive"), bool):
|
||||
raise ValueError(
|
||||
"Expected bool for 'exclusive' in ns '%s'" % ns
|
||||
)
|
||||
if not isinstance(regex_obj.get("regex"), basestring):
|
||||
raise ValueError(
|
||||
"Expected string for 'regex' in ns '%s'" % ns
|
||||
)
|
||||
return namespaces
|
||||
|
||||
def _matches_regex(self, test_string, namespace_key, return_obj=False):
|
||||
if not isinstance(test_string, basestring):
|
||||
logger.error(
|
||||
"Expected a string to test regex against, but got %s",
|
||||
test_string
|
||||
)
|
||||
return False
|
||||
|
||||
for regex_obj in self.namespaces[namespace_key]:
|
||||
if re.match(regex_obj["regex"], test_string):
|
||||
if return_obj:
|
||||
return regex_obj
|
||||
return True
|
||||
return False
|
||||
|
||||
def _is_exclusive(self, ns_key, test_string):
|
||||
regex_obj = self._matches_regex(test_string, ns_key, return_obj=True)
|
||||
if regex_obj:
|
||||
return regex_obj["exclusive"]
|
||||
return False
|
||||
|
||||
def _matches_user(self, event, member_list):
|
||||
if (hasattr(event, "sender") and
|
||||
self.is_interested_in_user(event.sender)):
|
||||
return True
|
||||
# also check m.room.member state key
|
||||
if (hasattr(event, "type") and event.type == EventTypes.Member
|
||||
and hasattr(event, "state_key")
|
||||
and self.is_interested_in_user(event.state_key)):
|
||||
return True
|
||||
# check joined member events
|
||||
for member in member_list:
|
||||
if self.is_interested_in_user(member.state_key):
|
||||
return True
|
||||
return False
|
||||
|
||||
def _matches_room_id(self, event):
|
||||
if hasattr(event, "room_id"):
|
||||
return self.is_interested_in_room(event.room_id)
|
||||
return False
|
||||
|
||||
def _matches_aliases(self, event, alias_list):
|
||||
for alias in alias_list:
|
||||
if self.is_interested_in_alias(alias):
|
||||
return True
|
||||
return False
|
||||
|
||||
def is_interested(self, event, restrict_to=None, aliases_for_event=None,
|
||||
member_list=None):
|
||||
"""Check if this service is interested in this event.
|
||||
|
||||
Args:
|
||||
event(Event): The event to check.
|
||||
restrict_to(str): The namespace to restrict regex tests to.
|
||||
aliases_for_event(list): A list of all the known room aliases for
|
||||
this event.
|
||||
member_list(list): A list of all joined room members in this room.
|
||||
Returns:
|
||||
bool: True if this service would like to know about this event.
|
||||
"""
|
||||
if aliases_for_event is None:
|
||||
aliases_for_event = []
|
||||
if member_list is None:
|
||||
member_list = []
|
||||
|
||||
if restrict_to and restrict_to not in ApplicationService.NS_LIST:
|
||||
# this is a programming error, so fail early and raise a general
|
||||
# exception
|
||||
raise Exception("Unexpected restrict_to value: %s". restrict_to)
|
||||
|
||||
if not restrict_to:
|
||||
return (self._matches_user(event, member_list)
|
||||
or self._matches_aliases(event, aliases_for_event)
|
||||
or self._matches_room_id(event))
|
||||
elif restrict_to == ApplicationService.NS_ALIASES:
|
||||
return self._matches_aliases(event, aliases_for_event)
|
||||
elif restrict_to == ApplicationService.NS_ROOMS:
|
||||
return self._matches_room_id(event)
|
||||
elif restrict_to == ApplicationService.NS_USERS:
|
||||
return self._matches_user(event, member_list)
|
||||
|
||||
def is_interested_in_user(self, user_id):
|
||||
return self._matches_regex(user_id, ApplicationService.NS_USERS)
|
||||
|
||||
def is_interested_in_alias(self, alias):
|
||||
return self._matches_regex(alias, ApplicationService.NS_ALIASES)
|
||||
|
||||
def is_interested_in_room(self, room_id):
|
||||
return self._matches_regex(room_id, ApplicationService.NS_ROOMS)
|
||||
|
||||
def is_exclusive_user(self, user_id):
|
||||
return self._is_exclusive(ApplicationService.NS_USERS, user_id)
|
||||
|
||||
def is_exclusive_alias(self, alias):
|
||||
return self._is_exclusive(ApplicationService.NS_ALIASES, alias)
|
||||
|
||||
def is_exclusive_room(self, room_id):
|
||||
return self._is_exclusive(ApplicationService.NS_ROOMS, room_id)
|
||||
|
||||
def __str__(self):
|
||||
return "ApplicationService: %s" % (self.__dict__,)
|
||||
108
synapse/appservice/api.py
Normal file
108
synapse/appservice/api.py
Normal file
@@ -0,0 +1,108 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2015 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.errors import CodeMessageException
|
||||
from synapse.http.client import SimpleHttpClient
|
||||
from synapse.events.utils import serialize_event
|
||||
|
||||
import logging
|
||||
import urllib
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ApplicationServiceApi(SimpleHttpClient):
|
||||
"""This class manages HS -> AS communications, including querying and
|
||||
pushing.
|
||||
"""
|
||||
|
||||
def __init__(self, hs):
|
||||
super(ApplicationServiceApi, self).__init__(hs)
|
||||
self.clock = hs.get_clock()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def query_user(self, service, user_id):
|
||||
uri = service.url + ("/users/%s" % urllib.quote(user_id))
|
||||
response = None
|
||||
try:
|
||||
response = yield self.get_json(uri, {
|
||||
"access_token": service.hs_token
|
||||
})
|
||||
if response is not None: # just an empty json object
|
||||
defer.returnValue(True)
|
||||
except CodeMessageException as e:
|
||||
if e.code == 404:
|
||||
defer.returnValue(False)
|
||||
return
|
||||
logger.warning("query_user to %s received %s", uri, e.code)
|
||||
except Exception as ex:
|
||||
logger.warning("query_user to %s threw exception %s", uri, ex)
|
||||
defer.returnValue(False)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def query_alias(self, service, alias):
|
||||
uri = service.url + ("/rooms/%s" % urllib.quote(alias))
|
||||
response = None
|
||||
try:
|
||||
response = yield self.get_json(uri, {
|
||||
"access_token": service.hs_token
|
||||
})
|
||||
if response is not None: # just an empty json object
|
||||
defer.returnValue(True)
|
||||
except CodeMessageException as e:
|
||||
logger.warning("query_alias to %s received %s", uri, e.code)
|
||||
if e.code == 404:
|
||||
defer.returnValue(False)
|
||||
return
|
||||
except Exception as ex:
|
||||
logger.warning("query_alias to %s threw exception %s", uri, ex)
|
||||
defer.returnValue(False)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def push_bulk(self, service, events):
|
||||
events = self._serialize(events)
|
||||
|
||||
uri = service.url + ("/transactions/%s" %
|
||||
urllib.quote(str(0))) # TODO txn_ids
|
||||
response = None
|
||||
try:
|
||||
response = yield self.put_json(
|
||||
uri=uri,
|
||||
json_body={
|
||||
"events": events
|
||||
},
|
||||
args={
|
||||
"access_token": service.hs_token
|
||||
})
|
||||
if response: # just an empty json object
|
||||
# TODO: Mark txn as sent successfully
|
||||
defer.returnValue(True)
|
||||
except CodeMessageException as e:
|
||||
logger.warning("push_bulk to %s received %s", uri, e.code)
|
||||
except Exception as ex:
|
||||
logger.warning("push_bulk to %s threw exception %s", uri, ex)
|
||||
defer.returnValue(False)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def push(self, service, event):
|
||||
response = yield self.push_bulk(service, [event])
|
||||
defer.returnValue(response)
|
||||
|
||||
def _serialize(self, events):
|
||||
time_now = self.clock.time_msec()
|
||||
return [
|
||||
serialize_event(e, time_now, as_client_event=True) for e in events
|
||||
]
|
||||
@@ -27,6 +27,16 @@ class Config(object):
|
||||
def __init__(self, args):
|
||||
pass
|
||||
|
||||
@staticmethod
|
||||
def parse_size(string):
|
||||
sizes = {"K": 1024, "M": 1024 * 1024}
|
||||
size = 1
|
||||
suffix = string[-1]
|
||||
if suffix in sizes:
|
||||
string = string[:-1]
|
||||
size = sizes[suffix]
|
||||
return int(string) * size
|
||||
|
||||
@staticmethod
|
||||
def abspath(file_path):
|
||||
return os.path.abspath(file_path) if file_path else file_path
|
||||
@@ -50,8 +60,9 @@ class Config(object):
|
||||
)
|
||||
return cls.abspath(file_path)
|
||||
|
||||
@staticmethod
|
||||
def ensure_directory(dir_path):
|
||||
@classmethod
|
||||
def ensure_directory(cls, dir_path):
|
||||
dir_path = cls.abspath(dir_path)
|
||||
if not os.path.exists(dir_path):
|
||||
os.makedirs(dir_path)
|
||||
if not os.path.isdir(dir_path):
|
||||
|
||||
@@ -20,7 +20,11 @@ import os
|
||||
class DatabaseConfig(Config):
|
||||
def __init__(self, args):
|
||||
super(DatabaseConfig, self).__init__(args)
|
||||
self.database_path = self.abspath(args.database_path)
|
||||
if args.database_path == ":memory:":
|
||||
self.database_path = ":memory:"
|
||||
else:
|
||||
self.database_path = self.abspath(args.database_path)
|
||||
self.event_cache_size = self.parse_size(args.event_cache_size)
|
||||
|
||||
@classmethod
|
||||
def add_arguments(cls, parser):
|
||||
@@ -30,6 +34,10 @@ class DatabaseConfig(Config):
|
||||
"-d", "--database-path", default="homeserver.db",
|
||||
help="The database name."
|
||||
)
|
||||
db_group.add_argument(
|
||||
"--event-cache-size", default="100K",
|
||||
help="Number of events to cache in memory."
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def generate_config(cls, args, config_dir_path):
|
||||
|
||||
@@ -22,11 +22,14 @@ from .repository import ContentRepositoryConfig
|
||||
from .captcha import CaptchaConfig
|
||||
from .email import EmailConfig
|
||||
from .voip import VoipConfig
|
||||
from .registration import RegistrationConfig
|
||||
from .metrics import MetricsConfig
|
||||
|
||||
|
||||
class HomeServerConfig(TlsConfig, ServerConfig, DatabaseConfig, LoggingConfig,
|
||||
RatelimitConfig, ContentRepositoryConfig, CaptchaConfig,
|
||||
EmailConfig, VoipConfig):
|
||||
EmailConfig, VoipConfig, RegistrationConfig,
|
||||
MetricsConfig,):
|
||||
pass
|
||||
|
||||
|
||||
|
||||
@@ -18,6 +18,7 @@ from synapse.util.logcontext import LoggingContextFilter
|
||||
from twisted.python.log import PythonLoggingObserver
|
||||
import logging
|
||||
import logging.config
|
||||
import yaml
|
||||
|
||||
|
||||
class LoggingConfig(Config):
|
||||
@@ -66,7 +67,10 @@ class LoggingConfig(Config):
|
||||
|
||||
formatter = logging.Formatter(log_format)
|
||||
if self.log_file:
|
||||
handler = logging.FileHandler(self.log_file)
|
||||
# TODO: Customisable file size / backup count
|
||||
handler = logging.handlers.RotatingFileHandler(
|
||||
self.log_file, maxBytes=(1000 * 1000 * 100), backupCount=3
|
||||
)
|
||||
else:
|
||||
handler = logging.StreamHandler()
|
||||
handler.setFormatter(formatter)
|
||||
@@ -76,7 +80,8 @@ class LoggingConfig(Config):
|
||||
logger.addHandler(handler)
|
||||
logger.info("Test")
|
||||
else:
|
||||
logging.config.fileConfig(self.log_config)
|
||||
with open(self.log_config, 'r') as f:
|
||||
logging.config.dictConfig(yaml.load(f))
|
||||
|
||||
observer = PythonLoggingObserver()
|
||||
observer.start()
|
||||
|
||||
36
synapse/config/metrics.py
Normal file
36
synapse/config/metrics.py
Normal file
@@ -0,0 +1,36 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2015 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from ._base import Config
|
||||
|
||||
|
||||
class MetricsConfig(Config):
|
||||
def __init__(self, args):
|
||||
super(MetricsConfig, self).__init__(args)
|
||||
self.enable_metrics = args.enable_metrics
|
||||
self.metrics_port = args.metrics_port
|
||||
|
||||
@classmethod
|
||||
def add_arguments(cls, parser):
|
||||
super(MetricsConfig, cls).add_arguments(parser)
|
||||
metrics_group = parser.add_argument_group("metrics")
|
||||
metrics_group.add_argument(
|
||||
'--enable-metrics', dest="enable_metrics", action="store_true",
|
||||
help="Enable collection and rendering of performance metrics"
|
||||
)
|
||||
metrics_group.add_argument(
|
||||
'--metrics-port', metavar="PORT", type=int,
|
||||
help="Separate port to accept metrics requests on (on localhost)"
|
||||
)
|
||||
@@ -22,6 +22,12 @@ class RatelimitConfig(Config):
|
||||
self.rc_messages_per_second = args.rc_messages_per_second
|
||||
self.rc_message_burst_count = args.rc_message_burst_count
|
||||
|
||||
self.federation_rc_window_size = args.federation_rc_window_size
|
||||
self.federation_rc_sleep_limit = args.federation_rc_sleep_limit
|
||||
self.federation_rc_sleep_delay = args.federation_rc_sleep_delay
|
||||
self.federation_rc_reject_limit = args.federation_rc_reject_limit
|
||||
self.federation_rc_concurrent = args.federation_rc_concurrent
|
||||
|
||||
@classmethod
|
||||
def add_arguments(cls, parser):
|
||||
super(RatelimitConfig, cls).add_arguments(parser)
|
||||
@@ -34,3 +40,33 @@ class RatelimitConfig(Config):
|
||||
"--rc-message-burst-count", type=float, default=10,
|
||||
help="number of message a client can send before being throttled"
|
||||
)
|
||||
|
||||
rc_group.add_argument(
|
||||
"--federation-rc-window-size", type=int, default=10000,
|
||||
help="The federation window size in milliseconds",
|
||||
)
|
||||
|
||||
rc_group.add_argument(
|
||||
"--federation-rc-sleep-limit", type=int, default=10,
|
||||
help="The number of federation requests from a single server"
|
||||
" in a window before the server will delay processing the"
|
||||
" request.",
|
||||
)
|
||||
|
||||
rc_group.add_argument(
|
||||
"--federation-rc-sleep-delay", type=int, default=500,
|
||||
help="The duration in milliseconds to delay processing events from"
|
||||
" remote servers by if they go over the sleep limit.",
|
||||
)
|
||||
|
||||
rc_group.add_argument(
|
||||
"--federation-rc-reject-limit", type=int, default=50,
|
||||
help="The maximum number of concurrent federation requests allowed"
|
||||
" from a single server",
|
||||
)
|
||||
|
||||
rc_group.add_argument(
|
||||
"--federation-rc-concurrent", type=int, default=3,
|
||||
help="The number of federation requests to concurrently process"
|
||||
" from a single server",
|
||||
)
|
||||
|
||||
60
synapse/config/registration.py
Normal file
60
synapse/config/registration.py
Normal file
@@ -0,0 +1,60 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2015 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from ._base import Config
|
||||
|
||||
from synapse.util.stringutils import random_string_with_symbols
|
||||
|
||||
import distutils.util
|
||||
|
||||
|
||||
class RegistrationConfig(Config):
|
||||
|
||||
def __init__(self, args):
|
||||
super(RegistrationConfig, self).__init__(args)
|
||||
|
||||
# `args.disable_registration` may either be a bool or a string depending
|
||||
# on if the option was given a value (e.g. --disable-registration=false
|
||||
# would set `args.disable_registration` to "false" not False.)
|
||||
self.disable_registration = bool(
|
||||
distutils.util.strtobool(str(args.disable_registration))
|
||||
)
|
||||
self.registration_shared_secret = args.registration_shared_secret
|
||||
|
||||
@classmethod
|
||||
def add_arguments(cls, parser):
|
||||
super(RegistrationConfig, cls).add_arguments(parser)
|
||||
reg_group = parser.add_argument_group("registration")
|
||||
|
||||
reg_group.add_argument(
|
||||
"--disable-registration",
|
||||
const=True,
|
||||
default=True,
|
||||
nargs='?',
|
||||
help="Disable registration of new users.",
|
||||
)
|
||||
reg_group.add_argument(
|
||||
"--registration-shared-secret", type=str,
|
||||
help="If set, allows registration by anyone who also has the shared"
|
||||
" secret, even if registration is otherwise disabled.",
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def generate_config(cls, args, config_dir_path):
|
||||
if args.disable_registration is None:
|
||||
args.disable_registration = True
|
||||
|
||||
if args.registration_shared_secret is None:
|
||||
args.registration_shared_secret = random_string_with_symbols(50)
|
||||
@@ -28,9 +28,9 @@ class ServerConfig(Config):
|
||||
self.unsecure_port = args.unsecure_port
|
||||
self.daemonize = args.daemonize
|
||||
self.pid_file = self.abspath(args.pid_file)
|
||||
self.webclient = True
|
||||
self.web_client = args.web_client
|
||||
self.manhole = args.manhole
|
||||
self.no_tls = args.no_tls
|
||||
self.soft_file_limit = args.soft_file_limit
|
||||
|
||||
if not args.content_addr:
|
||||
host = args.server_name
|
||||
@@ -47,8 +47,12 @@ class ServerConfig(Config):
|
||||
def add_arguments(cls, parser):
|
||||
super(ServerConfig, cls).add_arguments(parser)
|
||||
server_group = parser.add_argument_group("server")
|
||||
server_group.add_argument("-H", "--server-name", default="localhost",
|
||||
help="The name of the server")
|
||||
server_group.add_argument(
|
||||
"-H", "--server-name", default="localhost",
|
||||
help="The domain name of the server, with optional explicit port. "
|
||||
"This is used by remote servers to connect to this server, "
|
||||
"e.g. matrix.org, localhost:8080, etc."
|
||||
)
|
||||
server_group.add_argument("--signing-key-path",
|
||||
help="The signing key to sign messages with")
|
||||
server_group.add_argument("-p", "--bind-port", metavar="PORT",
|
||||
@@ -64,6 +68,8 @@ class ServerConfig(Config):
|
||||
server_group.add_argument('--pid-file', default="homeserver.pid",
|
||||
help="When running as a daemon, the file to"
|
||||
" store the pid in")
|
||||
server_group.add_argument('--web_client', default=True, type=bool,
|
||||
help="Whether or not to serve a web client")
|
||||
server_group.add_argument("--manhole", metavar="PORT", dest="manhole",
|
||||
type=int,
|
||||
help="Turn on the twisted telnet manhole"
|
||||
@@ -71,8 +77,12 @@ class ServerConfig(Config):
|
||||
server_group.add_argument("--content-addr", default=None,
|
||||
help="The host and scheme to use for the "
|
||||
"content repository")
|
||||
server_group.add_argument("--no-tls", action='store_true',
|
||||
help="Don't bind to the https port.")
|
||||
server_group.add_argument("--soft-file-limit", type=int, default=0,
|
||||
help="Set the soft limit on the number of "
|
||||
"file descriptors synapse can use. "
|
||||
"Zero is used to indicate synapse "
|
||||
"should set the soft limit to the hard"
|
||||
"limit.")
|
||||
|
||||
def read_signing_key(self, signing_key_path):
|
||||
signing_keys = self.read_file(signing_key_path, "signing_key")
|
||||
|
||||
@@ -28,9 +28,16 @@ class TlsConfig(Config):
|
||||
self.tls_certificate = self.read_tls_certificate(
|
||||
args.tls_certificate_path
|
||||
)
|
||||
self.tls_private_key = self.read_tls_private_key(
|
||||
args.tls_private_key_path
|
||||
)
|
||||
|
||||
self.no_tls = args.no_tls
|
||||
|
||||
if self.no_tls:
|
||||
self.tls_private_key = None
|
||||
else:
|
||||
self.tls_private_key = self.read_tls_private_key(
|
||||
args.tls_private_key_path
|
||||
)
|
||||
|
||||
self.tls_dh_params_path = self.check_file(
|
||||
args.tls_dh_params_path, "tls_dh_params"
|
||||
)
|
||||
@@ -45,6 +52,8 @@ class TlsConfig(Config):
|
||||
help="PEM encoded private key for TLS")
|
||||
tls_group.add_argument("--tls-dh-params-path",
|
||||
help="PEM dh parameters for ephemeral keys")
|
||||
tls_group.add_argument("--no-tls", action='store_true',
|
||||
help="Don't bind to the https port.")
|
||||
|
||||
def read_tls_certificate(self, cert_path):
|
||||
cert_pem = self.read_file(cert_path, "tls_certificate")
|
||||
|
||||
@@ -28,7 +28,7 @@ class VoipConfig(Config):
|
||||
super(VoipConfig, cls).add_arguments(parser)
|
||||
group = parser.add_argument_group("voip")
|
||||
group.add_argument(
|
||||
"--turn-uris", type=str, default=None,
|
||||
"--turn-uris", type=str, default=None, action='append',
|
||||
help="The public URIs of the TURN server to give to clients"
|
||||
)
|
||||
group.add_argument(
|
||||
|
||||
@@ -38,7 +38,10 @@ class ServerContextFactory(ssl.ContextFactory):
|
||||
logger.exception("Failed to enable eliptic curve for TLS")
|
||||
context.set_options(SSL.OP_NO_SSLv2 | SSL.OP_NO_SSLv3)
|
||||
context.use_certificate(config.tls_certificate)
|
||||
context.use_privatekey(config.tls_private_key)
|
||||
|
||||
if not config.no_tls:
|
||||
context.use_privatekey(config.tls_private_key)
|
||||
|
||||
context.load_tmp_dh(config.tls_dh_params_path)
|
||||
context.set_cipher_list("!ADH:HIGH+kEDH:!AECDH:HIGH+kEECDH")
|
||||
|
||||
|
||||
@@ -19,7 +19,7 @@ from twisted.internet.protocol import Factory
|
||||
from twisted.internet import defer, reactor
|
||||
from synapse.http.endpoint import matrix_federation_endpoint
|
||||
from synapse.util.logcontext import PreserveLoggingContext
|
||||
import json
|
||||
import simplejson as json
|
||||
import logging
|
||||
|
||||
|
||||
@@ -61,9 +61,11 @@ class SynapseKeyClientProtocol(HTTPClient):
|
||||
|
||||
def __init__(self):
|
||||
self.remote_key = defer.Deferred()
|
||||
self.host = None
|
||||
|
||||
def connectionMade(self):
|
||||
logger.debug("Connected to %s", self.transport.getHost())
|
||||
self.host = self.transport.getHost()
|
||||
logger.debug("Connected to %s", self.host)
|
||||
self.sendCommand(b"GET", b"/_matrix/key/v1/")
|
||||
self.endHeaders()
|
||||
self.timer = reactor.callLater(
|
||||
@@ -73,7 +75,7 @@ class SynapseKeyClientProtocol(HTTPClient):
|
||||
|
||||
def handleStatus(self, version, status, message):
|
||||
if status != b"200":
|
||||
#logger.info("Non-200 response from %s: %s %s",
|
||||
# logger.info("Non-200 response from %s: %s %s",
|
||||
# self.transport.getHost(), status, message)
|
||||
self.transport.abortConnection()
|
||||
|
||||
@@ -81,7 +83,7 @@ class SynapseKeyClientProtocol(HTTPClient):
|
||||
try:
|
||||
json_response = json.loads(response_body_bytes)
|
||||
except ValueError:
|
||||
#logger.info("Invalid JSON response from %s",
|
||||
# logger.info("Invalid JSON response from %s",
|
||||
# self.transport.getHost())
|
||||
self.transport.abortConnection()
|
||||
return
|
||||
@@ -92,8 +94,7 @@ class SynapseKeyClientProtocol(HTTPClient):
|
||||
self.timer.cancel()
|
||||
|
||||
def on_timeout(self):
|
||||
logger.debug("Timeout waiting for response from %s",
|
||||
self.transport.getHost())
|
||||
logger.debug("Timeout waiting for response from %s", self.host)
|
||||
self.remote_key.errback(IOError("Timeout waiting for response"))
|
||||
self.transport.abortConnection()
|
||||
|
||||
|
||||
@@ -22,6 +22,8 @@ from syutil.crypto.signing_key import (
|
||||
from syutil.base64util import decode_base64, encode_base64
|
||||
from synapse.api.errors import SynapseError, Codes
|
||||
|
||||
from synapse.util.retryutils import get_retry_limiter
|
||||
|
||||
from OpenSSL import crypto
|
||||
|
||||
import logging
|
||||
@@ -48,18 +50,27 @@ class Keyring(object):
|
||||
)
|
||||
try:
|
||||
verify_key = yield self.get_server_verify_key(server_name, key_ids)
|
||||
except IOError:
|
||||
except IOError as e:
|
||||
logger.warn(
|
||||
"Got IOError when downloading keys for %s: %s %s",
|
||||
server_name, type(e).__name__, str(e.message),
|
||||
)
|
||||
raise SynapseError(
|
||||
502,
|
||||
"Error downloading keys for %s" % (server_name,),
|
||||
Codes.UNAUTHORIZED,
|
||||
)
|
||||
except:
|
||||
except Exception as e:
|
||||
logger.warn(
|
||||
"Got Exception when downloading keys for %s: %s %s",
|
||||
server_name, type(e).__name__, str(e.message),
|
||||
)
|
||||
raise SynapseError(
|
||||
401,
|
||||
"No key for %s with id %s" % (server_name, key_ids),
|
||||
Codes.UNAUTHORIZED,
|
||||
)
|
||||
|
||||
try:
|
||||
verify_signed_json(json_object, server_name, verify_key)
|
||||
except:
|
||||
@@ -87,12 +98,18 @@ class Keyring(object):
|
||||
return
|
||||
|
||||
# Try to fetch the key from the remote server.
|
||||
# TODO(markjh): Ratelimit requests to a given server.
|
||||
|
||||
(response, tls_certificate) = yield fetch_server_key(
|
||||
server_name, self.hs.tls_context_factory
|
||||
limiter = yield get_retry_limiter(
|
||||
server_name,
|
||||
self.clock,
|
||||
self.store,
|
||||
)
|
||||
|
||||
with limiter:
|
||||
(response, tls_certificate) = yield fetch_server_key(
|
||||
server_name, self.hs.tls_context_factory
|
||||
)
|
||||
|
||||
# Check the response.
|
||||
|
||||
x509_certificate_bytes = crypto.dump_certificate(
|
||||
|
||||
@@ -13,12 +13,12 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from synapse.util.frozenutils import freeze, unfreeze
|
||||
from synapse.util.frozenutils import freeze
|
||||
|
||||
|
||||
class _EventInternalMetadata(object):
|
||||
def __init__(self, internal_metadata_dict):
|
||||
self.__dict__ = internal_metadata_dict
|
||||
self.__dict__ = dict(internal_metadata_dict)
|
||||
|
||||
def get_dict(self):
|
||||
return dict(self.__dict__)
|
||||
@@ -77,7 +77,7 @@ class EventBase(object):
|
||||
return self.content["membership"]
|
||||
|
||||
def is_state(self):
|
||||
return hasattr(self, "state_key")
|
||||
return hasattr(self, "state_key") and self.state_key is not None
|
||||
|
||||
def get_dict(self):
|
||||
d = dict(self._event_dict)
|
||||
@@ -140,10 +140,6 @@ class FrozenEvent(EventBase):
|
||||
|
||||
return e
|
||||
|
||||
def get_dict(self):
|
||||
# We need to unfreeze what we return
|
||||
return unfreeze(super(FrozenEvent, self).get_dict())
|
||||
|
||||
def __str__(self):
|
||||
return self.__repr__()
|
||||
|
||||
|
||||
@@ -23,22 +23,17 @@ import copy
|
||||
|
||||
|
||||
class EventBuilder(EventBase):
|
||||
def __init__(self, key_values={}):
|
||||
def __init__(self, key_values={}, internal_metadata_dict={}):
|
||||
signatures = copy.deepcopy(key_values.pop("signatures", {}))
|
||||
unsigned = copy.deepcopy(key_values.pop("unsigned", {}))
|
||||
|
||||
super(EventBuilder, self).__init__(
|
||||
key_values,
|
||||
signatures=signatures,
|
||||
unsigned=unsigned
|
||||
unsigned=unsigned,
|
||||
internal_metadata_dict=internal_metadata_dict,
|
||||
)
|
||||
|
||||
def update_event_key(self, key, value):
|
||||
self._event_dict[key] = value
|
||||
|
||||
def update_event_keys(self, other_dict):
|
||||
self._event_dict.update(other_dict)
|
||||
|
||||
def build(self):
|
||||
return FrozenEvent.from_event(self)
|
||||
|
||||
|
||||
@@ -16,7 +16,7 @@
|
||||
|
||||
class EventContext(object):
|
||||
|
||||
def __init__(self, current_state=None, auth_events=None):
|
||||
def __init__(self, current_state=None):
|
||||
self.current_state = current_state
|
||||
self.auth_events = auth_events
|
||||
self.state_group = None
|
||||
self.rejected = False
|
||||
|
||||
@@ -45,12 +45,14 @@ def prune_event(event):
|
||||
"membership",
|
||||
]
|
||||
|
||||
event_dict = event.get_dict()
|
||||
|
||||
new_content = {}
|
||||
|
||||
def add_fields(*fields):
|
||||
for field in fields:
|
||||
if field in event.content:
|
||||
new_content[field] = event.content[field]
|
||||
new_content[field] = event_dict["content"][field]
|
||||
|
||||
if event_type == EventTypes.Member:
|
||||
add_fields("membership")
|
||||
@@ -75,7 +77,7 @@ def prune_event(event):
|
||||
|
||||
allowed_fields = {
|
||||
k: v
|
||||
for k, v in event.get_dict().items()
|
||||
for k, v in event_dict.items()
|
||||
if k in allowed_keys
|
||||
}
|
||||
|
||||
@@ -86,48 +88,78 @@ def prune_event(event):
|
||||
if "age_ts" in event.unsigned:
|
||||
allowed_fields["unsigned"]["age_ts"] = event.unsigned["age_ts"]
|
||||
|
||||
return type(event)(allowed_fields)
|
||||
return type(event)(
|
||||
allowed_fields,
|
||||
internal_metadata_dict=event.internal_metadata.get_dict()
|
||||
)
|
||||
|
||||
|
||||
def serialize_event(hs, e):
|
||||
def format_event_raw(d):
|
||||
return d
|
||||
|
||||
|
||||
def format_event_for_client_v1(d):
|
||||
d["user_id"] = d.pop("sender", None)
|
||||
|
||||
move_keys = ("age", "redacted_because", "replaces_state", "prev_content")
|
||||
for key in move_keys:
|
||||
if key in d["unsigned"]:
|
||||
d[key] = d["unsigned"][key]
|
||||
|
||||
drop_keys = (
|
||||
"auth_events", "prev_events", "hashes", "signatures", "depth",
|
||||
"unsigned", "origin", "prev_state"
|
||||
)
|
||||
for key in drop_keys:
|
||||
d.pop(key, None)
|
||||
return d
|
||||
|
||||
|
||||
def format_event_for_client_v2(d):
|
||||
drop_keys = (
|
||||
"auth_events", "prev_events", "hashes", "signatures", "depth",
|
||||
"origin", "prev_state",
|
||||
)
|
||||
for key in drop_keys:
|
||||
d.pop(key, None)
|
||||
return d
|
||||
|
||||
|
||||
def format_event_for_client_v2_without_event_id(d):
|
||||
d = format_event_for_client_v2(d)
|
||||
d.pop("room_id", None)
|
||||
d.pop("event_id", None)
|
||||
return d
|
||||
|
||||
|
||||
def serialize_event(e, time_now_ms, as_client_event=True,
|
||||
event_format=format_event_for_client_v1,
|
||||
token_id=None):
|
||||
# FIXME(erikj): To handle the case of presence events and the like
|
||||
if not isinstance(e, EventBase):
|
||||
return e
|
||||
|
||||
time_now_ms = int(time_now_ms)
|
||||
|
||||
# Should this strip out None's?
|
||||
d = {k: v for k, v in e.get_dict().items()}
|
||||
|
||||
if "age_ts" in d["unsigned"]:
|
||||
now = int(hs.get_clock().time_msec())
|
||||
d["unsigned"]["age"] = now - d["unsigned"]["age_ts"]
|
||||
d["unsigned"]["age"] = time_now_ms - d["unsigned"]["age_ts"]
|
||||
del d["unsigned"]["age_ts"]
|
||||
|
||||
d["user_id"] = d.pop("sender", None)
|
||||
|
||||
if "redacted_because" in e.unsigned:
|
||||
d["redacted_because"] = serialize_event(
|
||||
hs, e.unsigned["redacted_because"]
|
||||
d["unsigned"]["redacted_because"] = serialize_event(
|
||||
e.unsigned["redacted_because"], time_now_ms
|
||||
)
|
||||
|
||||
del d["unsigned"]["redacted_because"]
|
||||
if token_id is not None:
|
||||
if token_id == getattr(e.internal_metadata, "token_id", None):
|
||||
txn_id = getattr(e.internal_metadata, "txn_id", None)
|
||||
if txn_id is not None:
|
||||
d["unsigned"]["transaction_id"] = txn_id
|
||||
|
||||
if "redacted_by" in e.unsigned:
|
||||
d["redacted_by"] = e.unsigned["redacted_by"]
|
||||
del d["unsigned"]["redacted_by"]
|
||||
|
||||
if "replaces_state" in e.unsigned:
|
||||
d["replaces_state"] = e.unsigned["replaces_state"]
|
||||
del d["unsigned"]["replaces_state"]
|
||||
|
||||
if "prev_content" in e.unsigned:
|
||||
d["prev_content"] = e.unsigned["prev_content"]
|
||||
del d["unsigned"]["prev_content"]
|
||||
|
||||
del d["auth_events"]
|
||||
del d["prev_events"]
|
||||
del d["hashes"]
|
||||
del d["signatures"]
|
||||
d.pop("depth", None)
|
||||
d.pop("unsigned", None)
|
||||
d.pop("origin", None)
|
||||
|
||||
return d
|
||||
if as_client_event:
|
||||
return event_format(d)
|
||||
else:
|
||||
return d
|
||||
|
||||
133
synapse/federation/federation_base.py
Normal file
133
synapse/federation/federation_base.py
Normal file
@@ -0,0 +1,133 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2015 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.events.utils import prune_event
|
||||
|
||||
from syutil.jsonutil import encode_canonical_json
|
||||
|
||||
from synapse.crypto.event_signing import check_event_content_hash
|
||||
|
||||
from synapse.api.errors import SynapseError
|
||||
|
||||
import logging
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class FederationBase(object):
|
||||
@defer.inlineCallbacks
|
||||
def _check_sigs_and_hash_and_fetch(self, origin, pdus, outlier=False):
|
||||
"""Takes a list of PDUs and checks the signatures and hashs of each
|
||||
one. If a PDU fails its signature check then we check if we have it in
|
||||
the database and if not then request if from the originating server of
|
||||
that PDU.
|
||||
|
||||
If a PDU fails its content hash check then it is redacted.
|
||||
|
||||
The given list of PDUs are not modified, instead the function returns
|
||||
a new list.
|
||||
|
||||
Args:
|
||||
pdu (list)
|
||||
outlier (bool)
|
||||
|
||||
Returns:
|
||||
Deferred : A list of PDUs that have valid signatures and hashes.
|
||||
"""
|
||||
|
||||
signed_pdus = []
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def do(pdu):
|
||||
try:
|
||||
new_pdu = yield self._check_sigs_and_hash(pdu)
|
||||
signed_pdus.append(new_pdu)
|
||||
except SynapseError:
|
||||
# FIXME: We should handle signature failures more gracefully.
|
||||
|
||||
# Check local db.
|
||||
new_pdu = yield self.store.get_event(
|
||||
pdu.event_id,
|
||||
allow_rejected=True,
|
||||
allow_none=True,
|
||||
)
|
||||
if new_pdu:
|
||||
signed_pdus.append(new_pdu)
|
||||
return
|
||||
|
||||
# Check pdu.origin
|
||||
if pdu.origin != origin:
|
||||
try:
|
||||
new_pdu = yield self.get_pdu(
|
||||
destinations=[pdu.origin],
|
||||
event_id=pdu.event_id,
|
||||
outlier=outlier,
|
||||
)
|
||||
|
||||
if new_pdu:
|
||||
signed_pdus.append(new_pdu)
|
||||
return
|
||||
except:
|
||||
pass
|
||||
|
||||
logger.warn(
|
||||
"Failed to find copy of %s with valid signature",
|
||||
pdu.event_id,
|
||||
)
|
||||
|
||||
yield defer.gatherResults(
|
||||
[do(pdu) for pdu in pdus],
|
||||
consumeErrors=True
|
||||
)
|
||||
|
||||
defer.returnValue(signed_pdus)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _check_sigs_and_hash(self, pdu):
|
||||
"""Throws a SynapseError if the PDU does not have the correct
|
||||
signatures.
|
||||
|
||||
Returns:
|
||||
FrozenEvent: Either the given event or it redacted if it failed the
|
||||
content hash check.
|
||||
"""
|
||||
# Check signatures are correct.
|
||||
redacted_event = prune_event(pdu)
|
||||
redacted_pdu_json = redacted_event.get_pdu_json()
|
||||
|
||||
try:
|
||||
yield self.keyring.verify_json_for_server(
|
||||
pdu.origin, redacted_pdu_json
|
||||
)
|
||||
except SynapseError:
|
||||
logger.warn(
|
||||
"Signature check failed for %s redacted to %s",
|
||||
encode_canonical_json(pdu.get_pdu_json()),
|
||||
encode_canonical_json(redacted_pdu_json),
|
||||
)
|
||||
raise
|
||||
|
||||
if not check_event_content_hash(pdu):
|
||||
logger.warn(
|
||||
"Event content has been tampered, redacting %s, %s",
|
||||
pdu.event_id, encode_canonical_json(pdu.get_dict())
|
||||
)
|
||||
defer.returnValue(redacted_event)
|
||||
|
||||
defer.returnValue(pdu)
|
||||
578
synapse/federation/federation_client.py
Normal file
578
synapse/federation/federation_client.py
Normal file
@@ -0,0 +1,578 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2015 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from .federation_base import FederationBase
|
||||
from .units import Edu
|
||||
|
||||
from synapse.api.errors import (
|
||||
CodeMessageException, HttpResponseException, SynapseError,
|
||||
)
|
||||
from synapse.util.expiringcache import ExpiringCache
|
||||
from synapse.util.logutils import log_function
|
||||
from synapse.events import FrozenEvent
|
||||
import synapse.metrics
|
||||
|
||||
from synapse.util.retryutils import get_retry_limiter, NotRetryingDestination
|
||||
|
||||
import itertools
|
||||
import logging
|
||||
import random
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# synapse.federation.federation_client is a silly name
|
||||
metrics = synapse.metrics.get_metrics_for("synapse.federation.client")
|
||||
|
||||
sent_pdus_destination_dist = metrics.register_distribution("sent_pdu_destinations")
|
||||
|
||||
sent_edus_counter = metrics.register_counter("sent_edus")
|
||||
|
||||
sent_queries_counter = metrics.register_counter("sent_queries", labels=["type"])
|
||||
|
||||
|
||||
class FederationClient(FederationBase):
|
||||
|
||||
def start_get_pdu_cache(self):
|
||||
self._get_pdu_cache = ExpiringCache(
|
||||
cache_name="get_pdu_cache",
|
||||
clock=self._clock,
|
||||
max_len=1000,
|
||||
expiry_ms=120*1000,
|
||||
reset_expiry_on_get=False,
|
||||
)
|
||||
|
||||
self._get_pdu_cache.start()
|
||||
|
||||
@log_function
|
||||
def send_pdu(self, pdu, destinations):
|
||||
"""Informs the replication layer about a new PDU generated within the
|
||||
home server that should be transmitted to others.
|
||||
|
||||
TODO: Figure out when we should actually resolve the deferred.
|
||||
|
||||
Args:
|
||||
pdu (Pdu): The new Pdu.
|
||||
|
||||
Returns:
|
||||
Deferred: Completes when we have successfully processed the PDU
|
||||
and replicated it to any interested remote home servers.
|
||||
"""
|
||||
order = self._order
|
||||
self._order += 1
|
||||
|
||||
sent_pdus_destination_dist.inc_by(len(destinations))
|
||||
|
||||
logger.debug("[%s] transaction_layer.enqueue_pdu... ", pdu.event_id)
|
||||
|
||||
# TODO, add errback, etc.
|
||||
self._transaction_queue.enqueue_pdu(pdu, destinations, order)
|
||||
|
||||
logger.debug(
|
||||
"[%s] transaction_layer.enqueue_pdu... done",
|
||||
pdu.event_id
|
||||
)
|
||||
|
||||
@log_function
|
||||
def send_edu(self, destination, edu_type, content):
|
||||
edu = Edu(
|
||||
origin=self.server_name,
|
||||
destination=destination,
|
||||
edu_type=edu_type,
|
||||
content=content,
|
||||
)
|
||||
|
||||
sent_edus_counter.inc()
|
||||
|
||||
# TODO, add errback, etc.
|
||||
self._transaction_queue.enqueue_edu(edu)
|
||||
return defer.succeed(None)
|
||||
|
||||
@log_function
|
||||
def send_failure(self, failure, destination):
|
||||
self._transaction_queue.enqueue_failure(failure, destination)
|
||||
return defer.succeed(None)
|
||||
|
||||
@log_function
|
||||
def make_query(self, destination, query_type, args,
|
||||
retry_on_dns_fail=True):
|
||||
"""Sends a federation Query to a remote homeserver of the given type
|
||||
and arguments.
|
||||
|
||||
Args:
|
||||
destination (str): Domain name of the remote homeserver
|
||||
query_type (str): Category of the query type; should match the
|
||||
handler name used in register_query_handler().
|
||||
args (dict): Mapping of strings to strings containing the details
|
||||
of the query request.
|
||||
|
||||
Returns:
|
||||
a Deferred which will eventually yield a JSON object from the
|
||||
response
|
||||
"""
|
||||
sent_queries_counter.inc(query_type)
|
||||
|
||||
return self.transport_layer.make_query(
|
||||
destination, query_type, args, retry_on_dns_fail=retry_on_dns_fail
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def backfill(self, dest, context, limit, extremities):
|
||||
"""Requests some more historic PDUs for the given context from the
|
||||
given destination server.
|
||||
|
||||
Args:
|
||||
dest (str): The remote home server to ask.
|
||||
context (str): The context to backfill.
|
||||
limit (int): The maximum number of PDUs to return.
|
||||
extremities (list): List of PDU id and origins of the first pdus
|
||||
we have seen from the context
|
||||
|
||||
Returns:
|
||||
Deferred: Results in the received PDUs.
|
||||
"""
|
||||
logger.debug("backfill extrem=%s", extremities)
|
||||
|
||||
# If there are no extremeties then we've (probably) reached the start.
|
||||
if not extremities:
|
||||
return
|
||||
|
||||
transaction_data = yield self.transport_layer.backfill(
|
||||
dest, context, extremities, limit)
|
||||
|
||||
logger.debug("backfill transaction_data=%s", repr(transaction_data))
|
||||
|
||||
pdus = [
|
||||
self.event_from_pdu_json(p, outlier=False)
|
||||
for p in transaction_data["pdus"]
|
||||
]
|
||||
|
||||
for i, pdu in enumerate(pdus):
|
||||
pdus[i] = yield self._check_sigs_and_hash(pdu)
|
||||
|
||||
# FIXME: We should handle signature failures more gracefully.
|
||||
|
||||
defer.returnValue(pdus)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def get_pdu(self, destinations, event_id, outlier=False):
|
||||
"""Requests the PDU with given origin and ID from the remote home
|
||||
servers.
|
||||
|
||||
Will attempt to get the PDU from each destination in the list until
|
||||
one succeeds.
|
||||
|
||||
This will persist the PDU locally upon receipt.
|
||||
|
||||
Args:
|
||||
destinations (list): Which home servers to query
|
||||
pdu_origin (str): The home server that originally sent the pdu.
|
||||
event_id (str)
|
||||
outlier (bool): Indicates whether the PDU is an `outlier`, i.e. if
|
||||
it's from an arbitary point in the context as opposed to part
|
||||
of the current block of PDUs. Defaults to `False`
|
||||
|
||||
Returns:
|
||||
Deferred: Results in the requested PDU.
|
||||
"""
|
||||
|
||||
# TODO: Rate limit the number of times we try and get the same event.
|
||||
|
||||
if self._get_pdu_cache:
|
||||
e = self._get_pdu_cache.get(event_id)
|
||||
if e:
|
||||
defer.returnValue(e)
|
||||
|
||||
pdu = None
|
||||
for destination in destinations:
|
||||
try:
|
||||
limiter = yield get_retry_limiter(
|
||||
destination,
|
||||
self._clock,
|
||||
self.store,
|
||||
)
|
||||
|
||||
with limiter:
|
||||
transaction_data = yield self.transport_layer.get_event(
|
||||
destination, event_id
|
||||
)
|
||||
|
||||
logger.debug("transaction_data %r", transaction_data)
|
||||
|
||||
pdu_list = [
|
||||
self.event_from_pdu_json(p, outlier=outlier)
|
||||
for p in transaction_data["pdus"]
|
||||
]
|
||||
|
||||
if pdu_list:
|
||||
pdu = pdu_list[0]
|
||||
|
||||
# Check signatures are correct.
|
||||
pdu = yield self._check_sigs_and_hash(pdu)
|
||||
|
||||
break
|
||||
|
||||
except SynapseError:
|
||||
logger.info(
|
||||
"Failed to get PDU %s from %s because %s",
|
||||
event_id, destination, e,
|
||||
)
|
||||
continue
|
||||
except CodeMessageException as e:
|
||||
if 400 <= e.code < 500:
|
||||
raise
|
||||
|
||||
logger.info(
|
||||
"Failed to get PDU %s from %s because %s",
|
||||
event_id, destination, e,
|
||||
)
|
||||
continue
|
||||
except NotRetryingDestination as e:
|
||||
logger.info(e.message)
|
||||
continue
|
||||
except Exception as e:
|
||||
logger.info(
|
||||
"Failed to get PDU %s from %s because %s",
|
||||
event_id, destination, e,
|
||||
)
|
||||
continue
|
||||
|
||||
if self._get_pdu_cache is not None:
|
||||
self._get_pdu_cache[event_id] = pdu
|
||||
|
||||
defer.returnValue(pdu)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def get_state_for_room(self, destination, room_id, event_id):
|
||||
"""Requests all of the `current` state PDUs for a given room from
|
||||
a remote home server.
|
||||
|
||||
Args:
|
||||
destination (str): The remote homeserver to query for the state.
|
||||
room_id (str): The id of the room we're interested in.
|
||||
event_id (str): The id of the event we want the state at.
|
||||
|
||||
Returns:
|
||||
Deferred: Results in a list of PDUs.
|
||||
"""
|
||||
|
||||
result = yield self.transport_layer.get_room_state(
|
||||
destination, room_id, event_id=event_id,
|
||||
)
|
||||
|
||||
pdus = [
|
||||
self.event_from_pdu_json(p, outlier=True) for p in result["pdus"]
|
||||
]
|
||||
|
||||
auth_chain = [
|
||||
self.event_from_pdu_json(p, outlier=True)
|
||||
for p in result.get("auth_chain", [])
|
||||
]
|
||||
|
||||
signed_pdus = yield self._check_sigs_and_hash_and_fetch(
|
||||
destination, pdus, outlier=True
|
||||
)
|
||||
|
||||
signed_auth = yield self._check_sigs_and_hash_and_fetch(
|
||||
destination, auth_chain, outlier=True
|
||||
)
|
||||
|
||||
signed_auth.sort(key=lambda e: e.depth)
|
||||
|
||||
defer.returnValue((signed_pdus, signed_auth))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def get_event_auth(self, destination, room_id, event_id):
|
||||
res = yield self.transport_layer.get_event_auth(
|
||||
destination, room_id, event_id,
|
||||
)
|
||||
|
||||
auth_chain = [
|
||||
self.event_from_pdu_json(p, outlier=True)
|
||||
for p in res["auth_chain"]
|
||||
]
|
||||
|
||||
signed_auth = yield self._check_sigs_and_hash_and_fetch(
|
||||
destination, auth_chain, outlier=True
|
||||
)
|
||||
|
||||
signed_auth.sort(key=lambda e: e.depth)
|
||||
|
||||
defer.returnValue(signed_auth)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def make_join(self, destinations, room_id, user_id):
|
||||
for destination in destinations:
|
||||
try:
|
||||
ret = yield self.transport_layer.make_join(
|
||||
destination, room_id, user_id
|
||||
)
|
||||
|
||||
pdu_dict = ret["event"]
|
||||
|
||||
logger.debug("Got response to make_join: %s", pdu_dict)
|
||||
|
||||
defer.returnValue(
|
||||
(destination, self.event_from_pdu_json(pdu_dict))
|
||||
)
|
||||
break
|
||||
except CodeMessageException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.warn(
|
||||
"Failed to make_join via %s: %s",
|
||||
destination, e.message
|
||||
)
|
||||
|
||||
raise RuntimeError("Failed to send to any server.")
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def send_join(self, destinations, pdu):
|
||||
for destination in destinations:
|
||||
try:
|
||||
time_now = self._clock.time_msec()
|
||||
_, content = yield self.transport_layer.send_join(
|
||||
destination=destination,
|
||||
room_id=pdu.room_id,
|
||||
event_id=pdu.event_id,
|
||||
content=pdu.get_pdu_json(time_now),
|
||||
)
|
||||
|
||||
logger.debug("Got content: %s", content)
|
||||
|
||||
state = [
|
||||
self.event_from_pdu_json(p, outlier=True)
|
||||
for p in content.get("state", [])
|
||||
]
|
||||
|
||||
auth_chain = [
|
||||
self.event_from_pdu_json(p, outlier=True)
|
||||
for p in content.get("auth_chain", [])
|
||||
]
|
||||
|
||||
signed_state = yield self._check_sigs_and_hash_and_fetch(
|
||||
destination, state, outlier=True
|
||||
)
|
||||
|
||||
signed_auth = yield self._check_sigs_and_hash_and_fetch(
|
||||
destination, auth_chain, outlier=True
|
||||
)
|
||||
|
||||
auth_chain.sort(key=lambda e: e.depth)
|
||||
|
||||
defer.returnValue({
|
||||
"state": signed_state,
|
||||
"auth_chain": signed_auth,
|
||||
"origin": destination,
|
||||
})
|
||||
except CodeMessageException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.warn(
|
||||
"Failed to send_join via %s: %s",
|
||||
destination, e.message
|
||||
)
|
||||
|
||||
raise RuntimeError("Failed to send to any server.")
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def send_invite(self, destination, room_id, event_id, pdu):
|
||||
time_now = self._clock.time_msec()
|
||||
code, content = yield self.transport_layer.send_invite(
|
||||
destination=destination,
|
||||
room_id=room_id,
|
||||
event_id=event_id,
|
||||
content=pdu.get_pdu_json(time_now),
|
||||
)
|
||||
|
||||
pdu_dict = content["event"]
|
||||
|
||||
logger.debug("Got response to send_invite: %s", pdu_dict)
|
||||
|
||||
pdu = self.event_from_pdu_json(pdu_dict)
|
||||
|
||||
# Check signatures are correct.
|
||||
pdu = yield self._check_sigs_and_hash(pdu)
|
||||
|
||||
# FIXME: We should handle signature failures more gracefully.
|
||||
|
||||
defer.returnValue(pdu)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def query_auth(self, destination, room_id, event_id, local_auth):
|
||||
"""
|
||||
Params:
|
||||
destination (str)
|
||||
event_it (str)
|
||||
local_auth (list)
|
||||
"""
|
||||
time_now = self._clock.time_msec()
|
||||
|
||||
send_content = {
|
||||
"auth_chain": [e.get_pdu_json(time_now) for e in local_auth],
|
||||
}
|
||||
|
||||
code, content = yield self.transport_layer.send_query_auth(
|
||||
destination=destination,
|
||||
room_id=room_id,
|
||||
event_id=event_id,
|
||||
content=send_content,
|
||||
)
|
||||
|
||||
auth_chain = [
|
||||
self.event_from_pdu_json(e)
|
||||
for e in content["auth_chain"]
|
||||
]
|
||||
|
||||
signed_auth = yield self._check_sigs_and_hash_and_fetch(
|
||||
destination, auth_chain, outlier=True
|
||||
)
|
||||
|
||||
signed_auth.sort(key=lambda e: e.depth)
|
||||
|
||||
ret = {
|
||||
"auth_chain": signed_auth,
|
||||
"rejects": content.get("rejects", []),
|
||||
"missing": content.get("missing", []),
|
||||
}
|
||||
|
||||
defer.returnValue(ret)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_missing_events(self, destination, room_id, earliest_events_ids,
|
||||
latest_events, limit, min_depth):
|
||||
"""Tries to fetch events we are missing. This is called when we receive
|
||||
an event without having received all of its ancestors.
|
||||
|
||||
Args:
|
||||
destination (str)
|
||||
room_id (str)
|
||||
earliest_events_ids (list): List of event ids. Effectively the
|
||||
events we expected to receive, but haven't. `get_missing_events`
|
||||
should only return events that didn't happen before these.
|
||||
latest_events (list): List of events we have received that we don't
|
||||
have all previous events for.
|
||||
limit (int): Maximum number of events to return.
|
||||
min_depth (int): Minimum depth of events tor return.
|
||||
"""
|
||||
try:
|
||||
content = yield self.transport_layer.get_missing_events(
|
||||
destination=destination,
|
||||
room_id=room_id,
|
||||
earliest_events=earliest_events_ids,
|
||||
latest_events=[e.event_id for e in latest_events],
|
||||
limit=limit,
|
||||
min_depth=min_depth,
|
||||
)
|
||||
|
||||
events = [
|
||||
self.event_from_pdu_json(e)
|
||||
for e in content.get("events", [])
|
||||
]
|
||||
|
||||
signed_events = yield self._check_sigs_and_hash_and_fetch(
|
||||
destination, events, outlier=True
|
||||
)
|
||||
|
||||
have_gotten_all_from_destination = True
|
||||
except HttpResponseException as e:
|
||||
if not e.code == 400:
|
||||
raise
|
||||
|
||||
# We are probably hitting an old server that doesn't support
|
||||
# get_missing_events
|
||||
signed_events = []
|
||||
have_gotten_all_from_destination = False
|
||||
|
||||
if len(signed_events) >= limit:
|
||||
defer.returnValue(signed_events)
|
||||
|
||||
servers = yield self.store.get_joined_hosts_for_room(room_id)
|
||||
|
||||
servers = set(servers)
|
||||
servers.discard(self.server_name)
|
||||
|
||||
failed_to_fetch = set()
|
||||
|
||||
while len(signed_events) < limit:
|
||||
# Are we missing any?
|
||||
|
||||
seen_events = set(earliest_events_ids)
|
||||
seen_events.update(e.event_id for e in signed_events)
|
||||
|
||||
missing_events = {}
|
||||
for e in itertools.chain(latest_events, signed_events):
|
||||
if e.depth > min_depth:
|
||||
missing_events.update({
|
||||
e_id: e.depth for e_id, _ in e.prev_events
|
||||
if e_id not in seen_events
|
||||
and e_id not in failed_to_fetch
|
||||
})
|
||||
|
||||
if not missing_events:
|
||||
break
|
||||
|
||||
have_seen = yield self.store.have_events(missing_events)
|
||||
|
||||
for k in have_seen:
|
||||
missing_events.pop(k, None)
|
||||
|
||||
if not missing_events:
|
||||
break
|
||||
|
||||
# Okay, we haven't gotten everything yet. Lets get them.
|
||||
ordered_missing = sorted(missing_events.items(), key=lambda x: x[0])
|
||||
|
||||
if have_gotten_all_from_destination:
|
||||
servers.discard(destination)
|
||||
|
||||
def random_server_list():
|
||||
srvs = list(servers)
|
||||
random.shuffle(srvs)
|
||||
return srvs
|
||||
|
||||
deferreds = [
|
||||
self.get_pdu(
|
||||
destinations=random_server_list(),
|
||||
event_id=e_id,
|
||||
)
|
||||
for e_id, depth in ordered_missing[:limit - len(signed_events)]
|
||||
]
|
||||
|
||||
res = yield defer.DeferredList(deferreds, consumeErrors=True)
|
||||
for (result, val), (e_id, _) in zip(res, ordered_missing):
|
||||
if result:
|
||||
signed_events.append(val)
|
||||
else:
|
||||
failed_to_fetch.add(e_id)
|
||||
|
||||
defer.returnValue(signed_events)
|
||||
|
||||
def event_from_pdu_json(self, pdu_json, outlier=False):
|
||||
event = FrozenEvent(
|
||||
pdu_json
|
||||
)
|
||||
|
||||
event.internal_metadata.outlier = outlier
|
||||
|
||||
return event
|
||||
490
synapse/federation/federation_server.py
Normal file
490
synapse/federation/federation_server.py
Normal file
@@ -0,0 +1,490 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2015 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from .federation_base import FederationBase
|
||||
from .units import Transaction, Edu
|
||||
|
||||
from synapse.util.logutils import log_function
|
||||
from synapse.util.logcontext import PreserveLoggingContext
|
||||
from synapse.events import FrozenEvent
|
||||
import synapse.metrics
|
||||
|
||||
from synapse.api.errors import FederationError, SynapseError
|
||||
|
||||
from synapse.crypto.event_signing import compute_event_signature
|
||||
|
||||
import logging
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# synapse.federation.federation_server is a silly name
|
||||
metrics = synapse.metrics.get_metrics_for("synapse.federation.server")
|
||||
|
||||
received_pdus_counter = metrics.register_counter("received_pdus")
|
||||
|
||||
received_edus_counter = metrics.register_counter("received_edus")
|
||||
|
||||
received_queries_counter = metrics.register_counter("received_queries", labels=["type"])
|
||||
|
||||
|
||||
class FederationServer(FederationBase):
|
||||
def set_handler(self, handler):
|
||||
"""Sets the handler that the replication layer will use to communicate
|
||||
receipt of new PDUs from other home servers. The required methods are
|
||||
documented on :py:class:`.ReplicationHandler`.
|
||||
"""
|
||||
self.handler = handler
|
||||
|
||||
def register_edu_handler(self, edu_type, handler):
|
||||
if edu_type in self.edu_handlers:
|
||||
raise KeyError("Already have an EDU handler for %s" % (edu_type,))
|
||||
|
||||
self.edu_handlers[edu_type] = handler
|
||||
|
||||
def register_query_handler(self, query_type, handler):
|
||||
"""Sets the handler callable that will be used to handle an incoming
|
||||
federation Query of the given type.
|
||||
|
||||
Args:
|
||||
query_type (str): Category name of the query, which should match
|
||||
the string used by make_query.
|
||||
handler (callable): Invoked to handle incoming queries of this type
|
||||
|
||||
handler is invoked as:
|
||||
result = handler(args)
|
||||
|
||||
where 'args' is a dict mapping strings to strings of the query
|
||||
arguments. It should return a Deferred that will eventually yield an
|
||||
object to encode as JSON.
|
||||
"""
|
||||
if query_type in self.query_handlers:
|
||||
raise KeyError(
|
||||
"Already have a Query handler for %s" % (query_type,)
|
||||
)
|
||||
|
||||
self.query_handlers[query_type] = handler
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def on_backfill_request(self, origin, room_id, versions, limit):
|
||||
pdus = yield self.handler.on_backfill_request(
|
||||
origin, room_id, versions, limit
|
||||
)
|
||||
|
||||
defer.returnValue((200, self._transaction_from_pdus(pdus).get_dict()))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def on_incoming_transaction(self, transaction_data):
|
||||
transaction = Transaction(**transaction_data)
|
||||
|
||||
received_pdus_counter.inc_by(len(transaction.pdus))
|
||||
|
||||
for p in transaction.pdus:
|
||||
if "unsigned" in p:
|
||||
unsigned = p["unsigned"]
|
||||
if "age" in unsigned:
|
||||
p["age"] = unsigned["age"]
|
||||
if "age" in p:
|
||||
p["age_ts"] = int(self._clock.time_msec()) - int(p["age"])
|
||||
del p["age"]
|
||||
|
||||
pdu_list = [
|
||||
self.event_from_pdu_json(p) for p in transaction.pdus
|
||||
]
|
||||
|
||||
logger.debug("[%s] Got transaction", transaction.transaction_id)
|
||||
|
||||
response = yield self.transaction_actions.have_responded(transaction)
|
||||
|
||||
if response:
|
||||
logger.debug(
|
||||
"[%s] We've already responed to this request",
|
||||
transaction.transaction_id
|
||||
)
|
||||
defer.returnValue(response)
|
||||
return
|
||||
|
||||
logger.debug("[%s] Transaction is new", transaction.transaction_id)
|
||||
|
||||
with PreserveLoggingContext():
|
||||
results = []
|
||||
|
||||
for pdu in pdu_list:
|
||||
d = self._handle_new_pdu(transaction.origin, pdu)
|
||||
|
||||
try:
|
||||
yield d
|
||||
results.append({})
|
||||
except FederationError as e:
|
||||
self.send_failure(e, transaction.origin)
|
||||
results.append({"error": str(e)})
|
||||
except Exception as e:
|
||||
results.append({"error": str(e)})
|
||||
logger.exception("Failed to handle PDU")
|
||||
|
||||
if hasattr(transaction, "edus"):
|
||||
for edu in [Edu(**x) for x in transaction.edus]:
|
||||
self.received_edu(
|
||||
transaction.origin,
|
||||
edu.edu_type,
|
||||
edu.content
|
||||
)
|
||||
|
||||
for failure in getattr(transaction, "pdu_failures", []):
|
||||
logger.info("Got failure %r", failure)
|
||||
|
||||
logger.debug("Returning: %s", str(results))
|
||||
|
||||
response = {
|
||||
"pdus": dict(zip(
|
||||
(p.event_id for p in pdu_list), results
|
||||
)),
|
||||
}
|
||||
|
||||
yield self.transaction_actions.set_response(
|
||||
transaction,
|
||||
200, response
|
||||
)
|
||||
defer.returnValue((200, response))
|
||||
|
||||
def received_edu(self, origin, edu_type, content):
|
||||
received_edus_counter.inc()
|
||||
|
||||
if edu_type in self.edu_handlers:
|
||||
self.edu_handlers[edu_type](origin, content)
|
||||
else:
|
||||
logger.warn("Received EDU of type %s with no handler", edu_type)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def on_context_state_request(self, origin, room_id, event_id):
|
||||
if event_id:
|
||||
pdus = yield self.handler.get_state_for_pdu(
|
||||
origin, room_id, event_id,
|
||||
)
|
||||
auth_chain = yield self.store.get_auth_chain(
|
||||
[pdu.event_id for pdu in pdus]
|
||||
)
|
||||
|
||||
for event in auth_chain:
|
||||
event.signatures.update(
|
||||
compute_event_signature(
|
||||
event,
|
||||
self.hs.hostname,
|
||||
self.hs.config.signing_key[0]
|
||||
)
|
||||
)
|
||||
else:
|
||||
raise NotImplementedError("Specify an event")
|
||||
|
||||
defer.returnValue((200, {
|
||||
"pdus": [pdu.get_pdu_json() for pdu in pdus],
|
||||
"auth_chain": [pdu.get_pdu_json() for pdu in auth_chain],
|
||||
}))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def on_pdu_request(self, origin, event_id):
|
||||
pdu = yield self._get_persisted_pdu(origin, event_id)
|
||||
|
||||
if pdu:
|
||||
defer.returnValue(
|
||||
(200, self._transaction_from_pdus([pdu]).get_dict())
|
||||
)
|
||||
else:
|
||||
defer.returnValue((404, ""))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def on_pull_request(self, origin, versions):
|
||||
raise NotImplementedError("Pull transactions not implemented")
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_query_request(self, query_type, args):
|
||||
received_queries_counter.inc(query_type)
|
||||
|
||||
if query_type in self.query_handlers:
|
||||
response = yield self.query_handlers[query_type](args)
|
||||
defer.returnValue((200, response))
|
||||
else:
|
||||
defer.returnValue(
|
||||
(404, "No handler for Query type '%s'" % (query_type,))
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_make_join_request(self, room_id, user_id):
|
||||
pdu = yield self.handler.on_make_join_request(room_id, user_id)
|
||||
time_now = self._clock.time_msec()
|
||||
defer.returnValue({"event": pdu.get_pdu_json(time_now)})
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_invite_request(self, origin, content):
|
||||
pdu = self.event_from_pdu_json(content)
|
||||
ret_pdu = yield self.handler.on_invite_request(origin, pdu)
|
||||
time_now = self._clock.time_msec()
|
||||
defer.returnValue((200, {"event": ret_pdu.get_pdu_json(time_now)}))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_send_join_request(self, origin, content):
|
||||
logger.debug("on_send_join_request: content: %s", content)
|
||||
pdu = self.event_from_pdu_json(content)
|
||||
logger.debug("on_send_join_request: pdu sigs: %s", pdu.signatures)
|
||||
res_pdus = yield self.handler.on_send_join_request(origin, pdu)
|
||||
time_now = self._clock.time_msec()
|
||||
defer.returnValue((200, {
|
||||
"state": [p.get_pdu_json(time_now) for p in res_pdus["state"]],
|
||||
"auth_chain": [
|
||||
p.get_pdu_json(time_now) for p in res_pdus["auth_chain"]
|
||||
],
|
||||
}))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_event_auth(self, origin, room_id, event_id):
|
||||
time_now = self._clock.time_msec()
|
||||
auth_pdus = yield self.handler.on_event_auth(event_id)
|
||||
defer.returnValue((200, {
|
||||
"auth_chain": [a.get_pdu_json(time_now) for a in auth_pdus],
|
||||
}))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_query_auth_request(self, origin, content, event_id):
|
||||
"""
|
||||
Content is a dict with keys::
|
||||
auth_chain (list): A list of events that give the auth chain.
|
||||
missing (list): A list of event_ids indicating what the other
|
||||
side (`origin`) think we're missing.
|
||||
rejects (dict): A mapping from event_id to a 2-tuple of reason
|
||||
string and a proof (or None) of why the event was rejected.
|
||||
The keys of this dict give the list of events the `origin` has
|
||||
rejected.
|
||||
|
||||
Args:
|
||||
origin (str)
|
||||
content (dict)
|
||||
event_id (str)
|
||||
|
||||
Returns:
|
||||
Deferred: Results in `dict` with the same format as `content`
|
||||
"""
|
||||
auth_chain = [
|
||||
self.event_from_pdu_json(e)
|
||||
for e in content["auth_chain"]
|
||||
]
|
||||
|
||||
signed_auth = yield self._check_sigs_and_hash_and_fetch(
|
||||
origin, auth_chain, outlier=True
|
||||
)
|
||||
|
||||
ret = yield self.handler.on_query_auth(
|
||||
origin,
|
||||
event_id,
|
||||
signed_auth,
|
||||
content.get("rejects", []),
|
||||
content.get("missing", []),
|
||||
)
|
||||
|
||||
time_now = self._clock.time_msec()
|
||||
send_content = {
|
||||
"auth_chain": [
|
||||
e.get_pdu_json(time_now)
|
||||
for e in ret["auth_chain"]
|
||||
],
|
||||
"rejects": ret.get("rejects", []),
|
||||
"missing": ret.get("missing", []),
|
||||
}
|
||||
|
||||
defer.returnValue(
|
||||
(200, send_content)
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def on_get_missing_events(self, origin, room_id, earliest_events,
|
||||
latest_events, limit, min_depth):
|
||||
missing_events = yield self.handler.on_get_missing_events(
|
||||
origin, room_id, earliest_events, latest_events, limit, min_depth
|
||||
)
|
||||
|
||||
time_now = self._clock.time_msec()
|
||||
|
||||
defer.returnValue({
|
||||
"events": [ev.get_pdu_json(time_now) for ev in missing_events],
|
||||
})
|
||||
|
||||
@log_function
|
||||
def _get_persisted_pdu(self, origin, event_id, do_auth=True):
|
||||
""" Get a PDU from the database with given origin and id.
|
||||
|
||||
Returns:
|
||||
Deferred: Results in a `Pdu`.
|
||||
"""
|
||||
return self.handler.get_persisted_pdu(
|
||||
origin, event_id, do_auth=do_auth
|
||||
)
|
||||
|
||||
def _transaction_from_pdus(self, pdu_list):
|
||||
"""Returns a new Transaction containing the given PDUs suitable for
|
||||
transmission.
|
||||
"""
|
||||
time_now = self._clock.time_msec()
|
||||
pdus = [p.get_pdu_json(time_now) for p in pdu_list]
|
||||
return Transaction(
|
||||
origin=self.server_name,
|
||||
pdus=pdus,
|
||||
origin_server_ts=int(time_now),
|
||||
destination=None,
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def _handle_new_pdu(self, origin, pdu, get_missing=True):
|
||||
# We reprocess pdus when we have seen them only as outliers
|
||||
existing = yield self._get_persisted_pdu(
|
||||
origin, pdu.event_id, do_auth=False
|
||||
)
|
||||
|
||||
# FIXME: Currently we fetch an event again when we already have it
|
||||
# if it has been marked as an outlier.
|
||||
|
||||
already_seen = (
|
||||
existing and (
|
||||
not existing.internal_metadata.is_outlier()
|
||||
or pdu.internal_metadata.is_outlier()
|
||||
)
|
||||
)
|
||||
if already_seen:
|
||||
logger.debug("Already seen pdu %s", pdu.event_id)
|
||||
return
|
||||
|
||||
# Check signature.
|
||||
try:
|
||||
pdu = yield self._check_sigs_and_hash(pdu)
|
||||
except SynapseError as e:
|
||||
raise FederationError(
|
||||
"ERROR",
|
||||
e.code,
|
||||
e.msg,
|
||||
affected=pdu.event_id,
|
||||
)
|
||||
|
||||
state = None
|
||||
|
||||
auth_chain = []
|
||||
|
||||
have_seen = yield self.store.have_events(
|
||||
[ev for ev, _ in pdu.prev_events]
|
||||
)
|
||||
|
||||
fetch_state = False
|
||||
|
||||
# Get missing pdus if necessary.
|
||||
if not pdu.internal_metadata.is_outlier():
|
||||
# We only backfill backwards to the min depth.
|
||||
min_depth = yield self.handler.get_min_depth_for_context(
|
||||
pdu.room_id
|
||||
)
|
||||
|
||||
logger.debug(
|
||||
"_handle_new_pdu min_depth for %s: %d",
|
||||
pdu.room_id, min_depth
|
||||
)
|
||||
|
||||
prevs = {e_id for e_id, _ in pdu.prev_events}
|
||||
seen = set(have_seen.keys())
|
||||
|
||||
if min_depth and pdu.depth < min_depth:
|
||||
# This is so that we don't notify the user about this
|
||||
# message, to work around the fact that some events will
|
||||
# reference really really old events we really don't want to
|
||||
# send to the clients.
|
||||
pdu.internal_metadata.outlier = True
|
||||
elif min_depth and pdu.depth > min_depth:
|
||||
if get_missing and prevs - seen:
|
||||
latest_tuples = yield self.store.get_latest_events_in_room(
|
||||
pdu.room_id
|
||||
)
|
||||
|
||||
# We add the prev events that we have seen to the latest
|
||||
# list to ensure the remote server doesn't give them to us
|
||||
latest = set(e_id for e_id, _, _ in latest_tuples)
|
||||
latest |= seen
|
||||
|
||||
missing_events = yield self.get_missing_events(
|
||||
origin,
|
||||
pdu.room_id,
|
||||
earliest_events_ids=list(latest),
|
||||
latest_events=[pdu],
|
||||
limit=10,
|
||||
min_depth=min_depth,
|
||||
)
|
||||
|
||||
# We want to sort these by depth so we process them and
|
||||
# tell clients about them in order.
|
||||
missing_events.sort(key=lambda x: x.depth)
|
||||
|
||||
for e in missing_events:
|
||||
yield self._handle_new_pdu(
|
||||
origin,
|
||||
e,
|
||||
get_missing=False
|
||||
)
|
||||
|
||||
have_seen = yield self.store.have_events(
|
||||
[ev for ev, _ in pdu.prev_events]
|
||||
)
|
||||
|
||||
prevs = {e_id for e_id, _ in pdu.prev_events}
|
||||
seen = set(have_seen.keys())
|
||||
if prevs - seen:
|
||||
fetch_state = True
|
||||
|
||||
if fetch_state:
|
||||
# We need to get the state at this event, since we haven't
|
||||
# processed all the prev events.
|
||||
logger.debug(
|
||||
"_handle_new_pdu getting state for %s",
|
||||
pdu.room_id
|
||||
)
|
||||
try:
|
||||
state, auth_chain = yield self.get_state_for_room(
|
||||
origin, pdu.room_id, pdu.event_id,
|
||||
)
|
||||
except:
|
||||
logger.warn("Failed to get state for event: %s", pdu.event_id)
|
||||
|
||||
yield self.handler.on_receive_pdu(
|
||||
origin,
|
||||
pdu,
|
||||
backfilled=False,
|
||||
state=state,
|
||||
auth_chain=auth_chain,
|
||||
)
|
||||
|
||||
def __str__(self):
|
||||
return "<ReplicationLayer(%s)>" % self.server_name
|
||||
|
||||
def event_from_pdu_json(self, pdu_json, outlier=False):
|
||||
event = FrozenEvent(
|
||||
pdu_json
|
||||
)
|
||||
|
||||
event.internal_metadata.outlier = outlier
|
||||
|
||||
return event
|
||||
@@ -23,7 +23,8 @@ from twisted.internet import defer
|
||||
|
||||
from synapse.util.logutils import log_function
|
||||
|
||||
import json
|
||||
from syutil.jsonutil import encode_canonical_json
|
||||
|
||||
import logging
|
||||
|
||||
|
||||
@@ -70,7 +71,7 @@ class TransactionActions(object):
|
||||
transaction.transaction_id,
|
||||
transaction.origin,
|
||||
code,
|
||||
json.dumps(response)
|
||||
encode_canonical_json(response)
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@@ -100,5 +101,5 @@ class TransactionActions(object):
|
||||
transaction.transaction_id,
|
||||
transaction.destination,
|
||||
response_code,
|
||||
json.dumps(response_dict)
|
||||
encode_canonical_json(response_dict)
|
||||
)
|
||||
|
||||
@@ -17,23 +17,20 @@
|
||||
a given transport.
|
||||
"""
|
||||
|
||||
from twisted.internet import defer
|
||||
from .federation_client import FederationClient
|
||||
from .federation_server import FederationServer
|
||||
|
||||
from .units import Transaction, Edu
|
||||
from .transaction_queue import TransactionQueue
|
||||
|
||||
from .persistence import TransactionActions
|
||||
|
||||
from synapse.util.logutils import log_function
|
||||
from synapse.util.logcontext import PreserveLoggingContext
|
||||
from synapse.events import FrozenEvent
|
||||
|
||||
import logging
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ReplicationLayer(object):
|
||||
class ReplicationLayer(FederationClient, FederationServer):
|
||||
"""This layer is responsible for replicating with remote home servers over
|
||||
the given transport. I.e., does the sending and receiving of PDUs to
|
||||
remote home servers.
|
||||
@@ -54,922 +51,28 @@ class ReplicationLayer(object):
|
||||
def __init__(self, hs, transport_layer):
|
||||
self.server_name = hs.hostname
|
||||
|
||||
self.keyring = hs.get_keyring()
|
||||
|
||||
self.transport_layer = transport_layer
|
||||
self.transport_layer.register_received_handler(self)
|
||||
self.transport_layer.register_request_handler(self)
|
||||
|
||||
self.store = hs.get_datastore()
|
||||
# self.pdu_actions = PduActions(self.store)
|
||||
self.transaction_actions = TransactionActions(self.store)
|
||||
self.federation_client = self
|
||||
|
||||
self._transaction_queue = _TransactionQueue(
|
||||
hs, self.transaction_actions, transport_layer
|
||||
)
|
||||
self.store = hs.get_datastore()
|
||||
|
||||
self.handler = None
|
||||
self.edu_handlers = {}
|
||||
self.query_handlers = {}
|
||||
|
||||
self._order = 0
|
||||
|
||||
self._clock = hs.get_clock()
|
||||
|
||||
self.event_builder_factory = hs.get_event_builder_factory()
|
||||
self.transaction_actions = TransactionActions(self.store)
|
||||
self._transaction_queue = TransactionQueue(hs, transport_layer)
|
||||
|
||||
def set_handler(self, handler):
|
||||
"""Sets the handler that the replication layer will use to communicate
|
||||
receipt of new PDUs from other home servers. The required methods are
|
||||
documented on :py:class:`.ReplicationHandler`.
|
||||
"""
|
||||
self.handler = handler
|
||||
self._order = 0
|
||||
|
||||
def register_edu_handler(self, edu_type, handler):
|
||||
if edu_type in self.edu_handlers:
|
||||
raise KeyError("Already have an EDU handler for %s" % (edu_type,))
|
||||
|
||||
self.edu_handlers[edu_type] = handler
|
||||
|
||||
def register_query_handler(self, query_type, handler):
|
||||
"""Sets the handler callable that will be used to handle an incoming
|
||||
federation Query of the given type.
|
||||
|
||||
Args:
|
||||
query_type (str): Category name of the query, which should match
|
||||
the string used by make_query.
|
||||
handler (callable): Invoked to handle incoming queries of this type
|
||||
|
||||
handler is invoked as:
|
||||
result = handler(args)
|
||||
|
||||
where 'args' is a dict mapping strings to strings of the query
|
||||
arguments. It should return a Deferred that will eventually yield an
|
||||
object to encode as JSON.
|
||||
"""
|
||||
if query_type in self.query_handlers:
|
||||
raise KeyError(
|
||||
"Already have a Query handler for %s" % (query_type,)
|
||||
)
|
||||
|
||||
self.query_handlers[query_type] = handler
|
||||
|
||||
@log_function
|
||||
def send_pdu(self, pdu, destinations):
|
||||
"""Informs the replication layer about a new PDU generated within the
|
||||
home server that should be transmitted to others.
|
||||
|
||||
TODO: Figure out when we should actually resolve the deferred.
|
||||
|
||||
Args:
|
||||
pdu (Pdu): The new Pdu.
|
||||
|
||||
Returns:
|
||||
Deferred: Completes when we have successfully processed the PDU
|
||||
and replicated it to any interested remote home servers.
|
||||
"""
|
||||
order = self._order
|
||||
self._order += 1
|
||||
|
||||
logger.debug("[%s] transaction_layer.enqueue_pdu... ", pdu.event_id)
|
||||
|
||||
# TODO, add errback, etc.
|
||||
self._transaction_queue.enqueue_pdu(pdu, destinations, order)
|
||||
|
||||
logger.debug(
|
||||
"[%s] transaction_layer.enqueue_pdu... done",
|
||||
pdu.event_id
|
||||
)
|
||||
|
||||
@log_function
|
||||
def send_edu(self, destination, edu_type, content):
|
||||
edu = Edu(
|
||||
origin=self.server_name,
|
||||
destination=destination,
|
||||
edu_type=edu_type,
|
||||
content=content,
|
||||
)
|
||||
|
||||
# TODO, add errback, etc.
|
||||
self._transaction_queue.enqueue_edu(edu)
|
||||
return defer.succeed(None)
|
||||
|
||||
@log_function
|
||||
def send_failure(self, failure, destination):
|
||||
self._transaction_queue.enqueue_failure(failure, destination)
|
||||
return defer.succeed(None)
|
||||
|
||||
@log_function
|
||||
def make_query(self, destination, query_type, args,
|
||||
retry_on_dns_fail=True):
|
||||
"""Sends a federation Query to a remote homeserver of the given type
|
||||
and arguments.
|
||||
|
||||
Args:
|
||||
destination (str): Domain name of the remote homeserver
|
||||
query_type (str): Category of the query type; should match the
|
||||
handler name used in register_query_handler().
|
||||
args (dict): Mapping of strings to strings containing the details
|
||||
of the query request.
|
||||
|
||||
Returns:
|
||||
a Deferred which will eventually yield a JSON object from the
|
||||
response
|
||||
"""
|
||||
return self.transport_layer.make_query(
|
||||
destination, query_type, args, retry_on_dns_fail=retry_on_dns_fail
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def backfill(self, dest, context, limit, extremities):
|
||||
"""Requests some more historic PDUs for the given context from the
|
||||
given destination server.
|
||||
|
||||
Args:
|
||||
dest (str): The remote home server to ask.
|
||||
context (str): The context to backfill.
|
||||
limit (int): The maximum number of PDUs to return.
|
||||
extremities (list): List of PDU id and origins of the first pdus
|
||||
we have seen from the context
|
||||
|
||||
Returns:
|
||||
Deferred: Results in the received PDUs.
|
||||
"""
|
||||
logger.debug("backfill extrem=%s", extremities)
|
||||
|
||||
# If there are no extremeties then we've (probably) reached the start.
|
||||
if not extremities:
|
||||
return
|
||||
|
||||
transaction_data = yield self.transport_layer.backfill(
|
||||
dest, context, extremities, limit)
|
||||
|
||||
logger.debug("backfill transaction_data=%s", repr(transaction_data))
|
||||
|
||||
transaction = Transaction(**transaction_data)
|
||||
|
||||
pdus = [
|
||||
self.event_from_pdu_json(p, outlier=False)
|
||||
for p in transaction.pdus
|
||||
]
|
||||
for pdu in pdus:
|
||||
yield self._handle_new_pdu(dest, pdu, backfilled=True)
|
||||
|
||||
defer.returnValue(pdus)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def get_pdu(self, destination, event_id, outlier=False):
|
||||
"""Requests the PDU with given origin and ID from the remote home
|
||||
server.
|
||||
|
||||
This will persist the PDU locally upon receipt.
|
||||
|
||||
Args:
|
||||
destination (str): Which home server to query
|
||||
pdu_origin (str): The home server that originally sent the pdu.
|
||||
event_id (str)
|
||||
outlier (bool): Indicates whether the PDU is an `outlier`, i.e. if
|
||||
it's from an arbitary point in the context as opposed to part
|
||||
of the current block of PDUs. Defaults to `False`
|
||||
|
||||
Returns:
|
||||
Deferred: Results in the requested PDU.
|
||||
"""
|
||||
|
||||
transaction_data = yield self.transport_layer.get_event(
|
||||
destination, event_id
|
||||
)
|
||||
|
||||
transaction = Transaction(**transaction_data)
|
||||
|
||||
pdu_list = [
|
||||
self.event_from_pdu_json(p, outlier=outlier)
|
||||
for p in transaction.pdus
|
||||
]
|
||||
|
||||
pdu = None
|
||||
if pdu_list:
|
||||
pdu = pdu_list[0]
|
||||
yield self._handle_new_pdu(destination, pdu)
|
||||
|
||||
defer.returnValue(pdu)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def get_state_for_context(self, destination, context, event_id):
|
||||
"""Requests all of the `current` state PDUs for a given context from
|
||||
a remote home server.
|
||||
|
||||
Args:
|
||||
destination (str): The remote homeserver to query for the state.
|
||||
context (str): The context we're interested in.
|
||||
event_id (str): The id of the event we want the state at.
|
||||
|
||||
Returns:
|
||||
Deferred: Results in a list of PDUs.
|
||||
"""
|
||||
|
||||
result = yield self.transport_layer.get_context_state(
|
||||
destination,
|
||||
context,
|
||||
event_id=event_id,
|
||||
)
|
||||
|
||||
pdus = [
|
||||
self.event_from_pdu_json(p, outlier=True) for p in result["pdus"]
|
||||
]
|
||||
|
||||
auth_chain = [
|
||||
self.event_from_pdu_json(p, outlier=True)
|
||||
for p in result.get("auth_chain", [])
|
||||
]
|
||||
|
||||
defer.returnValue((pdus, auth_chain))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def get_event_auth(self, destination, context, event_id):
|
||||
res = yield self.transport_layer.get_event_auth(
|
||||
destination, context, event_id,
|
||||
)
|
||||
|
||||
auth_chain = [
|
||||
self.event_from_pdu_json(p, outlier=True)
|
||||
for p in res["auth_chain"]
|
||||
]
|
||||
|
||||
auth_chain.sort(key=lambda e: e.depth)
|
||||
|
||||
defer.returnValue(auth_chain)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def on_backfill_request(self, origin, context, versions, limit):
|
||||
pdus = yield self.handler.on_backfill_request(
|
||||
origin, context, versions, limit
|
||||
)
|
||||
|
||||
defer.returnValue((200, self._transaction_from_pdus(pdus).get_dict()))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def on_incoming_transaction(self, transaction_data):
|
||||
transaction = Transaction(**transaction_data)
|
||||
|
||||
for p in transaction.pdus:
|
||||
if "unsigned" in p:
|
||||
unsigned = p["unsigned"]
|
||||
if "age" in unsigned:
|
||||
p["age"] = unsigned["age"]
|
||||
if "age" in p:
|
||||
p["age_ts"] = int(self._clock.time_msec()) - int(p["age"])
|
||||
del p["age"]
|
||||
|
||||
pdu_list = [
|
||||
self.event_from_pdu_json(p) for p in transaction.pdus
|
||||
]
|
||||
|
||||
logger.debug("[%s] Got transaction", transaction.transaction_id)
|
||||
|
||||
response = yield self.transaction_actions.have_responded(transaction)
|
||||
|
||||
if response:
|
||||
logger.debug("[%s] We've already responed to this request",
|
||||
transaction.transaction_id)
|
||||
defer.returnValue(response)
|
||||
return
|
||||
|
||||
logger.debug("[%s] Transaction is new", transaction.transaction_id)
|
||||
|
||||
with PreserveLoggingContext():
|
||||
dl = []
|
||||
for pdu in pdu_list:
|
||||
dl.append(self._handle_new_pdu(transaction.origin, pdu))
|
||||
|
||||
if hasattr(transaction, "edus"):
|
||||
for edu in [Edu(**x) for x in transaction.edus]:
|
||||
self.received_edu(
|
||||
transaction.origin,
|
||||
edu.edu_type,
|
||||
edu.content
|
||||
)
|
||||
|
||||
results = yield defer.DeferredList(dl)
|
||||
|
||||
ret = []
|
||||
for r in results:
|
||||
if r[0]:
|
||||
ret.append({})
|
||||
else:
|
||||
logger.exception(r[1])
|
||||
ret.append({"error": str(r[1])})
|
||||
|
||||
logger.debug("Returning: %s", str(ret))
|
||||
|
||||
yield self.transaction_actions.set_response(
|
||||
transaction,
|
||||
200, response
|
||||
)
|
||||
defer.returnValue((200, response))
|
||||
|
||||
def received_edu(self, origin, edu_type, content):
|
||||
if edu_type in self.edu_handlers:
|
||||
self.edu_handlers[edu_type](origin, content)
|
||||
else:
|
||||
logger.warn("Received EDU of type %s with no handler", edu_type)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def on_context_state_request(self, origin, context, event_id):
|
||||
if event_id:
|
||||
pdus = yield self.handler.get_state_for_pdu(
|
||||
origin,
|
||||
context,
|
||||
event_id,
|
||||
)
|
||||
auth_chain = yield self.store.get_auth_chain(
|
||||
[pdu.event_id for pdu in pdus]
|
||||
)
|
||||
else:
|
||||
raise NotImplementedError("Specify an event")
|
||||
|
||||
defer.returnValue((200, {
|
||||
"pdus": [pdu.get_pdu_json() for pdu in pdus],
|
||||
"auth_chain": [pdu.get_pdu_json() for pdu in auth_chain],
|
||||
}))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def on_pdu_request(self, origin, event_id):
|
||||
pdu = yield self._get_persisted_pdu(origin, event_id)
|
||||
|
||||
if pdu:
|
||||
defer.returnValue(
|
||||
(200, self._transaction_from_pdus([pdu]).get_dict())
|
||||
)
|
||||
else:
|
||||
defer.returnValue((404, ""))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def on_pull_request(self, origin, versions):
|
||||
raise NotImplementedError("Pull transacions not implemented")
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_query_request(self, query_type, args):
|
||||
if query_type in self.query_handlers:
|
||||
response = yield self.query_handlers[query_type](args)
|
||||
defer.returnValue((200, response))
|
||||
else:
|
||||
defer.returnValue(
|
||||
(404, "No handler for Query type '%s'" % (query_type, ))
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_make_join_request(self, context, user_id):
|
||||
pdu = yield self.handler.on_make_join_request(context, user_id)
|
||||
time_now = self._clock.time_msec()
|
||||
defer.returnValue({
|
||||
"event": pdu.get_pdu_json(time_now),
|
||||
})
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_invite_request(self, origin, content):
|
||||
pdu = self.event_from_pdu_json(content)
|
||||
ret_pdu = yield self.handler.on_invite_request(origin, pdu)
|
||||
time_now = self._clock.time_msec()
|
||||
defer.returnValue(
|
||||
(
|
||||
200,
|
||||
{
|
||||
"event": ret_pdu.get_pdu_json(time_now),
|
||||
}
|
||||
)
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_send_join_request(self, origin, content):
|
||||
logger.debug("on_send_join_request: content: %s", content)
|
||||
pdu = self.event_from_pdu_json(content)
|
||||
logger.debug("on_send_join_request: pdu sigs: %s", pdu.signatures)
|
||||
res_pdus = yield self.handler.on_send_join_request(origin, pdu)
|
||||
time_now = self._clock.time_msec()
|
||||
defer.returnValue((200, {
|
||||
"state": [p.get_pdu_json(time_now) for p in res_pdus["state"]],
|
||||
"auth_chain": [
|
||||
p.get_pdu_json(time_now) for p in res_pdus["auth_chain"]
|
||||
],
|
||||
}))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_event_auth(self, origin, context, event_id):
|
||||
time_now = self._clock.time_msec()
|
||||
auth_pdus = yield self.handler.on_event_auth(event_id)
|
||||
defer.returnValue(
|
||||
(
|
||||
200,
|
||||
{
|
||||
"auth_chain": [
|
||||
a.get_pdu_json(time_now) for a in auth_pdus
|
||||
],
|
||||
}
|
||||
)
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def make_join(self, destination, context, user_id):
|
||||
ret = yield self.transport_layer.make_join(
|
||||
destination=destination,
|
||||
context=context,
|
||||
user_id=user_id,
|
||||
)
|
||||
|
||||
pdu_dict = ret["event"]
|
||||
|
||||
logger.debug("Got response to make_join: %s", pdu_dict)
|
||||
|
||||
defer.returnValue(self.event_from_pdu_json(pdu_dict))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def send_join(self, destination, pdu):
|
||||
time_now = self._clock.time_msec()
|
||||
_, content = yield self.transport_layer.send_join(
|
||||
destination,
|
||||
pdu.room_id,
|
||||
pdu.event_id,
|
||||
pdu.get_pdu_json(time_now),
|
||||
)
|
||||
|
||||
logger.debug("Got content: %s", content)
|
||||
|
||||
state = [
|
||||
self.event_from_pdu_json(p, outlier=True)
|
||||
for p in content.get("state", [])
|
||||
]
|
||||
|
||||
# FIXME: We probably want to do something with the auth_chain given
|
||||
# to us
|
||||
|
||||
auth_chain = [
|
||||
self.event_from_pdu_json(p, outlier=True)
|
||||
for p in content.get("auth_chain", [])
|
||||
]
|
||||
|
||||
auth_chain.sort(key=lambda e: e.depth)
|
||||
|
||||
defer.returnValue({
|
||||
"state": state,
|
||||
"auth_chain": auth_chain,
|
||||
})
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def send_invite(self, destination, context, event_id, pdu):
|
||||
time_now = self._clock.time_msec()
|
||||
code, content = yield self.transport_layer.send_invite(
|
||||
destination=destination,
|
||||
context=context,
|
||||
event_id=event_id,
|
||||
content=pdu.get_pdu_json(time_now),
|
||||
)
|
||||
|
||||
pdu_dict = content["event"]
|
||||
|
||||
logger.debug("Got response to send_invite: %s", pdu_dict)
|
||||
|
||||
defer.returnValue(self.event_from_pdu_json(pdu_dict))
|
||||
|
||||
@log_function
|
||||
def _get_persisted_pdu(self, origin, event_id, do_auth=True):
|
||||
""" Get a PDU from the database with given origin and id.
|
||||
|
||||
Returns:
|
||||
Deferred: Results in a `Pdu`.
|
||||
"""
|
||||
return self.handler.get_persisted_pdu(
|
||||
origin, event_id, do_auth=do_auth
|
||||
)
|
||||
|
||||
def _transaction_from_pdus(self, pdu_list):
|
||||
"""Returns a new Transaction containing the given PDUs suitable for
|
||||
transmission.
|
||||
"""
|
||||
time_now = self._clock.time_msec()
|
||||
pdus = [p.get_pdu_json(time_now) for p in pdu_list]
|
||||
return Transaction(
|
||||
origin=self.server_name,
|
||||
pdus=pdus,
|
||||
origin_server_ts=int(time_now),
|
||||
destination=None,
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def _handle_new_pdu(self, origin, pdu, backfilled=False):
|
||||
# We reprocess pdus when we have seen them only as outliers
|
||||
existing = yield self._get_persisted_pdu(
|
||||
origin, pdu.event_id, do_auth=False
|
||||
)
|
||||
|
||||
already_seen = (
|
||||
existing and (
|
||||
not existing.internal_metadata.is_outlier()
|
||||
or pdu.internal_metadata.is_outlier()
|
||||
)
|
||||
)
|
||||
if already_seen:
|
||||
logger.debug("Already seen pdu %s", pdu.event_id)
|
||||
defer.returnValue({})
|
||||
return
|
||||
|
||||
state = None
|
||||
|
||||
auth_chain = []
|
||||
|
||||
# We need to make sure we have all the auth events.
|
||||
# for e_id, _ in pdu.auth_events:
|
||||
# exists = yield self._get_persisted_pdu(
|
||||
# origin,
|
||||
# e_id,
|
||||
# do_auth=False
|
||||
# )
|
||||
#
|
||||
# if not exists:
|
||||
# try:
|
||||
# logger.debug(
|
||||
# "_handle_new_pdu fetch missing auth event %s from %s",
|
||||
# e_id,
|
||||
# origin,
|
||||
# )
|
||||
#
|
||||
# yield self.get_pdu(
|
||||
# origin,
|
||||
# event_id=e_id,
|
||||
# outlier=True,
|
||||
# )
|
||||
#
|
||||
# logger.debug("Processed pdu %s", e_id)
|
||||
# except:
|
||||
# logger.warn(
|
||||
# "Failed to get auth event %s from %s",
|
||||
# e_id,
|
||||
# origin
|
||||
# )
|
||||
|
||||
# Get missing pdus if necessary.
|
||||
if not pdu.internal_metadata.is_outlier():
|
||||
# We only backfill backwards to the min depth.
|
||||
min_depth = yield self.handler.get_min_depth_for_context(
|
||||
pdu.room_id
|
||||
)
|
||||
|
||||
logger.debug(
|
||||
"_handle_new_pdu min_depth for %s: %d",
|
||||
pdu.room_id, min_depth
|
||||
)
|
||||
|
||||
if min_depth and pdu.depth > min_depth:
|
||||
for event_id, hashes in pdu.prev_events:
|
||||
exists = yield self._get_persisted_pdu(
|
||||
origin,
|
||||
event_id,
|
||||
do_auth=False
|
||||
)
|
||||
|
||||
if not exists:
|
||||
logger.debug(
|
||||
"_handle_new_pdu requesting pdu %s",
|
||||
event_id
|
||||
)
|
||||
|
||||
try:
|
||||
yield self.get_pdu(
|
||||
origin,
|
||||
event_id=event_id,
|
||||
)
|
||||
logger.debug("Processed pdu %s", event_id)
|
||||
except:
|
||||
# TODO(erikj): Do some more intelligent retries.
|
||||
logger.exception("Failed to get PDU")
|
||||
else:
|
||||
# We need to get the state at this event, since we have reached
|
||||
# a backward extremity edge.
|
||||
logger.debug(
|
||||
"_handle_new_pdu getting state for %s",
|
||||
pdu.room_id
|
||||
)
|
||||
state, auth_chain = yield self.get_state_for_context(
|
||||
origin, pdu.room_id, pdu.event_id,
|
||||
)
|
||||
|
||||
if not backfilled:
|
||||
ret = yield self.handler.on_receive_pdu(
|
||||
origin,
|
||||
pdu,
|
||||
backfilled=backfilled,
|
||||
state=state,
|
||||
auth_chain=auth_chain,
|
||||
)
|
||||
else:
|
||||
ret = None
|
||||
|
||||
# yield self.pdu_actions.mark_as_processed(pdu)
|
||||
|
||||
defer.returnValue(ret)
|
||||
self.hs = hs
|
||||
|
||||
def __str__(self):
|
||||
return "<ReplicationLayer(%s)>" % self.server_name
|
||||
|
||||
def event_from_pdu_json(self, pdu_json, outlier=False):
|
||||
event = FrozenEvent(
|
||||
pdu_json
|
||||
)
|
||||
|
||||
event.internal_metadata.outlier = outlier
|
||||
|
||||
return event
|
||||
|
||||
|
||||
class _TransactionQueue(object):
|
||||
"""This class makes sure we only have one transaction in flight at
|
||||
a time for a given destination.
|
||||
|
||||
It batches pending PDUs into single transactions.
|
||||
"""
|
||||
|
||||
def __init__(self, hs, transaction_actions, transport_layer):
|
||||
self.server_name = hs.hostname
|
||||
self.transaction_actions = transaction_actions
|
||||
self.transport_layer = transport_layer
|
||||
|
||||
self._clock = hs.get_clock()
|
||||
self.store = hs.get_datastore()
|
||||
|
||||
# Is a mapping from destinations -> deferreds. Used to keep track
|
||||
# of which destinations have transactions in flight and when they are
|
||||
# done
|
||||
self.pending_transactions = {}
|
||||
|
||||
# Is a mapping from destination -> list of
|
||||
# tuple(pending pdus, deferred, order)
|
||||
self.pending_pdus_by_dest = {}
|
||||
# destination -> list of tuple(edu, deferred)
|
||||
self.pending_edus_by_dest = {}
|
||||
|
||||
# destination -> list of tuple(failure, deferred)
|
||||
self.pending_failures_by_dest = {}
|
||||
|
||||
# HACK to get unique tx id
|
||||
self._next_txn_id = int(self._clock.time_msec())
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def enqueue_pdu(self, pdu, destinations, order):
|
||||
# We loop through all destinations to see whether we already have
|
||||
# a transaction in progress. If we do, stick it in the pending_pdus
|
||||
# table and we'll get back to it later.
|
||||
|
||||
destinations = set(destinations)
|
||||
destinations.discard(self.server_name)
|
||||
destinations.discard("localhost")
|
||||
|
||||
logger.debug("Sending to: %s", str(destinations))
|
||||
|
||||
if not destinations:
|
||||
return
|
||||
|
||||
deferreds = []
|
||||
|
||||
for destination in destinations:
|
||||
deferred = defer.Deferred()
|
||||
self.pending_pdus_by_dest.setdefault(destination, []).append(
|
||||
(pdu, deferred, order)
|
||||
)
|
||||
|
||||
def eb(failure):
|
||||
if not deferred.called:
|
||||
deferred.errback(failure)
|
||||
else:
|
||||
logger.warn("Failed to send pdu", failure)
|
||||
|
||||
with PreserveLoggingContext():
|
||||
self._attempt_new_transaction(destination).addErrback(eb)
|
||||
|
||||
deferreds.append(deferred)
|
||||
|
||||
yield defer.DeferredList(deferreds)
|
||||
|
||||
# NO inlineCallbacks
|
||||
def enqueue_edu(self, edu):
|
||||
destination = edu.destination
|
||||
|
||||
if destination == self.server_name:
|
||||
return
|
||||
|
||||
deferred = defer.Deferred()
|
||||
self.pending_edus_by_dest.setdefault(destination, []).append(
|
||||
(edu, deferred)
|
||||
)
|
||||
|
||||
def eb(failure):
|
||||
if not deferred.called:
|
||||
deferred.errback(failure)
|
||||
else:
|
||||
logger.warn("Failed to send edu", failure)
|
||||
|
||||
with PreserveLoggingContext():
|
||||
self._attempt_new_transaction(destination).addErrback(eb)
|
||||
|
||||
return deferred
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def enqueue_failure(self, failure, destination):
|
||||
deferred = defer.Deferred()
|
||||
|
||||
self.pending_failures_by_dest.setdefault(
|
||||
destination, []
|
||||
).append(
|
||||
(failure, deferred)
|
||||
)
|
||||
|
||||
yield deferred
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def _attempt_new_transaction(self, destination):
|
||||
|
||||
(retry_last_ts, retry_interval) = (0, 0)
|
||||
retry_timings = yield self.store.get_destination_retry_timings(
|
||||
destination
|
||||
)
|
||||
if retry_timings:
|
||||
(retry_last_ts, retry_interval) = (
|
||||
retry_timings.retry_last_ts, retry_timings.retry_interval
|
||||
)
|
||||
if retry_last_ts + retry_interval > int(self._clock.time_msec()):
|
||||
logger.info(
|
||||
"TX [%s] not ready for retry yet - "
|
||||
"dropping transaction for now",
|
||||
destination,
|
||||
)
|
||||
return
|
||||
else:
|
||||
logger.info("TX [%s] is ready for retry", destination)
|
||||
|
||||
logger.info("TX [%s] _attempt_new_transaction", destination)
|
||||
|
||||
if destination in self.pending_transactions:
|
||||
# XXX: pending_transactions can get stuck on by a never-ending
|
||||
# request at which point pending_pdus_by_dest just keeps growing.
|
||||
# we need application-layer timeouts of some flavour of these
|
||||
# requests
|
||||
return
|
||||
|
||||
# list of (pending_pdu, deferred, order)
|
||||
pending_pdus = self.pending_pdus_by_dest.pop(destination, [])
|
||||
pending_edus = self.pending_edus_by_dest.pop(destination, [])
|
||||
pending_failures = self.pending_failures_by_dest.pop(destination, [])
|
||||
|
||||
if pending_pdus:
|
||||
logger.info("TX [%s] len(pending_pdus_by_dest[dest]) = %d", destination, len(pending_pdus))
|
||||
|
||||
if not pending_pdus and not pending_edus and not pending_failures:
|
||||
return
|
||||
|
||||
logger.debug(
|
||||
"TX [%s] Attempting new transaction "
|
||||
"(pdus: %d, edus: %d, failures: %d)",
|
||||
destination,
|
||||
len(pending_pdus),
|
||||
len(pending_edus),
|
||||
len(pending_failures)
|
||||
)
|
||||
|
||||
# Sort based on the order field
|
||||
pending_pdus.sort(key=lambda t: t[2])
|
||||
|
||||
pdus = [x[0] for x in pending_pdus]
|
||||
edus = [x[0] for x in pending_edus]
|
||||
failures = [x[0].get_dict() for x in pending_failures]
|
||||
deferreds = [
|
||||
x[1]
|
||||
for x in pending_pdus + pending_edus + pending_failures
|
||||
]
|
||||
|
||||
try:
|
||||
self.pending_transactions[destination] = 1
|
||||
|
||||
logger.debug("TX [%s] Persisting transaction...", destination)
|
||||
|
||||
transaction = Transaction.create_new(
|
||||
origin_server_ts=int(self._clock.time_msec()),
|
||||
transaction_id=str(self._next_txn_id),
|
||||
origin=self.server_name,
|
||||
destination=destination,
|
||||
pdus=pdus,
|
||||
edus=edus,
|
||||
pdu_failures=failures,
|
||||
)
|
||||
|
||||
self._next_txn_id += 1
|
||||
|
||||
yield self.transaction_actions.prepare_to_send(transaction)
|
||||
|
||||
logger.debug("TX [%s] Persisted transaction", destination)
|
||||
logger.info(
|
||||
"TX [%s] Sending transaction [%s]",
|
||||
destination,
|
||||
transaction.transaction_id,
|
||||
)
|
||||
|
||||
# Actually send the transaction
|
||||
|
||||
# FIXME (erikj): This is a bit of a hack to make the Pdu age
|
||||
# keys work
|
||||
def json_data_cb():
|
||||
data = transaction.get_dict()
|
||||
now = int(self._clock.time_msec())
|
||||
if "pdus" in data:
|
||||
for p in data["pdus"]:
|
||||
if "age_ts" in p:
|
||||
unsigned = p.setdefault("unsigned", {})
|
||||
unsigned["age"] = now - int(p["age_ts"])
|
||||
del p["age_ts"]
|
||||
return data
|
||||
|
||||
code, response = yield self.transport_layer.send_transaction(
|
||||
transaction, json_data_cb
|
||||
)
|
||||
|
||||
logger.info("TX [%s] got %d response", destination, code)
|
||||
|
||||
logger.debug("TX [%s] Sent transaction", destination)
|
||||
logger.debug("TX [%s] Marking as delivered...", destination)
|
||||
|
||||
yield self.transaction_actions.delivered(
|
||||
transaction, code, response
|
||||
)
|
||||
|
||||
logger.debug("TX [%s] Marked as delivered", destination)
|
||||
logger.debug("TX [%s] Yielding to callbacks...", destination)
|
||||
|
||||
for deferred in deferreds:
|
||||
if code == 200:
|
||||
if retry_last_ts:
|
||||
# this host is alive! reset retry schedule
|
||||
yield self.store.set_destination_retry_timings(
|
||||
destination, 0, 0
|
||||
)
|
||||
deferred.callback(None)
|
||||
else:
|
||||
self.set_retrying(destination, retry_interval)
|
||||
deferred.errback(RuntimeError("Got status %d" % code))
|
||||
|
||||
# Ensures we don't continue until all callbacks on that
|
||||
# deferred have fired
|
||||
try:
|
||||
yield deferred
|
||||
except:
|
||||
pass
|
||||
|
||||
logger.debug("TX [%s] Yielded to callbacks", destination)
|
||||
|
||||
except Exception as e:
|
||||
# We capture this here as there as nothing actually listens
|
||||
# for this finishing functions deferred.
|
||||
logger.warn(
|
||||
"TX [%s] Problem in _attempt_transaction: %s",
|
||||
destination,
|
||||
e,
|
||||
)
|
||||
|
||||
self.set_retrying(destination, retry_interval)
|
||||
|
||||
for deferred in deferreds:
|
||||
if not deferred.called:
|
||||
deferred.errback(e)
|
||||
|
||||
finally:
|
||||
# We want to be *very* sure we delete this after we stop processing
|
||||
self.pending_transactions.pop(destination, None)
|
||||
|
||||
# Check to see if there is anything else to send.
|
||||
self._attempt_new_transaction(destination)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def set_retrying(self, destination, retry_interval):
|
||||
# track that this destination is having problems and we should
|
||||
# give it a chance to recover before trying it again
|
||||
|
||||
if retry_interval:
|
||||
retry_interval *= 2
|
||||
# plateau at hourly retries for now
|
||||
if retry_interval >= 60 * 60 * 1000:
|
||||
retry_interval = 60 * 60 * 1000
|
||||
else:
|
||||
retry_interval = 2000 # try again at first after 2 seconds
|
||||
|
||||
yield self.store.set_destination_retry_timings(
|
||||
destination,
|
||||
int(self._clock.time_msec()),
|
||||
retry_interval
|
||||
)
|
||||
|
||||
376
synapse/federation/transaction_queue.py
Normal file
376
synapse/federation/transaction_queue.py
Normal file
@@ -0,0 +1,376 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014, 2015 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from .persistence import TransactionActions
|
||||
from .units import Transaction
|
||||
|
||||
from synapse.api.errors import HttpResponseException
|
||||
from synapse.util.logutils import log_function
|
||||
from synapse.util.logcontext import PreserveLoggingContext
|
||||
from synapse.util.retryutils import (
|
||||
get_retry_limiter, NotRetryingDestination,
|
||||
)
|
||||
import synapse.metrics
|
||||
|
||||
import logging
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
metrics = synapse.metrics.get_metrics_for(__name__)
|
||||
|
||||
|
||||
class TransactionQueue(object):
|
||||
"""This class makes sure we only have one transaction in flight at
|
||||
a time for a given destination.
|
||||
|
||||
It batches pending PDUs into single transactions.
|
||||
"""
|
||||
|
||||
def __init__(self, hs, transport_layer):
|
||||
self.server_name = hs.hostname
|
||||
|
||||
self.store = hs.get_datastore()
|
||||
self.transaction_actions = TransactionActions(self.store)
|
||||
|
||||
self.transport_layer = transport_layer
|
||||
|
||||
self._clock = hs.get_clock()
|
||||
|
||||
# Is a mapping from destinations -> deferreds. Used to keep track
|
||||
# of which destinations have transactions in flight and when they are
|
||||
# done
|
||||
self.pending_transactions = {}
|
||||
|
||||
metrics.register_callback(
|
||||
"pending_destinations",
|
||||
lambda: len(self.pending_transactions),
|
||||
)
|
||||
|
||||
# Is a mapping from destination -> list of
|
||||
# tuple(pending pdus, deferred, order)
|
||||
self.pending_pdus_by_dest = pdus = {}
|
||||
# destination -> list of tuple(edu, deferred)
|
||||
self.pending_edus_by_dest = edus = {}
|
||||
|
||||
metrics.register_callback(
|
||||
"pending_pdus",
|
||||
lambda: sum(map(len, pdus.values())),
|
||||
)
|
||||
metrics.register_callback(
|
||||
"pending_edus",
|
||||
lambda: sum(map(len, edus.values())),
|
||||
)
|
||||
|
||||
# destination -> list of tuple(failure, deferred)
|
||||
self.pending_failures_by_dest = {}
|
||||
|
||||
# HACK to get unique tx id
|
||||
self._next_txn_id = int(self._clock.time_msec())
|
||||
|
||||
def can_send_to(self, destination):
|
||||
"""Can we send messages to the given server?
|
||||
|
||||
We can't send messages to ourselves. If we are running on localhost
|
||||
then we can only federation with other servers running on localhost.
|
||||
Otherwise we only federate with servers on a public domain.
|
||||
|
||||
Args:
|
||||
destination(str): The server we are possibly trying to send to.
|
||||
Returns:
|
||||
bool: True if we can send to the server.
|
||||
"""
|
||||
|
||||
if destination == self.server_name:
|
||||
return False
|
||||
if self.server_name.startswith("localhost"):
|
||||
return destination.startswith("localhost")
|
||||
else:
|
||||
return not destination.startswith("localhost")
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def enqueue_pdu(self, pdu, destinations, order):
|
||||
# We loop through all destinations to see whether we already have
|
||||
# a transaction in progress. If we do, stick it in the pending_pdus
|
||||
# table and we'll get back to it later.
|
||||
|
||||
destinations = set(destinations)
|
||||
destinations = set(
|
||||
dest for dest in destinations if self.can_send_to(dest)
|
||||
)
|
||||
|
||||
logger.debug("Sending to: %s", str(destinations))
|
||||
|
||||
if not destinations:
|
||||
return
|
||||
|
||||
deferreds = []
|
||||
|
||||
for destination in destinations:
|
||||
deferred = defer.Deferred()
|
||||
self.pending_pdus_by_dest.setdefault(destination, []).append(
|
||||
(pdu, deferred, order)
|
||||
)
|
||||
|
||||
def chain(failure):
|
||||
if not deferred.called:
|
||||
deferred.errback(failure)
|
||||
|
||||
def log_failure(f):
|
||||
logger.warn("Failed to send pdu to %s: %s", destination, f.value)
|
||||
|
||||
deferred.addErrback(log_failure)
|
||||
|
||||
with PreserveLoggingContext():
|
||||
self._attempt_new_transaction(destination).addErrback(chain)
|
||||
|
||||
deferreds.append(deferred)
|
||||
|
||||
yield defer.DeferredList(deferreds, consumeErrors=True)
|
||||
|
||||
# NO inlineCallbacks
|
||||
def enqueue_edu(self, edu):
|
||||
destination = edu.destination
|
||||
|
||||
if not self.can_send_to(destination):
|
||||
return
|
||||
|
||||
deferred = defer.Deferred()
|
||||
self.pending_edus_by_dest.setdefault(destination, []).append(
|
||||
(edu, deferred)
|
||||
)
|
||||
|
||||
def chain(failure):
|
||||
if not deferred.called:
|
||||
deferred.errback(failure)
|
||||
|
||||
def log_failure(f):
|
||||
logger.warn("Failed to send edu to %s: %s", destination, f.value)
|
||||
|
||||
deferred.addErrback(log_failure)
|
||||
|
||||
with PreserveLoggingContext():
|
||||
self._attempt_new_transaction(destination).addErrback(chain)
|
||||
|
||||
return deferred
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def enqueue_failure(self, failure, destination):
|
||||
if destination == self.server_name or destination == "localhost":
|
||||
return
|
||||
|
||||
deferred = defer.Deferred()
|
||||
|
||||
if not self.can_send_to(destination):
|
||||
return
|
||||
|
||||
self.pending_failures_by_dest.setdefault(
|
||||
destination, []
|
||||
).append(
|
||||
(failure, deferred)
|
||||
)
|
||||
|
||||
def chain(f):
|
||||
if not deferred.called:
|
||||
deferred.errback(f)
|
||||
|
||||
def log_failure(f):
|
||||
logger.warn("Failed to send failure to %s: %s", destination, f.value)
|
||||
|
||||
deferred.addErrback(log_failure)
|
||||
|
||||
with PreserveLoggingContext():
|
||||
self._attempt_new_transaction(destination).addErrback(chain)
|
||||
|
||||
yield deferred
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def _attempt_new_transaction(self, destination):
|
||||
if destination in self.pending_transactions:
|
||||
# XXX: pending_transactions can get stuck on by a never-ending
|
||||
# request at which point pending_pdus_by_dest just keeps growing.
|
||||
# we need application-layer timeouts of some flavour of these
|
||||
# requests
|
||||
logger.info(
|
||||
"TX [%s] Transaction already in progress",
|
||||
destination
|
||||
)
|
||||
return
|
||||
|
||||
logger.info("TX [%s] _attempt_new_transaction", destination)
|
||||
|
||||
# list of (pending_pdu, deferred, order)
|
||||
pending_pdus = self.pending_pdus_by_dest.pop(destination, [])
|
||||
pending_edus = self.pending_edus_by_dest.pop(destination, [])
|
||||
pending_failures = self.pending_failures_by_dest.pop(destination, [])
|
||||
|
||||
if pending_pdus:
|
||||
logger.info("TX [%s] len(pending_pdus_by_dest[dest]) = %d",
|
||||
destination, len(pending_pdus))
|
||||
|
||||
if not pending_pdus and not pending_edus and not pending_failures:
|
||||
logger.info("TX [%s] Nothing to send", destination)
|
||||
return
|
||||
|
||||
# Sort based on the order field
|
||||
pending_pdus.sort(key=lambda t: t[2])
|
||||
|
||||
pdus = [x[0] for x in pending_pdus]
|
||||
edus = [x[0] for x in pending_edus]
|
||||
failures = [x[0].get_dict() for x in pending_failures]
|
||||
deferreds = [
|
||||
x[1]
|
||||
for x in pending_pdus + pending_edus + pending_failures
|
||||
]
|
||||
|
||||
try:
|
||||
self.pending_transactions[destination] = 1
|
||||
|
||||
limiter = yield get_retry_limiter(
|
||||
destination,
|
||||
self._clock,
|
||||
self.store,
|
||||
)
|
||||
|
||||
logger.debug(
|
||||
"TX [%s] Attempting new transaction"
|
||||
" (pdus: %d, edus: %d, failures: %d)",
|
||||
destination,
|
||||
len(pending_pdus),
|
||||
len(pending_edus),
|
||||
len(pending_failures)
|
||||
)
|
||||
|
||||
logger.debug("TX [%s] Persisting transaction...", destination)
|
||||
|
||||
transaction = Transaction.create_new(
|
||||
origin_server_ts=int(self._clock.time_msec()),
|
||||
transaction_id=str(self._next_txn_id),
|
||||
origin=self.server_name,
|
||||
destination=destination,
|
||||
pdus=pdus,
|
||||
edus=edus,
|
||||
pdu_failures=failures,
|
||||
)
|
||||
|
||||
self._next_txn_id += 1
|
||||
|
||||
yield self.transaction_actions.prepare_to_send(transaction)
|
||||
|
||||
logger.debug("TX [%s] Persisted transaction", destination)
|
||||
logger.info(
|
||||
"TX [%s] Sending transaction [%s]",
|
||||
destination,
|
||||
transaction.transaction_id,
|
||||
)
|
||||
|
||||
with limiter:
|
||||
# Actually send the transaction
|
||||
|
||||
# FIXME (erikj): This is a bit of a hack to make the Pdu age
|
||||
# keys work
|
||||
def json_data_cb():
|
||||
data = transaction.get_dict()
|
||||
now = int(self._clock.time_msec())
|
||||
if "pdus" in data:
|
||||
for p in data["pdus"]:
|
||||
if "age_ts" in p:
|
||||
unsigned = p.setdefault("unsigned", {})
|
||||
unsigned["age"] = now - int(p["age_ts"])
|
||||
del p["age_ts"]
|
||||
return data
|
||||
|
||||
try:
|
||||
response = yield self.transport_layer.send_transaction(
|
||||
transaction, json_data_cb
|
||||
)
|
||||
code = 200
|
||||
|
||||
if response:
|
||||
for e_id, r in response.get("pdus", {}).items():
|
||||
if "error" in r:
|
||||
logger.warn(
|
||||
"Transaction returned error for %s: %s",
|
||||
e_id, r,
|
||||
)
|
||||
except HttpResponseException as e:
|
||||
code = e.code
|
||||
response = e.response
|
||||
|
||||
logger.info("TX [%s] got %d response", destination, code)
|
||||
|
||||
logger.debug("TX [%s] Sent transaction", destination)
|
||||
logger.debug("TX [%s] Marking as delivered...", destination)
|
||||
|
||||
yield self.transaction_actions.delivered(
|
||||
transaction, code, response
|
||||
)
|
||||
|
||||
logger.debug("TX [%s] Marked as delivered", destination)
|
||||
|
||||
logger.debug("TX [%s] Yielding to callbacks...", destination)
|
||||
|
||||
for deferred in deferreds:
|
||||
if code == 200:
|
||||
deferred.callback(None)
|
||||
else:
|
||||
deferred.errback(RuntimeError("Got status %d" % code))
|
||||
|
||||
# Ensures we don't continue until all callbacks on that
|
||||
# deferred have fired
|
||||
try:
|
||||
yield deferred
|
||||
except:
|
||||
pass
|
||||
|
||||
logger.debug("TX [%s] Yielded to callbacks", destination)
|
||||
except NotRetryingDestination:
|
||||
logger.info(
|
||||
"TX [%s] not ready for retry yet - "
|
||||
"dropping transaction for now",
|
||||
destination,
|
||||
)
|
||||
except RuntimeError as e:
|
||||
# We capture this here as there as nothing actually listens
|
||||
# for this finishing functions deferred.
|
||||
logger.warn(
|
||||
"TX [%s] Problem in _attempt_transaction: %s",
|
||||
destination,
|
||||
e,
|
||||
)
|
||||
except Exception as e:
|
||||
# We capture this here as there as nothing actually listens
|
||||
# for this finishing functions deferred.
|
||||
logger.warn(
|
||||
"TX [%s] Problem in _attempt_transaction: %s",
|
||||
destination,
|
||||
e,
|
||||
)
|
||||
|
||||
for deferred in deferreds:
|
||||
if not deferred.called:
|
||||
deferred.errback(e)
|
||||
|
||||
finally:
|
||||
# We want to be *very* sure we delete this after we stop processing
|
||||
self.pending_transactions.pop(destination, None)
|
||||
|
||||
# Check to see if there is anything else to send.
|
||||
self._attempt_new_transaction(destination)
|
||||
@@ -1,598 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014, 2015 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""The transport layer is responsible for both sending transactions to remote
|
||||
home servers and receiving a variety of requests from other home servers.
|
||||
|
||||
Typically, this is done over HTTP (and all home servers are required to
|
||||
support HTTP), however individual pairings of servers may decide to communicate
|
||||
over a different (albeit still reliable) protocol.
|
||||
"""
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.urls import FEDERATION_PREFIX as PREFIX
|
||||
from synapse.api.errors import Codes, SynapseError
|
||||
from synapse.util.logutils import log_function
|
||||
|
||||
import logging
|
||||
import json
|
||||
import re
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class TransportLayer(object):
|
||||
"""This is a basic implementation of the transport layer that translates
|
||||
transactions and other requests to/from HTTP.
|
||||
|
||||
Attributes:
|
||||
server_name (str): Local home server host
|
||||
|
||||
server (synapse.http.server.HttpServer): the http server to
|
||||
register listeners on
|
||||
|
||||
client (synapse.http.client.HttpClient): the http client used to
|
||||
send requests
|
||||
|
||||
request_handler (TransportRequestHandler): The handler to fire when we
|
||||
receive requests for data.
|
||||
|
||||
received_handler (TransportReceivedHandler): The handler to fire when
|
||||
we receive data.
|
||||
"""
|
||||
|
||||
def __init__(self, homeserver, server_name, server, client):
|
||||
"""
|
||||
Args:
|
||||
server_name (str): Local home server host
|
||||
server (synapse.protocol.http.HttpServer): the http server to
|
||||
register listeners on
|
||||
client (synapse.protocol.http.HttpClient): the http client used to
|
||||
send requests
|
||||
"""
|
||||
self.keyring = homeserver.get_keyring()
|
||||
self.server_name = server_name
|
||||
self.server = server
|
||||
self.client = client
|
||||
self.request_handler = None
|
||||
self.received_handler = None
|
||||
|
||||
@log_function
|
||||
def get_context_state(self, destination, context, event_id=None):
|
||||
""" Requests all state for a given context (i.e. room) from the
|
||||
given server.
|
||||
|
||||
Args:
|
||||
destination (str): The host name of the remote home server we want
|
||||
to get the state from.
|
||||
context (str): The name of the context we want the state of
|
||||
|
||||
Returns:
|
||||
Deferred: Results in a dict received from the remote homeserver.
|
||||
"""
|
||||
logger.debug("get_context_state dest=%s, context=%s",
|
||||
destination, context)
|
||||
|
||||
subpath = "/state/%s/" % context
|
||||
|
||||
args = {}
|
||||
if event_id:
|
||||
args["event_id"] = event_id
|
||||
|
||||
return self._do_request_for_transaction(
|
||||
destination, subpath, args=args
|
||||
)
|
||||
|
||||
@log_function
|
||||
def get_event(self, destination, event_id):
|
||||
""" Requests the pdu with give id and origin from the given server.
|
||||
|
||||
Args:
|
||||
destination (str): The host name of the remote home server we want
|
||||
to get the state from.
|
||||
event_id (str): The id of the event being requested.
|
||||
|
||||
Returns:
|
||||
Deferred: Results in a dict received from the remote homeserver.
|
||||
"""
|
||||
logger.debug("get_pdu dest=%s, event_id=%s",
|
||||
destination, event_id)
|
||||
|
||||
subpath = "/event/%s/" % (event_id, )
|
||||
|
||||
return self._do_request_for_transaction(destination, subpath)
|
||||
|
||||
@log_function
|
||||
def backfill(self, dest, context, event_tuples, limit):
|
||||
""" Requests `limit` previous PDUs in a given context before list of
|
||||
PDUs.
|
||||
|
||||
Args:
|
||||
dest (str)
|
||||
context (str)
|
||||
event_tuples (list)
|
||||
limt (int)
|
||||
|
||||
Returns:
|
||||
Deferred: Results in a dict received from the remote homeserver.
|
||||
"""
|
||||
logger.debug(
|
||||
"backfill dest=%s, context=%s, event_tuples=%s, limit=%s",
|
||||
dest, context, repr(event_tuples), str(limit)
|
||||
)
|
||||
|
||||
if not event_tuples:
|
||||
# TODO: raise?
|
||||
return
|
||||
|
||||
subpath = "/backfill/%s/" % (context,)
|
||||
|
||||
args = {
|
||||
"v": event_tuples,
|
||||
"limit": [str(limit)],
|
||||
}
|
||||
|
||||
return self._do_request_for_transaction(
|
||||
dest,
|
||||
subpath,
|
||||
args=args,
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def send_transaction(self, transaction, json_data_callback=None):
|
||||
""" Sends the given Transaction to its destination
|
||||
|
||||
Args:
|
||||
transaction (Transaction)
|
||||
|
||||
Returns:
|
||||
Deferred: Results of the deferred is a tuple in the form of
|
||||
(response_code, response_body) where the response_body is a
|
||||
python dict decoded from json
|
||||
"""
|
||||
logger.debug(
|
||||
"send_data dest=%s, txid=%s",
|
||||
transaction.destination, transaction.transaction_id
|
||||
)
|
||||
|
||||
if transaction.destination == self.server_name:
|
||||
raise RuntimeError("Transport layer cannot send to itself!")
|
||||
|
||||
# FIXME: This is only used by the tests. The actual json sent is
|
||||
# generated by the json_data_callback.
|
||||
json_data = transaction.get_dict()
|
||||
|
||||
code, response = yield self.client.put_json(
|
||||
transaction.destination,
|
||||
path=PREFIX + "/send/%s/" % transaction.transaction_id,
|
||||
data=json_data,
|
||||
json_data_callback=json_data_callback,
|
||||
)
|
||||
|
||||
logger.debug(
|
||||
"send_data dest=%s, txid=%s, got response: %d",
|
||||
transaction.destination, transaction.transaction_id, code
|
||||
)
|
||||
|
||||
defer.returnValue((code, response))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def make_query(self, destination, query_type, args, retry_on_dns_fail):
|
||||
path = PREFIX + "/query/%s" % query_type
|
||||
|
||||
response = yield self.client.get_json(
|
||||
destination=destination,
|
||||
path=path,
|
||||
args=args,
|
||||
retry_on_dns_fail=retry_on_dns_fail,
|
||||
)
|
||||
|
||||
defer.returnValue(response)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def make_join(self, destination, context, user_id, retry_on_dns_fail=True):
|
||||
path = PREFIX + "/make_join/%s/%s" % (context, user_id,)
|
||||
|
||||
response = yield self.client.get_json(
|
||||
destination=destination,
|
||||
path=path,
|
||||
retry_on_dns_fail=retry_on_dns_fail,
|
||||
)
|
||||
|
||||
defer.returnValue(response)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def send_join(self, destination, context, event_id, content):
|
||||
path = PREFIX + "/send_join/%s/%s" % (
|
||||
context,
|
||||
event_id,
|
||||
)
|
||||
|
||||
code, content = yield self.client.put_json(
|
||||
destination=destination,
|
||||
path=path,
|
||||
data=content,
|
||||
)
|
||||
|
||||
if not 200 <= code < 300:
|
||||
raise RuntimeError("Got %d from send_join", code)
|
||||
|
||||
defer.returnValue(json.loads(content))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def send_invite(self, destination, context, event_id, content):
|
||||
path = PREFIX + "/invite/%s/%s" % (
|
||||
context,
|
||||
event_id,
|
||||
)
|
||||
|
||||
code, content = yield self.client.put_json(
|
||||
destination=destination,
|
||||
path=path,
|
||||
data=content,
|
||||
)
|
||||
|
||||
if not 200 <= code < 300:
|
||||
raise RuntimeError("Got %d from send_invite", code)
|
||||
|
||||
defer.returnValue(json.loads(content))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def get_event_auth(self, destination, context, event_id):
|
||||
path = PREFIX + "/event_auth/%s/%s" % (
|
||||
context,
|
||||
event_id,
|
||||
)
|
||||
|
||||
response = yield self.client.get_json(
|
||||
destination=destination,
|
||||
path=path,
|
||||
)
|
||||
|
||||
defer.returnValue(response)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _authenticate_request(self, request):
|
||||
json_request = {
|
||||
"method": request.method,
|
||||
"uri": request.uri,
|
||||
"destination": self.server_name,
|
||||
"signatures": {},
|
||||
}
|
||||
|
||||
content = None
|
||||
origin = None
|
||||
|
||||
if request.method == "PUT":
|
||||
# TODO: Handle other method types? other content types?
|
||||
try:
|
||||
content_bytes = request.content.read()
|
||||
content = json.loads(content_bytes)
|
||||
json_request["content"] = content
|
||||
except:
|
||||
raise SynapseError(400, "Unable to parse JSON", Codes.BAD_JSON)
|
||||
|
||||
def parse_auth_header(header_str):
|
||||
try:
|
||||
params = auth.split(" ")[1].split(",")
|
||||
param_dict = dict(kv.split("=") for kv in params)
|
||||
|
||||
def strip_quotes(value):
|
||||
if value.startswith("\""):
|
||||
return value[1:-1]
|
||||
else:
|
||||
return value
|
||||
|
||||
origin = strip_quotes(param_dict["origin"])
|
||||
key = strip_quotes(param_dict["key"])
|
||||
sig = strip_quotes(param_dict["sig"])
|
||||
return (origin, key, sig)
|
||||
except:
|
||||
raise SynapseError(
|
||||
400, "Malformed Authorization header", Codes.UNAUTHORIZED
|
||||
)
|
||||
|
||||
auth_headers = request.requestHeaders.getRawHeaders(b"Authorization")
|
||||
|
||||
if not auth_headers:
|
||||
raise SynapseError(
|
||||
401, "Missing Authorization headers", Codes.UNAUTHORIZED,
|
||||
)
|
||||
|
||||
for auth in auth_headers:
|
||||
if auth.startswith("X-Matrix"):
|
||||
(origin, key, sig) = parse_auth_header(auth)
|
||||
json_request["origin"] = origin
|
||||
json_request["signatures"].setdefault(origin, {})[key] = sig
|
||||
|
||||
if not json_request["signatures"]:
|
||||
raise SynapseError(
|
||||
401, "Missing Authorization headers", Codes.UNAUTHORIZED,
|
||||
)
|
||||
|
||||
yield self.keyring.verify_json_for_server(origin, json_request)
|
||||
|
||||
defer.returnValue((origin, content))
|
||||
|
||||
def _with_authentication(self, handler):
|
||||
@defer.inlineCallbacks
|
||||
def new_handler(request, *args, **kwargs):
|
||||
try:
|
||||
(origin, content) = yield self._authenticate_request(request)
|
||||
response = yield handler(
|
||||
origin, content, request.args, *args, **kwargs
|
||||
)
|
||||
except:
|
||||
logger.exception("_authenticate_request failed")
|
||||
raise
|
||||
defer.returnValue(response)
|
||||
return new_handler
|
||||
|
||||
@log_function
|
||||
def register_received_handler(self, handler):
|
||||
""" Register a handler that will be fired when we receive data.
|
||||
|
||||
Args:
|
||||
handler (TransportReceivedHandler)
|
||||
"""
|
||||
self.received_handler = handler
|
||||
|
||||
# This is when someone is trying to send us a bunch of data.
|
||||
self.server.register_path(
|
||||
"PUT",
|
||||
re.compile("^" + PREFIX + "/send/([^/]*)/$"),
|
||||
self._with_authentication(self._on_send_request)
|
||||
)
|
||||
|
||||
@log_function
|
||||
def register_request_handler(self, handler):
|
||||
""" Register a handler that will be fired when we get asked for data.
|
||||
|
||||
Args:
|
||||
handler (TransportRequestHandler)
|
||||
"""
|
||||
self.request_handler = handler
|
||||
|
||||
# TODO(markjh): Namespace the federation URI paths
|
||||
|
||||
# This is for when someone asks us for everything since version X
|
||||
self.server.register_path(
|
||||
"GET",
|
||||
re.compile("^" + PREFIX + "/pull/$"),
|
||||
self._with_authentication(
|
||||
lambda origin, content, query:
|
||||
handler.on_pull_request(query["origin"][0], query["v"])
|
||||
)
|
||||
)
|
||||
|
||||
# This is when someone asks for a data item for a given server
|
||||
# data_id pair.
|
||||
self.server.register_path(
|
||||
"GET",
|
||||
re.compile("^" + PREFIX + "/event/([^/]*)/$"),
|
||||
self._with_authentication(
|
||||
lambda origin, content, query, event_id:
|
||||
handler.on_pdu_request(origin, event_id)
|
||||
)
|
||||
)
|
||||
|
||||
# This is when someone asks for all data for a given context.
|
||||
self.server.register_path(
|
||||
"GET",
|
||||
re.compile("^" + PREFIX + "/state/([^/]*)/$"),
|
||||
self._with_authentication(
|
||||
lambda origin, content, query, context:
|
||||
handler.on_context_state_request(
|
||||
origin,
|
||||
context,
|
||||
query.get("event_id", [None])[0],
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
self.server.register_path(
|
||||
"GET",
|
||||
re.compile("^" + PREFIX + "/backfill/([^/]*)/$"),
|
||||
self._with_authentication(
|
||||
lambda origin, content, query, context:
|
||||
self._on_backfill_request(
|
||||
origin, context, query["v"], query["limit"]
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
# This is when we receive a server-server Query
|
||||
self.server.register_path(
|
||||
"GET",
|
||||
re.compile("^" + PREFIX + "/query/([^/]*)$"),
|
||||
self._with_authentication(
|
||||
lambda origin, content, query, query_type:
|
||||
handler.on_query_request(
|
||||
query_type,
|
||||
{k: v[0].decode("utf-8") for k, v in query.items()}
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
self.server.register_path(
|
||||
"GET",
|
||||
re.compile("^" + PREFIX + "/make_join/([^/]*)/([^/]*)$"),
|
||||
self._with_authentication(
|
||||
lambda origin, content, query, context, user_id:
|
||||
self._on_make_join_request(
|
||||
origin, content, query, context, user_id
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
self.server.register_path(
|
||||
"GET",
|
||||
re.compile("^" + PREFIX + "/event_auth/([^/]*)/([^/]*)$"),
|
||||
self._with_authentication(
|
||||
lambda origin, content, query, context, event_id:
|
||||
handler.on_event_auth(
|
||||
origin, context, event_id,
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
self.server.register_path(
|
||||
"PUT",
|
||||
re.compile("^" + PREFIX + "/send_join/([^/]*)/([^/]*)$"),
|
||||
self._with_authentication(
|
||||
lambda origin, content, query, context, event_id:
|
||||
self._on_send_join_request(
|
||||
origin, content, query,
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
self.server.register_path(
|
||||
"PUT",
|
||||
re.compile("^" + PREFIX + "/invite/([^/]*)/([^/]*)$"),
|
||||
self._with_authentication(
|
||||
lambda origin, content, query, context, event_id:
|
||||
self._on_invite_request(
|
||||
origin, content, query,
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def _on_send_request(self, origin, content, query, transaction_id):
|
||||
""" Called on PUT /send/<transaction_id>/
|
||||
|
||||
Args:
|
||||
request (twisted.web.http.Request): The HTTP request.
|
||||
transaction_id (str): The transaction_id associated with this
|
||||
request. This is *not* None.
|
||||
|
||||
Returns:
|
||||
Deferred: Results in a tuple of `(code, response)`, where
|
||||
`response` is a python dict to be converted into JSON that is
|
||||
used as the response body.
|
||||
"""
|
||||
# Parse the request
|
||||
try:
|
||||
transaction_data = content
|
||||
|
||||
logger.debug(
|
||||
"Decoded %s: %s",
|
||||
transaction_id, str(transaction_data)
|
||||
)
|
||||
|
||||
# We should ideally be getting this from the security layer.
|
||||
# origin = body["origin"]
|
||||
|
||||
# Add some extra data to the transaction dict that isn't included
|
||||
# in the request body.
|
||||
transaction_data.update(
|
||||
transaction_id=transaction_id,
|
||||
destination=self.server_name
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.exception(e)
|
||||
defer.returnValue((400, {"error": "Invalid transaction"}))
|
||||
return
|
||||
|
||||
try:
|
||||
handler = self.received_handler
|
||||
code, response = yield handler.on_incoming_transaction(
|
||||
transaction_data
|
||||
)
|
||||
except:
|
||||
logger.exception("on_incoming_transaction failed")
|
||||
raise
|
||||
|
||||
defer.returnValue((code, response))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def _do_request_for_transaction(self, destination, subpath, args={}):
|
||||
"""
|
||||
Args:
|
||||
destination (str)
|
||||
path (str)
|
||||
args (dict): This is parsed directly to the HttpClient.
|
||||
|
||||
Returns:
|
||||
Deferred: Results in a dict.
|
||||
"""
|
||||
|
||||
data = yield self.client.get_json(
|
||||
destination,
|
||||
path=PREFIX + subpath,
|
||||
args=args,
|
||||
)
|
||||
|
||||
# Add certain keys to the JSON, ready for decoding as a Transaction
|
||||
data.update(
|
||||
origin=destination,
|
||||
destination=self.server_name,
|
||||
transaction_id=None
|
||||
)
|
||||
|
||||
defer.returnValue(data)
|
||||
|
||||
@log_function
|
||||
def _on_backfill_request(self, origin, context, v_list, limits):
|
||||
if not limits:
|
||||
return defer.succeed(
|
||||
(400, {"error": "Did not include limit param"})
|
||||
)
|
||||
|
||||
limit = int(limits[-1])
|
||||
|
||||
versions = v_list
|
||||
|
||||
return self.request_handler.on_backfill_request(
|
||||
origin, context, versions, limit
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def _on_make_join_request(self, origin, content, query, context, user_id):
|
||||
content = yield self.request_handler.on_make_join_request(
|
||||
context, user_id,
|
||||
)
|
||||
defer.returnValue((200, content))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def _on_send_join_request(self, origin, content, query):
|
||||
content = yield self.request_handler.on_send_join_request(
|
||||
origin, content,
|
||||
)
|
||||
|
||||
defer.returnValue((200, content))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def _on_invite_request(self, origin, content, query):
|
||||
content = yield self.request_handler.on_invite_request(
|
||||
origin, content,
|
||||
)
|
||||
|
||||
defer.returnValue((200, content))
|
||||
74
synapse/federation/transport/__init__.py
Normal file
74
synapse/federation/transport/__init__.py
Normal file
@@ -0,0 +1,74 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014, 2015 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""The transport layer is responsible for both sending transactions to remote
|
||||
home servers and receiving a variety of requests from other home servers.
|
||||
|
||||
By default this is done over HTTPS (and all home servers are required to
|
||||
support HTTPS), however individual pairings of servers may decide to
|
||||
communicate over a different (albeit still reliable) protocol.
|
||||
"""
|
||||
|
||||
from .server import TransportLayerServer
|
||||
from .client import TransportLayerClient
|
||||
|
||||
from synapse.util.ratelimitutils import FederationRateLimiter
|
||||
|
||||
|
||||
class TransportLayer(TransportLayerServer, TransportLayerClient):
|
||||
"""This is a basic implementation of the transport layer that translates
|
||||
transactions and other requests to/from HTTP.
|
||||
|
||||
Attributes:
|
||||
server_name (str): Local home server host
|
||||
|
||||
server (synapse.http.server.HttpServer): the http server to
|
||||
register listeners on
|
||||
|
||||
client (synapse.http.client.HttpClient): the http client used to
|
||||
send requests
|
||||
|
||||
request_handler (TransportRequestHandler): The handler to fire when we
|
||||
receive requests for data.
|
||||
|
||||
received_handler (TransportReceivedHandler): The handler to fire when
|
||||
we receive data.
|
||||
"""
|
||||
|
||||
def __init__(self, homeserver, server_name, server, client):
|
||||
"""
|
||||
Args:
|
||||
server_name (str): Local home server host
|
||||
server (synapse.protocol.http.HttpServer): the http server to
|
||||
register listeners on
|
||||
client (synapse.protocol.http.HttpClient): the http client used to
|
||||
send requests
|
||||
"""
|
||||
self.keyring = homeserver.get_keyring()
|
||||
self.clock = homeserver.get_clock()
|
||||
self.server_name = server_name
|
||||
self.server = server
|
||||
self.client = client
|
||||
self.request_handler = None
|
||||
self.received_handler = None
|
||||
|
||||
self.ratelimiter = FederationRateLimiter(
|
||||
self.clock,
|
||||
window_size=homeserver.config.federation_rc_window_size,
|
||||
sleep_limit=homeserver.config.federation_rc_sleep_limit,
|
||||
sleep_msec=homeserver.config.federation_rc_sleep_delay,
|
||||
reject_limit=homeserver.config.federation_rc_reject_limit,
|
||||
concurrent_requests=homeserver.config.federation_rc_concurrent,
|
||||
)
|
||||
240
synapse/federation/transport/client.py
Normal file
240
synapse/federation/transport/client.py
Normal file
@@ -0,0 +1,240 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014, 2015 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.urls import FEDERATION_PREFIX as PREFIX
|
||||
from synapse.util.logutils import log_function
|
||||
|
||||
import logging
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class TransportLayerClient(object):
|
||||
"""Sends federation HTTP requests to other servers"""
|
||||
|
||||
@log_function
|
||||
def get_room_state(self, destination, room_id, event_id):
|
||||
""" Requests all state for a given room from the given server at the
|
||||
given event.
|
||||
|
||||
Args:
|
||||
destination (str): The host name of the remote home server we want
|
||||
to get the state from.
|
||||
context (str): The name of the context we want the state of
|
||||
event_id (str): The event we want the context at.
|
||||
|
||||
Returns:
|
||||
Deferred: Results in a dict received from the remote homeserver.
|
||||
"""
|
||||
logger.debug("get_room_state dest=%s, room=%s",
|
||||
destination, room_id)
|
||||
|
||||
path = PREFIX + "/state/%s/" % room_id
|
||||
return self.client.get_json(
|
||||
destination, path=path, args={"event_id": event_id},
|
||||
)
|
||||
|
||||
@log_function
|
||||
def get_event(self, destination, event_id):
|
||||
""" Requests the pdu with give id and origin from the given server.
|
||||
|
||||
Args:
|
||||
destination (str): The host name of the remote home server we want
|
||||
to get the state from.
|
||||
event_id (str): The id of the event being requested.
|
||||
|
||||
Returns:
|
||||
Deferred: Results in a dict received from the remote homeserver.
|
||||
"""
|
||||
logger.debug("get_pdu dest=%s, event_id=%s",
|
||||
destination, event_id)
|
||||
|
||||
path = PREFIX + "/event/%s/" % (event_id, )
|
||||
return self.client.get_json(destination, path=path)
|
||||
|
||||
@log_function
|
||||
def backfill(self, destination, room_id, event_tuples, limit):
|
||||
""" Requests `limit` previous PDUs in a given context before list of
|
||||
PDUs.
|
||||
|
||||
Args:
|
||||
dest (str)
|
||||
room_id (str)
|
||||
event_tuples (list)
|
||||
limt (int)
|
||||
|
||||
Returns:
|
||||
Deferred: Results in a dict received from the remote homeserver.
|
||||
"""
|
||||
logger.debug(
|
||||
"backfill dest=%s, room_id=%s, event_tuples=%s, limit=%s",
|
||||
destination, room_id, repr(event_tuples), str(limit)
|
||||
)
|
||||
|
||||
if not event_tuples:
|
||||
# TODO: raise?
|
||||
return
|
||||
|
||||
path = PREFIX + "/backfill/%s/" % (room_id,)
|
||||
|
||||
args = {
|
||||
"v": event_tuples,
|
||||
"limit": [str(limit)],
|
||||
}
|
||||
|
||||
return self.client.get_json(
|
||||
destination,
|
||||
path=path,
|
||||
args=args,
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def send_transaction(self, transaction, json_data_callback=None):
|
||||
""" Sends the given Transaction to its destination
|
||||
|
||||
Args:
|
||||
transaction (Transaction)
|
||||
|
||||
Returns:
|
||||
Deferred: Results of the deferred is a tuple in the form of
|
||||
(response_code, response_body) where the response_body is a
|
||||
python dict decoded from json
|
||||
"""
|
||||
logger.debug(
|
||||
"send_data dest=%s, txid=%s",
|
||||
transaction.destination, transaction.transaction_id
|
||||
)
|
||||
|
||||
if transaction.destination == self.server_name:
|
||||
raise RuntimeError("Transport layer cannot send to itself!")
|
||||
|
||||
# FIXME: This is only used by the tests. The actual json sent is
|
||||
# generated by the json_data_callback.
|
||||
json_data = transaction.get_dict()
|
||||
|
||||
response = yield self.client.put_json(
|
||||
transaction.destination,
|
||||
path=PREFIX + "/send/%s/" % transaction.transaction_id,
|
||||
data=json_data,
|
||||
json_data_callback=json_data_callback,
|
||||
)
|
||||
|
||||
logger.debug(
|
||||
"send_data dest=%s, txid=%s, got response: 200",
|
||||
transaction.destination, transaction.transaction_id,
|
||||
)
|
||||
|
||||
defer.returnValue(response)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def make_query(self, destination, query_type, args, retry_on_dns_fail):
|
||||
path = PREFIX + "/query/%s" % query_type
|
||||
|
||||
content = yield self.client.get_json(
|
||||
destination=destination,
|
||||
path=path,
|
||||
args=args,
|
||||
retry_on_dns_fail=retry_on_dns_fail,
|
||||
)
|
||||
|
||||
defer.returnValue(content)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def make_join(self, destination, room_id, user_id, retry_on_dns_fail=True):
|
||||
path = PREFIX + "/make_join/%s/%s" % (room_id, user_id)
|
||||
|
||||
content = yield self.client.get_json(
|
||||
destination=destination,
|
||||
path=path,
|
||||
retry_on_dns_fail=retry_on_dns_fail,
|
||||
)
|
||||
|
||||
defer.returnValue(content)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def send_join(self, destination, room_id, event_id, content):
|
||||
path = PREFIX + "/send_join/%s/%s" % (room_id, event_id)
|
||||
|
||||
response = yield self.client.put_json(
|
||||
destination=destination,
|
||||
path=path,
|
||||
data=content,
|
||||
)
|
||||
|
||||
defer.returnValue(response)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def send_invite(self, destination, room_id, event_id, content):
|
||||
path = PREFIX + "/invite/%s/%s" % (room_id, event_id)
|
||||
|
||||
response = yield self.client.put_json(
|
||||
destination=destination,
|
||||
path=path,
|
||||
data=content,
|
||||
)
|
||||
|
||||
defer.returnValue(response)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def get_event_auth(self, destination, room_id, event_id):
|
||||
path = PREFIX + "/event_auth/%s/%s" % (room_id, event_id)
|
||||
|
||||
content = yield self.client.get_json(
|
||||
destination=destination,
|
||||
path=path,
|
||||
)
|
||||
|
||||
defer.returnValue(content)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def send_query_auth(self, destination, room_id, event_id, content):
|
||||
path = PREFIX + "/query_auth/%s/%s" % (room_id, event_id)
|
||||
|
||||
content = yield self.client.post_json(
|
||||
destination=destination,
|
||||
path=path,
|
||||
data=content,
|
||||
)
|
||||
|
||||
defer.returnValue(content)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def get_missing_events(self, destination, room_id, earliest_events,
|
||||
latest_events, limit, min_depth):
|
||||
path = PREFIX + "/get_missing_events/%s" % (room_id,)
|
||||
|
||||
content = yield self.client.post_json(
|
||||
destination=destination,
|
||||
path=path,
|
||||
data={
|
||||
"limit": int(limit),
|
||||
"min_depth": int(min_depth),
|
||||
"earliest_events": earliest_events,
|
||||
"latest_events": latest_events,
|
||||
}
|
||||
)
|
||||
|
||||
defer.returnValue(content)
|
||||
365
synapse/federation/transport/server.py
Normal file
365
synapse/federation/transport/server.py
Normal file
@@ -0,0 +1,365 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014, 2015 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.urls import FEDERATION_PREFIX as PREFIX
|
||||
from synapse.api.errors import Codes, SynapseError
|
||||
from synapse.util.logutils import log_function
|
||||
|
||||
import functools
|
||||
import logging
|
||||
import simplejson as json
|
||||
import re
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class TransportLayerServer(object):
|
||||
"""Handles incoming federation HTTP requests"""
|
||||
|
||||
# A method just so we can pass 'self' as the authenticator to the Servlets
|
||||
@defer.inlineCallbacks
|
||||
def authenticate_request(self, request):
|
||||
json_request = {
|
||||
"method": request.method,
|
||||
"uri": request.uri,
|
||||
"destination": self.server_name,
|
||||
"signatures": {},
|
||||
}
|
||||
|
||||
content = None
|
||||
origin = None
|
||||
|
||||
if request.method in ["PUT", "POST"]:
|
||||
# TODO: Handle other method types? other content types?
|
||||
try:
|
||||
content_bytes = request.content.read()
|
||||
content = json.loads(content_bytes)
|
||||
json_request["content"] = content
|
||||
except:
|
||||
raise SynapseError(400, "Unable to parse JSON", Codes.BAD_JSON)
|
||||
|
||||
def parse_auth_header(header_str):
|
||||
try:
|
||||
params = auth.split(" ")[1].split(",")
|
||||
param_dict = dict(kv.split("=") for kv in params)
|
||||
|
||||
def strip_quotes(value):
|
||||
if value.startswith("\""):
|
||||
return value[1:-1]
|
||||
else:
|
||||
return value
|
||||
|
||||
origin = strip_quotes(param_dict["origin"])
|
||||
key = strip_quotes(param_dict["key"])
|
||||
sig = strip_quotes(param_dict["sig"])
|
||||
return (origin, key, sig)
|
||||
except:
|
||||
raise SynapseError(
|
||||
400, "Malformed Authorization header", Codes.UNAUTHORIZED
|
||||
)
|
||||
|
||||
auth_headers = request.requestHeaders.getRawHeaders(b"Authorization")
|
||||
|
||||
if not auth_headers:
|
||||
raise SynapseError(
|
||||
401, "Missing Authorization headers", Codes.UNAUTHORIZED,
|
||||
)
|
||||
|
||||
for auth in auth_headers:
|
||||
if auth.startswith("X-Matrix"):
|
||||
(origin, key, sig) = parse_auth_header(auth)
|
||||
json_request["origin"] = origin
|
||||
json_request["signatures"].setdefault(origin, {})[key] = sig
|
||||
|
||||
if not json_request["signatures"]:
|
||||
raise SynapseError(
|
||||
401, "Missing Authorization headers", Codes.UNAUTHORIZED,
|
||||
)
|
||||
|
||||
yield self.keyring.verify_json_for_server(origin, json_request)
|
||||
|
||||
defer.returnValue((origin, content))
|
||||
|
||||
@log_function
|
||||
def register_received_handler(self, handler):
|
||||
""" Register a handler that will be fired when we receive data.
|
||||
|
||||
Args:
|
||||
handler (TransportReceivedHandler)
|
||||
"""
|
||||
FederationSendServlet(
|
||||
handler,
|
||||
authenticator=self,
|
||||
ratelimiter=self.ratelimiter,
|
||||
server_name=self.server_name,
|
||||
).register(self.server)
|
||||
|
||||
@log_function
|
||||
def register_request_handler(self, handler):
|
||||
""" Register a handler that will be fired when we get asked for data.
|
||||
|
||||
Args:
|
||||
handler (TransportRequestHandler)
|
||||
"""
|
||||
for servletclass in SERVLET_CLASSES:
|
||||
servletclass(
|
||||
handler,
|
||||
authenticator=self,
|
||||
ratelimiter=self.ratelimiter,
|
||||
).register(self.server)
|
||||
|
||||
|
||||
class BaseFederationServlet(object):
|
||||
def __init__(self, handler, authenticator, ratelimiter):
|
||||
self.handler = handler
|
||||
self.authenticator = authenticator
|
||||
self.ratelimiter = ratelimiter
|
||||
|
||||
def _wrap(self, code):
|
||||
authenticator = self.authenticator
|
||||
ratelimiter = self.ratelimiter
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@functools.wraps(code)
|
||||
def new_code(request, *args, **kwargs):
|
||||
try:
|
||||
(origin, content) = yield authenticator.authenticate_request(request)
|
||||
with ratelimiter.ratelimit(origin) as d:
|
||||
yield d
|
||||
response = yield code(
|
||||
origin, content, request.args, *args, **kwargs
|
||||
)
|
||||
except:
|
||||
logger.exception("authenticate_request failed")
|
||||
raise
|
||||
defer.returnValue(response)
|
||||
|
||||
# Extra logic that functools.wraps() doesn't finish
|
||||
new_code.__self__ = code.__self__
|
||||
|
||||
return new_code
|
||||
|
||||
def register(self, server):
|
||||
pattern = re.compile("^" + PREFIX + self.PATH + "$")
|
||||
|
||||
for method in ("GET", "PUT", "POST"):
|
||||
code = getattr(self, "on_%s" % (method), None)
|
||||
if code is None:
|
||||
continue
|
||||
|
||||
server.register_path(method, pattern, self._wrap(code))
|
||||
|
||||
|
||||
class FederationSendServlet(BaseFederationServlet):
|
||||
PATH = "/send/([^/]*)/"
|
||||
|
||||
def __init__(self, handler, server_name, **kwargs):
|
||||
super(FederationSendServlet, self).__init__(handler, **kwargs)
|
||||
self.server_name = server_name
|
||||
|
||||
# This is when someone is trying to send us a bunch of data.
|
||||
@defer.inlineCallbacks
|
||||
def on_PUT(self, origin, content, query, transaction_id):
|
||||
""" Called on PUT /send/<transaction_id>/
|
||||
|
||||
Args:
|
||||
request (twisted.web.http.Request): The HTTP request.
|
||||
transaction_id (str): The transaction_id associated with this
|
||||
request. This is *not* None.
|
||||
|
||||
Returns:
|
||||
Deferred: Results in a tuple of `(code, response)`, where
|
||||
`response` is a python dict to be converted into JSON that is
|
||||
used as the response body.
|
||||
"""
|
||||
# Parse the request
|
||||
try:
|
||||
transaction_data = content
|
||||
|
||||
logger.debug(
|
||||
"Decoded %s: %s",
|
||||
transaction_id, str(transaction_data)
|
||||
)
|
||||
|
||||
# We should ideally be getting this from the security layer.
|
||||
# origin = body["origin"]
|
||||
|
||||
# Add some extra data to the transaction dict that isn't included
|
||||
# in the request body.
|
||||
transaction_data.update(
|
||||
transaction_id=transaction_id,
|
||||
destination=self.server_name
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.exception(e)
|
||||
defer.returnValue((400, {"error": "Invalid transaction"}))
|
||||
return
|
||||
|
||||
try:
|
||||
code, response = yield self.handler.on_incoming_transaction(
|
||||
transaction_data
|
||||
)
|
||||
except:
|
||||
logger.exception("on_incoming_transaction failed")
|
||||
raise
|
||||
|
||||
defer.returnValue((code, response))
|
||||
|
||||
|
||||
class FederationPullServlet(BaseFederationServlet):
|
||||
PATH = "/pull/"
|
||||
|
||||
# This is for when someone asks us for everything since version X
|
||||
def on_GET(self, origin, content, query):
|
||||
return self.handler.on_pull_request(query["origin"][0], query["v"])
|
||||
|
||||
|
||||
class FederationEventServlet(BaseFederationServlet):
|
||||
PATH = "/event/([^/]*)/"
|
||||
|
||||
# This is when someone asks for a data item for a given server data_id pair.
|
||||
def on_GET(self, origin, content, query, event_id):
|
||||
return self.handler.on_pdu_request(origin, event_id)
|
||||
|
||||
|
||||
class FederationStateServlet(BaseFederationServlet):
|
||||
PATH = "/state/([^/]*)/"
|
||||
|
||||
# This is when someone asks for all data for a given context.
|
||||
def on_GET(self, origin, content, query, context):
|
||||
return self.handler.on_context_state_request(
|
||||
origin,
|
||||
context,
|
||||
query.get("event_id", [None])[0],
|
||||
)
|
||||
|
||||
|
||||
class FederationBackfillServlet(BaseFederationServlet):
|
||||
PATH = "/backfill/([^/]*)/"
|
||||
|
||||
def on_GET(self, origin, content, query, context):
|
||||
versions = query["v"]
|
||||
limits = query["limit"]
|
||||
|
||||
if not limits:
|
||||
return defer.succeed((400, {"error": "Did not include limit param"}))
|
||||
|
||||
limit = int(limits[-1])
|
||||
|
||||
return self.handler.on_backfill_request(origin, context, versions, limit)
|
||||
|
||||
|
||||
class FederationQueryServlet(BaseFederationServlet):
|
||||
PATH = "/query/([^/]*)"
|
||||
|
||||
# This is when we receive a server-server Query
|
||||
def on_GET(self, origin, content, query, query_type):
|
||||
return self.handler.on_query_request(
|
||||
query_type,
|
||||
{k: v[0].decode("utf-8") for k, v in query.items()}
|
||||
)
|
||||
|
||||
|
||||
class FederationMakeJoinServlet(BaseFederationServlet):
|
||||
PATH = "/make_join/([^/]*)/([^/]*)"
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_GET(self, origin, content, query, context, user_id):
|
||||
content = yield self.handler.on_make_join_request(context, user_id)
|
||||
defer.returnValue((200, content))
|
||||
|
||||
|
||||
class FederationEventAuthServlet(BaseFederationServlet):
|
||||
PATH = "/event_auth/([^/]*)/([^/]*)"
|
||||
|
||||
def on_GET(self, origin, content, query, context, event_id):
|
||||
return self.handler.on_event_auth(origin, context, event_id)
|
||||
|
||||
|
||||
class FederationSendJoinServlet(BaseFederationServlet):
|
||||
PATH = "/send_join/([^/]*)/([^/]*)"
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_PUT(self, origin, content, query, context, event_id):
|
||||
# TODO(paul): assert that context/event_id parsed from path actually
|
||||
# match those given in content
|
||||
content = yield self.handler.on_send_join_request(origin, content)
|
||||
defer.returnValue((200, content))
|
||||
|
||||
|
||||
class FederationInviteServlet(BaseFederationServlet):
|
||||
PATH = "/invite/([^/]*)/([^/]*)"
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_PUT(self, origin, content, query, context, event_id):
|
||||
# TODO(paul): assert that context/event_id parsed from path actually
|
||||
# match those given in content
|
||||
content = yield self.handler.on_invite_request(origin, content)
|
||||
defer.returnValue((200, content))
|
||||
|
||||
|
||||
class FederationQueryAuthServlet(BaseFederationServlet):
|
||||
PATH = "/query_auth/([^/]*)/([^/]*)"
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_POST(self, origin, content, query, context, event_id):
|
||||
new_content = yield self.handler.on_query_auth_request(
|
||||
origin, content, event_id
|
||||
)
|
||||
|
||||
defer.returnValue((200, new_content))
|
||||
|
||||
|
||||
class FederationGetMissingEventsServlet(BaseFederationServlet):
|
||||
# TODO(paul): Why does this path alone end with "/?" optional?
|
||||
PATH = "/get_missing_events/([^/]*)/?"
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_POST(self, origin, content, query, room_id):
|
||||
limit = int(content.get("limit", 10))
|
||||
min_depth = int(content.get("min_depth", 0))
|
||||
earliest_events = content.get("earliest_events", [])
|
||||
latest_events = content.get("latest_events", [])
|
||||
|
||||
content = yield self.handler.on_get_missing_events(
|
||||
origin,
|
||||
room_id=room_id,
|
||||
earliest_events=earliest_events,
|
||||
latest_events=latest_events,
|
||||
min_depth=min_depth,
|
||||
limit=limit,
|
||||
)
|
||||
|
||||
defer.returnValue((200, content))
|
||||
|
||||
|
||||
SERVLET_CLASSES = (
|
||||
FederationPullServlet,
|
||||
FederationEventServlet,
|
||||
FederationStateServlet,
|
||||
FederationBackfillServlet,
|
||||
FederationQueryServlet,
|
||||
FederationMakeJoinServlet,
|
||||
FederationEventServlet,
|
||||
FederationSendJoinServlet,
|
||||
FederationInviteServlet,
|
||||
FederationQueryAuthServlet,
|
||||
FederationGetMissingEventsServlet,
|
||||
FederationEventAuthServlet,
|
||||
)
|
||||
@@ -13,6 +13,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from synapse.appservice.api import ApplicationServiceApi
|
||||
from .register import RegistrationHandler
|
||||
from .room import (
|
||||
RoomCreationHandler, RoomMemberHandler, RoomListHandler
|
||||
@@ -26,6 +27,8 @@ from .presence import PresenceHandler
|
||||
from .directory import DirectoryHandler
|
||||
from .typing import TypingNotificationHandler
|
||||
from .admin import AdminHandler
|
||||
from .appservice import ApplicationServicesHandler
|
||||
from .sync import SyncHandler
|
||||
|
||||
|
||||
class Handlers(object):
|
||||
@@ -51,3 +54,7 @@ class Handlers(object):
|
||||
self.directory_handler = DirectoryHandler(hs)
|
||||
self.typing_notification_handler = TypingNotificationHandler(hs)
|
||||
self.admin_handler = AdminHandler(hs)
|
||||
self.appservice_handler = ApplicationServicesHandler(
|
||||
hs, ApplicationServiceApi(hs)
|
||||
)
|
||||
self.sync_handler = SyncHandler(hs)
|
||||
|
||||
@@ -19,6 +19,7 @@ from synapse.api.errors import LimitExceededError, SynapseError
|
||||
from synapse.util.async import run_on_reactor
|
||||
from synapse.crypto.event_signing import add_hashes_and_signatures
|
||||
from synapse.api.constants import Membership, EventTypes
|
||||
from synapse.types import UserID
|
||||
|
||||
import logging
|
||||
|
||||
@@ -89,8 +90,8 @@ class BaseHandler(object):
|
||||
event = builder.build()
|
||||
|
||||
logger.debug(
|
||||
"Created event %s with auth_events: %s, current state: %s",
|
||||
event.event_id, context.auth_events, context.current_state,
|
||||
"Created event %s with current state: %s",
|
||||
event.event_id, context.current_state,
|
||||
)
|
||||
|
||||
defer.returnValue(
|
||||
@@ -105,7 +106,7 @@ class BaseHandler(object):
|
||||
# We now need to go and hit out to wherever we need to hit out to.
|
||||
|
||||
if not suppress_auth:
|
||||
self.auth.check(event, auth_events=context.auth_events)
|
||||
self.auth.check(event, auth_events=context.current_state)
|
||||
|
||||
yield self.store.persist_event(event, context=context)
|
||||
|
||||
@@ -113,7 +114,7 @@ class BaseHandler(object):
|
||||
|
||||
if event.type == EventTypes.Member:
|
||||
if event.content["membership"] == Membership.INVITE:
|
||||
invitee = self.hs.parse_userid(event.state_key)
|
||||
invitee = UserID.from_string(event.state_key)
|
||||
if not self.hs.is_mine(invitee):
|
||||
# TODO: Can we add signature from remote server in a nicer
|
||||
# way? If we have been invited by a remote server, we need
|
||||
@@ -134,17 +135,24 @@ class BaseHandler(object):
|
||||
if k[0] == EventTypes.Member:
|
||||
if s.content["membership"] == Membership.JOIN:
|
||||
destinations.add(
|
||||
self.hs.parse_userid(s.state_key).domain
|
||||
UserID.from_string(s.state_key).domain
|
||||
)
|
||||
except SynapseError:
|
||||
logger.warn(
|
||||
"Failed to get destination from event %s", s.event_id
|
||||
)
|
||||
|
||||
yield self.notifier.on_new_room_event(event, extra_users=extra_users)
|
||||
# Don't block waiting on waking up all the listeners.
|
||||
d = self.notifier.on_new_room_event(event, extra_users=extra_users)
|
||||
|
||||
def log_failure(f):
|
||||
logger.warn(
|
||||
"Failed to notify about %s: %s",
|
||||
event.event_id, f.value
|
||||
)
|
||||
|
||||
d.addErrback(log_failure)
|
||||
|
||||
yield federation_handler.handle_new_event(
|
||||
event,
|
||||
None,
|
||||
destinations=destinations,
|
||||
event, destinations=destinations,
|
||||
)
|
||||
|
||||
211
synapse/handlers/appservice.py
Normal file
211
synapse/handlers/appservice.py
Normal file
@@ -0,0 +1,211 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2015 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.constants import EventTypes, Membership
|
||||
from synapse.api.errors import Codes, StoreError, SynapseError
|
||||
from synapse.appservice import ApplicationService
|
||||
from synapse.types import UserID
|
||||
import synapse.util.stringutils as stringutils
|
||||
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# NB: Purposefully not inheriting BaseHandler since that contains way too much
|
||||
# setup code which this handler does not need or use. This makes testing a lot
|
||||
# easier.
|
||||
class ApplicationServicesHandler(object):
|
||||
|
||||
def __init__(self, hs, appservice_api):
|
||||
self.store = hs.get_datastore()
|
||||
self.hs = hs
|
||||
self.appservice_api = appservice_api
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def register(self, app_service):
|
||||
logger.info("Register -> %s", app_service)
|
||||
# check the token is recognised
|
||||
try:
|
||||
stored_service = yield self.store.get_app_service_by_token(
|
||||
app_service.token
|
||||
)
|
||||
if not stored_service:
|
||||
raise StoreError(404, "Application service not found")
|
||||
except StoreError:
|
||||
raise SynapseError(
|
||||
403, "Unrecognised application services token. "
|
||||
"Consult the home server admin.",
|
||||
errcode=Codes.FORBIDDEN
|
||||
)
|
||||
|
||||
app_service.hs_token = self._generate_hs_token()
|
||||
|
||||
# create a sender for this application service which is used when
|
||||
# creating rooms, etc..
|
||||
account = yield self.hs.get_handlers().registration_handler.register()
|
||||
app_service.sender = account[0]
|
||||
|
||||
yield self.store.update_app_service(app_service)
|
||||
defer.returnValue(app_service)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def unregister(self, token):
|
||||
logger.info("Unregister as_token=%s", token)
|
||||
yield self.store.unregister_app_service(token)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def notify_interested_services(self, event):
|
||||
"""Notifies (pushes) all application services interested in this event.
|
||||
|
||||
Pushing is done asynchronously, so this method won't block for any
|
||||
prolonged length of time.
|
||||
|
||||
Args:
|
||||
event(Event): The event to push out to interested services.
|
||||
"""
|
||||
# Gather interested services
|
||||
services = yield self._get_services_for_event(event)
|
||||
if len(services) == 0:
|
||||
return # no services need notifying
|
||||
|
||||
# Do we know this user exists? If not, poke the user query API for
|
||||
# all services which match that user regex. This needs to block as these
|
||||
# user queries need to be made BEFORE pushing the event.
|
||||
yield self._check_user_exists(event.sender)
|
||||
if event.type == EventTypes.Member:
|
||||
yield self._check_user_exists(event.state_key)
|
||||
|
||||
# Fork off pushes to these services - XXX First cut, best effort
|
||||
for service in services:
|
||||
self.appservice_api.push(service, event)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def query_user_exists(self, user_id):
|
||||
"""Check if any application service knows this user_id exists.
|
||||
|
||||
Args:
|
||||
user_id(str): The user to query if they exist on any AS.
|
||||
Returns:
|
||||
True if this user exists on at least one application service.
|
||||
"""
|
||||
user_query_services = yield self._get_services_for_user(
|
||||
user_id=user_id
|
||||
)
|
||||
for user_service in user_query_services:
|
||||
is_known_user = yield self.appservice_api.query_user(
|
||||
user_service, user_id
|
||||
)
|
||||
if is_known_user:
|
||||
defer.returnValue(True)
|
||||
defer.returnValue(False)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def query_room_alias_exists(self, room_alias):
|
||||
"""Check if an application service knows this room alias exists.
|
||||
|
||||
Args:
|
||||
room_alias(RoomAlias): The room alias to query.
|
||||
Returns:
|
||||
namedtuple: with keys "room_id" and "servers" or None if no
|
||||
association can be found.
|
||||
"""
|
||||
room_alias_str = room_alias.to_string()
|
||||
alias_query_services = yield self._get_services_for_event(
|
||||
event=None,
|
||||
restrict_to=ApplicationService.NS_ALIASES,
|
||||
alias_list=[room_alias_str]
|
||||
)
|
||||
for alias_service in alias_query_services:
|
||||
is_known_alias = yield self.appservice_api.query_alias(
|
||||
alias_service, room_alias_str
|
||||
)
|
||||
if is_known_alias:
|
||||
# the alias exists now so don't query more ASes.
|
||||
result = yield self.store.get_association_from_room_alias(
|
||||
room_alias
|
||||
)
|
||||
defer.returnValue(result)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _get_services_for_event(self, event, restrict_to="", alias_list=None):
|
||||
"""Retrieve a list of application services interested in this event.
|
||||
|
||||
Args:
|
||||
event(Event): The event to check. Can be None if alias_list is not.
|
||||
restrict_to(str): The namespace to restrict regex tests to.
|
||||
alias_list: A list of aliases to get services for. If None, this
|
||||
list is obtained from the database.
|
||||
Returns:
|
||||
list<ApplicationService>: A list of services interested in this
|
||||
event based on the service regex.
|
||||
"""
|
||||
member_list = None
|
||||
if hasattr(event, "room_id"):
|
||||
# We need to know the aliases associated with this event.room_id,
|
||||
# if any.
|
||||
if not alias_list:
|
||||
alias_list = yield self.store.get_aliases_for_room(
|
||||
event.room_id
|
||||
)
|
||||
# We need to know the members associated with this event.room_id,
|
||||
# if any.
|
||||
member_list = yield self.store.get_room_members(
|
||||
room_id=event.room_id,
|
||||
membership=Membership.JOIN
|
||||
)
|
||||
|
||||
services = yield self.store.get_app_services()
|
||||
interested_list = [
|
||||
s for s in services if (
|
||||
s.is_interested(event, restrict_to, alias_list, member_list)
|
||||
)
|
||||
]
|
||||
defer.returnValue(interested_list)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _get_services_for_user(self, user_id):
|
||||
services = yield self.store.get_app_services()
|
||||
interested_list = [
|
||||
s for s in services if (
|
||||
s.is_interested_in_user(user_id)
|
||||
)
|
||||
]
|
||||
defer.returnValue(interested_list)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _is_unknown_user(self, user_id):
|
||||
user = UserID.from_string(user_id)
|
||||
if not self.hs.is_mine(user):
|
||||
# we don't know if they are unknown or not since it isn't one of our
|
||||
# users. We can't poke ASes.
|
||||
defer.returnValue(False)
|
||||
return
|
||||
|
||||
user_info = yield self.store.get_user_by_id(user_id)
|
||||
defer.returnValue(len(user_info) == 0)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _check_user_exists(self, user_id):
|
||||
unknown_user = yield self._is_unknown_user(user_id)
|
||||
if unknown_user:
|
||||
exists = yield self.query_user_exists(user_id)
|
||||
defer.returnValue(exists)
|
||||
defer.returnValue(True)
|
||||
|
||||
def _generate_hs_token(self):
|
||||
return stringutils.random_string(24)
|
||||
@@ -19,6 +19,7 @@ from ._base import BaseHandler
|
||||
|
||||
from synapse.api.errors import SynapseError, Codes, CodeMessageException
|
||||
from synapse.api.constants import EventTypes
|
||||
from synapse.types import RoomAlias
|
||||
|
||||
import logging
|
||||
|
||||
@@ -36,18 +37,15 @@ class DirectoryHandler(BaseHandler):
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def create_association(self, user_id, room_alias, room_id, servers=None):
|
||||
|
||||
# TODO(erikj): Do auth.
|
||||
def _create_association(self, room_alias, room_id, servers=None):
|
||||
# general association creation for both human users and app services
|
||||
|
||||
if not self.hs.is_mine(room_alias):
|
||||
raise SynapseError(400, "Room alias must be local")
|
||||
# TODO(erikj): Change this.
|
||||
|
||||
# TODO(erikj): Add transactions.
|
||||
|
||||
# TODO(erikj): Check if there is a current association.
|
||||
|
||||
if not servers:
|
||||
servers = yield self.store.get_joined_hosts_for_room(room_id)
|
||||
|
||||
@@ -60,23 +58,78 @@ class DirectoryHandler(BaseHandler):
|
||||
servers
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def create_association(self, user_id, room_alias, room_id, servers=None):
|
||||
# association creation for human users
|
||||
# TODO(erikj): Do user auth.
|
||||
|
||||
can_create = yield self.can_modify_alias(
|
||||
room_alias,
|
||||
user_id=user_id
|
||||
)
|
||||
if not can_create:
|
||||
raise SynapseError(
|
||||
400, "This alias is reserved by an application service.",
|
||||
errcode=Codes.EXCLUSIVE
|
||||
)
|
||||
yield self._create_association(room_alias, room_id, servers)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def create_appservice_association(self, service, room_alias, room_id,
|
||||
servers=None):
|
||||
if not service.is_interested_in_alias(room_alias.to_string()):
|
||||
raise SynapseError(
|
||||
400, "This application service has not reserved"
|
||||
" this kind of alias.", errcode=Codes.EXCLUSIVE
|
||||
)
|
||||
|
||||
# association creation for app services
|
||||
yield self._create_association(room_alias, room_id, servers)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def delete_association(self, user_id, room_alias):
|
||||
# association deletion for human users
|
||||
|
||||
# TODO Check if server admin
|
||||
|
||||
can_delete = yield self.can_modify_alias(
|
||||
room_alias,
|
||||
user_id=user_id
|
||||
)
|
||||
if not can_delete:
|
||||
raise SynapseError(
|
||||
400, "This alias is reserved by an application service.",
|
||||
errcode=Codes.EXCLUSIVE
|
||||
)
|
||||
|
||||
yield self._delete_association(room_alias)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def delete_appservice_association(self, service, room_alias):
|
||||
if not service.is_interested_in_alias(room_alias.to_string()):
|
||||
raise SynapseError(
|
||||
400,
|
||||
"This application service has not reserved this kind of alias",
|
||||
errcode=Codes.EXCLUSIVE
|
||||
)
|
||||
yield self._delete_association(room_alias)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _delete_association(self, room_alias):
|
||||
if not self.hs.is_mine(room_alias):
|
||||
raise SynapseError(400, "Room alias must be local")
|
||||
|
||||
room_id = yield self.store.delete_room_alias(room_alias)
|
||||
yield self.store.delete_room_alias(room_alias)
|
||||
|
||||
if room_id:
|
||||
yield self._update_room_alias_events(user_id, room_id)
|
||||
# TODO - Looks like _update_room_alias_event has never been implemented
|
||||
# if room_id:
|
||||
# yield self._update_room_alias_events(user_id, room_id)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_association(self, room_alias):
|
||||
room_id = None
|
||||
if self.hs.is_mine(room_alias):
|
||||
result = yield self.store.get_association_from_room_alias(
|
||||
result = yield self.get_association_from_room_alias(
|
||||
room_alias
|
||||
)
|
||||
|
||||
@@ -107,12 +160,21 @@ class DirectoryHandler(BaseHandler):
|
||||
if not room_id:
|
||||
raise SynapseError(
|
||||
404,
|
||||
"Room alias %r not found" % (room_alias.to_string(),),
|
||||
"Room alias %s not found" % (room_alias.to_string(),),
|
||||
Codes.NOT_FOUND
|
||||
)
|
||||
|
||||
extra_servers = yield self.store.get_joined_hosts_for_room(room_id)
|
||||
servers = list(set(extra_servers) | set(servers))
|
||||
servers = set(extra_servers) | set(servers)
|
||||
|
||||
# If this server is in the list of servers, return it first.
|
||||
if self.server_name in servers:
|
||||
servers = (
|
||||
[self.server_name]
|
||||
+ [s for s in servers if s != self.server_name]
|
||||
)
|
||||
else:
|
||||
servers = list(servers)
|
||||
|
||||
defer.returnValue({
|
||||
"room_id": room_id,
|
||||
@@ -122,13 +184,13 @@ class DirectoryHandler(BaseHandler):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_directory_query(self, args):
|
||||
room_alias = self.hs.parse_roomalias(args["room_alias"])
|
||||
room_alias = RoomAlias.from_string(args["room_alias"])
|
||||
if not self.hs.is_mine(room_alias):
|
||||
raise SynapseError(
|
||||
400, "Room Alias is not hosted on this Home Server"
|
||||
)
|
||||
|
||||
result = yield self.store.get_association_from_room_alias(
|
||||
result = yield self.get_association_from_room_alias(
|
||||
room_alias
|
||||
)
|
||||
|
||||
@@ -156,3 +218,37 @@ class DirectoryHandler(BaseHandler):
|
||||
"sender": user_id,
|
||||
"content": {"aliases": aliases},
|
||||
}, ratelimit=False)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_association_from_room_alias(self, room_alias):
|
||||
result = yield self.store.get_association_from_room_alias(
|
||||
room_alias
|
||||
)
|
||||
if not result:
|
||||
# Query AS to see if it exists
|
||||
as_handler = self.hs.get_handlers().appservice_handler
|
||||
result = yield as_handler.query_room_alias_exists(room_alias)
|
||||
defer.returnValue(result)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def can_modify_alias(self, alias, user_id=None):
|
||||
# Any application service "interested" in an alias they are regexing on
|
||||
# can modify the alias.
|
||||
# Users can only modify the alias if ALL the interested services have
|
||||
# non-exclusive locks on the alias (or there are no interested services)
|
||||
services = yield self.store.get_app_services()
|
||||
interested_services = [
|
||||
s for s in services if s.is_interested_in_alias(alias.to_string())
|
||||
]
|
||||
|
||||
for service in interested_services:
|
||||
if user_id == service.sender:
|
||||
# this user IS the app service so they can do whatever they like
|
||||
defer.returnValue(True)
|
||||
return
|
||||
elif service.is_exclusive_alias(alias.to_string()):
|
||||
# another service has an exclusive lock on this alias.
|
||||
defer.returnValue(False)
|
||||
return
|
||||
# either no interested services, or no service with an exclusive lock
|
||||
defer.returnValue(True)
|
||||
|
||||
@@ -17,10 +17,13 @@ from twisted.internet import defer
|
||||
|
||||
from synapse.util.logcontext import PreserveLoggingContext
|
||||
from synapse.util.logutils import log_function
|
||||
from synapse.types import UserID
|
||||
from synapse.events.utils import serialize_event
|
||||
|
||||
from ._base import BaseHandler
|
||||
|
||||
import logging
|
||||
import random
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -46,39 +49,48 @@ class EventStreamHandler(BaseHandler):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def get_stream(self, auth_user_id, pagin_config, timeout=0):
|
||||
auth_user = self.hs.parse_userid(auth_user_id)
|
||||
def get_stream(self, auth_user_id, pagin_config, timeout=0,
|
||||
as_client_event=True, affect_presence=True):
|
||||
auth_user = UserID.from_string(auth_user_id)
|
||||
|
||||
try:
|
||||
if auth_user not in self._streams_per_user:
|
||||
self._streams_per_user[auth_user] = 0
|
||||
if auth_user in self._stop_timer_per_user:
|
||||
try:
|
||||
self.clock.cancel_call_later(
|
||||
self._stop_timer_per_user.pop(auth_user)
|
||||
if affect_presence:
|
||||
if auth_user not in self._streams_per_user:
|
||||
self._streams_per_user[auth_user] = 0
|
||||
if auth_user in self._stop_timer_per_user:
|
||||
try:
|
||||
self.clock.cancel_call_later(
|
||||
self._stop_timer_per_user.pop(auth_user)
|
||||
)
|
||||
except:
|
||||
logger.exception("Failed to cancel event timer")
|
||||
else:
|
||||
yield self.distributor.fire(
|
||||
"started_user_eventstream", auth_user
|
||||
)
|
||||
except:
|
||||
logger.exception("Failed to cancel event timer")
|
||||
else:
|
||||
yield self.distributor.fire(
|
||||
"started_user_eventstream", auth_user
|
||||
)
|
||||
self._streams_per_user[auth_user] += 1
|
||||
|
||||
if pagin_config.from_token is None:
|
||||
pagin_config.from_token = None
|
||||
self._streams_per_user[auth_user] += 1
|
||||
|
||||
rm_handler = self.hs.get_handlers().room_member_handler
|
||||
logger.debug("BETA")
|
||||
room_ids = yield rm_handler.get_rooms_for_user(auth_user)
|
||||
room_ids = yield rm_handler.get_joined_rooms_for_user(auth_user)
|
||||
|
||||
if timeout:
|
||||
# If they've set a timeout set a minimum limit.
|
||||
timeout = max(timeout, 500)
|
||||
|
||||
# Add some randomness to this value to try and mitigate against
|
||||
# thundering herds on restart.
|
||||
timeout = random.randint(int(timeout*0.9), int(timeout*1.1))
|
||||
|
||||
logger.debug("ALPHA")
|
||||
with PreserveLoggingContext():
|
||||
events, tokens = yield self.notifier.get_events_for(
|
||||
auth_user, room_ids, pagin_config, timeout
|
||||
)
|
||||
|
||||
chunks = [self.hs.serialize_event(e) for e in events]
|
||||
time_now = self.clock.time_msec()
|
||||
|
||||
chunks = [
|
||||
serialize_event(e, time_now, as_client_event) for e in events
|
||||
]
|
||||
|
||||
chunk = {
|
||||
"chunk": chunks,
|
||||
@@ -89,28 +101,29 @@ class EventStreamHandler(BaseHandler):
|
||||
defer.returnValue(chunk)
|
||||
|
||||
finally:
|
||||
self._streams_per_user[auth_user] -= 1
|
||||
if not self._streams_per_user[auth_user]:
|
||||
del self._streams_per_user[auth_user]
|
||||
if affect_presence:
|
||||
self._streams_per_user[auth_user] -= 1
|
||||
if not self._streams_per_user[auth_user]:
|
||||
del self._streams_per_user[auth_user]
|
||||
|
||||
# 10 seconds of grace to allow the client to reconnect again
|
||||
# before we think they're gone
|
||||
def _later():
|
||||
logger.debug(
|
||||
"_later stopped_user_eventstream %s", auth_user
|
||||
# 10 seconds of grace to allow the client to reconnect again
|
||||
# before we think they're gone
|
||||
def _later():
|
||||
logger.debug(
|
||||
"_later stopped_user_eventstream %s", auth_user
|
||||
)
|
||||
|
||||
self._stop_timer_per_user.pop(auth_user, None)
|
||||
|
||||
return self.distributor.fire(
|
||||
"stopped_user_eventstream", auth_user
|
||||
)
|
||||
|
||||
logger.debug("Scheduling _later: for %s", auth_user)
|
||||
self._stop_timer_per_user[auth_user] = (
|
||||
self.clock.call_later(30, _later)
|
||||
)
|
||||
|
||||
self._stop_timer_per_user.pop(auth_user, None)
|
||||
|
||||
yield self.distributor.fire(
|
||||
"stopped_user_eventstream", auth_user
|
||||
)
|
||||
|
||||
logger.debug("Scheduling _later: for %s", auth_user)
|
||||
self._stop_timer_per_user[auth_user] = (
|
||||
self.clock.call_later(30, _later)
|
||||
)
|
||||
|
||||
|
||||
class EventHandler(BaseHandler):
|
||||
|
||||
|
||||
@@ -17,21 +17,21 @@
|
||||
|
||||
from ._base import BaseHandler
|
||||
|
||||
from synapse.events.utils import prune_event
|
||||
from synapse.api.errors import (
|
||||
AuthError, FederationError, SynapseError, StoreError,
|
||||
AuthError, FederationError, StoreError,
|
||||
)
|
||||
from synapse.api.constants import EventTypes, Membership
|
||||
from synapse.api.constants import EventTypes, Membership, RejectedReason
|
||||
from synapse.util.logutils import log_function
|
||||
from synapse.util.async import run_on_reactor
|
||||
from synapse.util.frozenutils import unfreeze
|
||||
from synapse.crypto.event_signing import (
|
||||
compute_event_signature, check_event_content_hash,
|
||||
add_hashes_and_signatures,
|
||||
compute_event_signature, add_hashes_and_signatures,
|
||||
)
|
||||
from syutil.jsonutil import encode_canonical_json
|
||||
from synapse.types import UserID
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
import itertools
|
||||
import logging
|
||||
|
||||
|
||||
@@ -75,14 +75,14 @@ class FederationHandler(BaseHandler):
|
||||
|
||||
@log_function
|
||||
@defer.inlineCallbacks
|
||||
def handle_new_event(self, event, snapshot, destinations):
|
||||
def handle_new_event(self, event, destinations):
|
||||
""" Takes in an event from the client to server side, that has already
|
||||
been authed and handled by the state module, and sends it to any
|
||||
remote home servers that may be interested.
|
||||
|
||||
Args:
|
||||
event
|
||||
snapshot (.storage.Snapshot): THe snapshot the event happened after
|
||||
event: The event to send
|
||||
destinations: A list of destinations to send it to
|
||||
|
||||
Returns:
|
||||
Deferred: Resolved when it has successfully been queued for
|
||||
@@ -112,33 +112,6 @@ class FederationHandler(BaseHandler):
|
||||
|
||||
logger.debug("Processing event: %s", event.event_id)
|
||||
|
||||
redacted_event = prune_event(event)
|
||||
|
||||
redacted_pdu_json = redacted_event.get_pdu_json()
|
||||
try:
|
||||
yield self.keyring.verify_json_for_server(
|
||||
event.origin, redacted_pdu_json
|
||||
)
|
||||
except SynapseError as e:
|
||||
logger.warn(
|
||||
"Signature check failed for %s redacted to %s",
|
||||
encode_canonical_json(pdu.get_pdu_json()),
|
||||
encode_canonical_json(redacted_pdu_json),
|
||||
)
|
||||
raise FederationError(
|
||||
"ERROR",
|
||||
e.code,
|
||||
e.msg,
|
||||
affected=event.event_id,
|
||||
)
|
||||
|
||||
if not check_event_content_hash(event):
|
||||
logger.warn(
|
||||
"Event content has been tampered, redacting %s, %s",
|
||||
event.event_id, encode_canonical_json(event.get_dict())
|
||||
)
|
||||
event = redacted_event
|
||||
|
||||
logger.debug("Event: %s", event)
|
||||
|
||||
# FIXME (erikj): Awful hack to make the case where we are not currently
|
||||
@@ -148,41 +121,38 @@ class FederationHandler(BaseHandler):
|
||||
event.room_id,
|
||||
self.server_name
|
||||
)
|
||||
if not is_in_room and not event.internal_metadata.outlier:
|
||||
if not is_in_room and not event.internal_metadata.is_outlier():
|
||||
logger.debug("Got event for room we're not in.")
|
||||
|
||||
replication = self.replication_layer
|
||||
|
||||
if not state:
|
||||
state, auth_chain = yield replication.get_state_for_context(
|
||||
origin, context=event.room_id, event_id=event.event_id,
|
||||
)
|
||||
|
||||
if not auth_chain:
|
||||
auth_chain = yield replication.get_event_auth(
|
||||
origin,
|
||||
context=event.room_id,
|
||||
event_id=event.event_id,
|
||||
)
|
||||
|
||||
for e in auth_chain:
|
||||
e.internal_metadata.outlier = True
|
||||
try:
|
||||
yield self._handle_new_event(e, fetch_auth_from=origin)
|
||||
except:
|
||||
logger.exception(
|
||||
"Failed to handle auth event %s",
|
||||
e.event_id,
|
||||
)
|
||||
|
||||
current_state = state
|
||||
|
||||
event_ids = set()
|
||||
if state:
|
||||
for e in state:
|
||||
logging.info("A :) %r", e)
|
||||
event_ids |= {e.event_id for e in state}
|
||||
if auth_chain:
|
||||
event_ids |= {e.event_id for e in auth_chain}
|
||||
|
||||
seen_ids = set(
|
||||
(yield self.store.have_events(event_ids)).keys()
|
||||
)
|
||||
|
||||
if state and auth_chain is not None:
|
||||
# If we have any state or auth_chain given to us by the replication
|
||||
# layer, then we should handle them (if we haven't before.)
|
||||
for e in itertools.chain(auth_chain, state):
|
||||
if e.event_id in seen_ids:
|
||||
continue
|
||||
|
||||
e.internal_metadata.outlier = True
|
||||
try:
|
||||
yield self._handle_new_event(e)
|
||||
auth_ids = [e_id for e_id, _ in e.auth_events]
|
||||
auth = {
|
||||
(e.type, e.state_key): e for e in auth_chain
|
||||
if e.event_id in auth_ids
|
||||
}
|
||||
yield self._handle_new_event(
|
||||
origin, e, auth_events=auth
|
||||
)
|
||||
seen_ids.add(e.event_id)
|
||||
except:
|
||||
logger.exception(
|
||||
"Failed to handle state event %s",
|
||||
@@ -191,6 +161,7 @@ class FederationHandler(BaseHandler):
|
||||
|
||||
try:
|
||||
yield self._handle_new_event(
|
||||
origin,
|
||||
event,
|
||||
state=state,
|
||||
backfilled=backfilled,
|
||||
@@ -227,7 +198,7 @@ class FederationHandler(BaseHandler):
|
||||
extra_users = []
|
||||
if event.type == EventTypes.Member:
|
||||
target_user_id = event.state_key
|
||||
target_user = self.hs.parse_userid(target_user_id)
|
||||
target_user = UserID.from_string(target_user_id)
|
||||
extra_users.append(target_user)
|
||||
|
||||
yield self.notifier.on_new_room_event(
|
||||
@@ -236,7 +207,7 @@ class FederationHandler(BaseHandler):
|
||||
|
||||
if event.type == EventTypes.Member:
|
||||
if event.membership == Membership.JOIN:
|
||||
user = self.hs.parse_userid(event.state_key)
|
||||
user = UserID.from_string(event.state_key)
|
||||
yield self.distributor.fire(
|
||||
"user_joined_room", user=user, room_id=event.room_id
|
||||
)
|
||||
@@ -281,7 +252,7 @@ class FederationHandler(BaseHandler):
|
||||
"""
|
||||
pdu = yield self.replication_layer.send_invite(
|
||||
destination=target_host,
|
||||
context=event.room_id,
|
||||
room_id=event.room_id,
|
||||
event_id=event.event_id,
|
||||
pdu=event
|
||||
)
|
||||
@@ -305,7 +276,7 @@ class FederationHandler(BaseHandler):
|
||||
|
||||
@log_function
|
||||
@defer.inlineCallbacks
|
||||
def do_invite_join(self, target_host, room_id, joinee, content, snapshot):
|
||||
def do_invite_join(self, target_hosts, room_id, joinee, content, snapshot):
|
||||
""" Attempts to join the `joinee` to the room `room_id` via the
|
||||
server `target_host`.
|
||||
|
||||
@@ -319,8 +290,10 @@ class FederationHandler(BaseHandler):
|
||||
"""
|
||||
logger.debug("Joining %s to %s", joinee, room_id)
|
||||
|
||||
pdu = yield self.replication_layer.make_join(
|
||||
target_host,
|
||||
yield self.store.clean_room_for_join(room_id)
|
||||
|
||||
origin, pdu = yield self.replication_layer.make_join(
|
||||
target_hosts,
|
||||
room_id,
|
||||
joinee
|
||||
)
|
||||
@@ -341,7 +314,7 @@ class FederationHandler(BaseHandler):
|
||||
self.room_queues[room_id] = []
|
||||
|
||||
builder = self.event_builder_factory.new(
|
||||
event.get_pdu_json()
|
||||
unfreeze(event.get_pdu_json())
|
||||
)
|
||||
|
||||
handled_events = set()
|
||||
@@ -362,11 +335,20 @@ class FederationHandler(BaseHandler):
|
||||
|
||||
new_event = builder.build()
|
||||
|
||||
# Try the host we successfully got a response to /make_join/
|
||||
# request first.
|
||||
try:
|
||||
target_hosts.remove(origin)
|
||||
target_hosts.insert(0, origin)
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
ret = yield self.replication_layer.send_join(
|
||||
target_host,
|
||||
target_hosts,
|
||||
new_event
|
||||
)
|
||||
|
||||
origin = ret["origin"]
|
||||
state = ret["state"]
|
||||
auth_chain = ret["auth_chain"]
|
||||
auth_chain.sort(key=lambda e: e.depth)
|
||||
@@ -392,8 +374,19 @@ class FederationHandler(BaseHandler):
|
||||
|
||||
for e in auth_chain:
|
||||
e.internal_metadata.outlier = True
|
||||
|
||||
if e.event_id == event.event_id:
|
||||
continue
|
||||
|
||||
try:
|
||||
yield self._handle_new_event(e)
|
||||
auth_ids = [e_id for e_id, _ in e.auth_events]
|
||||
auth = {
|
||||
(e.type, e.state_key): e for e in auth_chain
|
||||
if e.event_id in auth_ids
|
||||
}
|
||||
yield self._handle_new_event(
|
||||
origin, e, auth_events=auth
|
||||
)
|
||||
except:
|
||||
logger.exception(
|
||||
"Failed to handle auth event %s",
|
||||
@@ -401,11 +394,18 @@ class FederationHandler(BaseHandler):
|
||||
)
|
||||
|
||||
for e in state:
|
||||
# FIXME: Auth these.
|
||||
if e.event_id == event.event_id:
|
||||
continue
|
||||
|
||||
e.internal_metadata.outlier = True
|
||||
try:
|
||||
auth_ids = [e_id for e_id, _ in e.auth_events]
|
||||
auth = {
|
||||
(e.type, e.state_key): e for e in auth_chain
|
||||
if e.event_id in auth_ids
|
||||
}
|
||||
yield self._handle_new_event(
|
||||
e, fetch_auth_from=target_host
|
||||
origin, e, auth_events=auth
|
||||
)
|
||||
except:
|
||||
logger.exception(
|
||||
@@ -413,10 +413,18 @@ class FederationHandler(BaseHandler):
|
||||
e.event_id,
|
||||
)
|
||||
|
||||
auth_ids = [e_id for e_id, _ in event.auth_events]
|
||||
auth_events = {
|
||||
(e.type, e.state_key): e for e in auth_chain
|
||||
if e.event_id in auth_ids
|
||||
}
|
||||
|
||||
yield self._handle_new_event(
|
||||
origin,
|
||||
new_event,
|
||||
state=state,
|
||||
current_state=state,
|
||||
auth_events=auth_events,
|
||||
)
|
||||
|
||||
yield self.notifier.on_new_room_event(
|
||||
@@ -458,11 +466,9 @@ class FederationHandler(BaseHandler):
|
||||
builder=builder,
|
||||
)
|
||||
|
||||
self.auth.check(event, auth_events=context.auth_events)
|
||||
self.auth.check(event, auth_events=context.current_state)
|
||||
|
||||
pdu = event
|
||||
|
||||
defer.returnValue(pdu)
|
||||
defer.returnValue(event)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
@@ -480,7 +486,7 @@ class FederationHandler(BaseHandler):
|
||||
|
||||
event.internal_metadata.outlier = False
|
||||
|
||||
context = yield self._handle_new_event(event)
|
||||
context = yield self._handle_new_event(origin, event)
|
||||
|
||||
logger.debug(
|
||||
"on_send_join_request: After _handle_new_event: %s, sigs: %s",
|
||||
@@ -491,7 +497,7 @@ class FederationHandler(BaseHandler):
|
||||
extra_users = []
|
||||
if event.type == EventTypes.Member:
|
||||
target_user_id = event.state_key
|
||||
target_user = self.hs.parse_userid(target_user_id)
|
||||
target_user = UserID.from_string(target_user_id)
|
||||
extra_users.append(target_user)
|
||||
|
||||
yield self.notifier.on_new_room_event(
|
||||
@@ -500,7 +506,7 @@ class FederationHandler(BaseHandler):
|
||||
|
||||
if event.type == EventTypes.Member:
|
||||
if event.content["membership"] == Membership.JOIN:
|
||||
user = self.hs.parse_userid(event.state_key)
|
||||
user = UserID.from_string(event.state_key)
|
||||
yield self.distributor.fire(
|
||||
"user_joined_room", user=user, room_id=event.room_id
|
||||
)
|
||||
@@ -514,13 +520,15 @@ class FederationHandler(BaseHandler):
|
||||
if k[0] == EventTypes.Member:
|
||||
if s.content["membership"] == Membership.JOIN:
|
||||
destinations.add(
|
||||
self.hs.parse_userid(s.state_key).domain
|
||||
UserID.from_string(s.state_key).domain
|
||||
)
|
||||
except:
|
||||
logger.warn(
|
||||
"Failed to get destination from event %s", s.event_id
|
||||
)
|
||||
|
||||
destinations.discard(origin)
|
||||
|
||||
logger.debug(
|
||||
"on_send_join_request: Sending event: %s, signatures: %s",
|
||||
event.event_id,
|
||||
@@ -565,7 +573,7 @@ class FederationHandler(BaseHandler):
|
||||
backfilled=False,
|
||||
)
|
||||
|
||||
target_user = self.hs.parse_userid(event.state_key)
|
||||
target_user = UserID.from_string(event.state_key)
|
||||
yield self.notifier.on_new_room_event(
|
||||
event, extra_users=[target_user],
|
||||
)
|
||||
@@ -573,12 +581,13 @@ class FederationHandler(BaseHandler):
|
||||
defer.returnValue(event)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_state_for_pdu(self, origin, room_id, event_id):
|
||||
def get_state_for_pdu(self, origin, room_id, event_id, do_auth=True):
|
||||
yield run_on_reactor()
|
||||
|
||||
in_room = yield self.auth.check_host_in_room(room_id, origin)
|
||||
if not in_room:
|
||||
raise AuthError(403, "Host not in room.")
|
||||
if do_auth:
|
||||
in_room = yield self.auth.check_host_in_room(room_id, origin)
|
||||
if not in_room:
|
||||
raise AuthError(403, "Host not in room.")
|
||||
|
||||
state_groups = yield self.store.get_state_groups(
|
||||
[event_id]
|
||||
@@ -617,13 +626,13 @@ class FederationHandler(BaseHandler):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def on_backfill_request(self, origin, context, pdu_list, limit):
|
||||
in_room = yield self.auth.check_host_in_room(context, origin)
|
||||
def on_backfill_request(self, origin, room_id, pdu_list, limit):
|
||||
in_room = yield self.auth.check_host_in_room(room_id, origin)
|
||||
if not in_room:
|
||||
raise AuthError(403, "Host not in room.")
|
||||
|
||||
events = yield self.store.get_backfill_events(
|
||||
context,
|
||||
room_id,
|
||||
pdu_list,
|
||||
limit
|
||||
)
|
||||
@@ -641,6 +650,7 @@ class FederationHandler(BaseHandler):
|
||||
event = yield self.store.get_event(
|
||||
event_id,
|
||||
allow_none=True,
|
||||
allow_rejected=True,
|
||||
)
|
||||
|
||||
if event:
|
||||
@@ -681,11 +691,12 @@ class FederationHandler(BaseHandler):
|
||||
waiters.pop().callback(None)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _handle_new_event(self, event, state=None, backfilled=False,
|
||||
current_state=None, fetch_auth_from=None):
|
||||
@log_function
|
||||
def _handle_new_event(self, origin, event, state=None, backfilled=False,
|
||||
current_state=None, auth_events=None):
|
||||
|
||||
logger.debug(
|
||||
"_handle_new_event: Before annotate: %s, sigs: %s",
|
||||
"_handle_new_event: %s, sigs: %s",
|
||||
event.event_id, event.signatures,
|
||||
)
|
||||
|
||||
@@ -693,65 +704,46 @@ class FederationHandler(BaseHandler):
|
||||
event, old_state=state
|
||||
)
|
||||
|
||||
if not auth_events:
|
||||
auth_events = context.current_state
|
||||
|
||||
logger.debug(
|
||||
"_handle_new_event: Before auth fetch: %s, sigs: %s",
|
||||
event.event_id, event.signatures,
|
||||
"_handle_new_event: %s, auth_events: %s",
|
||||
event.event_id, auth_events,
|
||||
)
|
||||
|
||||
is_new_state = not event.internal_metadata.is_outlier()
|
||||
|
||||
known_ids = set(
|
||||
[s.event_id for s in context.auth_events.values()]
|
||||
)
|
||||
|
||||
for e_id, _ in event.auth_events:
|
||||
if e_id not in known_ids:
|
||||
e = yield self.store.get_event(e_id, allow_none=True)
|
||||
|
||||
if not e and fetch_auth_from is not None:
|
||||
# Grab the auth_chain over federation if we are missing
|
||||
# auth events.
|
||||
auth_chain = yield self.replication_layer.get_event_auth(
|
||||
fetch_auth_from, event.event_id, event.room_id
|
||||
)
|
||||
for auth_event in auth_chain:
|
||||
yield self._handle_new_event(auth_event)
|
||||
e = yield self.store.get_event(e_id, allow_none=True)
|
||||
|
||||
if not e:
|
||||
# TODO: Do some conflict res to make sure that we're
|
||||
# not the ones who are wrong.
|
||||
logger.info(
|
||||
"Rejecting %s as %s not in db or %s",
|
||||
event.event_id, e_id, known_ids,
|
||||
)
|
||||
# FIXME: How does raising AuthError work with federation?
|
||||
raise AuthError(403, "Cannot find auth event")
|
||||
|
||||
context.auth_events[(e.type, e.state_key)] = e
|
||||
|
||||
logger.debug(
|
||||
"_handle_new_event: Before hack: %s, sigs: %s",
|
||||
event.event_id, event.signatures,
|
||||
)
|
||||
|
||||
# This is a hack to fix some old rooms where the initial join event
|
||||
# didn't reference the create event in its auth events.
|
||||
if event.type == EventTypes.Member and not event.auth_events:
|
||||
if len(event.prev_events) == 1:
|
||||
c = yield self.store.get_event(event.prev_events[0][0])
|
||||
if c.type == EventTypes.Create:
|
||||
context.auth_events[(c.type, c.state_key)] = c
|
||||
auth_events[(c.type, c.state_key)] = c
|
||||
|
||||
logger.debug(
|
||||
"_handle_new_event: Before auth check: %s, sigs: %s",
|
||||
event.event_id, event.signatures,
|
||||
)
|
||||
try:
|
||||
yield self.do_auth(
|
||||
origin, event, context, auth_events=auth_events
|
||||
)
|
||||
except AuthError as e:
|
||||
logger.warn(
|
||||
"Rejecting %s because %s",
|
||||
event.event_id, e.msg
|
||||
)
|
||||
|
||||
self.auth.check(event, auth_events=context.auth_events)
|
||||
context.rejected = RejectedReason.AUTH_ERROR
|
||||
|
||||
logger.debug(
|
||||
"_handle_new_event: Before persist_event: %s, sigs: %s",
|
||||
event.event_id, event.signatures,
|
||||
)
|
||||
# FIXME: Don't store as rejected with AUTH_ERROR if we haven't
|
||||
# seen all the auth events.
|
||||
yield self.store.persist_event(
|
||||
event,
|
||||
context=context,
|
||||
backfilled=backfilled,
|
||||
is_new_state=False,
|
||||
current_state=current_state,
|
||||
)
|
||||
raise
|
||||
|
||||
yield self.store.persist_event(
|
||||
event,
|
||||
@@ -761,9 +753,388 @@ class FederationHandler(BaseHandler):
|
||||
current_state=current_state,
|
||||
)
|
||||
|
||||
logger.debug(
|
||||
"_handle_new_event: After persist_event: %s, sigs: %s",
|
||||
event.event_id, event.signatures,
|
||||
defer.returnValue(context)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_query_auth(self, origin, event_id, remote_auth_chain, rejects,
|
||||
missing):
|
||||
# Just go through and process each event in `remote_auth_chain`. We
|
||||
# don't want to fall into the trap of `missing` being wrong.
|
||||
for e in remote_auth_chain:
|
||||
try:
|
||||
yield self._handle_new_event(origin, e)
|
||||
except AuthError:
|
||||
pass
|
||||
|
||||
# Now get the current auth_chain for the event.
|
||||
local_auth_chain = yield self.store.get_auth_chain([event_id])
|
||||
|
||||
# TODO: Check if we would now reject event_id. If so we need to tell
|
||||
# everyone.
|
||||
|
||||
ret = yield self.construct_auth_difference(
|
||||
local_auth_chain, remote_auth_chain
|
||||
)
|
||||
|
||||
defer.returnValue(context)
|
||||
for event in ret["auth_chain"]:
|
||||
event.signatures.update(
|
||||
compute_event_signature(
|
||||
event,
|
||||
self.hs.hostname,
|
||||
self.hs.config.signing_key[0]
|
||||
)
|
||||
)
|
||||
|
||||
logger.debug("on_query_auth returning: %s", ret)
|
||||
|
||||
defer.returnValue(ret)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_get_missing_events(self, origin, room_id, earliest_events,
|
||||
latest_events, limit, min_depth):
|
||||
in_room = yield self.auth.check_host_in_room(
|
||||
room_id,
|
||||
origin
|
||||
)
|
||||
if not in_room:
|
||||
raise AuthError(403, "Host not in room.")
|
||||
|
||||
limit = min(limit, 20)
|
||||
min_depth = max(min_depth, 0)
|
||||
|
||||
missing_events = yield self.store.get_missing_events(
|
||||
room_id=room_id,
|
||||
earliest_events=earliest_events,
|
||||
latest_events=latest_events,
|
||||
limit=limit,
|
||||
min_depth=min_depth,
|
||||
)
|
||||
|
||||
defer.returnValue(missing_events)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def do_auth(self, origin, event, context, auth_events):
|
||||
# Check if we have all the auth events.
|
||||
have_events = yield self.store.have_events(
|
||||
[e_id for e_id, _ in event.auth_events]
|
||||
)
|
||||
|
||||
event_auth_events = set(e_id for e_id, _ in event.auth_events)
|
||||
seen_events = set(have_events.keys())
|
||||
|
||||
missing_auth = event_auth_events - seen_events
|
||||
|
||||
if missing_auth:
|
||||
logger.info("Missing auth: %s", missing_auth)
|
||||
# If we don't have all the auth events, we need to get them.
|
||||
try:
|
||||
remote_auth_chain = yield self.replication_layer.get_event_auth(
|
||||
origin, event.room_id, event.event_id
|
||||
)
|
||||
|
||||
seen_remotes = yield self.store.have_events(
|
||||
[e.event_id for e in remote_auth_chain]
|
||||
)
|
||||
|
||||
for e in remote_auth_chain:
|
||||
if e.event_id in seen_remotes.keys():
|
||||
continue
|
||||
|
||||
if e.event_id == event.event_id:
|
||||
continue
|
||||
|
||||
try:
|
||||
auth_ids = [e_id for e_id, _ in e.auth_events]
|
||||
auth = {
|
||||
(e.type, e.state_key): e for e in remote_auth_chain
|
||||
if e.event_id in auth_ids
|
||||
}
|
||||
e.internal_metadata.outlier = True
|
||||
|
||||
logger.debug(
|
||||
"do_auth %s missing_auth: %s",
|
||||
event.event_id, e.event_id
|
||||
)
|
||||
yield self._handle_new_event(
|
||||
origin, e, auth_events=auth
|
||||
)
|
||||
|
||||
if e.event_id in event_auth_events:
|
||||
auth_events[(e.type, e.state_key)] = e
|
||||
except AuthError:
|
||||
pass
|
||||
|
||||
have_events = yield self.store.have_events(
|
||||
[e_id for e_id, _ in event.auth_events]
|
||||
)
|
||||
seen_events = set(have_events.keys())
|
||||
except:
|
||||
# FIXME:
|
||||
logger.exception("Failed to get auth chain")
|
||||
|
||||
# FIXME: Assumes we have and stored all the state for all the
|
||||
# prev_events
|
||||
current_state = set(e.event_id for e in auth_events.values())
|
||||
different_auth = event_auth_events - current_state
|
||||
|
||||
if different_auth and not event.internal_metadata.is_outlier():
|
||||
# Do auth conflict res.
|
||||
logger.info("Different auth: %s", different_auth)
|
||||
|
||||
different_events = yield defer.gatherResults(
|
||||
[
|
||||
self.store.get_event(
|
||||
d,
|
||||
allow_none=True,
|
||||
allow_rejected=False,
|
||||
)
|
||||
for d in different_auth
|
||||
if d in have_events and not have_events[d]
|
||||
],
|
||||
consumeErrors=True
|
||||
)
|
||||
|
||||
if different_events:
|
||||
local_view = dict(auth_events)
|
||||
remote_view = dict(auth_events)
|
||||
remote_view.update({
|
||||
(d.type, d.state_key): d for d in different_events
|
||||
})
|
||||
|
||||
new_state, prev_state = self.state_handler.resolve_events(
|
||||
[local_view.values(), remote_view.values()],
|
||||
event
|
||||
)
|
||||
|
||||
auth_events.update(new_state)
|
||||
|
||||
current_state = set(e.event_id for e in auth_events.values())
|
||||
different_auth = event_auth_events - current_state
|
||||
|
||||
context.current_state.update(auth_events)
|
||||
context.state_group = None
|
||||
|
||||
if different_auth and not event.internal_metadata.is_outlier():
|
||||
logger.info("Different auth after resolution: %s", different_auth)
|
||||
|
||||
# Only do auth resolution if we have something new to say.
|
||||
# We can't rove an auth failure.
|
||||
do_resolution = False
|
||||
|
||||
provable = [
|
||||
RejectedReason.NOT_ANCESTOR, RejectedReason.NOT_ANCESTOR,
|
||||
]
|
||||
|
||||
for e_id in different_auth:
|
||||
if e_id in have_events:
|
||||
if have_events[e_id] in provable:
|
||||
do_resolution = True
|
||||
break
|
||||
|
||||
if do_resolution:
|
||||
# 1. Get what we think is the auth chain.
|
||||
auth_ids = self.auth.compute_auth_events(
|
||||
event, context.current_state
|
||||
)
|
||||
local_auth_chain = yield self.store.get_auth_chain(auth_ids)
|
||||
|
||||
try:
|
||||
# 2. Get remote difference.
|
||||
result = yield self.replication_layer.query_auth(
|
||||
origin,
|
||||
event.room_id,
|
||||
event.event_id,
|
||||
local_auth_chain,
|
||||
)
|
||||
|
||||
seen_remotes = yield self.store.have_events(
|
||||
[e.event_id for e in result["auth_chain"]]
|
||||
)
|
||||
|
||||
# 3. Process any remote auth chain events we haven't seen.
|
||||
for ev in result["auth_chain"]:
|
||||
if ev.event_id in seen_remotes.keys():
|
||||
continue
|
||||
|
||||
if ev.event_id == event.event_id:
|
||||
continue
|
||||
|
||||
try:
|
||||
auth_ids = [e_id for e_id, _ in ev.auth_events]
|
||||
auth = {
|
||||
(e.type, e.state_key): e
|
||||
for e in result["auth_chain"]
|
||||
if e.event_id in auth_ids
|
||||
}
|
||||
ev.internal_metadata.outlier = True
|
||||
|
||||
logger.debug(
|
||||
"do_auth %s different_auth: %s",
|
||||
event.event_id, e.event_id
|
||||
)
|
||||
|
||||
yield self._handle_new_event(
|
||||
origin, ev, auth_events=auth
|
||||
)
|
||||
|
||||
if ev.event_id in event_auth_events:
|
||||
auth_events[(ev.type, ev.state_key)] = ev
|
||||
except AuthError:
|
||||
pass
|
||||
|
||||
except:
|
||||
# FIXME:
|
||||
logger.exception("Failed to query auth chain")
|
||||
|
||||
# 4. Look at rejects and their proofs.
|
||||
# TODO.
|
||||
|
||||
context.current_state.update(auth_events)
|
||||
context.state_group = None
|
||||
|
||||
try:
|
||||
self.auth.check(event, auth_events=auth_events)
|
||||
except AuthError:
|
||||
raise
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def construct_auth_difference(self, local_auth, remote_auth):
|
||||
""" Given a local and remote auth chain, find the differences. This
|
||||
assumes that we have already processed all events in remote_auth
|
||||
|
||||
Params:
|
||||
local_auth (list)
|
||||
remote_auth (list)
|
||||
|
||||
Returns:
|
||||
dict
|
||||
"""
|
||||
|
||||
logger.debug("construct_auth_difference Start!")
|
||||
|
||||
# TODO: Make sure we are OK with local_auth or remote_auth having more
|
||||
# auth events in them than strictly necessary.
|
||||
|
||||
def sort_fun(ev):
|
||||
return ev.depth, ev.event_id
|
||||
|
||||
logger.debug("construct_auth_difference after sort_fun!")
|
||||
|
||||
# We find the differences by starting at the "bottom" of each list
|
||||
# and iterating up on both lists. The lists are ordered by depth and
|
||||
# then event_id, we iterate up both lists until we find the event ids
|
||||
# don't match. Then we look at depth/event_id to see which side is
|
||||
# missing that event, and iterate only up that list. Repeat.
|
||||
|
||||
remote_list = list(remote_auth)
|
||||
remote_list.sort(key=sort_fun)
|
||||
|
||||
local_list = list(local_auth)
|
||||
local_list.sort(key=sort_fun)
|
||||
|
||||
local_iter = iter(local_list)
|
||||
remote_iter = iter(remote_list)
|
||||
|
||||
logger.debug("construct_auth_difference before get_next!")
|
||||
|
||||
def get_next(it, opt=None):
|
||||
try:
|
||||
return it.next()
|
||||
except:
|
||||
return opt
|
||||
|
||||
current_local = get_next(local_iter)
|
||||
current_remote = get_next(remote_iter)
|
||||
|
||||
logger.debug("construct_auth_difference before while")
|
||||
|
||||
missing_remotes = []
|
||||
missing_locals = []
|
||||
while current_local or current_remote:
|
||||
if current_remote is None:
|
||||
missing_locals.append(current_local)
|
||||
current_local = get_next(local_iter)
|
||||
continue
|
||||
|
||||
if current_local is None:
|
||||
missing_remotes.append(current_remote)
|
||||
current_remote = get_next(remote_iter)
|
||||
continue
|
||||
|
||||
if current_local.event_id == current_remote.event_id:
|
||||
current_local = get_next(local_iter)
|
||||
current_remote = get_next(remote_iter)
|
||||
continue
|
||||
|
||||
if current_local.depth < current_remote.depth:
|
||||
missing_locals.append(current_local)
|
||||
current_local = get_next(local_iter)
|
||||
continue
|
||||
|
||||
if current_local.depth > current_remote.depth:
|
||||
missing_remotes.append(current_remote)
|
||||
current_remote = get_next(remote_iter)
|
||||
continue
|
||||
|
||||
# They have the same depth, so we fall back to the event_id order
|
||||
if current_local.event_id < current_remote.event_id:
|
||||
missing_locals.append(current_local)
|
||||
current_local = get_next(local_iter)
|
||||
|
||||
if current_local.event_id > current_remote.event_id:
|
||||
missing_remotes.append(current_remote)
|
||||
current_remote = get_next(remote_iter)
|
||||
continue
|
||||
|
||||
logger.debug("construct_auth_difference after while")
|
||||
|
||||
# missing locals should be sent to the server
|
||||
# We should find why we are missing remotes, as they will have been
|
||||
# rejected.
|
||||
|
||||
# Remove events from missing_remotes if they are referencing a missing
|
||||
# remote. We only care about the "root" rejected ones.
|
||||
missing_remote_ids = [e.event_id for e in missing_remotes]
|
||||
base_remote_rejected = list(missing_remotes)
|
||||
for e in missing_remotes:
|
||||
for e_id, _ in e.auth_events:
|
||||
if e_id in missing_remote_ids:
|
||||
try:
|
||||
base_remote_rejected.remove(e)
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
reason_map = {}
|
||||
|
||||
for e in base_remote_rejected:
|
||||
reason = yield self.store.get_rejection_reason(e.event_id)
|
||||
if reason is None:
|
||||
# TODO: e is not in the current state, so we should
|
||||
# construct some proof of that.
|
||||
continue
|
||||
|
||||
reason_map[e.event_id] = reason
|
||||
|
||||
if reason == RejectedReason.AUTH_ERROR:
|
||||
pass
|
||||
elif reason == RejectedReason.REPLACED:
|
||||
# TODO: Get proof
|
||||
pass
|
||||
elif reason == RejectedReason.NOT_ANCESTOR:
|
||||
# TODO: Get proof.
|
||||
pass
|
||||
|
||||
logger.debug("construct_auth_difference returning")
|
||||
|
||||
defer.returnValue({
|
||||
"auth_chain": local_auth,
|
||||
"rejects": {
|
||||
e.event_id: {
|
||||
"reason": reason_map[e.event_id],
|
||||
"proof": None,
|
||||
}
|
||||
for e in base_remote_rejected
|
||||
},
|
||||
"missing": [e.event_id for e in missing_locals],
|
||||
})
|
||||
|
||||
@@ -16,12 +16,13 @@
|
||||
from twisted.internet import defer
|
||||
|
||||
from ._base import BaseHandler
|
||||
from synapse.api.errors import LoginError, Codes
|
||||
from synapse.api.errors import LoginError, Codes, CodeMessageException
|
||||
from synapse.http.client import SimpleHttpClient
|
||||
from synapse.util.emailutils import EmailException
|
||||
import synapse.util.emailutils as emailutils
|
||||
|
||||
import bcrypt
|
||||
import json
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -96,16 +97,20 @@ class LoginHandler(BaseHandler):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _query_email(self, email):
|
||||
httpCli = SimpleHttpClient(self.hs)
|
||||
data = yield httpCli.get_json(
|
||||
# TODO FIXME This should be configurable.
|
||||
# XXX: ID servers need to use HTTPS
|
||||
"http://%s%s" % (
|
||||
"matrix.org:8090", "/_matrix/identity/api/v1/lookup"
|
||||
),
|
||||
{
|
||||
'medium': 'email',
|
||||
'address': email
|
||||
}
|
||||
)
|
||||
defer.returnValue(data)
|
||||
http_client = SimpleHttpClient(self.hs)
|
||||
try:
|
||||
data = yield http_client.get_json(
|
||||
# TODO FIXME This should be configurable.
|
||||
# XXX: ID servers need to use HTTPS
|
||||
"http://%s%s" % (
|
||||
"matrix.org:8090", "/_matrix/identity/api/v1/lookup"
|
||||
),
|
||||
{
|
||||
'medium': 'email',
|
||||
'address': email
|
||||
}
|
||||
)
|
||||
defer.returnValue(data)
|
||||
except CodeMessageException as e:
|
||||
data = json.loads(e.msg)
|
||||
defer.returnValue(data)
|
||||
|
||||
@@ -16,10 +16,12 @@
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.constants import EventTypes, Membership
|
||||
from synapse.api.errors import RoomError
|
||||
from synapse.api.errors import RoomError, SynapseError
|
||||
from synapse.streams.config import PaginationConfig
|
||||
from synapse.events.utils import serialize_event
|
||||
from synapse.events.validator import EventValidator
|
||||
from synapse.util.logcontext import PreserveLoggingContext
|
||||
from synapse.types import UserID
|
||||
|
||||
from ._base import BaseHandler
|
||||
|
||||
@@ -33,6 +35,7 @@ class MessageHandler(BaseHandler):
|
||||
def __init__(self, hs):
|
||||
super(MessageHandler, self).__init__(hs)
|
||||
self.hs = hs
|
||||
self.state = hs.get_state_handler()
|
||||
self.clock = hs.get_clock()
|
||||
self.validator = EventValidator()
|
||||
|
||||
@@ -67,7 +70,7 @@ class MessageHandler(BaseHandler):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_messages(self, user_id=None, room_id=None, pagin_config=None,
|
||||
feedback=False):
|
||||
feedback=False, as_client_event=True):
|
||||
"""Get messages in a room.
|
||||
|
||||
Args:
|
||||
@@ -76,6 +79,7 @@ class MessageHandler(BaseHandler):
|
||||
pagin_config (synapse.api.streams.PaginationConfig): The pagination
|
||||
config rules to apply, if any.
|
||||
feedback (bool): True to get compressed feedback with the messages
|
||||
as_client_event (bool): True to get events in client-server format.
|
||||
Returns:
|
||||
dict: Pagination API results
|
||||
"""
|
||||
@@ -88,7 +92,7 @@ class MessageHandler(BaseHandler):
|
||||
yield self.hs.get_event_sources().get_current_token()
|
||||
)
|
||||
|
||||
user = self.hs.parse_userid(user_id)
|
||||
user = UserID.from_string(user_id)
|
||||
|
||||
events, next_key = yield data_source.get_pagination_rows(
|
||||
user, pagin_config.get_source_config("room"), room_id
|
||||
@@ -98,8 +102,12 @@ class MessageHandler(BaseHandler):
|
||||
"room_key", next_key
|
||||
)
|
||||
|
||||
time_now = self.clock.time_msec()
|
||||
|
||||
chunk = {
|
||||
"chunk": [self.hs.serialize_event(e) for e in events],
|
||||
"chunk": [
|
||||
serialize_event(e, time_now, as_client_event) for e in events
|
||||
],
|
||||
"start": pagin_config.from_token.to_string(),
|
||||
"end": next_token.to_string(),
|
||||
}
|
||||
@@ -107,7 +115,8 @@ class MessageHandler(BaseHandler):
|
||||
defer.returnValue(chunk)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def create_and_send_event(self, event_dict, ratelimit=True):
|
||||
def create_and_send_event(self, event_dict, ratelimit=True,
|
||||
client=None, txn_id=None):
|
||||
""" Given a dict from a client, create and handle a new event.
|
||||
|
||||
Creates an FrozenEvent object, filling out auth_events, prev_events,
|
||||
@@ -127,13 +136,13 @@ class MessageHandler(BaseHandler):
|
||||
if ratelimit:
|
||||
self.ratelimit(builder.user_id)
|
||||
# TODO(paul): Why does 'event' not have a 'user' object?
|
||||
user = self.hs.parse_userid(builder.user_id)
|
||||
user = UserID.from_string(builder.user_id)
|
||||
assert self.hs.is_mine(user), "User must be our own: %s" % (user,)
|
||||
|
||||
if builder.type == EventTypes.Member:
|
||||
membership = builder.content.get("membership", None)
|
||||
if membership == Membership.JOIN:
|
||||
joinee = self.hs.parse_userid(builder.state_key)
|
||||
joinee = UserID.from_string(builder.state_key)
|
||||
# If event doesn't include a display name, add one.
|
||||
yield self.distributor.fire(
|
||||
"collect_presencelike_data",
|
||||
@@ -141,6 +150,15 @@ class MessageHandler(BaseHandler):
|
||||
builder.content
|
||||
)
|
||||
|
||||
if client is not None:
|
||||
if client.token_id is not None:
|
||||
builder.internal_metadata.token_id = client.token_id
|
||||
if client.device_id is not None:
|
||||
builder.internal_metadata.device_id = client.device_id
|
||||
|
||||
if txn_id is not None:
|
||||
builder.internal_metadata.txn_id = txn_id
|
||||
|
||||
event, context = yield self._create_new_client_event(
|
||||
builder=builder,
|
||||
)
|
||||
@@ -207,11 +225,14 @@ class MessageHandler(BaseHandler):
|
||||
|
||||
# TODO: This is duplicating logic from snapshot_all_rooms
|
||||
current_state = yield self.state_handler.get_current_state(room_id)
|
||||
defer.returnValue([self.hs.serialize_event(c) for c in current_state])
|
||||
now = self.clock.time_msec()
|
||||
defer.returnValue(
|
||||
[serialize_event(c, now) for c in current_state.values()]
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def snapshot_all_rooms(self, user_id=None, pagin_config=None,
|
||||
feedback=False):
|
||||
feedback=False, as_client_event=True):
|
||||
"""Retrieve a snapshot of all rooms the user is invited or has joined.
|
||||
|
||||
This snapshot may include messages for all rooms where the user is
|
||||
@@ -222,6 +243,7 @@ class MessageHandler(BaseHandler):
|
||||
pagin_config (synapse.api.streams.PaginationConfig): The pagination
|
||||
config used to determine how many messages *PER ROOM* to return.
|
||||
feedback (bool): True to get feedback along with these messages.
|
||||
as_client_event (bool): True to get events in client-server format.
|
||||
Returns:
|
||||
A list of dicts with "room_id" and "membership" keys for all rooms
|
||||
the user is currently invited or joined in on. Rooms where the user
|
||||
@@ -233,7 +255,7 @@ class MessageHandler(BaseHandler):
|
||||
membership_list=[Membership.INVITE, Membership.JOIN]
|
||||
)
|
||||
|
||||
user = self.hs.parse_userid(user_id)
|
||||
user = UserID.from_string(user_id)
|
||||
|
||||
rooms_ret = []
|
||||
|
||||
@@ -278,9 +300,13 @@ class MessageHandler(BaseHandler):
|
||||
|
||||
start_token = now_token.copy_and_replace("room_key", token[0])
|
||||
end_token = now_token.copy_and_replace("room_key", token[1])
|
||||
time_now = self.clock.time_msec()
|
||||
|
||||
d["messages"] = {
|
||||
"chunk": [self.hs.serialize_event(m) for m in messages],
|
||||
"chunk": [
|
||||
serialize_event(m, time_now, as_client_event)
|
||||
for m in messages
|
||||
],
|
||||
"start": start_token.to_string(),
|
||||
"end": end_token.to_string(),
|
||||
}
|
||||
@@ -289,7 +315,8 @@ class MessageHandler(BaseHandler):
|
||||
event.room_id
|
||||
)
|
||||
d["state"] = [
|
||||
self.hs.serialize_event(c) for c in current_state
|
||||
serialize_event(c, time_now, as_client_event)
|
||||
for c in current_state.values()
|
||||
]
|
||||
except:
|
||||
logger.exception("Failed to get snapshot")
|
||||
@@ -305,20 +332,27 @@ class MessageHandler(BaseHandler):
|
||||
@defer.inlineCallbacks
|
||||
def room_initial_sync(self, user_id, room_id, pagin_config=None,
|
||||
feedback=False):
|
||||
yield self.auth.check_joined_room(room_id, user_id)
|
||||
current_state = yield self.state.get_current_state(
|
||||
room_id=room_id,
|
||||
)
|
||||
|
||||
yield self.auth.check_joined_room(
|
||||
room_id, user_id,
|
||||
current_state=current_state
|
||||
)
|
||||
|
||||
# TODO(paul): I wish I was called with user objects not user_id
|
||||
# strings...
|
||||
auth_user = self.hs.parse_userid(user_id)
|
||||
auth_user = UserID.from_string(user_id)
|
||||
|
||||
# TODO: These concurrently
|
||||
state_tuples = yield self.state_handler.get_current_state(room_id)
|
||||
state = [self.hs.serialize_event(x) for x in state_tuples]
|
||||
time_now = self.clock.time_msec()
|
||||
state = [
|
||||
serialize_event(x, time_now)
|
||||
for x in current_state.values()
|
||||
]
|
||||
|
||||
member_event = (yield self.store.get_room_member(
|
||||
user_id=user_id,
|
||||
room_id=room_id
|
||||
))
|
||||
member_event = current_state.get((EventTypes.Member, user_id,))
|
||||
|
||||
now_token = yield self.hs.get_event_sources().get_current_token()
|
||||
|
||||
@@ -335,28 +369,34 @@ class MessageHandler(BaseHandler):
|
||||
start_token = now_token.copy_and_replace("room_key", token[0])
|
||||
end_token = now_token.copy_and_replace("room_key", token[1])
|
||||
|
||||
room_members = yield self.store.get_room_members(room_id)
|
||||
room_members = [
|
||||
m for m in current_state.values()
|
||||
if m.type == EventTypes.Member
|
||||
and m.content["membership"] == Membership.JOIN
|
||||
]
|
||||
|
||||
presence_handler = self.hs.get_handlers().presence_handler
|
||||
presence = []
|
||||
for m in room_members:
|
||||
try:
|
||||
member_presence = yield presence_handler.get_state(
|
||||
target_user=self.hs.parse_userid(m.user_id),
|
||||
target_user=UserID.from_string(m.user_id),
|
||||
auth_user=auth_user,
|
||||
as_event=True,
|
||||
)
|
||||
presence.append(member_presence)
|
||||
except Exception:
|
||||
except SynapseError:
|
||||
logger.exception(
|
||||
"Failed to get member presence of %r", m.user_id
|
||||
)
|
||||
|
||||
time_now = self.clock.time_msec()
|
||||
|
||||
defer.returnValue({
|
||||
"membership": member_event.membership,
|
||||
"room_id": room_id,
|
||||
"messages": {
|
||||
"chunk": [self.hs.serialize_event(m) for m in messages],
|
||||
"chunk": [serialize_event(m, time_now) for m in messages],
|
||||
"start": start_token.to_string(),
|
||||
"end": end_token.to_string(),
|
||||
},
|
||||
|
||||
@@ -20,6 +20,8 @@ from synapse.api.constants import PresenceState
|
||||
|
||||
from synapse.util.logutils import log_function
|
||||
from synapse.util.logcontext import PreserveLoggingContext
|
||||
from synapse.types import UserID
|
||||
import synapse.metrics
|
||||
|
||||
from ._base import BaseHandler
|
||||
|
||||
@@ -28,6 +30,8 @@ import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
metrics = synapse.metrics.get_metrics_for(__name__)
|
||||
|
||||
|
||||
# TODO(paul): Maybe there's one of these I can steal from somewhere
|
||||
def partition(l, func):
|
||||
@@ -86,6 +90,10 @@ class PresenceHandler(BaseHandler):
|
||||
"changed_presencelike_data", self.changed_presencelike_data
|
||||
)
|
||||
|
||||
# outbound signal from the presence module to advertise when a user's
|
||||
# presence has changed
|
||||
distributor.declare("user_presence_changed")
|
||||
|
||||
self.distributor = distributor
|
||||
|
||||
self.federation = hs.get_replication_layer()
|
||||
@@ -96,22 +104,22 @@ class PresenceHandler(BaseHandler):
|
||||
self.federation.register_edu_handler(
|
||||
"m.presence_invite",
|
||||
lambda origin, content: self.invite_presence(
|
||||
observed_user=hs.parse_userid(content["observed_user"]),
|
||||
observer_user=hs.parse_userid(content["observer_user"]),
|
||||
observed_user=UserID.from_string(content["observed_user"]),
|
||||
observer_user=UserID.from_string(content["observer_user"]),
|
||||
)
|
||||
)
|
||||
self.federation.register_edu_handler(
|
||||
"m.presence_accept",
|
||||
lambda origin, content: self.accept_presence(
|
||||
observed_user=hs.parse_userid(content["observed_user"]),
|
||||
observer_user=hs.parse_userid(content["observer_user"]),
|
||||
observed_user=UserID.from_string(content["observed_user"]),
|
||||
observer_user=UserID.from_string(content["observer_user"]),
|
||||
)
|
||||
)
|
||||
self.federation.register_edu_handler(
|
||||
"m.presence_deny",
|
||||
lambda origin, content: self.deny_presence(
|
||||
observed_user=hs.parse_userid(content["observed_user"]),
|
||||
observer_user=hs.parse_userid(content["observer_user"]),
|
||||
observed_user=UserID.from_string(content["observed_user"]),
|
||||
observer_user=UserID.from_string(content["observer_user"]),
|
||||
)
|
||||
)
|
||||
|
||||
@@ -128,6 +136,11 @@ class PresenceHandler(BaseHandler):
|
||||
self._user_cachemap = {}
|
||||
self._user_cachemap_latest_serial = 0
|
||||
|
||||
metrics.register_callback(
|
||||
"userCachemap:size",
|
||||
lambda: len(self._user_cachemap),
|
||||
)
|
||||
|
||||
def _get_or_make_usercache(self, user):
|
||||
"""If the cache entry doesn't exist, initialise a new one."""
|
||||
if user not in self._user_cachemap:
|
||||
@@ -418,7 +431,7 @@ class PresenceHandler(BaseHandler):
|
||||
)
|
||||
|
||||
for p in presence:
|
||||
observed_user = self.hs.parse_userid(p.pop("observed_user_id"))
|
||||
observed_user = UserID.from_string(p.pop("observed_user_id"))
|
||||
p["observed_user"] = observed_user
|
||||
p.update(self._get_or_offline_usercache(observed_user).get_state())
|
||||
if "last_active" in p:
|
||||
@@ -441,20 +454,20 @@ class PresenceHandler(BaseHandler):
|
||||
user.localpart, accepted=True
|
||||
)
|
||||
target_users = set([
|
||||
self.hs.parse_userid(x["observed_user_id"]) for x in presence
|
||||
UserID.from_string(x["observed_user_id"]) for x in presence
|
||||
])
|
||||
|
||||
# Also include people in all my rooms
|
||||
|
||||
rm_handler = self.homeserver.get_handlers().room_member_handler
|
||||
room_ids = yield rm_handler.get_rooms_for_user(user)
|
||||
room_ids = yield rm_handler.get_joined_rooms_for_user(user)
|
||||
|
||||
if state is None:
|
||||
state = yield self.store.get_presence_state(user.localpart)
|
||||
else:
|
||||
# statuscache = self._get_or_make_usercache(user)
|
||||
# self._user_cachemap_latest_serial += 1
|
||||
# statuscache.update(state, self._user_cachemap_latest_serial)
|
||||
# statuscache = self._get_or_make_usercache(user)
|
||||
# self._user_cachemap_latest_serial += 1
|
||||
# statuscache.update(state, self._user_cachemap_latest_serial)
|
||||
pass
|
||||
|
||||
yield self.push_update_to_local_and_remote(
|
||||
@@ -487,7 +500,7 @@ class PresenceHandler(BaseHandler):
|
||||
user, domain, remoteusers
|
||||
))
|
||||
|
||||
yield defer.DeferredList(deferreds)
|
||||
yield defer.DeferredList(deferreds, consumeErrors=True)
|
||||
|
||||
def _start_polling_local(self, user, target_user):
|
||||
target_localpart = target_user.localpart
|
||||
@@ -543,7 +556,7 @@ class PresenceHandler(BaseHandler):
|
||||
self._stop_polling_remote(user, domain, remoteusers)
|
||||
)
|
||||
|
||||
return defer.DeferredList(deferreds)
|
||||
return defer.DeferredList(deferreds, consumeErrors=True)
|
||||
|
||||
def _stop_polling_local(self, user, target_user):
|
||||
for localpart in self._local_pushmap.keys():
|
||||
@@ -591,7 +604,7 @@ class PresenceHandler(BaseHandler):
|
||||
localusers.add(user)
|
||||
|
||||
rm_handler = self.homeserver.get_handlers().room_member_handler
|
||||
room_ids = yield rm_handler.get_rooms_for_user(user)
|
||||
room_ids = yield rm_handler.get_joined_rooms_for_user(user)
|
||||
|
||||
if not localusers and not room_ids:
|
||||
defer.returnValue(None)
|
||||
@@ -603,6 +616,7 @@ class PresenceHandler(BaseHandler):
|
||||
room_ids=room_ids,
|
||||
statuscache=statuscache,
|
||||
)
|
||||
yield self.distributor.fire("user_presence_changed", user, statuscache)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _push_presence_remote(self, user, destination, state=None):
|
||||
@@ -646,16 +660,18 @@ class PresenceHandler(BaseHandler):
|
||||
deferreds = []
|
||||
|
||||
for push in content.get("push", []):
|
||||
user = self.hs.parse_userid(push["user_id"])
|
||||
user = UserID.from_string(push["user_id"])
|
||||
|
||||
logger.debug("Incoming presence update from %s", user)
|
||||
|
||||
observers = set(self._remote_recvmap.get(user, set()))
|
||||
if observers:
|
||||
logger.debug(" | %d interested local observers %r", len(observers), observers)
|
||||
logger.debug(
|
||||
" | %d interested local observers %r", len(observers), observers
|
||||
)
|
||||
|
||||
rm_handler = self.homeserver.get_handlers().room_member_handler
|
||||
room_ids = yield rm_handler.get_rooms_for_user(user)
|
||||
room_ids = yield rm_handler.get_joined_rooms_for_user(user)
|
||||
if room_ids:
|
||||
logger.debug(" | %d interested room IDs %r", len(room_ids), room_ids)
|
||||
|
||||
@@ -694,14 +710,14 @@ class PresenceHandler(BaseHandler):
|
||||
del self._user_cachemap[user]
|
||||
|
||||
for poll in content.get("poll", []):
|
||||
user = self.hs.parse_userid(poll)
|
||||
user = UserID.from_string(poll)
|
||||
|
||||
if not self.hs.is_mine(user):
|
||||
continue
|
||||
|
||||
# TODO(paul) permissions checks
|
||||
|
||||
if not user in self._remote_sendmap:
|
||||
if user not in self._remote_sendmap:
|
||||
self._remote_sendmap[user] = set()
|
||||
|
||||
self._remote_sendmap[user].add(origin)
|
||||
@@ -709,7 +725,7 @@ class PresenceHandler(BaseHandler):
|
||||
deferreds.append(self._push_presence_remote(user, origin))
|
||||
|
||||
for unpoll in content.get("unpoll", []):
|
||||
user = self.hs.parse_userid(unpoll)
|
||||
user = UserID.from_string(unpoll)
|
||||
|
||||
if not self.hs.is_mine(user):
|
||||
continue
|
||||
@@ -721,7 +737,7 @@ class PresenceHandler(BaseHandler):
|
||||
del self._remote_sendmap[user]
|
||||
|
||||
with PreserveLoggingContext():
|
||||
yield defer.DeferredList(deferreds)
|
||||
yield defer.DeferredList(deferreds, consumeErrors=True)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def push_update_to_local_and_remote(self, observed_user, statuscache,
|
||||
@@ -760,7 +776,7 @@ class PresenceHandler(BaseHandler):
|
||||
)
|
||||
)
|
||||
|
||||
yield defer.DeferredList(deferreds)
|
||||
yield defer.DeferredList(deferreds, consumeErrors=True)
|
||||
|
||||
defer.returnValue((localusers, remote_domains))
|
||||
|
||||
|
||||
@@ -18,6 +18,7 @@ from twisted.internet import defer
|
||||
from synapse.api.errors import SynapseError, AuthError, CodeMessageException
|
||||
from synapse.api.constants import EventTypes, Membership
|
||||
from synapse.util.logcontext import PreserveLoggingContext
|
||||
from synapse.types import UserID
|
||||
|
||||
from ._base import BaseHandler
|
||||
|
||||
@@ -169,7 +170,7 @@ class ProfileHandler(BaseHandler):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_profile_query(self, args):
|
||||
user = self.hs.parse_userid(args["user_id"])
|
||||
user = UserID.from_string(args["user_id"])
|
||||
if not self.hs.is_mine(user):
|
||||
raise SynapseError(400, "User is not hosted on this Home Server")
|
||||
|
||||
@@ -196,9 +197,8 @@ class ProfileHandler(BaseHandler):
|
||||
|
||||
self.ratelimit(user.to_string())
|
||||
|
||||
joins = yield self.store.get_rooms_for_user_where_membership_is(
|
||||
joins = yield self.store.get_rooms_for_user(
|
||||
user.to_string(),
|
||||
[Membership.JOIN],
|
||||
)
|
||||
|
||||
for j in joins:
|
||||
@@ -211,10 +211,16 @@ class ProfileHandler(BaseHandler):
|
||||
)
|
||||
|
||||
msg_handler = self.hs.get_handlers().message_handler
|
||||
yield msg_handler.create_and_send_event({
|
||||
"type": EventTypes.Member,
|
||||
"room_id": j.room_id,
|
||||
"state_key": user.to_string(),
|
||||
"content": content,
|
||||
"sender": user.to_string()
|
||||
}, ratelimit=False)
|
||||
try:
|
||||
yield msg_handler.create_and_send_event({
|
||||
"type": EventTypes.Member,
|
||||
"room_id": j.room_id,
|
||||
"state_key": user.to_string(),
|
||||
"content": content,
|
||||
"sender": user.to_string()
|
||||
}, ratelimit=False)
|
||||
except Exception as e:
|
||||
logger.warn(
|
||||
"Failed to update join event for room %s - %s",
|
||||
j.room_id, str(e.message)
|
||||
)
|
||||
|
||||
@@ -18,7 +18,8 @@ from twisted.internet import defer
|
||||
|
||||
from synapse.types import UserID
|
||||
from synapse.api.errors import (
|
||||
SynapseError, RegistrationError, InvalidCaptchaError
|
||||
AuthError, Codes, SynapseError, RegistrationError, InvalidCaptchaError,
|
||||
CodeMessageException
|
||||
)
|
||||
from ._base import BaseHandler
|
||||
import synapse.util.stringutils as stringutils
|
||||
@@ -28,7 +29,9 @@ from synapse.http.client import CaptchaServerHttpClient
|
||||
|
||||
import base64
|
||||
import bcrypt
|
||||
import json
|
||||
import logging
|
||||
import urllib
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -61,9 +64,18 @@ class RegistrationHandler(BaseHandler):
|
||||
password_hash = bcrypt.hashpw(password, bcrypt.gensalt())
|
||||
|
||||
if localpart:
|
||||
if localpart and urllib.quote(localpart) != localpart:
|
||||
raise SynapseError(
|
||||
400,
|
||||
"User ID must only contain characters which do not"
|
||||
" require URL encoding."
|
||||
)
|
||||
|
||||
user = UserID(localpart, self.hs.hostname)
|
||||
user_id = user.to_string()
|
||||
|
||||
yield self.check_user_id_is_valid(user_id)
|
||||
|
||||
token = self._generate_token(user_id)
|
||||
yield self.store.register(
|
||||
user_id=user_id,
|
||||
@@ -82,6 +94,7 @@ class RegistrationHandler(BaseHandler):
|
||||
localpart = self._generate_user_id()
|
||||
user = UserID(localpart, self.hs.hostname)
|
||||
user_id = user.to_string()
|
||||
yield self.check_user_id_is_valid(user_id)
|
||||
|
||||
token = self._generate_token(user_id)
|
||||
yield self.store.register(
|
||||
@@ -99,6 +112,47 @@ class RegistrationHandler(BaseHandler):
|
||||
raise RegistrationError(
|
||||
500, "Cannot generate user ID.")
|
||||
|
||||
# create a default avatar for the user
|
||||
# XXX: ideally clients would explicitly specify one, but given they don't
|
||||
# and we want consistent and pretty identicons for random users, we'll
|
||||
# do it here.
|
||||
try:
|
||||
auth_user = UserID.from_string(user_id)
|
||||
media_repository = self.hs.get_resource_for_media_repository()
|
||||
identicon_resource = media_repository.getChildWithDefault("identicon", None)
|
||||
upload_resource = media_repository.getChildWithDefault("upload", None)
|
||||
identicon_bytes = identicon_resource.generate_identicon(user_id, 320, 320)
|
||||
content_uri = yield upload_resource.create_content(
|
||||
"image/png", None, identicon_bytes, len(identicon_bytes), auth_user
|
||||
)
|
||||
profile_handler = self.hs.get_handlers().profile_handler
|
||||
profile_handler.set_avatar_url(
|
||||
auth_user, auth_user, ("%s#auto" % (content_uri,))
|
||||
)
|
||||
except NotImplementedError:
|
||||
pass # make tests pass without messing around creating default avatars
|
||||
|
||||
defer.returnValue((user_id, token))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def appservice_register(self, user_localpart, as_token):
|
||||
user = UserID(user_localpart, self.hs.hostname)
|
||||
user_id = user.to_string()
|
||||
service = yield self.store.get_app_service_by_token(as_token)
|
||||
if not service:
|
||||
raise AuthError(403, "Invalid application service token.")
|
||||
if not service.is_interested_in_user(user_id):
|
||||
raise SynapseError(
|
||||
400, "Invalid user localpart for this application service.",
|
||||
errcode=Codes.EXCLUSIVE
|
||||
)
|
||||
token = self._generate_token(user_id)
|
||||
yield self.store.register(
|
||||
user_id=user_id,
|
||||
token=token,
|
||||
password_hash=""
|
||||
)
|
||||
self.distributor.fire("registered_user", user)
|
||||
defer.returnValue((user_id, token))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@@ -147,6 +201,21 @@ class RegistrationHandler(BaseHandler):
|
||||
# XXX: This should be a deferred list, shouldn't it?
|
||||
yield self._bind_threepid(c, user_id)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def check_user_id_is_valid(self, user_id):
|
||||
# valid user IDs must not clash with any user ID namespaces claimed by
|
||||
# application services.
|
||||
services = yield self.store.get_app_services()
|
||||
interested_services = [
|
||||
s for s in services if s.is_interested_in_user(user_id)
|
||||
]
|
||||
for service in interested_services:
|
||||
if service.is_exclusive_user(user_id):
|
||||
raise SynapseError(
|
||||
400, "This user ID is reserved by an application service.",
|
||||
errcode=Codes.EXCLUSIVE
|
||||
)
|
||||
|
||||
def _generate_token(self, user_id):
|
||||
# urlsafe variant uses _ and - so use . as the separator and replace
|
||||
# all =s with .s so http clients don't quote =s when it is used as
|
||||
@@ -161,21 +230,26 @@ class RegistrationHandler(BaseHandler):
|
||||
def _threepid_from_creds(self, creds):
|
||||
# TODO: get this from the homeserver rather than creating a new one for
|
||||
# each request
|
||||
httpCli = SimpleHttpClient(self.hs)
|
||||
http_client = SimpleHttpClient(self.hs)
|
||||
# XXX: make this configurable!
|
||||
trustedIdServers = ['matrix.org:8090']
|
||||
trustedIdServers = ['matrix.org:8090', 'matrix.org']
|
||||
if not creds['idServer'] in trustedIdServers:
|
||||
logger.warn('%s is not a trusted ID server: rejecting 3pid ' +
|
||||
'credentials', creds['idServer'])
|
||||
defer.returnValue(None)
|
||||
data = yield httpCli.get_json(
|
||||
# XXX: This should be HTTPS
|
||||
"http://%s%s" % (
|
||||
creds['idServer'],
|
||||
"/_matrix/identity/api/v1/3pid/getValidated3pid"
|
||||
),
|
||||
{'sid': creds['sid'], 'clientSecret': creds['clientSecret']}
|
||||
)
|
||||
|
||||
data = {}
|
||||
try:
|
||||
data = yield http_client.get_json(
|
||||
# XXX: This should be HTTPS
|
||||
"http://%s%s" % (
|
||||
creds['idServer'],
|
||||
"/_matrix/identity/api/v1/3pid/getValidated3pid"
|
||||
),
|
||||
{'sid': creds['sid'], 'clientSecret': creds['clientSecret']}
|
||||
)
|
||||
except CodeMessageException as e:
|
||||
data = json.loads(e.msg)
|
||||
|
||||
if 'medium' in data:
|
||||
defer.returnValue(data)
|
||||
@@ -185,19 +259,23 @@ class RegistrationHandler(BaseHandler):
|
||||
def _bind_threepid(self, creds, mxid):
|
||||
yield
|
||||
logger.debug("binding threepid")
|
||||
httpCli = SimpleHttpClient(self.hs)
|
||||
data = yield httpCli.post_urlencoded_get_json(
|
||||
# XXX: Change when ID servers are all HTTPS
|
||||
"http://%s%s" % (
|
||||
creds['idServer'], "/_matrix/identity/api/v1/3pid/bind"
|
||||
),
|
||||
{
|
||||
'sid': creds['sid'],
|
||||
'clientSecret': creds['clientSecret'],
|
||||
'mxid': mxid,
|
||||
}
|
||||
)
|
||||
logger.debug("bound threepid")
|
||||
http_client = SimpleHttpClient(self.hs)
|
||||
data = None
|
||||
try:
|
||||
data = yield http_client.post_urlencoded_get_json(
|
||||
# XXX: Change when ID servers are all HTTPS
|
||||
"http://%s%s" % (
|
||||
creds['idServer'], "/_matrix/identity/api/v1/3pid/bind"
|
||||
),
|
||||
{
|
||||
'sid': creds['sid'],
|
||||
'clientSecret': creds['clientSecret'],
|
||||
'mxid': mxid,
|
||||
}
|
||||
)
|
||||
logger.debug("bound threepid")
|
||||
except CodeMessageException as e:
|
||||
data = json.loads(e.msg)
|
||||
defer.returnValue(data)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
|
||||
@@ -16,12 +16,14 @@
|
||||
"""Contains functions for performing events on rooms."""
|
||||
from twisted.internet import defer
|
||||
|
||||
from ._base import BaseHandler
|
||||
|
||||
from synapse.types import UserID, RoomAlias, RoomID
|
||||
from synapse.api.constants import EventTypes, Membership, JoinRules
|
||||
from synapse.api.errors import StoreError, SynapseError
|
||||
from synapse.util import stringutils
|
||||
from synapse.util.async import run_on_reactor
|
||||
from ._base import BaseHandler
|
||||
from synapse.events.utils import serialize_event
|
||||
|
||||
import logging
|
||||
|
||||
@@ -64,7 +66,7 @@ class RoomCreationHandler(BaseHandler):
|
||||
invite_list = config.get("invite", [])
|
||||
for i in invite_list:
|
||||
try:
|
||||
self.hs.parse_userid(i)
|
||||
UserID.from_string(i)
|
||||
except:
|
||||
raise SynapseError(400, "Invalid user_id: %s" % (i,))
|
||||
|
||||
@@ -114,7 +116,7 @@ class RoomCreationHandler(BaseHandler):
|
||||
servers=[self.hs.hostname],
|
||||
)
|
||||
|
||||
user = self.hs.parse_userid(user_id)
|
||||
user = UserID.from_string(user_id)
|
||||
creation_events = self._create_events_for_new_room(
|
||||
user, room_id, is_public=is_public
|
||||
)
|
||||
@@ -246,11 +248,9 @@ class RoomMemberHandler(BaseHandler):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_room_members(self, room_id):
|
||||
hs = self.hs
|
||||
|
||||
users = yield self.store.get_users_in_room(room_id)
|
||||
|
||||
defer.returnValue([hs.parse_userid(u) for u in users])
|
||||
defer.returnValue([UserID.from_string(u) for u in users])
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def fetch_room_distributions_into(self, room_id, localusers=None,
|
||||
@@ -295,8 +295,9 @@ class RoomMemberHandler(BaseHandler):
|
||||
yield self.auth.check_joined_room(room_id, user_id)
|
||||
|
||||
member_list = yield self.store.get_room_members(room_id=room_id)
|
||||
time_now = self.clock.time_msec()
|
||||
event_list = [
|
||||
self.hs.serialize_event(entry)
|
||||
serialize_event(entry, time_now)
|
||||
for entry in member_list
|
||||
]
|
||||
chunk_data = {
|
||||
@@ -368,7 +369,7 @@ class RoomMemberHandler(BaseHandler):
|
||||
)
|
||||
|
||||
if prev_state and prev_state.membership == Membership.JOIN:
|
||||
user = self.hs.parse_userid(event.user_id)
|
||||
user = UserID.from_string(event.user_id)
|
||||
self.distributor.fire(
|
||||
"user_left_room", user=user, room_id=event.room_id
|
||||
)
|
||||
@@ -388,8 +389,6 @@ class RoomMemberHandler(BaseHandler):
|
||||
if not hosts:
|
||||
raise SynapseError(404, "No known servers")
|
||||
|
||||
host = hosts[0]
|
||||
|
||||
# If event doesn't include a display name, add one.
|
||||
yield self.distributor.fire(
|
||||
"collect_presencelike_data", joinee, content
|
||||
@@ -406,13 +405,13 @@ class RoomMemberHandler(BaseHandler):
|
||||
})
|
||||
event, context = yield self._create_new_client_event(builder)
|
||||
|
||||
yield self._do_join(event, context, room_host=host, do_auth=True)
|
||||
yield self._do_join(event, context, room_hosts=hosts, do_auth=True)
|
||||
|
||||
defer.returnValue({"room_id": room_id})
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _do_join(self, event, context, room_host=None, do_auth=True):
|
||||
joinee = self.hs.parse_userid(event.state_key)
|
||||
def _do_join(self, event, context, room_hosts=None, do_auth=True):
|
||||
joinee = UserID.from_string(event.state_key)
|
||||
# room_id = RoomID.from_string(event.room_id, self.hs)
|
||||
room_id = event.room_id
|
||||
|
||||
@@ -425,10 +424,22 @@ class RoomMemberHandler(BaseHandler):
|
||||
event.room_id,
|
||||
self.hs.hostname
|
||||
)
|
||||
if not is_host_in_room:
|
||||
# is *anyone* in the room?
|
||||
room_member_keys = [
|
||||
v for (k, v) in context.current_state.keys() if (
|
||||
k == "m.room.member"
|
||||
)
|
||||
]
|
||||
if len(room_member_keys) == 0:
|
||||
# has the room been created so we can join it?
|
||||
create_event = context.current_state.get(("m.room.create", ""))
|
||||
if create_event:
|
||||
is_host_in_room = True
|
||||
|
||||
if is_host_in_room:
|
||||
should_do_dance = False
|
||||
elif room_host:
|
||||
elif room_hosts: # TODO: Shouldn't this be remote_room_host?
|
||||
should_do_dance = True
|
||||
else:
|
||||
# TODO(markjh): get prev_state from snapshot
|
||||
@@ -440,17 +451,18 @@ class RoomMemberHandler(BaseHandler):
|
||||
inviter = UserID.from_string(prev_state.user_id)
|
||||
|
||||
should_do_dance = not self.hs.is_mine(inviter)
|
||||
room_host = inviter.domain
|
||||
room_hosts = [inviter.domain]
|
||||
else:
|
||||
should_do_dance = False
|
||||
# return the same error as join_room_alias does
|
||||
raise SynapseError(404, "No known servers")
|
||||
|
||||
if should_do_dance:
|
||||
handler = self.hs.get_handlers().federation_handler
|
||||
yield handler.do_invite_join(
|
||||
room_host,
|
||||
room_hosts,
|
||||
room_id,
|
||||
event.user_id,
|
||||
event.get_dict()["content"], # FIXME To get a non-frozen dict
|
||||
event.content, # FIXME To get a non-frozen dict
|
||||
context
|
||||
)
|
||||
else:
|
||||
@@ -463,7 +475,7 @@ class RoomMemberHandler(BaseHandler):
|
||||
do_auth=do_auth,
|
||||
)
|
||||
|
||||
user = self.hs.parse_userid(event.user_id)
|
||||
user = UserID.from_string(event.user_id)
|
||||
yield self.distributor.fire(
|
||||
"user_joined_room", user=user, room_id=room_id
|
||||
)
|
||||
@@ -495,12 +507,19 @@ class RoomMemberHandler(BaseHandler):
|
||||
defer.returnValue((is_remote_invite_join, room_host))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_rooms_for_user(self, user, membership_list=[Membership.JOIN]):
|
||||
def get_joined_rooms_for_user(self, user):
|
||||
"""Returns a list of roomids that the user has any of the given
|
||||
membership states in."""
|
||||
rooms = yield self.store.get_rooms_for_user_where_membership_is(
|
||||
user_id=user.to_string(), membership_list=membership_list
|
||||
|
||||
app_service = yield self.store.get_app_service_by_user_id(
|
||||
user.to_string()
|
||||
)
|
||||
if app_service:
|
||||
rooms = yield self.store.get_app_service_rooms(app_service)
|
||||
else:
|
||||
rooms = yield self.store.get_rooms_for_user(
|
||||
user.to_string(),
|
||||
)
|
||||
|
||||
# For some reason the list of events contains duplicates
|
||||
# TODO(paul): work out why because I really don't think it should
|
||||
@@ -513,7 +532,7 @@ class RoomMemberHandler(BaseHandler):
|
||||
do_auth):
|
||||
yield run_on_reactor()
|
||||
|
||||
target_user = self.hs.parse_userid(event.state_key)
|
||||
target_user = UserID.from_string(event.state_key)
|
||||
|
||||
yield self.handle_new_client_event(
|
||||
event,
|
||||
@@ -547,13 +566,24 @@ class RoomEventSource(object):
|
||||
|
||||
to_key = yield self.get_current_key()
|
||||
|
||||
events, end_key = yield self.store.get_room_events_stream(
|
||||
user_id=user.to_string(),
|
||||
from_key=from_key,
|
||||
to_key=to_key,
|
||||
room_id=None,
|
||||
limit=limit,
|
||||
app_service = yield self.store.get_app_service_by_user_id(
|
||||
user.to_string()
|
||||
)
|
||||
if app_service:
|
||||
events, end_key = yield self.store.get_appservice_room_stream(
|
||||
service=app_service,
|
||||
from_key=from_key,
|
||||
to_key=to_key,
|
||||
limit=limit,
|
||||
)
|
||||
else:
|
||||
events, end_key = yield self.store.get_room_events_stream(
|
||||
user_id=user.to_string(),
|
||||
from_key=from_key,
|
||||
to_key=to_key,
|
||||
room_id=None,
|
||||
limit=limit,
|
||||
)
|
||||
|
||||
defer.returnValue((events, end_key))
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user