mirror of
https://github.com/element-hq/synapse.git
synced 2025-12-11 01:40:27 +00:00
Compare commits
2954 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
88a973cde5 | ||
|
|
1a830b751d | ||
|
|
abc1b22193 | ||
|
|
0eff740523 | ||
|
|
a1b7902944 | ||
|
|
7718303e71 | ||
|
|
103b432c84 | ||
|
|
a45cc801d2 | ||
|
|
7634687057 | ||
|
|
b3ecb96e36 | ||
|
|
907c1faf1e | ||
|
|
6c3126d950 | ||
|
|
6e89e69d08 | ||
|
|
e66d0bd03a | ||
|
|
4a2ace1857 | ||
|
|
5189bfdef4 | ||
|
|
24f00a6c33 | ||
|
|
8e49892b21 | ||
|
|
e557dc80b8 | ||
|
|
4eb8f9ca8a | ||
|
|
f7ef5c1d57 | ||
|
|
00c9ad49df | ||
|
|
9777c5f49a | ||
|
|
0214745239 | ||
|
|
46a02ff15b | ||
|
|
6ad9586c84 | ||
|
|
78a5482267 | ||
|
|
7b0d846407 | ||
|
|
f28cc45183 | ||
|
|
e664e9737c | ||
|
|
13ba8d878c | ||
|
|
78d6c1b5be | ||
|
|
feb294d552 | ||
|
|
70a8608749 | ||
|
|
7e3b586c1e | ||
|
|
eff12e838c | ||
|
|
82631c5f94 | ||
|
|
b58a8b1ee0 | ||
|
|
5b75b637b8 | ||
|
|
9ac9b75bc4 | ||
|
|
ebaa999f92 | ||
|
|
6c558ee8bc | ||
|
|
31a2b892d8 | ||
|
|
9daa4e2a85 | ||
|
|
3e2fcd67b2 | ||
|
|
241b71852e | ||
|
|
97294ef2fd | ||
|
|
549698b1e0 | ||
|
|
c486b7b41c | ||
|
|
f078ecbc8f | ||
|
|
2bb5f035af | ||
|
|
cca5c06679 | ||
|
|
0897357993 | ||
|
|
2c1fbea531 | ||
|
|
13e6262659 | ||
|
|
1d19a5ec0f | ||
|
|
77c7ed0e93 | ||
|
|
b052621f67 | ||
|
|
8f1031586f | ||
|
|
79a1c0574b | ||
|
|
489f92e0e5 | ||
|
|
737c4223ef | ||
|
|
db0da033eb | ||
|
|
6a9f1209df | ||
|
|
34dda7cc7f | ||
|
|
4d36e73230 | ||
|
|
709e09e1c3 | ||
|
|
aa4af94c69 | ||
|
|
b84d59c5f0 | ||
|
|
33c71c3a4b | ||
|
|
c8e4d5de7f | ||
|
|
156cea5b45 | ||
|
|
8450114098 | ||
|
|
24277fbb97 | ||
|
|
66bb255fcd | ||
|
|
5054806ec1 | ||
|
|
430e496050 | ||
|
|
d4f72a5bfb | ||
|
|
f8aae79a72 | ||
|
|
9cd80a7b5c | ||
|
|
772b45c745 | ||
|
|
5f280837a6 | ||
|
|
6f52e90065 | ||
|
|
771528ab13 | ||
|
|
b32121a5d1 | ||
|
|
2e36689df3 | ||
|
|
2df6114bc4 | ||
|
|
a644ac6d2c | ||
|
|
de11b5b9b5 | ||
|
|
d83d004ccd | ||
|
|
43e13dbd4d | ||
|
|
8a391e33ae | ||
|
|
477b1ed6cf | ||
|
|
04ad93e6fd | ||
|
|
65e92eca49 | ||
|
|
9039904f4c | ||
|
|
69214ea671 | ||
|
|
b023995538 | ||
|
|
793369791a | ||
|
|
7a8ea7e78b | ||
|
|
854ca32f10 | ||
|
|
d7ac861d3b | ||
|
|
89b40b225c | ||
|
|
4bf448be25 | ||
|
|
fa48020a52 | ||
|
|
1ef7cae41b | ||
|
|
2d3837bec7 | ||
|
|
498c2e60fd | ||
|
|
ceb6b8680a | ||
|
|
b264b9548f | ||
|
|
d98a9f2583 | ||
|
|
226a9a5fa6 | ||
|
|
25c311eaf6 | ||
|
|
cc9c97e0dc | ||
|
|
e70165039c | ||
|
|
c1de91aca4 | ||
|
|
b55b90bfb4 | ||
|
|
8da95b6f1b | ||
|
|
b91baae09d | ||
|
|
13724569ec | ||
|
|
4a6eb5eb45 | ||
|
|
6927d0e091 | ||
|
|
b5dbced938 | ||
|
|
f2d5ff5bf2 | ||
|
|
3d60686c0c | ||
|
|
45488e0ffa | ||
|
|
f67d60496a | ||
|
|
18579534ea | ||
|
|
47374a33fc | ||
|
|
0fcafbece8 | ||
|
|
96bb4bf38a | ||
|
|
fd142c29d9 | ||
|
|
ebc5f00efe | ||
|
|
ea320d3464 | ||
|
|
5687a00e4e | ||
|
|
b18114e19e | ||
|
|
02a9c3be6c | ||
|
|
4fce59f274 | ||
|
|
fb7299800f | ||
|
|
c046630c33 | ||
|
|
a30364c1f9 | ||
|
|
766526e114 | ||
|
|
50e18938a9 | ||
|
|
f3af1840cb | ||
|
|
467c27a1f9 | ||
|
|
3f5dd18bd4 | ||
|
|
40431251cb | ||
|
|
82cf3a8043 | ||
|
|
03b2c2577c | ||
|
|
0663c5bd52 | ||
|
|
35981c8b71 | ||
|
|
8fe8951a8d | ||
|
|
fdca8ec418 | ||
|
|
45cf827c8f | ||
|
|
00cb3eb24b | ||
|
|
c23a8c7833 | ||
|
|
e1941442d4 | ||
|
|
49c328a892 | ||
|
|
0935802f1e | ||
|
|
19fd425928 | ||
|
|
167d1df699 | ||
|
|
7ed2bbeb11 | ||
|
|
9101193242 | ||
|
|
571a566399 | ||
|
|
3c6518ddbf | ||
|
|
4e7948b47a | ||
|
|
ba8931829b | ||
|
|
61eaa6ec64 | ||
|
|
c5e7c0e436 | ||
|
|
e26390ca46 | ||
|
|
a6477d5933 | ||
|
|
5cba88ea7c | ||
|
|
5fc9b17518 | ||
|
|
fa90c180ee | ||
|
|
5610880003 | ||
|
|
e7febf4fbb | ||
|
|
aca3193efb | ||
|
|
b97f6626b6 | ||
|
|
f93ecf8783 | ||
|
|
0487c9441f | ||
|
|
a955cbfa49 | ||
|
|
8c97b49886 | ||
|
|
2152b320c5 | ||
|
|
d6d60b4d6c | ||
|
|
d6c831bd3d | ||
|
|
97b364cb25 | ||
|
|
03f4569dc3 | ||
|
|
8c94833b72 | ||
|
|
9fda8b5193 | ||
|
|
e4e33c743e | ||
|
|
87f9477b10 | ||
|
|
9959d9ece8 | ||
|
|
27b9775073 | ||
|
|
766c24b2e6 | ||
|
|
7179fdd550 | ||
|
|
c887c4cbd5 | ||
|
|
e18257f0e5 | ||
|
|
8431f62ebb | ||
|
|
f091b73e69 | ||
|
|
ce6fbbea94 | ||
|
|
aea5da0ef6 | ||
|
|
3a75159832 | ||
|
|
1ebf5e3d03 | ||
|
|
dc2647cd3d | ||
|
|
86896408b0 | ||
|
|
53cb173663 | ||
|
|
d59c58bc95 | ||
|
|
ddd25def01 | ||
|
|
8c6012a4af | ||
|
|
42deca50c2 | ||
|
|
d685ae73b4 | ||
|
|
4021f95261 | ||
|
|
f92fe15897 | ||
|
|
3fe8c56736 | ||
|
|
60965bd7e5 | ||
|
|
0e0e441b33 | ||
|
|
b4a41aa542 | ||
|
|
db6e26bb8c | ||
|
|
88baa3865e | ||
|
|
74f49f99f9 | ||
|
|
7065b75bfd | ||
|
|
7959e8b764 | ||
|
|
52bdd1b834 | ||
|
|
7a3fe48ba4 | ||
|
|
7cd418d38e | ||
|
|
cd80019eec | ||
|
|
d552861346 | ||
|
|
10f76dc5da | ||
|
|
5b142788d2 | ||
|
|
eaa836e8ca | ||
|
|
42eae4634f | ||
|
|
8acc5cb60f | ||
|
|
31a051b677 | ||
|
|
8f9c74e9f1 | ||
|
|
975903ae17 | ||
|
|
4efcaa43c8 | ||
|
|
330be18ec5 | ||
|
|
f1f8122120 | ||
|
|
297eded261 | ||
|
|
0e07f2e15d | ||
|
|
82b46f556d | ||
|
|
8f66fe6392 | ||
|
|
3a00f13436 | ||
|
|
c6549117a2 | ||
|
|
ed1d189e10 | ||
|
|
dfe1273d14 | ||
|
|
91a222c66d | ||
|
|
0503bdb316 | ||
|
|
930ba003f8 | ||
|
|
d54005059c | ||
|
|
d7c85ad916 | ||
|
|
c1a3021771 | ||
|
|
d049e81b10 | ||
|
|
c43b6dcc75 | ||
|
|
367cfab4e6 | ||
|
|
69adf8c384 | ||
|
|
73ca8e5834 | ||
|
|
b088291f14 | ||
|
|
a2ae01cc0f | ||
|
|
da417aa56d | ||
|
|
d4315bbf6b | ||
|
|
3fa344c037 | ||
|
|
7cc047455e | ||
|
|
d726597737 | ||
|
|
2309450a76 | ||
|
|
ea5eea2424 | ||
|
|
746f6e0eb3 | ||
|
|
7441d8cc0c | ||
|
|
ccf9387d57 | ||
|
|
d4cefb6289 | ||
|
|
259d1ecd1d | ||
|
|
191070123d | ||
|
|
afb7b377f2 | ||
|
|
af30140621 | ||
|
|
ac2842ff1e | ||
|
|
892ee473d9 | ||
|
|
40d9765123 | ||
|
|
2818a000aa | ||
|
|
fb5d8e58ff | ||
|
|
5a7d1ecffc | ||
|
|
d056a0a3d8 | ||
|
|
7a079adc8f | ||
|
|
b8518ffe65 | ||
|
|
9654ee0848 | ||
|
|
7ecd211163 | ||
|
|
05f78b3b52 | ||
|
|
f5fc8f2928 | ||
|
|
9a8949f022 | ||
|
|
3adcc4c86a | ||
|
|
47e7963e50 | ||
|
|
88af7bb48b | ||
|
|
0d241e1114 | ||
|
|
f750a442f7 | ||
|
|
003853e702 | ||
|
|
a284ad4092 | ||
|
|
47f82e4408 | ||
|
|
5cd2126a6a | ||
|
|
29c353c553 | ||
|
|
808a8aedab | ||
|
|
74474a6d63 | ||
|
|
d16dcf642e | ||
|
|
7dd14e5d1c | ||
|
|
866fe27e78 | ||
|
|
d1f56f732e | ||
|
|
0e39dcd135 | ||
|
|
345ff2196a | ||
|
|
2c176e02ae | ||
|
|
63485b3029 | ||
|
|
f59b564507 | ||
|
|
2068678b8c | ||
|
|
cc66a9a5e3 | ||
|
|
5de1563997 | ||
|
|
ac5a4477ad | ||
|
|
c049f60d4a | ||
|
|
b5ce4f0427 | ||
|
|
ac12b6d332 | ||
|
|
5819b7a78c | ||
|
|
5bf1a3d6dc | ||
|
|
3f8db3d597 | ||
|
|
a50013fd99 | ||
|
|
2978053d16 | ||
|
|
2c760372d6 | ||
|
|
2680043bc6 | ||
|
|
8db451f652 | ||
|
|
430d3d74f6 | ||
|
|
7ee1879ed4 | ||
|
|
d14fcfd24a | ||
|
|
27927463a1 | ||
|
|
fcb6df45e5 | ||
|
|
a7927c13fd | ||
|
|
339c8f0133 | ||
|
|
bce602eb4e | ||
|
|
939cbd7057 | ||
|
|
12623c99b6 | ||
|
|
2655d61d70 | ||
|
|
fcb05b4c82 | ||
|
|
806bae1ee7 | ||
|
|
f6fcff3602 | ||
|
|
244b356a37 | ||
|
|
49f33f6438 | ||
|
|
93afb40cd4 | ||
|
|
9c1f853d58 | ||
|
|
7d09ab8915 | ||
|
|
d9db819e23 | ||
|
|
37716d55ed | ||
|
|
4399684582 | ||
|
|
44b4fc5f50 | ||
|
|
f4dad9f639 | ||
|
|
8740e4e94a | ||
|
|
c0a279e808 | ||
|
|
96e400fee5 | ||
|
|
72ba26679b | ||
|
|
ea47760bd8 | ||
|
|
70dfe4dc96 | ||
|
|
a8e9e0b916 | ||
|
|
31de2953a3 | ||
|
|
fd5c28dc52 | ||
|
|
42aa1f3f33 | ||
|
|
2110e35fd6 | ||
|
|
b5d33a656f | ||
|
|
fe56138142 | ||
|
|
c110eb92f8 | ||
|
|
8f8b884430 | ||
|
|
a8cd1eb996 | ||
|
|
29e595e5d4 | ||
|
|
c232780081 | ||
|
|
7c816de442 | ||
|
|
8677b7d698 | ||
|
|
33bef689c1 | ||
|
|
fcbe63eaad | ||
|
|
5727922106 | ||
|
|
5dc5e29b9c | ||
|
|
3deffcdb1e | ||
|
|
c9ae1d1ee5 | ||
|
|
daadcf36c0 | ||
|
|
823b679232 | ||
|
|
6c28ac260c | ||
|
|
49c34dfd36 | ||
|
|
4106477e7f | ||
|
|
7ac6ca7311 | ||
|
|
11a974da21 | ||
|
|
09dc9854cd | ||
|
|
442fcc02f7 | ||
|
|
b6a585348a | ||
|
|
c582f178b7 | ||
|
|
4cec90a260 | ||
|
|
0e48f7f245 | ||
|
|
392773ccb2 | ||
|
|
bf32922e5a | ||
|
|
797691f908 | ||
|
|
e5ea4fad78 | ||
|
|
5880de186b | ||
|
|
992928304f | ||
|
|
ae1262a241 | ||
|
|
c79f221192 | ||
|
|
8ce5679813 | ||
|
|
eb03625626 | ||
|
|
87d577e023 | ||
|
|
2ef6de928d | ||
|
|
29e131df43 | ||
|
|
cfd07aafff | ||
|
|
a178eb1bc8 | ||
|
|
8737ead008 | ||
|
|
90921981be | ||
|
|
acb19068d0 | ||
|
|
d74c4e90d4 | ||
|
|
85ca8cb90c | ||
|
|
c3ea36304b | ||
|
|
1b5642604b | ||
|
|
07c33eff43 | ||
|
|
4eb7b950c8 | ||
|
|
dc65d0ae9d | ||
|
|
b18b99eb14 | ||
|
|
5680be70ae | ||
|
|
c77e7e60fc | ||
|
|
d74c6ace24 | ||
|
|
f1b67730fa | ||
|
|
92a1e74b20 | ||
|
|
c914d67cda | ||
|
|
f35f8d06ea | ||
|
|
d2709a5389 | ||
|
|
928c575c6f | ||
|
|
3051c9d002 | ||
|
|
34c09f33da | ||
|
|
cf3282d103 | ||
|
|
32d9fd0b26 | ||
|
|
c6e79c84de | ||
|
|
8d6dde7825 | ||
|
|
d12c00bdc3 | ||
|
|
ba39d3d5d7 | ||
|
|
f3948e001f | ||
|
|
7fa71e3267 | ||
|
|
517fb9a023 | ||
|
|
9ac417fa88 | ||
|
|
d2a92c6bde | ||
|
|
d79e90f078 | ||
|
|
9b4cd0cd0f | ||
|
|
140a50f641 | ||
|
|
5645d9747b | ||
|
|
3fbb031745 | ||
|
|
4c8f6a7e42 | ||
|
|
7df276d219 | ||
|
|
0ee0138325 | ||
|
|
77f06856b6 | ||
|
|
65c451cb38 | ||
|
|
251aafccca | ||
|
|
c058625959 | ||
|
|
cdd04f7055 | ||
|
|
542ab0f886 | ||
|
|
e525b46f12 | ||
|
|
b9b4466d0d | ||
|
|
c3fff251a9 | ||
|
|
45a9e0ae0c | ||
|
|
489a4cd1cf | ||
|
|
2a2b2ef834 | ||
|
|
2e2eeb43a6 | ||
|
|
7f3148865c | ||
|
|
bb9c7f2dd9 | ||
|
|
9036d2d6a8 | ||
|
|
c061b47c57 | ||
|
|
f73f154ec2 | ||
|
|
64b6606824 | ||
|
|
42a7a09eea | ||
|
|
091c545c4f | ||
|
|
a6ba41e078 | ||
|
|
f85949bde0 | ||
|
|
2f871ad143 | ||
|
|
c8ea2d5b1f | ||
|
|
b131fb1fe2 | ||
|
|
413d0d6a24 | ||
|
|
0a2d73fd60 | ||
|
|
ce4999268a | ||
|
|
633ceb9bb1 | ||
|
|
64374bda5b | ||
|
|
772ad4f715 | ||
|
|
bdacee476d | ||
|
|
10f82b4bea | ||
|
|
8c5f252edb | ||
|
|
8b9f471d27 | ||
|
|
a64f9bbfe0 | ||
|
|
42ad49f5b7 | ||
|
|
2b0f8a9482 | ||
|
|
af4422c42a | ||
|
|
0311612ce9 | ||
|
|
5fc03449c8 | ||
|
|
4fab578b43 | ||
|
|
661b76615b | ||
|
|
dcfc70e8ed | ||
|
|
63fdd9fe0b | ||
|
|
910956b0ec | ||
|
|
d3ac8fd87d | ||
|
|
e98e00558a | ||
|
|
3ddf0b9722 | ||
|
|
2acae8300f | ||
|
|
dbe7892e03 | ||
|
|
28c5181dfe | ||
|
|
15e9885197 | ||
|
|
8505a4ddc3 | ||
|
|
6051266924 | ||
|
|
070e28e203 | ||
|
|
834924248f | ||
|
|
98dfa7d24f | ||
|
|
338c0a8a69 | ||
|
|
a874c0894a | ||
|
|
f382a3bb7e | ||
|
|
76e69cc8de | ||
|
|
fde412b240 | ||
|
|
bfc52a2342 | ||
|
|
deeebbfcb7 | ||
|
|
1ee7280c4c | ||
|
|
cde49d3d2b | ||
|
|
e738920156 | ||
|
|
0065e554e0 | ||
|
|
5a3e4e43d8 | ||
|
|
d9a5c56930 | ||
|
|
51fb590c0e | ||
|
|
5577a61090 | ||
|
|
5e909c73d7 | ||
|
|
e0c9f30efa | ||
|
|
515548a47a | ||
|
|
aa667ee396 | ||
|
|
7d6b313312 | ||
|
|
a84a693327 | ||
|
|
99afb4b750 | ||
|
|
21f135ba76 | ||
|
|
d7ee7b589f | ||
|
|
a8589d1ff3 | ||
|
|
dd9430e758 | ||
|
|
05f6cb42db | ||
|
|
5bdb93c2a6 | ||
|
|
613748804a | ||
|
|
86345a511f | ||
|
|
a24eedada7 | ||
|
|
4a728beba1 | ||
|
|
019597555f | ||
|
|
e4bfe50e8f | ||
|
|
0f826b0b0d | ||
|
|
7c2ff8c889 | ||
|
|
7a8ba4c9a0 | ||
|
|
219027f580 | ||
|
|
6a5ff5f223 | ||
|
|
f7a1cdbbc6 | ||
|
|
d547afeae0 | ||
|
|
dd108286df | ||
|
|
266df8a9b8 | ||
|
|
9c9b2829ae | ||
|
|
50e5886de1 | ||
|
|
ba1d740239 | ||
|
|
a190b2e85e | ||
|
|
3dd1630848 | ||
|
|
07d18dcab1 | ||
|
|
41905784f7 | ||
|
|
4013216fcc | ||
|
|
84f2ad5dea | ||
|
|
44b2bf91be | ||
|
|
660dee94af | ||
|
|
262a97f02b | ||
|
|
bd0fa9e2d2 | ||
|
|
d57c5cda71 | ||
|
|
99e1d6777f | ||
|
|
3c85a317d6 | ||
|
|
5231737369 | ||
|
|
d6059bdd2a | ||
|
|
48a2526d62 | ||
|
|
b29d2fd7f8 | ||
|
|
edfcb83473 | ||
|
|
478b4e3ed4 | ||
|
|
b8680b82c3 | ||
|
|
ac213c2e08 | ||
|
|
e880164c59 | ||
|
|
526bc33e02 | ||
|
|
181616deed | ||
|
|
e515b48929 | ||
|
|
8810eb8c39 | ||
|
|
748c0f5efa | ||
|
|
491f3d16dc | ||
|
|
872c134807 | ||
|
|
f721fdbf87 | ||
|
|
976cb5aaa8 | ||
|
|
b2def42bfd | ||
|
|
b9acef5301 | ||
|
|
58d0927767 | ||
|
|
7dd6e5efca | ||
|
|
5dc09e82c4 | ||
|
|
c2c70f7daf | ||
|
|
477da77b46 | ||
|
|
37b2d69bbc | ||
|
|
addb248e0b | ||
|
|
4b1281f9b7 | ||
|
|
dede14f689 | ||
|
|
5eb4d13aaa | ||
|
|
c30cdb0d68 | ||
|
|
2a0ec3b89d | ||
|
|
03b2a6a8aa | ||
|
|
9fbd504b4e | ||
|
|
9670f226e3 | ||
|
|
6863466653 | ||
|
|
3d5c5e8be5 | ||
|
|
65a9bf2dd5 | ||
|
|
ae9f8cda7e | ||
|
|
5d321e4b9a | ||
|
|
a9526831a4 | ||
|
|
ed0f79bdc5 | ||
|
|
98ee629d00 | ||
|
|
f73ea0bda2 | ||
|
|
c533f69d38 | ||
|
|
a2922bb944 | ||
|
|
95f30ecd1f | ||
|
|
f487355364 | ||
|
|
6e70979973 | ||
|
|
14d7acfad4 | ||
|
|
27c5e1b374 | ||
|
|
af96c6f4d3 | ||
|
|
d32db0bc45 | ||
|
|
7b593af7e1 | ||
|
|
3d3da2b460 | ||
|
|
31069ecf6a | ||
|
|
71578e2bf2 | ||
|
|
2430fcd462 | ||
|
|
f593a6e5f8 | ||
|
|
c91a05776f | ||
|
|
5f9a2cb337 | ||
|
|
8c902431ba | ||
|
|
a33c0748e3 | ||
|
|
306415391d | ||
|
|
d0f28b46cd | ||
|
|
da7dd58641 | ||
|
|
bde8d78b8a | ||
|
|
4dcaa42b6d | ||
|
|
76936f43ae | ||
|
|
f280726037 | ||
|
|
6cd595e438 | ||
|
|
17dd5071ef | ||
|
|
df7cf6c0eb | ||
|
|
3e573a5c6b | ||
|
|
7dfa455508 | ||
|
|
924d85a75e | ||
|
|
91695150cc | ||
|
|
3dd09a8795 | ||
|
|
d7739c4e37 | ||
|
|
c6a15f5026 | ||
|
|
2ca01ed747 | ||
|
|
1b64cb019e | ||
|
|
8c3af5bc62 | ||
|
|
0eabfa55f6 | ||
|
|
6408541075 | ||
|
|
13130c2c9f | ||
|
|
7680ae16c9 | ||
|
|
2c1bc4392f | ||
|
|
93f7bb8dd5 | ||
|
|
1d9c1d4166 | ||
|
|
3f151da314 | ||
|
|
6b95a79724 | ||
|
|
e3dae653e8 | ||
|
|
9de1f328ad | ||
|
|
506874cca9 | ||
|
|
2f2bbb4d06 | ||
|
|
95c3306798 | ||
|
|
df6824a008 | ||
|
|
dd11bf8a79 | ||
|
|
1cfda3d2d8 | ||
|
|
8b5349c7bc | ||
|
|
37de8a7f4a | ||
|
|
7a802ec0ff | ||
|
|
d2ecde2cbb | ||
|
|
248cfd5eb3 | ||
|
|
9da4c5340d | ||
|
|
f9d9bd6aa0 | ||
|
|
5fcef78c6a | ||
|
|
c104fd3494 | ||
|
|
57a76c9aee | ||
|
|
06f74068f4 | ||
|
|
e5d91b8e57 | ||
|
|
f6e092f6cc | ||
|
|
24ae0eee8e | ||
|
|
3c3fc6b268 | ||
|
|
b361440738 | ||
|
|
f0ee1d515b | ||
|
|
628ba81a77 | ||
|
|
bed7889703 | ||
|
|
03204f54ac | ||
|
|
037ce4c68f | ||
|
|
2fcd9819ac | ||
|
|
162e2c1ce5 | ||
|
|
1fe973fa5a | ||
|
|
d153f482dd | ||
|
|
1c960fbb80 | ||
|
|
915e56e1af | ||
|
|
fbb76a4d5d | ||
|
|
8bae98b314 | ||
|
|
fe51b3628e | ||
|
|
ba26eb3d5d | ||
|
|
cf844e2ad6 | ||
|
|
b697a842a5 | ||
|
|
bd3de8f39a | ||
|
|
cbf3cd6151 | ||
|
|
a9770e5d24 | ||
|
|
cf4ef5f3c7 | ||
|
|
afdfd12bdf | ||
|
|
d3861b4442 | ||
|
|
391f2aa56c | ||
|
|
bceec65913 | ||
|
|
9eff52d1a6 | ||
|
|
0186aef814 | ||
|
|
e503848990 | ||
|
|
90b3a98df7 | ||
|
|
d34990141e | ||
|
|
f20d064e05 | ||
|
|
f5e25c5f35 | ||
|
|
f4db76692f | ||
|
|
09bb5cf02f | ||
|
|
3b90df21d5 | ||
|
|
1654d3b329 | ||
|
|
aca6e5bf46 | ||
|
|
4fbe6ca401 | ||
|
|
233af7c74b | ||
|
|
641420c5e0 | ||
|
|
6fed9fd697 | ||
|
|
9c3f4f8dfd | ||
|
|
0644f0eb7d | ||
|
|
da3dd4867d | ||
|
|
e4d622aaaf | ||
|
|
fddedd51d9 | ||
|
|
5ab4b0afe8 | ||
|
|
5dea4d37d1 | ||
|
|
fc27ca9006 | ||
|
|
468a2ed4ec | ||
|
|
018b504f5b | ||
|
|
49f1758d74 | ||
|
|
c0b3554401 | ||
|
|
3de46c7755 | ||
|
|
8fd8e72cec | ||
|
|
78f6010207 | ||
|
|
06bfd0a3c0 | ||
|
|
764e79d051 | ||
|
|
0d08670f61 | ||
|
|
320408ef47 | ||
|
|
fb7e260a20 | ||
|
|
14a9d805b9 | ||
|
|
39de87869c | ||
|
|
473a239d83 | ||
|
|
0a93df5f9c | ||
|
|
8ea5dccea1 | ||
|
|
50f1afbd5b | ||
|
|
2fc81af06a | ||
|
|
6a9c4cfd0b | ||
|
|
884e601683 | ||
|
|
63b28c7816 | ||
|
|
e327327174 | ||
|
|
aa3ab6c6a0 | ||
|
|
04034d0b56 | ||
|
|
6341be45c6 | ||
|
|
e93d550b79 | ||
|
|
e21cef9bb5 | ||
|
|
e1627388d1 | ||
|
|
f15ba926cc | ||
|
|
5d098a32c9 | ||
|
|
ffdc8e5e1c | ||
|
|
940a161192 | ||
|
|
2b779af10f | ||
|
|
dd2eb49385 | ||
|
|
cf437900e0 | ||
|
|
466b4ec01d | ||
|
|
38d82edf0e | ||
|
|
90b503216c | ||
|
|
36c58b18a3 | ||
|
|
a412b9a465 | ||
|
|
82e8a2d763 | ||
|
|
2ede7aa8a1 | ||
|
|
889388f105 | ||
|
|
c7db2068c8 | ||
|
|
0d63dc3ec9 | ||
|
|
c6a01f2ed0 | ||
|
|
9107ed23b7 | ||
|
|
b1953a9627 | ||
|
|
bbe10e8be7 | ||
|
|
c4135d85e1 | ||
|
|
dd40fb68e4 | ||
|
|
767c20a869 | ||
|
|
5335bf9c34 | ||
|
|
f2c4ee41b9 | ||
|
|
0da4b11efb | ||
|
|
0b31223c7a | ||
|
|
fece2f5c77 | ||
|
|
545a7b291a | ||
|
|
f23af34729 | ||
|
|
6be1b4b113 | ||
|
|
3a02a13e38 | ||
|
|
66d36b8e41 | ||
|
|
2aa98ff3bc | ||
|
|
5ee070d21f | ||
|
|
f1dcaf3296 | ||
|
|
2cebe53545 | ||
|
|
32fc0737d6 | ||
|
|
4df491b922 | ||
|
|
1ad6222ebf | ||
|
|
5bc690408d | ||
|
|
3640ddfbf6 | ||
|
|
729ea933ea | ||
|
|
347146be29 | ||
|
|
7a5ea067e2 | ||
|
|
7301e05122 | ||
|
|
ca2f90742d | ||
|
|
414a4a71b4 | ||
|
|
7a369e8a55 | ||
|
|
45f1827fb7 | ||
|
|
05c326d445 | ||
|
|
4e62ffdb21 | ||
|
|
f522f50a08 | ||
|
|
1758187715 | ||
|
|
33b3e04049 | ||
|
|
23cfd32e64 | ||
|
|
285d056629 | ||
|
|
c452dabc3d | ||
|
|
f74f48e9e6 | ||
|
|
6a3a840b19 | ||
|
|
a3bfef35fd | ||
|
|
6797fcd9ab | ||
|
|
97d792b28f | ||
|
|
7ce264ce5f | ||
|
|
8a0407c7e6 | ||
|
|
06986e46a3 | ||
|
|
5897e773fd | ||
|
|
2657140c58 | ||
|
|
eacb068ac2 | ||
|
|
57be722c46 | ||
|
|
771ca56c88 | ||
|
|
ddd8566f41 | ||
|
|
192241cf2a | ||
|
|
3eb62873f6 | ||
|
|
0e36756383 | ||
|
|
fb46937413 | ||
|
|
79b65f3875 | ||
|
|
621e84d9a0 | ||
|
|
fdf73c6855 | ||
|
|
0f432ba551 | ||
|
|
d58edd98e9 | ||
|
|
5cf22f0596 | ||
|
|
f6e6f3d87a | ||
|
|
f40b0ed5e1 | ||
|
|
5d80dad99e | ||
|
|
e83c4b8e3e | ||
|
|
a2e5f7f3d8 | ||
|
|
2f6ad79a80 | ||
|
|
a89b86dc47 | ||
|
|
892e70ec84 | ||
|
|
56dbcd1524 | ||
|
|
234d6f9f3e | ||
|
|
5cb298c934 | ||
|
|
d0b1968a4c | ||
|
|
c79c4f9b14 | ||
|
|
f69a5c9134 | ||
|
|
a299fede9d | ||
|
|
f73de2004e | ||
|
|
cea2039b56 | ||
|
|
f7e14bb535 | ||
|
|
87961d8dcf | ||
|
|
fa1cf5ef34 | ||
|
|
3f0a57eb9b | ||
|
|
4cf633d5e9 | ||
|
|
b8e37ed944 | ||
|
|
0c36098c1f | ||
|
|
216c976399 | ||
|
|
259d10f0e4 | ||
|
|
b051781ddb | ||
|
|
53c679b59b | ||
|
|
4e05aab4f7 | ||
|
|
671ac699f1 | ||
|
|
9b6f3bc742 | ||
|
|
2980136d75 | ||
|
|
fb0fecd0b9 | ||
|
|
61547106f5 | ||
|
|
232beb3a3c | ||
|
|
ba02bba88c | ||
|
|
1fc2d11a14 | ||
|
|
b0ac0a9438 | ||
|
|
8a98f0dc5b | ||
|
|
c9c82e8f4d | ||
|
|
e60dad86ba | ||
|
|
f142898f52 | ||
|
|
3993d6ecc2 | ||
|
|
4d25bc6c92 | ||
|
|
87da71bace | ||
|
|
3ce1b8c705 | ||
|
|
5025ba959f | ||
|
|
13a6e9beaf | ||
|
|
5201c66108 | ||
|
|
e94ffd89d6 | ||
|
|
d63a0ca34b | ||
|
|
e3d75f564a | ||
|
|
8627048787 | ||
|
|
4dec901c76 | ||
|
|
5c41224a89 | ||
|
|
c8baada94a | ||
|
|
ede07434e0 | ||
|
|
44e2933bf8 | ||
|
|
7be06680ed | ||
|
|
87deec824a | ||
|
|
45cd2b0233 | ||
|
|
f510586372 | ||
|
|
137fafce4e | ||
|
|
b02a342750 | ||
|
|
51d03e65b2 | ||
|
|
3c7d6202ea | ||
|
|
9ed784098a | ||
|
|
531e3aa75e | ||
|
|
68b7fc3e2b | ||
|
|
9261ef3a15 | ||
|
|
a8795c9644 | ||
|
|
07b58a431f | ||
|
|
0aab34004b | ||
|
|
e0bf0258ee | ||
|
|
aff4d850bd | ||
|
|
ae3082dd31 | ||
|
|
243a79d291 | ||
|
|
9371a35e89 | ||
|
|
0e5239ffc3 | ||
|
|
b19b9535f6 | ||
|
|
46d39343d9 | ||
|
|
f2d698cb52 | ||
|
|
33646eb000 | ||
|
|
524b708f98 | ||
|
|
380f148db7 | ||
|
|
fc012aa8dc | ||
|
|
e5acc8a47b | ||
|
|
d4b5621e0a | ||
|
|
23ed7dc0e7 | ||
|
|
315b03b58d | ||
|
|
c225d63e9e | ||
|
|
b8dd5b1a2d | ||
|
|
366af6b73a | ||
|
|
f2f031fd57 | ||
|
|
12122bfc36 | ||
|
|
edb998ba23 | ||
|
|
5df54de801 | ||
|
|
b62da463e1 | ||
|
|
3cf9948b8d | ||
|
|
73260ad01f | ||
|
|
22a8c91448 | ||
|
|
a8945d24d1 | ||
|
|
6296590bf7 | ||
|
|
bcfb653816 | ||
|
|
e46cdc08cc | ||
|
|
8189c4e3fd | ||
|
|
6ffbcf45c6 | ||
|
|
643b5fcdc8 | ||
|
|
f38df51e8d | ||
|
|
1a934e8bfd | ||
|
|
5338220d3a | ||
|
|
a059760954 | ||
|
|
d7c70d09f0 | ||
|
|
c185c1c413 | ||
|
|
f50c43464c | ||
|
|
f45aaf0e35 | ||
|
|
8c9df8774e | ||
|
|
99c7fbfef7 | ||
|
|
1d9e109820 | ||
|
|
d25b0f65ea | ||
|
|
858634e1d0 | ||
|
|
474274583f | ||
|
|
d82c5f7b5c | ||
|
|
0c38e8637f | ||
|
|
1941eb315d | ||
|
|
9020860479 | ||
|
|
14edea1aff | ||
|
|
b68db61222 | ||
|
|
bb407cd624 | ||
|
|
32d66738b0 | ||
|
|
95e53ac535 | ||
|
|
7639c3d9e5 | ||
|
|
7ecd11accb | ||
|
|
17dffef5ec | ||
|
|
3e2a1297b5 | ||
|
|
323d3e506d | ||
|
|
ff2b66f42e | ||
|
|
8897781558 | ||
|
|
2fa9e23e04 | ||
|
|
cacf0688c6 | ||
|
|
88971fd034 | ||
|
|
7ec9be9c53 | ||
|
|
17c80c8a3d | ||
|
|
cfd39d6b55 | ||
|
|
32a453d7ba | ||
|
|
f9340ea0d5 | ||
|
|
ec398af41c | ||
|
|
54414221e4 | ||
|
|
40b6a5aad1 | ||
|
|
ab9cf73258 | ||
|
|
30c2783d2f | ||
|
|
1a40afa756 | ||
|
|
f96b480670 | ||
|
|
956509dfec | ||
|
|
586beb8318 | ||
|
|
427943907f | ||
|
|
739464fbc5 | ||
|
|
ca53ad7425 | ||
|
|
f6fde343a1 | ||
|
|
927004e349 | ||
|
|
83b464e4f7 | ||
|
|
ab7f9bb861 | ||
|
|
54cb509d64 | ||
|
|
885301486c | ||
|
|
7f8fdc9814 | ||
|
|
01a5f1991c | ||
|
|
76421c496d | ||
|
|
7845f62c22 | ||
|
|
ae72e247fa | ||
|
|
61561b9df7 | ||
|
|
782f7fb489 | ||
|
|
a80ef851f7 | ||
|
|
347aa3c225 | ||
|
|
95f7661170 | ||
|
|
a9c299c0be | ||
|
|
e52f4dc599 | ||
|
|
625e13bfde | ||
|
|
22112f8d14 | ||
|
|
c33f5c1a24 | ||
|
|
1a46daf621 | ||
|
|
987803781e | ||
|
|
0a96a9a023 | ||
|
|
af7b214476 | ||
|
|
1b9802a0d9 | ||
|
|
c15cf6ac06 | ||
|
|
c85c912562 | ||
|
|
ce19fc0f11 | ||
|
|
51ef725647 | ||
|
|
dc72021748 | ||
|
|
dfef2b41aa | ||
|
|
91482cd6a0 | ||
|
|
e3d3205cd9 | ||
|
|
7c809abe86 | ||
|
|
db6e1e1fe3 | ||
|
|
61ee72517c | ||
|
|
1cacc71050 | ||
|
|
fac990a656 | ||
|
|
fcd9ba8802 | ||
|
|
93cc60e805 | ||
|
|
d4bb28c59b | ||
|
|
ca6496c27c | ||
|
|
492beb62a8 | ||
|
|
e0b466bcfd | ||
|
|
287c81abf3 | ||
|
|
c05b5ef7b0 | ||
|
|
ddd079c8f8 | ||
|
|
b28c7da0a4 | ||
|
|
34d26d3687 | ||
|
|
471555b3a8 | ||
|
|
58e6a58eb7 | ||
|
|
8fc52bc56a | ||
|
|
49ebd472fa | ||
|
|
40017a9a11 | ||
|
|
a086b7aa00 | ||
|
|
9c311dfce5 | ||
|
|
a38d36ccd0 | ||
|
|
d5e081c7ae | ||
|
|
5879edbb09 | ||
|
|
f31014b18f | ||
|
|
5b3e9713dd | ||
|
|
b43930d4c9 | ||
|
|
bad780a197 | ||
|
|
0a4b7226fc | ||
|
|
0ec78b360c | ||
|
|
ecd0c0dfc5 | ||
|
|
83892d0d30 | ||
|
|
9d39615b7d | ||
|
|
301141515a | ||
|
|
741777235c | ||
|
|
a14665bde7 | ||
|
|
f87a11e0fd | ||
|
|
76328b85f6 | ||
|
|
17795161c3 | ||
|
|
cf1100887b | ||
|
|
314aabba82 | ||
|
|
7d55314277 | ||
|
|
1cd65a8d1e | ||
|
|
973ebb66ba | ||
|
|
e51aa4be96 | ||
|
|
92d8d724c5 | ||
|
|
c292dba70c | ||
|
|
396834f1c0 | ||
|
|
1d9036aff2 | ||
|
|
1ee3d26432 | ||
|
|
82b8d4b86a | ||
|
|
57338a9768 | ||
|
|
60728c8c9e | ||
|
|
04abf53a56 | ||
|
|
257fa1c53e | ||
|
|
8a519ac76d | ||
|
|
d2fc591619 | ||
|
|
dc6094b908 | ||
|
|
3559a835a2 | ||
|
|
7dd4f79c49 | ||
|
|
bb4dddd6c4 | ||
|
|
7a5818ed81 | ||
|
|
184ba0968a | ||
|
|
a247729806 | ||
|
|
f2fcc0a8cf | ||
|
|
372ac60375 | ||
|
|
527d95dea0 | ||
|
|
cc3ab0c214 | ||
|
|
ca2abf9a6e | ||
|
|
b35baf6f3c | ||
|
|
f17aadd1b5 | ||
|
|
6d59ffe1ce | ||
|
|
b6e0303c83 | ||
|
|
eb011cd99b | ||
|
|
6d7f291b93 | ||
|
|
7213588083 | ||
|
|
ee2d722f0f | ||
|
|
49c0a0b5c4 | ||
|
|
95c304e3f9 | ||
|
|
0c16285989 | ||
|
|
1e101ed4a4 | ||
|
|
8e3bbc9bd0 | ||
|
|
0b5c9adeb5 | ||
|
|
afe475e9be | ||
|
|
b105996fc1 | ||
|
|
51b2448e05 | ||
|
|
c34ffd2736 | ||
|
|
54e688277a | ||
|
|
3a01901d6c | ||
|
|
744e7d2790 | ||
|
|
a3e332af19 | ||
|
|
4678055173 | ||
|
|
ffe8cf7e59 | ||
|
|
eb700cdc38 | ||
|
|
16026e60c5 | ||
|
|
0b1a55c60a | ||
|
|
663b96ae96 | ||
|
|
2048388cfd | ||
|
|
8148c48f11 | ||
|
|
2c8f16257a | ||
|
|
1107e83b54 | ||
|
|
3b05b67c89 | ||
|
|
d4af08a167 | ||
|
|
3bcbabc9fb | ||
|
|
9fc0aad567 | ||
|
|
929ae19d00 | ||
|
|
9cd5b9a802 | ||
|
|
728d07c8c1 | ||
|
|
d59acb8c5b | ||
|
|
91cb3b630d | ||
|
|
dffc9c4ae0 | ||
|
|
e2054ce21a | ||
|
|
49ae42bbe1 | ||
|
|
4ba8189b74 | ||
|
|
ca32c7a065 | ||
|
|
184a5c81f0 | ||
|
|
30768dcf40 | ||
|
|
4ae73d16a9 | ||
|
|
a5b41b809f | ||
|
|
3f60481655 | ||
|
|
e1eb1f3fb9 | ||
|
|
09cb5c7d33 | ||
|
|
dd0867f5ba | ||
|
|
3c166a24c5 | ||
|
|
bc8b25eb56 | ||
|
|
2c746382e0 | ||
|
|
1d579df664 | ||
|
|
c0d1f37baf | ||
|
|
ddfe30ba83 | ||
|
|
89ae0166de | ||
|
|
6485f03d91 | ||
|
|
81a93ddcc8 | ||
|
|
e530208e68 | ||
|
|
dd42bb78d0 | ||
|
|
417485eefa | ||
|
|
2ff439cff7 | ||
|
|
709ba99afd | ||
|
|
9e4dacd5e7 | ||
|
|
d23bc77e2c | ||
|
|
73e4ad4b8b | ||
|
|
076e19da28 | ||
|
|
3ead04ceef | ||
|
|
227b77409f | ||
|
|
efeeff29f6 | ||
|
|
1002bbd732 | ||
|
|
9ad38c9807 | ||
|
|
bdf2e5865a | ||
|
|
fd0a919af3 | ||
|
|
e90f32646f | ||
|
|
aaf319820a | ||
|
|
a9ad647fb2 | ||
|
|
77580addc3 | ||
|
|
8e8955bcea | ||
|
|
8bab7abddd | ||
|
|
3cdfd37d95 | ||
|
|
9b05ef6f39 | ||
|
|
187320b019 | ||
|
|
b345853918 | ||
|
|
7ab401d4dc | ||
|
|
a88e16152f | ||
|
|
00149c063b | ||
|
|
ab9e01809d | ||
|
|
236245f7d8 | ||
|
|
57df6fffa7 | ||
|
|
b62c1395d6 | ||
|
|
e255c2c32f | ||
|
|
9c8eb4a809 | ||
|
|
b854a375b0 | ||
|
|
cd800ad99a | ||
|
|
3e4de64bc9 | ||
|
|
d71af2ee12 | ||
|
|
b143641b20 | ||
|
|
4d1ea40008 | ||
|
|
8256a8ece7 | ||
|
|
a7122692d9 | ||
|
|
b442217d91 | ||
|
|
c961cd7736 | ||
|
|
5371c2a1f7 | ||
|
|
4a6d894850 | ||
|
|
ddf4d2bd98 | ||
|
|
66ec6cf9b8 | ||
|
|
53c2eed862 | ||
|
|
f02532baad | ||
|
|
25b32b63ae | ||
|
|
e330c802e4 | ||
|
|
c9cb354b58 | ||
|
|
5a9e0c3682 | ||
|
|
e85c7873dc | ||
|
|
86fac9c95e | ||
|
|
3063383547 | ||
|
|
81450fded8 | ||
|
|
4c56928263 | ||
|
|
6f0c344ca7 | ||
|
|
37f0ddca5f | ||
|
|
d3c0e48859 | ||
|
|
6a4b650d8a | ||
|
|
06094591c5 | ||
|
|
fd246fde89 | ||
|
|
4f6fa981ec | ||
|
|
3cab86a122 | ||
|
|
e768d7b3a6 | ||
|
|
efdaa5dd55 | ||
|
|
da51acf0e7 | ||
|
|
f4d552589e | ||
|
|
90fde4b8d7 | ||
|
|
0de2aad061 | ||
|
|
3f6f74686a | ||
|
|
82145912c3 | ||
|
|
a2355fae7e | ||
|
|
ee3fa1a99c | ||
|
|
59891a294f | ||
|
|
3e1029fe80 | ||
|
|
af7c1397d1 | ||
|
|
460cad7c11 | ||
|
|
825f0875bc | ||
|
|
a9d8bd95e7 | ||
|
|
bfb66773a4 | ||
|
|
57619d6058 | ||
|
|
a0b181bd17 | ||
|
|
1925a38f95 | ||
|
|
747535f20f | ||
|
|
133d90abfb | ||
|
|
3a20cdcd27 | ||
|
|
d046adf4ec | ||
|
|
1d1c303b9b | ||
|
|
d33f31d741 | ||
|
|
c63df2d4e0 | ||
|
|
f63208a1c0 | ||
|
|
43f2e42bfd | ||
|
|
4bd05573e9 | ||
|
|
12b1a47ba4 | ||
|
|
37403ab06c | ||
|
|
2e31dd2ad3 | ||
|
|
8b52fe48b5 | ||
|
|
d9088c923f | ||
|
|
86cef6a91b | ||
|
|
1c847af28a | ||
|
|
cf8c04948f | ||
|
|
aa361f51dc | ||
|
|
037481a033 | ||
|
|
01fc3943f1 | ||
|
|
571ac105e6 | ||
|
|
51c53369a3 | ||
|
|
f093873d69 | ||
|
|
61f36d9939 | ||
|
|
f8f3d72e2b | ||
|
|
78323ccdb3 | ||
|
|
457970c724 | ||
|
|
1bd1a43073 | ||
|
|
0f6a25f670 | ||
|
|
b9490e8cbb | ||
|
|
5dbd102470 | ||
|
|
fd5ad0f00e | ||
|
|
745b72660a | ||
|
|
42f12ad92f | ||
|
|
aa3c9c7bd0 | ||
|
|
1f7642efa9 | ||
|
|
3e9ee62db0 | ||
|
|
21b71b6d7c | ||
|
|
b1e35eabf2 | ||
|
|
c7788685b0 | ||
|
|
8c74bd8960 | ||
|
|
f483340b3e | ||
|
|
ea570ffaeb | ||
|
|
7049e1564f | ||
|
|
d5a825edee | ||
|
|
225c244aba | ||
|
|
4e706ec82c | ||
|
|
31621c2e06 | ||
|
|
f90ea3dc73 | ||
|
|
ce2a7ed6e4 | ||
|
|
e8cf77fa49 | ||
|
|
cecbd636e9 | ||
|
|
b578c822e3 | ||
|
|
3befc9ccc3 | ||
|
|
d5c31e01f2 | ||
|
|
cb8201ba12 | ||
|
|
c141d47a28 | ||
|
|
13a6517d89 | ||
|
|
61cd03466f | ||
|
|
f764f92647 | ||
|
|
ca0d28ef34 | ||
|
|
8a951540f6 | ||
|
|
482648123f | ||
|
|
fd88ea19c0 | ||
|
|
bb9611bd46 | ||
|
|
9b63def388 | ||
|
|
23b21e5215 | ||
|
|
9d720223f2 | ||
|
|
617501dd2a | ||
|
|
099ce4bc38 | ||
|
|
22346a0ee7 | ||
|
|
cbd053bb8f | ||
|
|
be27d81808 | ||
|
|
ade5342752 | ||
|
|
4cf302de5b | ||
|
|
c50ad14bae | ||
|
|
a0b8e5f2fe | ||
|
|
aadb2238c9 | ||
|
|
f9e7493ac2 | ||
|
|
ecc59ae66e | ||
|
|
70e265e695 | ||
|
|
09d23b6209 | ||
|
|
daa01842f8 | ||
|
|
7f08ebb772 | ||
|
|
d7272f8d9d | ||
|
|
78fa346b07 | ||
|
|
a45ec7c651 | ||
|
|
40da1f200d | ||
|
|
abc6986a24 | ||
|
|
ce832c38d4 | ||
|
|
42e858daeb | ||
|
|
e624cdec64 | ||
|
|
c3dd2ecd5e | ||
|
|
38a965b816 | ||
|
|
a82938416d | ||
|
|
0bfdaf1f4f | ||
|
|
a5cbd20001 | ||
|
|
128ed32e6b | ||
|
|
3e6fdfda00 | ||
|
|
ee59af9ac0 | ||
|
|
1469141023 | ||
|
|
cacdb529ab | ||
|
|
2d3462714e | ||
|
|
f704c10f29 | ||
|
|
6e7d36a72c | ||
|
|
d3da63f766 | ||
|
|
8199475ce0 | ||
|
|
0d4abf7777 | ||
|
|
e55291ce5e | ||
|
|
8e254862f4 | ||
|
|
85d0bc3bdc | ||
|
|
cfc503681f | ||
|
|
dc2a105fca | ||
|
|
83eb627b5a | ||
|
|
776ee6d92b | ||
|
|
f72ed6c6a3 | ||
|
|
8899df13bf | ||
|
|
8f4165628b | ||
|
|
d3d582bc1c | ||
|
|
4d8e1e1f9e | ||
|
|
afef6f5d16 | ||
|
|
2d97e65558 | ||
|
|
1a9510bb84 | ||
|
|
47abebfd6d | ||
|
|
f9d4da7f45 | ||
|
|
30883d8409 | ||
|
|
891dfd90bd | ||
|
|
68b255c5a1 | ||
|
|
95b0f5449d | ||
|
|
129ee4e149 | ||
|
|
c5966b2a97 | ||
|
|
0cceb2ac92 | ||
|
|
d6bcc68ea7 | ||
|
|
b16cd18a86 | ||
|
|
9f7f228ec2 | ||
|
|
3d77e56c12 | ||
|
|
d884047d34 | ||
|
|
2bb2c02571 | ||
|
|
3d1cdda762 | ||
|
|
57877b01d7 | ||
|
|
5db5677969 | ||
|
|
7e77a82c5f | ||
|
|
7eb4d626ba | ||
|
|
06750140f6 | ||
|
|
adbd720fab | ||
|
|
8b7ce2945b | ||
|
|
a6c27de1aa | ||
|
|
c044aca1fd | ||
|
|
ba5d34a832 | ||
|
|
6a191d62ed | ||
|
|
21ac8be5f7 | ||
|
|
3e4e367f09 | ||
|
|
0fbed2a8fa | ||
|
|
998a72d4d9 | ||
|
|
c10ac7806e | ||
|
|
101ee3fd00 | ||
|
|
df361d08f7 | ||
|
|
7b0e797080 | ||
|
|
2eb91e6694 | ||
|
|
cfa62007a3 | ||
|
|
5ce903e2f7 | ||
|
|
a7eeb34c64 | ||
|
|
f7e2f981ea | ||
|
|
bcc1d34d35 | ||
|
|
f4122c64b5 | ||
|
|
415c2f0549 | ||
|
|
f43041aacd | ||
|
|
73605f8070 | ||
|
|
de3b7b55d6 | ||
|
|
d46208c12c | ||
|
|
4f11a5b2b5 | ||
|
|
7bbaab9432 | ||
|
|
7b49236b37 | ||
|
|
fdb724cb70 | ||
|
|
7e3d1c7d92 | ||
|
|
d7451e0f22 | ||
|
|
4807616e16 | ||
|
|
275f7c987c | ||
|
|
b24d7ebd6e | ||
|
|
2df8dd9b37 | ||
|
|
a23a760b3f | ||
|
|
7568fe880d | ||
|
|
4ff0228c25 | ||
|
|
dcd5983fe4 | ||
|
|
45610305ea | ||
|
|
88e03da39f | ||
|
|
9dba813234 | ||
|
|
53a817518b | ||
|
|
6eaa116867 | ||
|
|
4762c276cb | ||
|
|
dc8399ee00 | ||
|
|
1b994a97dd | ||
|
|
10b874067b | ||
|
|
017b798e4f | ||
|
|
2c019eea11 | ||
|
|
bb0a475c30 | ||
|
|
dcefac3b06 | ||
|
|
559c51debc | ||
|
|
6f274f7e13 | ||
|
|
7ce71f2ffc | ||
|
|
8c3a62b5c7 | ||
|
|
86eaaa885b | ||
|
|
e0b6e49466 | ||
|
|
2cd6cb9f65 | ||
|
|
aa88582e00 | ||
|
|
5119e416e8 | ||
|
|
8f04b6fa7a | ||
|
|
7dec0b2bee | ||
|
|
06218ab125 | ||
|
|
2352974aab | ||
|
|
9c5385b53a | ||
|
|
ffab798a38 | ||
|
|
62126c996c | ||
|
|
3213ff630c | ||
|
|
20addfa358 | ||
|
|
9eb5b23d3a | ||
|
|
0211890134 | ||
|
|
ffdb8c3828 | ||
|
|
e69b669083 | ||
|
|
0db40d3e93 | ||
|
|
e3c8e2c13c | ||
|
|
efe60d5e8c | ||
|
|
b2c7bd4b09 | ||
|
|
b3768ec10a | ||
|
|
b8e386db59 | ||
|
|
fe994e728f | ||
|
|
0ac61b1c78 | ||
|
|
0caf30f94b | ||
|
|
1d08bf7c17 | ||
|
|
63b1eaf32c | ||
|
|
b811c98574 | ||
|
|
433314cc34 | ||
|
|
8049c9a71e | ||
|
|
f596ff402e | ||
|
|
2efb93af52 | ||
|
|
953dbd28a7 | ||
|
|
7eea3e356f | ||
|
|
3e1b77efc2 | ||
|
|
b52b4a84ec | ||
|
|
1e62a3d3a9 | ||
|
|
a89559d797 | ||
|
|
07507643cb | ||
|
|
185ac7ee6c | ||
|
|
a0dea6eaed | ||
|
|
c67ba143fa | ||
|
|
e7768e77f5 | ||
|
|
883aabe423 | ||
|
|
07ad03d5df | ||
|
|
e124128542 | ||
|
|
c77048e12f | ||
|
|
2e35a733cc | ||
|
|
413a4c289b | ||
|
|
4d6cb8814e | ||
|
|
7148aaf5d0 | ||
|
|
28d07a02e4 | ||
|
|
2c963054f8 | ||
|
|
11b0a34074 | ||
|
|
c772dffc9f | ||
|
|
a4d62ba36a | ||
|
|
c472d6107e | ||
|
|
39e21ea51c | ||
|
|
2da3b1e60b | ||
|
|
62c010283d | ||
|
|
459085184c | ||
|
|
2b4f47db9c | ||
|
|
33d83f3615 | ||
|
|
ff7c2e41de | ||
|
|
6886bba988 | ||
|
|
103e1c2431 | ||
|
|
4e2e67fd50 | ||
|
|
a56eccbbfc | ||
|
|
20c0324e9c | ||
|
|
cf7a40b08a | ||
|
|
90dbd71c13 | ||
|
|
3b5823c74d | ||
|
|
bde97b988a | ||
|
|
53d1174aa9 | ||
|
|
ddef5ea126 | ||
|
|
b6ee0585bd | ||
|
|
4f973eb657 | ||
|
|
4cab2cfa34 | ||
|
|
b6d4a4c6d8 | ||
|
|
d155b318d2 | ||
|
|
a2ed7f437c | ||
|
|
c456d17daf | ||
|
|
09489499e7 | ||
|
|
4da05fa0ae | ||
|
|
8cedf3ce95 | ||
|
|
baa55fb69e | ||
|
|
002a44ac1a | ||
|
|
62b4b72fe4 | ||
|
|
b49a30a972 | ||
|
|
4624d6035e | ||
|
|
d5cc794598 | ||
|
|
5989637f37 | ||
|
|
016c089f13 | ||
|
|
e5991af629 | ||
|
|
17bb9a7eb9 | ||
|
|
a5ea22d468 | ||
|
|
7e3b14fe78 | ||
|
|
532fcc997a | ||
|
|
0b3389bcd2 | ||
|
|
0d7f0febf4 | ||
|
|
b7cb37b189 | ||
|
|
a01097d60b | ||
|
|
f3049d0b81 | ||
|
|
a887efa07a | ||
|
|
9158ad1abb | ||
|
|
b5f0d73ea3 | ||
|
|
ed88720952 | ||
|
|
f0979afdb0 | ||
|
|
bf0d59ed30 | ||
|
|
c2d08ca62a | ||
|
|
4019b48aaa | ||
|
|
294dbd712f | ||
|
|
1af188209a | ||
|
|
8cd34dfe95 | ||
|
|
d2caa5351a | ||
|
|
fb8d2862c1 | ||
|
|
8ad2d2d1cb | ||
|
|
f26a3df1bf | ||
|
|
19fa3731ae | ||
|
|
465acb0c6a | ||
|
|
64afbe6ccd | ||
|
|
04192ee05b | ||
|
|
8fb79eeea4 | ||
|
|
ce9e2f84ad | ||
|
|
304343f4d7 | ||
|
|
af812b68dd | ||
|
|
d85ce8d89b | ||
|
|
f53bae0c19 | ||
|
|
77c5db5977 | ||
|
|
81682d0f82 | ||
|
|
87311d1b8c | ||
|
|
f0dd6d4cbd | ||
|
|
ca041d5526 | ||
|
|
716e426933 | ||
|
|
e8b2f6f8a1 | ||
|
|
dfc74c30c9 | ||
|
|
28ef344077 | ||
|
|
2ef182ee93 | ||
|
|
b5770f8947 | ||
|
|
a7dcbfe430 | ||
|
|
1a3255b507 | ||
|
|
fb47c3cfbe | ||
|
|
65e69dec8b | ||
|
|
c3e2600c67 | ||
|
|
400894616d | ||
|
|
c0a975cc2e | ||
|
|
12b83f1a0d | ||
|
|
00ab882ed6 | ||
|
|
41938afed8 | ||
|
|
1a60545626 | ||
|
|
ac78e60de6 | ||
|
|
bd1236c0ee | ||
|
|
ddf7979531 | ||
|
|
0862fed2a8 | ||
|
|
80a61330ee | ||
|
|
67362a9a03 | ||
|
|
480d720388 | ||
|
|
901f56fa63 | ||
|
|
9beaedd164 | ||
|
|
2124f668db | ||
|
|
11374a77ef | ||
|
|
f0dd568e16 | ||
|
|
b5f55a1d85 | ||
|
|
5130d80d79 | ||
|
|
6825eef955 | ||
|
|
6924852592 | ||
|
|
f043b14bc0 | ||
|
|
9c72011fd7 | ||
|
|
2f556e0c55 | ||
|
|
7fa1363fb0 | ||
|
|
275dab6b55 | ||
|
|
a68abc79fd | ||
|
|
653533a3da | ||
|
|
9bf61ef97b | ||
|
|
0e58d19163 | ||
|
|
18968efa0a | ||
|
|
eb928c9f52 | ||
|
|
9d112f4440 | ||
|
|
ad460a8315 | ||
|
|
bf628cf6dd | ||
|
|
9e5a353663 | ||
|
|
6f6ebd216d | ||
|
|
73513ececc | ||
|
|
1f24c2e589 | ||
|
|
22049ea700 | ||
|
|
050ebccf30 | ||
|
|
d88e20cdb9 | ||
|
|
eceb554a2f | ||
|
|
b849a64f8d | ||
|
|
0460406298 | ||
|
|
9a3cd1c00d | ||
|
|
fb7def3344 | ||
|
|
f13890ddce | ||
|
|
aaa749d366 | ||
|
|
bc42ca121f | ||
|
|
cee69441d3 | ||
|
|
b5209c5744 | ||
|
|
66da8f60d0 | ||
|
|
f0583f65e1 | ||
|
|
44c9102e7a | ||
|
|
6a7cf6b41f | ||
|
|
2acee97c2b | ||
|
|
7f7ec84d6f | ||
|
|
cebde85b94 | ||
|
|
f00f8346f1 | ||
|
|
83f119a84a | ||
|
|
9d0326baa6 | ||
|
|
186f61a3ac | ||
|
|
fe9bac3749 | ||
|
|
6c01ceb8d0 | ||
|
|
2eda996a63 | ||
|
|
4706f3964d | ||
|
|
4df76b0a5d | ||
|
|
a005b7269a | ||
|
|
261ccd7f5f | ||
|
|
942e39e87c | ||
|
|
9c5fc81c2d | ||
|
|
fd2c07bfed | ||
|
|
405f8c4796 | ||
|
|
c42ed47660 | ||
|
|
1a87f5f26c | ||
|
|
a3dc31cab9 | ||
|
|
4dd47236e7 | ||
|
|
295b400d57 | ||
|
|
716cf144ec | ||
|
|
1e365e88bd | ||
|
|
2d41dc0069 | ||
|
|
f7f07dc517 | ||
|
|
b8690dd840 | ||
|
|
378a0f7a79 | ||
|
|
da84946de4 | ||
|
|
63a7b3ad1e | ||
|
|
5730b20c6d | ||
|
|
8047fd2434 | ||
|
|
3bbd0d0e09 | ||
|
|
9dda396baa | ||
|
|
13ed3b9985 | ||
|
|
bd2cf9d4bf | ||
|
|
d4902a7ad0 | ||
|
|
55bf90b9e4 | ||
|
|
53f0bf85d7 | ||
|
|
1c3d844e73 | ||
|
|
0d7d9c37b6 | ||
|
|
d8866d7277 | ||
|
|
2ef2f6d593 | ||
|
|
3483b78d1a | ||
|
|
d3ded420b1 | ||
|
|
22716774d5 | ||
|
|
5044e6c544 | ||
|
|
09e23334de | ||
|
|
02410e9239 | ||
|
|
e552b78d50 | ||
|
|
fde0da6f19 | ||
|
|
3f04a08a0c | ||
|
|
4bbfbf898e | ||
|
|
6e17463228 | ||
|
|
522f285f9b | ||
|
|
b8d49be5a1 | ||
|
|
90abdaf3bc | ||
|
|
b579a8ea18 | ||
|
|
53ef3a0bfe | ||
|
|
d70c847b4f | ||
|
|
d15f166093 | ||
|
|
ca580ef862 | ||
|
|
45bac68064 | ||
|
|
784aaa53df | ||
|
|
8355b4d074 | ||
|
|
a7b65bdedf | ||
|
|
d94590ed48 | ||
|
|
afbd3b2fc4 | ||
|
|
79e37a7ecb | ||
|
|
0f118e55db | ||
|
|
2f54522d44 | ||
|
|
dd74436ffd | ||
|
|
11f51e6ded | ||
|
|
086df80790 | ||
|
|
291e942332 | ||
|
|
31ade3b3e9 | ||
|
|
36b3b75b21 | ||
|
|
6d1dea337b | ||
|
|
99eb1172b0 | ||
|
|
6cb3212fc2 | ||
|
|
554c63ca60 | ||
|
|
fff7905409 | ||
|
|
00dd207f60 | ||
|
|
e417469af2 | ||
|
|
cb7dac3a5d | ||
|
|
2651fd5e24 | ||
|
|
764856777c | ||
|
|
e7b25a649c | ||
|
|
804b732aab | ||
|
|
45fffe8cbe | ||
|
|
9ba3c1ede4 | ||
|
|
a0bebeda8b | ||
|
|
27e093cbc1 | ||
|
|
d9f60e8dc8 | ||
|
|
0e42dfbe22 | ||
|
|
5ebd33302f | ||
|
|
17167898c8 | ||
|
|
6eadbfbea0 | ||
|
|
1a9a9abcc7 | ||
|
|
74b7de83ec | ||
|
|
36317f3dad | ||
|
|
052ac0c8d0 | ||
|
|
49a2c10279 | ||
|
|
5d53c14342 | ||
|
|
4752a990c8 | ||
|
|
106a3051b8 | ||
|
|
284f55a7fb | ||
|
|
1ce1509989 | ||
|
|
8bb85c8c5a | ||
|
|
c8135f808b | ||
|
|
b21d015c55 | ||
|
|
e70e8e053e | ||
|
|
1b446a5d85 | ||
|
|
59a0682f3e | ||
|
|
b6adfc59f5 | ||
|
|
254aa3c986 | ||
|
|
f43544eecc | ||
|
|
a04cde613e | ||
|
|
4429e720ae | ||
|
|
ee49098843 | ||
|
|
51f5d36f4f | ||
|
|
f8c2cd129d | ||
|
|
f6d1183fc5 | ||
|
|
2043527b9b | ||
|
|
53447e9cd3 | ||
|
|
d61ce3f670 | ||
|
|
a910984b58 | ||
|
|
e309b1045d | ||
|
|
0180bfe4aa | ||
|
|
1f3d1d85a9 | ||
|
|
39a3340f73 | ||
|
|
ae3bff3491 | ||
|
|
dc085ddf8c | ||
|
|
73d23c6ae8 | ||
|
|
6189d8e54d | ||
|
|
115ef3ddac | ||
|
|
4fb858d90a | ||
|
|
88f1ea36ce | ||
|
|
c2633907c5 | ||
|
|
ebfdd2eb5b | ||
|
|
a551c5dad7 | ||
|
|
27e4b45c06 | ||
|
|
ac5f2bf9db | ||
|
|
80a167b1f0 | ||
|
|
7ae8afb7ef | ||
|
|
9118a92862 | ||
|
|
8eca5bd50a | ||
|
|
e01b825cc9 | ||
|
|
ab45e12d31 | ||
|
|
f407cbd2f1 | ||
|
|
227f8ef031 | ||
|
|
2bc60c55af | ||
|
|
20814fabdd | ||
|
|
9084cdd70f | ||
|
|
5b731178b2 | ||
|
|
3a653515ec | ||
|
|
aa729349dd | ||
|
|
5b1631a4a9 | ||
|
|
291cba284b | ||
|
|
253f76a0a5 | ||
|
|
6837c5edab | ||
|
|
7223129916 | ||
|
|
5ae4a84211 | ||
|
|
118a760719 | ||
|
|
19505e0392 | ||
|
|
df431b127b | ||
|
|
882ac83d8d | ||
|
|
d3e09f12d0 | ||
|
|
677be13ffc | ||
|
|
350b88656a | ||
|
|
9de94d5a4d | ||
|
|
2b7120e233 | ||
|
|
8b256a7296 | ||
|
|
62ccc6d95f | ||
|
|
01858bcbf2 | ||
|
|
2aeee2a905 | ||
|
|
5e7883ec19 | ||
|
|
c6a03c46e6 | ||
|
|
e4c65b338d | ||
|
|
99914ec9f8 | ||
|
|
ef910a0358 | ||
|
|
591c4bf223 | ||
|
|
e1150cac4b | ||
|
|
165eb2dbe6 | ||
|
|
880fb46de0 | ||
|
|
9396723995 | ||
|
|
65878a2319 | ||
|
|
ad31fa3040 | ||
|
|
4d1b6f4ad1 | ||
|
|
0b0033c40b | ||
|
|
755def8083 | ||
|
|
1e90715a3d | ||
|
|
131bdf9bb1 | ||
|
|
10f1bdb9a2 | ||
|
|
d5cea26d45 | ||
|
|
c71176858b | ||
|
|
f8bd4de87d | ||
|
|
c3b37abdfd | ||
|
|
6c74fd62a0 | ||
|
|
9ff7f66a2b | ||
|
|
70f272f71c | ||
|
|
8763dd80ef | ||
|
|
807229f2f2 | ||
|
|
acb12cc811 | ||
|
|
d62dee7eae | ||
|
|
0f29cfabc3 | ||
|
|
e275a9c0d9 | ||
|
|
aa32bd38e4 | ||
|
|
372d4c6d7b | ||
|
|
575ec91d82 | ||
|
|
10be983f2c | ||
|
|
415b158ce2 | ||
|
|
de01438a57 | ||
|
|
a2c4f3f150 | ||
|
|
0a4330cd5d | ||
|
|
47ec693e29 | ||
|
|
1d566edb81 | ||
|
|
6e1ad283cf | ||
|
|
ef3d8754f5 | ||
|
|
142934084a | ||
|
|
96c5b9f87c | ||
|
|
7cd6a6f6cf | ||
|
|
c5d1b4986b | ||
|
|
7f4105a5c9 | ||
|
|
f4d58deba1 | ||
|
|
386b7330d2 | ||
|
|
0ad1c67234 | ||
|
|
7d6a1dae31 | ||
|
|
656223fbd3 | ||
|
|
67800f7626 | ||
|
|
2f7f8e1c2b | ||
|
|
4770cec7bc | ||
|
|
e1e9f0c5b2 | ||
|
|
ab78a8926e | ||
|
|
f6f902d459 | ||
|
|
cdb3757942 | ||
|
|
92e1c8983d | ||
|
|
0c894e1ebd | ||
|
|
084c365c3a | ||
|
|
c37a6e151f | ||
|
|
36ea26c5c0 | ||
|
|
7c549dd557 | ||
|
|
899d4675dd | ||
|
|
243c56e725 | ||
|
|
3edd2d5c93 | ||
|
|
47fb089eb5 | ||
|
|
4f1d984e56 | ||
|
|
5e0c533672 | ||
|
|
968b01a91a | ||
|
|
4071f29653 | ||
|
|
f1b83d88a3 | ||
|
|
a988361aea | ||
|
|
8888982db3 | ||
|
|
9af432257d | ||
|
|
cf706cc6ef | ||
|
|
5971d240d4 | ||
|
|
ca4f458787 | ||
|
|
df6db5c802 | ||
|
|
6edff11a88 | ||
|
|
63878c0379 | ||
|
|
02590c3e1d | ||
|
|
619a21812b | ||
|
|
fec4485e28 | ||
|
|
409bcc76bd | ||
|
|
cffe6057fb | ||
|
|
80fd2b574c | ||
|
|
e122685978 | ||
|
|
54ef09f860 | ||
|
|
d7b3ac46f8 | ||
|
|
4429e4bf24 | ||
|
|
ec07dba29e | ||
|
|
c167cbc9fd | ||
|
|
a6fb2aa2a5 | ||
|
|
1fce36b111 | ||
|
|
8b28209c60 | ||
|
|
30c72d377e | ||
|
|
e4eddf9b36 | ||
|
|
c1779a79bc | ||
|
|
74850d7f75 | ||
|
|
07a1223156 | ||
|
|
0d31ad5101 | ||
|
|
a0dfffb33c | ||
|
|
6e5ac4a28f | ||
|
|
8022b27fc2 | ||
|
|
95dedb866f | ||
|
|
78672a9fd5 | ||
|
|
da6a7bbdde | ||
|
|
2551b6645d | ||
|
|
5e4ba463b7 | ||
|
|
51da995806 | ||
|
|
5002056b16 | ||
|
|
5c75adff95 | ||
|
|
367382b575 | ||
|
|
4df11b5039 | ||
|
|
84e6b4001f | ||
|
|
17653a5dfe | ||
|
|
e269c511f6 | ||
|
|
5e3b254dc8 | ||
|
|
d244fa9741 | ||
|
|
e89ca34e0e | ||
|
|
79b7154454 | ||
|
|
4ef556f650 | ||
|
|
b036596b75 | ||
|
|
cd525c0f5a | ||
|
|
3c224f4d0e | ||
|
|
d38862a080 | ||
|
|
2640d6718d | ||
|
|
de87541862 | ||
|
|
22d2f498fa | ||
|
|
d79ffa1898 | ||
|
|
2236ef6c92 | ||
|
|
da1aa07db5 | ||
|
|
4ac1941592 | ||
|
|
476899295f | ||
|
|
fca28d243e | ||
|
|
37feb4031f | ||
|
|
0cd1401f8d | ||
|
|
724bb1e7d9 | ||
|
|
1c7912751e | ||
|
|
9d36eb4eab | ||
|
|
b0f71db3ff | ||
|
|
84e1cacea4 | ||
|
|
6538d445e8 | ||
|
|
52f98f8a5b | ||
|
|
22a7ba8b22 | ||
|
|
3a42f32134 | ||
|
|
4fa0f53521 | ||
|
|
326121aec4 | ||
|
|
9a9386226a | ||
|
|
126d562576 | ||
|
|
f08c33e834 | ||
|
|
45543028bb | ||
|
|
f683b5de47 | ||
|
|
db0dca2f6f | ||
|
|
89c0cd4acc | ||
|
|
6101ce427a | ||
|
|
5fe26a9b5c | ||
|
|
35698484a5 | ||
|
|
63562f6d5a | ||
|
|
a151693a3b | ||
|
|
ac29318b84 | ||
|
|
dfa98f911b | ||
|
|
4605953b0f | ||
|
|
ef8e8ebd91 | ||
|
|
3188e94ac4 | ||
|
|
97a64f3ebe | ||
|
|
b850c9fa04 | ||
|
|
4a7a4a5b6c | ||
|
|
771fc05d30 | ||
|
|
938939fd89 | ||
|
|
028a570e17 | ||
|
|
0e4393652f | ||
|
|
b994fb2b96 | ||
|
|
f10fd8a470 | ||
|
|
3c11c9c122 | ||
|
|
673375fe2d | ||
|
|
3c92231094 | ||
|
|
119e5d7702 | ||
|
|
271ee604f8 | ||
|
|
04c01882fc | ||
|
|
f4664a6cbd | ||
|
|
ecb26beda5 | ||
|
|
0c4ac271ca | ||
|
|
0cf7e480b4 | ||
|
|
ed2584050f | ||
|
|
977338a7af | ||
|
|
31049c4d72 | ||
|
|
deb0237166 | ||
|
|
e45b05647e | ||
|
|
3d5a955e08 | ||
|
|
d18f37e026 | ||
|
|
9951542393 | ||
|
|
041b6cba61 | ||
|
|
63075118a5 | ||
|
|
531d7955fd | ||
|
|
bfa4a7f8b0 | ||
|
|
d0fece8d3c | ||
|
|
bdcd7693c8 | ||
|
|
43c2e8deae | ||
|
|
1692dc019d | ||
|
|
a9aea68fd5 | ||
|
|
261d809a47 | ||
|
|
d9cc5de9e5 | ||
|
|
b8940cd902 | ||
|
|
1942382246 | ||
|
|
eb9bd2d949 | ||
|
|
2d386d7038 | ||
|
|
4ac2823b3c | ||
|
|
22c7c5eb8f | ||
|
|
42c12c04f6 | ||
|
|
adb5b76ff5 | ||
|
|
3bcdf3664c | ||
|
|
9eeb03c0dd | ||
|
|
32937f3ea0 | ||
|
|
7b50769eb9 | ||
|
|
7693f24792 | ||
|
|
46a65c282f | ||
|
|
92b20713d7 | ||
|
|
da4ed08739 | ||
|
|
9060dc6b59 | ||
|
|
1fae1b3166 | ||
|
|
80b4119279 | ||
|
|
4011cf1c42 | ||
|
|
657298cebd | ||
|
|
fabb7acd45 | ||
|
|
23c639ff32 | ||
|
|
8be5284e91 | ||
|
|
503e4d3d52 | ||
|
|
00718ae7a9 | ||
|
|
0465560c1a | ||
|
|
61d05daab1 | ||
|
|
6ead27ddda | ||
|
|
50c87b8eed | ||
|
|
345995fcde | ||
|
|
62cebee8ee | ||
|
|
95cbfee8ae | ||
|
|
4ad8350607 | ||
|
|
6ea9cf58be | ||
|
|
f383d5a801 | ||
|
|
c95480963e | ||
|
|
069296dbb0 | ||
|
|
2d4d2bbae4 | ||
|
|
2f1348f339 | ||
|
|
69d4063651 | ||
|
|
5b02f33451 | ||
|
|
054aa0d58c | ||
|
|
3c4c229788 | ||
|
|
74aaacf82a | ||
|
|
29400b45b9 | ||
|
|
c28f1d16f0 | ||
|
|
265f30bd3f | ||
|
|
c9e62927f2 | ||
|
|
2366d28780 | ||
|
|
d89a9f7283 | ||
|
|
1aa11cf7ce | ||
|
|
0c1b7f843b | ||
|
|
4b46fbec5b | ||
|
|
1d7702833d | ||
|
|
6b69ddd17a | ||
|
|
d624e2a638 | ||
|
|
b1ca784aca | ||
|
|
4a9dc5b2f5 | ||
|
|
0ade2712d1 | ||
|
|
50f96f256f | ||
|
|
d2d61a8288 | ||
|
|
3e71d13acf | ||
|
|
e7a6edb0ee | ||
|
|
c27d6ad6b5 | ||
|
|
46daf2d200 | ||
|
|
3864b3a8e6 | ||
|
|
0618978238 | ||
|
|
09177f4f2e | ||
|
|
472be88674 | ||
|
|
a6e62cf6d0 | ||
|
|
12d381bd5d | ||
|
|
f8c30faf25 | ||
|
|
61cd5d9045 | ||
|
|
fb95035a65 | ||
|
|
4669def000 | ||
|
|
0337eaf321 | ||
|
|
884fb88e28 | ||
|
|
d76c058eea | ||
|
|
9927170787 | ||
|
|
109c8aafd2 | ||
|
|
b7788f80a3 | ||
|
|
c8ed9bd278 | ||
|
|
f2d90d5c02 | ||
|
|
845b0b2c97 | ||
|
|
c0036ced54 | ||
|
|
970a9b9d2b | ||
|
|
64991b0c8b | ||
|
|
e26a3d8d9e | ||
|
|
1319905d7a | ||
|
|
a9549fdce3 | ||
|
|
4ad8b45155 | ||
|
|
19167fd21f | ||
|
|
9c4ea42e79 | ||
|
|
74874ffda7 | ||
|
|
cd0864121b | ||
|
|
4932a7e2d9 | ||
|
|
0baf923153 | ||
|
|
9894da6a29 | ||
|
|
46d200a3a1 | ||
|
|
72443572bf | ||
|
|
a08bf11138 | ||
|
|
204132a998 | ||
|
|
f4c9ebbc34 | ||
|
|
45278eaa19 | ||
|
|
478e511db0 | ||
|
|
68c0603946 | ||
|
|
e3005d3ddb | ||
|
|
cc5d68f4c4 | ||
|
|
cc52f02d74 | ||
|
|
3151afee9e | ||
|
|
f41a9a1ffc | ||
|
|
1783c7ca92 | ||
|
|
0126ef7f3c | ||
|
|
d98edb548a | ||
|
|
9fbcf19188 | ||
|
|
073b891ec1 | ||
|
|
327ca883ec | ||
|
|
a1d4813a54 | ||
|
|
18f8247701 | ||
|
|
af27b84ff7 | ||
|
|
4a13ae7201 | ||
|
|
9182f87664 | ||
|
|
55e1bc8920 | ||
|
|
55fcf62e9c | ||
|
|
b96c133034 | ||
|
|
ce8b0b2868 | ||
|
|
252e6f6869 | ||
|
|
1ccaea5b92 | ||
|
|
0bc71103e1 | ||
|
|
f8b865264a | ||
|
|
5b8b1a43bd | ||
|
|
40cbd6b6ee | ||
|
|
4e49f52375 | ||
|
|
38432d8c25 | ||
|
|
42b7139dec | ||
|
|
1ef66cc3bd | ||
|
|
416a3e6c4f | ||
|
|
8558e1ec73 | ||
|
|
56f518d279 | ||
|
|
6f8e2d517e | ||
|
|
1c1d67dfef | ||
|
|
2c70849dc3 | ||
|
|
0a016b0525 | ||
|
|
e701aec2d1 | ||
|
|
412ece18e7 | ||
|
|
1c82fbd2eb | ||
|
|
2732be83d9 | ||
|
|
e4c4664d73 | ||
|
|
03c4f0ed67 | ||
|
|
f1acb9fd40 | ||
|
|
8a5be236e0 | ||
|
|
df75914791 | ||
|
|
b02e1006b9 | ||
|
|
f8152f2708 | ||
|
|
2f475bd5d5 | ||
|
|
a7b51f4539 | ||
|
|
288702170d | ||
|
|
f46eee838a | ||
|
|
a654f3fe49 | ||
|
|
44ccfa6258 | ||
|
|
04d1725752 | ||
|
|
1bac74b9ae | ||
|
|
ed83638668 | ||
|
|
7ac8a60c6f | ||
|
|
c253b14f6e | ||
|
|
bdcb23ca25 | ||
|
|
b2c2dc8940 | ||
|
|
869dc94cbb | ||
|
|
a218619626 | ||
|
|
b1e68add19 | ||
|
|
31e262e6b4 | ||
|
|
eede182df7 | ||
|
|
4e2f8b8722 | ||
|
|
c8c710eca7 | ||
|
|
149ed9f151 | ||
|
|
f7a79a37be | ||
|
|
6532b6e607 | ||
|
|
74270defda | ||
|
|
e1e5e53127 | ||
|
|
b3bda8a75f | ||
|
|
8a785c3006 | ||
|
|
191f7f09ce | ||
|
|
03eb4adc6e | ||
|
|
4bbf7156ef | ||
|
|
6d15401341 | ||
|
|
8c78414284 | ||
|
|
6c99491347 | ||
|
|
0eb61a3d16 | ||
|
|
a2c10d37d7 | ||
|
|
2e0d9219b9 | ||
|
|
f30d47c876 | ||
|
|
a16eaa0c33 | ||
|
|
f43063158a | ||
|
|
7c50e3b816 | ||
|
|
2808c040ef | ||
|
|
48b6ee2b67 | ||
|
|
bc41f0398f | ||
|
|
d3309933f5 | ||
|
|
b568c0231c | ||
|
|
3a7d7a3f22 | ||
|
|
3ba522bb23 | ||
|
|
6080830bef | ||
|
|
8b183781cb | ||
|
|
1f651ba6ec | ||
|
|
812a99100b | ||
|
|
1967650bc4 | ||
|
|
1ebff9736b | ||
|
|
24d21887ed | ||
|
|
db8d4e8dd6 | ||
|
|
2f9157b427 | ||
|
|
91c8f828e1 | ||
|
|
8db6832db8 | ||
|
|
117f35ac4a | ||
|
|
4eea5cf6c2 | ||
|
|
f96ab9d18d | ||
|
|
865398b4a9 | ||
|
|
e3417bbbe0 | ||
|
|
2492efb162 | ||
|
|
4a5990ff8f | ||
|
|
5e7a90316d | ||
|
|
231498ac45 | ||
|
|
fd4fa9097f | ||
|
|
0b1a8500a2 | ||
|
|
cb03fafdf1 | ||
|
|
bf5e54f255 | ||
|
|
94e1e58b4d | ||
|
|
ced39d019f | ||
|
|
16dcdedc8a | ||
|
|
83b554437e | ||
|
|
dfc46c6220 | ||
|
|
6ba2e3df4e | ||
|
|
427bcb7608 | ||
|
|
0ec346d942 | ||
|
|
1352ae2c07 | ||
|
|
4cd5fb13a3 | ||
|
|
ea1776f556 | ||
|
|
e1c0970c11 | ||
|
|
b8092fbc82 | ||
|
|
bc9e69e160 | ||
|
|
f2cf37518b | ||
|
|
04c7f3576e | ||
|
|
0268d40281 | ||
|
|
399b5add58 | ||
|
|
e6e130b9ba | ||
|
|
766bd8e880 | ||
|
|
ffad75bd62 | ||
|
|
a429515bdd | ||
|
|
8d761134c2 | ||
|
|
cf04cedf21 | ||
|
|
5b31afcbd1 | ||
|
|
6e91f14d09 | ||
|
|
ed26e4012b | ||
|
|
806f380a8b | ||
|
|
a19b739909 | ||
|
|
a5c72780e6 | ||
|
|
e19f794fee | ||
|
|
d5ff9effcf | ||
|
|
e845434028 | ||
|
|
4af32a2817 | ||
|
|
bc6cef823f | ||
|
|
1ec6fa98c9 | ||
|
|
25d2914fba | ||
|
|
cce5d057d3 | ||
|
|
6606f7c659 | ||
|
|
a971fa9d58 | ||
|
|
ded4128965 | ||
|
|
f9e12f79ca | ||
|
|
c756dfeb14 | ||
|
|
63677d1f47 | ||
|
|
32e14d8181 | ||
|
|
4847a9534d | ||
|
|
88cb06e996 | ||
|
|
d488463fa3 | ||
|
|
127fad17dd | ||
|
|
5a95cd4442 | ||
|
|
58d8339966 | ||
|
|
be7ead6946 | ||
|
|
3cbc286d06 | ||
|
|
3c741682e5 | ||
|
|
86fc9b617c | ||
|
|
90bcb86957 | ||
|
|
1bede47843 | ||
|
|
93937b2b31 | ||
|
|
c5365dee56 | ||
|
|
4103b1c470 | ||
|
|
4d5b098626 | ||
|
|
7ed2ec3061 | ||
|
|
ce797ad373 | ||
|
|
7e863c51e6 | ||
|
|
0f12772e32 | ||
|
|
d5d4281647 | ||
|
|
cda4a6f93f | ||
|
|
e2722f58ee | ||
|
|
a1665c5094 | ||
|
|
2ded344620 | ||
|
|
9707acfc40 | ||
|
|
8bf285e082 | ||
|
|
cf59d68b17 | ||
|
|
1280a47fc6 | ||
|
|
23d285ad57 | ||
|
|
8ad0f4912e | ||
|
|
6f9dea7483 | ||
|
|
22d7a59306 | ||
|
|
279a547a8b | ||
|
|
83f5125d52 | ||
|
|
a43b40449b | ||
|
|
9cef051ce2 | ||
|
|
0575840866 | ||
|
|
45131b2bca | ||
|
|
ccda401dbf | ||
|
|
5b89052d2f | ||
|
|
3887350e47 | ||
|
|
19234cc6c3 | ||
|
|
e8f1521605 | ||
|
|
605941ee26 | ||
|
|
5bc41fe9f8 | ||
|
|
638be5a6b9 | ||
|
|
830d07db82 | ||
|
|
65f5e4e3e4 | ||
|
|
07d4041709 | ||
|
|
c1b34af441 | ||
|
|
9a05795619 | ||
|
|
24d8134ac1 | ||
|
|
7f911ef4e3 | ||
|
|
d5e7e6b9b6 | ||
|
|
0775c62469 | ||
|
|
38928c6609 | ||
|
|
a2a93a4fa7 | ||
|
|
4fe95094d1 | ||
|
|
ae8ff92e05 | ||
|
|
49d6aa1394 | ||
|
|
0bfa78b39b | ||
|
|
6bc9edd8b2 | ||
|
|
05a35d62b6 | ||
|
|
8574bf62dc | ||
|
|
0af5f5efaf | ||
|
|
c8d3f6486d | ||
|
|
304111afd0 | ||
|
|
d0e444a648 | ||
|
|
65fd446b4d | ||
|
|
364c7f92b4 | ||
|
|
4eb6d66b45 | ||
|
|
6b59650753 | ||
|
|
41cd778d66 | ||
|
|
70a84f17f3 | ||
|
|
779f7b0f44 | ||
|
|
ef1e019840 | ||
|
|
5583e29513 | ||
|
|
c5bf0343e8 | ||
|
|
e24c32e6f3 | ||
|
|
e9c908ebc0 | ||
|
|
9236136f3a | ||
|
|
813e54bd5b | ||
|
|
80a620a83a | ||
|
|
9fa8bda099 | ||
|
|
f129ee1e18 | ||
|
|
09cbff174a | ||
|
|
d18e7779ca | ||
|
|
5e88a09a42 | ||
|
|
cf1fa59f4b | ||
|
|
3470cb36a8 | ||
|
|
c217504949 | ||
|
|
b59aa74556 | ||
|
|
d33ae65efc | ||
|
|
9f642a93ec | ||
|
|
e7887e37a8 | ||
|
|
af853a4cdb | ||
|
|
4891c4ff72 | ||
|
|
46183cc69f | ||
|
|
59bf16eddc | ||
|
|
9a506a191a | ||
|
|
8675ea03de | ||
|
|
8366fde82f | ||
|
|
3e420aebd8 | ||
|
|
ff1fa0fbf8 | ||
|
|
abcd03af02 | ||
|
|
5116946ae9 | ||
|
|
6f4f7e4e22 | ||
|
|
a32e876ef4 | ||
|
|
a198894bf7 | ||
|
|
5b999e206e | ||
|
|
32206dde3f | ||
|
|
4edcbcee3b | ||
|
|
953e40f9dc | ||
|
|
df4c12c762 | ||
|
|
c1a256cc4c | ||
|
|
f173d40a32 | ||
|
|
1b988b051b | ||
|
|
033a517feb | ||
|
|
9ba6487b3f | ||
|
|
d6b3ea75d4 | ||
|
|
7ab9f91a60 | ||
|
|
0e8f5095c7 | ||
|
|
ce2766d19c | ||
|
|
438a21c87b | ||
|
|
9aa0224cdf | ||
|
|
c7023f2155 | ||
|
|
0ba393924a | ||
|
|
f488293d96 | ||
|
|
1aa44939fc | ||
|
|
5a447098dd | ||
|
|
9e98f1022a | ||
|
|
9115421ace | ||
|
|
d19e79ecc9 | ||
|
|
ed008e85a8 | ||
|
|
6e7131f02f | ||
|
|
9a7f496298 | ||
|
|
78adccfaf4 | ||
|
|
d98660a60d | ||
|
|
d5272b1d2c | ||
|
|
278149f533 | ||
|
|
72d8406409 | ||
|
|
a63b4f7101 | ||
|
|
0f86312c4c | ||
|
|
b1022ed8b5 | ||
|
|
f6583796fe | ||
|
|
4848fdbf59 | ||
|
|
80cd08c190 | ||
|
|
9517f4da4d | ||
|
|
dc0c989ef4 | ||
|
|
ceb61daa70 | ||
|
|
fce0114005 | ||
|
|
7e282a53a5 | ||
|
|
91cb46191d | ||
|
|
87db64b839 | ||
|
|
cb8162d3d1 | ||
|
|
d288d273e1 | ||
|
|
d4f50f3ae5 | ||
|
|
455579ca90 | ||
|
|
0d0610870d | ||
|
|
532ebc4a82 | ||
|
|
56f2d31676 | ||
|
|
c178e4e6ca | ||
|
|
d7a0496f3e | ||
|
|
58ed393235 | ||
|
|
fae059cc18 | ||
|
|
b26d85c30f | ||
|
|
0dcb145c7e | ||
|
|
64cf1483e5 | ||
|
|
d028207a6e | ||
|
|
0a55a2b692 | ||
|
|
6cc046302f | ||
|
|
ed4d44d833 | ||
|
|
f88db7ac0b | ||
|
|
57976f646f | ||
|
|
bb24609158 | ||
|
|
89036579ed | ||
|
|
93978c5e2b | ||
|
|
1489521ee5 | ||
|
|
6d33f97703 | ||
|
|
7564dac8cb | ||
|
|
3f7a31d366 | ||
|
|
be170b1426 | ||
|
|
cd2539ab2a | ||
|
|
f0d6f724a2 | ||
|
|
6df319b6f0 | ||
|
|
f1d2b94e0b | ||
|
|
857810d2dd | ||
|
|
ac8eb0f319 | ||
|
|
d04fa1f712 | ||
|
|
c2c9471cba | ||
|
|
6279285b2a | ||
|
|
8bad40701b | ||
|
|
250e143084 | ||
|
|
b2e6ee5b43 | ||
|
|
c9c444f562 | ||
|
|
835e01fc70 | ||
|
|
f9232c7917 | ||
|
|
e7ce5d8b06 | ||
|
|
758d114cbc | ||
|
|
ea8590cf66 | ||
|
|
ab8229479b | ||
|
|
b677ff6692 | ||
|
|
c8032aec17 | ||
|
|
256fe08963 | ||
|
|
e731d30d90 | ||
|
|
a1abee013c | ||
|
|
98a3825614 | ||
|
|
7393c5ce4c | ||
|
|
598c47a108 | ||
|
|
9266cb0a22 | ||
|
|
bcfce93ccd | ||
|
|
dea236e4fa | ||
|
|
69135f59aa | ||
|
|
58367a9da2 | ||
|
|
58247c8b4b | ||
|
|
f55bd3f94b | ||
|
|
e90002ca1d | ||
|
|
bbb010a30f | ||
|
|
05a056a409 | ||
|
|
0eb7e6b9a8 | ||
|
|
128cf2daf7 | ||
|
|
b98b4c135d | ||
|
|
a2cdd11d4a | ||
|
|
e0214a263b | ||
|
|
e75fa8bbbf | ||
|
|
c782e893ec | ||
|
|
89ac1fa8ba | ||
|
|
2e4f0b2bd7 | ||
|
|
c1cdd7954d | ||
|
|
63cb7ece62 | ||
|
|
493e3fa0ca | ||
|
|
f1fbe3e09f | ||
|
|
642f725fd7 | ||
|
|
cbc0406be8 | ||
|
|
1748605c5d | ||
|
|
4d661ec0f3 | ||
|
|
0e847540c3 | ||
|
|
22b37b75db | ||
|
|
b0cf867319 | ||
|
|
0b96bb793e | ||
|
|
b3a0179d64 | ||
|
|
f9478e475b | ||
|
|
399689dcc7 | ||
|
|
fa319a5786 | ||
|
|
6d146e15df | ||
|
|
25187ab674 | ||
|
|
f52acf3b12 | ||
|
|
a99d6edc05 | ||
|
|
72625f2f4d | ||
|
|
e1a7e3564f | ||
|
|
094803cf82 | ||
|
|
e9c4b0d178 | ||
|
|
23ab0c68c2 | ||
|
|
849300bc73 | ||
|
|
8664599af7 | ||
|
|
e02cc249da | ||
|
|
59c448f074 | ||
|
|
d8caa5454d | ||
|
|
b0cdf097f4 | ||
|
|
ce8b5769f7 | ||
|
|
7d72e44eb9 | ||
|
|
c53ec53d80 | ||
|
|
9470412316 | ||
|
|
a594087f06 | ||
|
|
74bc42cfdd | ||
|
|
120b689284 | ||
|
|
e7420a3bef | ||
|
|
e07fc62833 | ||
|
|
5b6e11d560 | ||
|
|
211c14c391 | ||
|
|
ad5701f50f | ||
|
|
c92fdf88a3 | ||
|
|
d33a3b91c3 | ||
|
|
a7a28f85ae | ||
|
|
59a5f012cc | ||
|
|
099e4b88d8 | ||
|
|
cdb2e045ee | ||
|
|
465354ffde | ||
|
|
83b1e7fb3c | ||
|
|
04f8478aaa | ||
|
|
8916acbc13 | ||
|
|
abaf47bbb6 | ||
|
|
045afd6b61 | ||
|
|
98b867f7b7 | ||
|
|
db1fbc6c6f | ||
|
|
e84fe3599b | ||
|
|
c37eceeb9e | ||
|
|
b8a6692657 | ||
|
|
7e0bba555c | ||
|
|
04c9751f24 | ||
|
|
019422ebba | ||
|
|
b98cd03193 | ||
|
|
9fccb0df08 | ||
|
|
21fd84dcb8 | ||
|
|
6d74e46621 | ||
|
|
8e28db5cc9 | ||
|
|
0a60bbf4fa | ||
|
|
d5174065af | ||
|
|
1ead1caa18 | ||
|
|
f31e65ca8b | ||
|
|
1c2dcf762a | ||
|
|
1df3ccf7ee | ||
|
|
118c883429 | ||
|
|
406d32f8b5 | ||
|
|
34ce2ca62f | ||
|
|
4a6afa6abf | ||
|
|
01c099d9ef | ||
|
|
64345b7559 | ||
|
|
5d43eaed61 | ||
|
|
9ccccd4874 | ||
|
|
10766f1e93 | ||
|
|
2602ddc379 | ||
|
|
0354659f9d | ||
|
|
be9dafcd37 | ||
|
|
7d3491c741 | ||
|
|
a2c6f25190 | ||
|
|
96eda876a4 | ||
|
|
f260cb72cd | ||
|
|
e7d7152c3c | ||
|
|
b67765dccf | ||
|
|
2763587acd | ||
|
|
141ec04d19 | ||
|
|
5ecc768970 | ||
|
|
0fbfe1b08a | ||
|
|
369449827d | ||
|
|
c54773473f | ||
|
|
b102a87348 | ||
|
|
cf66ddc1b4 | ||
|
|
c06b45129c | ||
|
|
192e228a98 | ||
|
|
b1491dfd7c | ||
|
|
e49d6b1568 | ||
|
|
657a0d2568 | ||
|
|
3ce8540484 | ||
|
|
e780492ecf | ||
|
|
1487bba226 | ||
|
|
83d31144eb | ||
|
|
d516d68b29 | ||
|
|
130df8fb01 | ||
|
|
d79d91a4a7 | ||
|
|
5eab2549ab | ||
|
|
7644cb79b2 | ||
|
|
ba8ac996f9 | ||
|
|
a901ed16b5 | ||
|
|
0c838f9f5e | ||
|
|
773cb3b688 | ||
|
|
5b5c7a28d6 | ||
|
|
12bcf3d179 | ||
|
|
9708f49abf | ||
|
|
96fee64421 | ||
|
|
39aa968a76 | ||
|
|
6dfd8c73fc | ||
|
|
e319071191 | ||
|
|
9d9d39536b | ||
|
|
ae702d161a | ||
|
|
be09c23ff0 | ||
|
|
dc4b774f1e | ||
|
|
027fd1242c | ||
|
|
590b544f67 | ||
|
|
ed72fc3a50 | ||
|
|
1af1c45dc0 | ||
|
|
d56c01fff4 | ||
|
|
17d319a20d | ||
|
|
92b3dc3219 | ||
|
|
5681264faa | ||
|
|
f701197227 | ||
|
|
2a45f3d448 | ||
|
|
16dd87d848 | ||
|
|
5eefd1f618 | ||
|
|
b4c38738f4 | ||
|
|
640e53935d | ||
|
|
8c8354e85a | ||
|
|
c3530c3fb3 | ||
|
|
811355ccd0 | ||
|
|
82b34e813d | ||
|
|
84a4367657 | ||
|
|
abbee6b29b | ||
|
|
527e0c43a5 | ||
|
|
ede89ae3b4 | ||
|
|
a313e97873 | ||
|
|
da877aad15 | ||
|
|
8d33adfbbb | ||
|
|
6fab7bd2c1 | ||
|
|
09f9e8493c | ||
|
|
3c8bd7809c | ||
|
|
9f03553f48 | ||
|
|
b41dc68773 | ||
|
|
20436cdf75 | ||
|
|
2de5b14fe0 | ||
|
|
8486910b64 | ||
|
|
8ad024ea80 | ||
|
|
b2d2118476 | ||
|
|
fb7b6c4681 | ||
|
|
0a036944bd | ||
|
|
3fce185c77 | ||
|
|
e4f301e7a0 | ||
|
|
4195e55ccc | ||
|
|
c3c01641d2 | ||
|
|
210d3c5d72 | ||
|
|
3077cb2915 | ||
|
|
769f8b58e8 | ||
|
|
33f93d389e | ||
|
|
29481690c5 | ||
|
|
3f6b36d96e | ||
|
|
23d9bd1d74 | ||
|
|
9d9b230501 | ||
|
|
cb97ea3ec2 | ||
|
|
377ae369c1 | ||
|
|
b216b36892 | ||
|
|
3d73383d18 | ||
|
|
ebc4830666 | ||
|
|
2a6dedd7cc | ||
|
|
0554d07082 | ||
|
|
9dc9118e55 | ||
|
|
58ff066064 | ||
|
|
de190e49d5 | ||
|
|
127efeeb68 | ||
|
|
40c9896705 | ||
|
|
16b90764ad | ||
|
|
806a6c886a | ||
|
|
0ebd632d39 | ||
|
|
1cc77145d4 | ||
|
|
cfac3b7873 | ||
|
|
1959088156 | ||
|
|
f0995436e7 | ||
|
|
210ef79100 | ||
|
|
dcec7175dc | ||
|
|
93d90765c4 | ||
|
|
59362454dd | ||
|
|
92478e96d6 | ||
|
|
944003021b | ||
|
|
94fa334b01 | ||
|
|
29267cf9d7 | ||
|
|
978ce87c86 | ||
|
|
2c79c4dc7f | ||
|
|
2b8ca84296 | ||
|
|
2d20466f9a | ||
|
|
a025055643 | ||
|
|
255f989c7b | ||
|
|
e60353c4a0 | ||
|
|
4212e7a049 | ||
|
|
64c23352f9 | ||
|
|
4390a36b6e | ||
|
|
4a39c10eef | ||
|
|
1b4e3b7fa6 | ||
|
|
443ba4eecc | ||
|
|
c0aaf9fe76 | ||
|
|
082c88a4b2 | ||
|
|
2eeb8ec4fa | ||
|
|
9640510de2 | ||
|
|
f53fcbce97 | ||
|
|
27080698e7 | ||
|
|
74048bdd41 | ||
|
|
28d8614f48 | ||
|
|
bd84755e64 | ||
|
|
f30d4d5308 | ||
|
|
e36b18ad5b | ||
|
|
a09e59a698 | ||
|
|
e6363857d0 | ||
|
|
044d813ef7 | ||
|
|
357fba2c24 | ||
|
|
e76d485e29 | ||
|
|
0696dfd94b | ||
|
|
22399d3d8f | ||
|
|
852816befe | ||
|
|
4631b737fd | ||
|
|
e25e0f4da9 | ||
|
|
42b972bccd | ||
|
|
db215b7e00 | ||
|
|
3741c336ff | ||
|
|
596daf6e68 | ||
|
|
b33a4cd6cc | ||
|
|
a87c56c673 | ||
|
|
1f29fafc95 | ||
|
|
7c56210f20 | ||
|
|
7367ca42b5 | ||
|
|
2b45ca1541 | ||
|
|
dc0ee55110 | ||
|
|
0edfecc904 | ||
|
|
2bafeca270 | ||
|
|
e944b767d7 | ||
|
|
15e2d7e387 | ||
|
|
55022d6ca5 | ||
|
|
ebc3db295b | ||
|
|
077d200342 | ||
|
|
0ac2a79faa | ||
|
|
61959928bb | ||
|
|
5f4c28d313 | ||
|
|
0722f982d3 | ||
|
|
81163f822e | ||
|
|
894a89d99b | ||
|
|
939273c4b0 | ||
|
|
c3eb7dd9c5 | ||
|
|
8321e8a2e0 | ||
|
|
63c1f4fa98 | ||
|
|
b457f1677c | ||
|
|
faf4f67847 | ||
|
|
7025781df8 | ||
|
|
142f1263f6 | ||
|
|
6311ae8968 | ||
|
|
3f1871021e | ||
|
|
b6771037a6 | ||
|
|
5b753d472b | ||
|
|
1df8bad63e | ||
|
|
5358966a87 | ||
|
|
aa577df064 | ||
|
|
d122e215ff | ||
|
|
a7925259a1 | ||
|
|
7d304ae11c | ||
|
|
d4952e6849 | ||
|
|
446ef58992 | ||
|
|
cc3d3babb0 | ||
|
|
6375bd3e33 | ||
|
|
2462aacd77 | ||
|
|
b68e4a729f | ||
|
|
47d3ff4cf8 | ||
|
|
36e144091b | ||
|
|
b17bd31da0 | ||
|
|
5806d52423 | ||
|
|
87e9aeb914 | ||
|
|
7e9d59f3b4 | ||
|
|
cedad8fbd6 | ||
|
|
65ca713ff5 | ||
|
|
5e24471469 | ||
|
|
e482541e1d | ||
|
|
0db52d43fa | ||
|
|
859fbd4423 | ||
|
|
1be67eca8a | ||
|
|
2635d4e634 | ||
|
|
fe672a04f7 | ||
|
|
08f804208b | ||
|
|
ec847059f3 | ||
|
|
4fd176a41d | ||
|
|
d77912ff44 | ||
|
|
9371019133 | ||
|
|
649dc8a7e2 | ||
|
|
c8436b38a0 | ||
|
|
f91263b1e0 | ||
|
|
1177245e86 | ||
|
|
20e3172f38 | ||
|
|
58554fa658 | ||
|
|
2c29ed3e84 | ||
|
|
2b8f1a956c | ||
|
|
5025305fb2 | ||
|
|
1a989c436c | ||
|
|
964bb43fbe | ||
|
|
e7e20417ca | ||
|
|
8b919c00f3 | ||
|
|
676e8ee78a | ||
|
|
08e70231c9 | ||
|
|
0647e27a41 | ||
|
|
fa6c93bd26 | ||
|
|
c02da58a9d | ||
|
|
472734a8cc | ||
|
|
4de93001bf | ||
|
|
659ead082f | ||
|
|
c82e26ad4b | ||
|
|
47281f8fa4 | ||
|
|
02bfa889de | ||
|
|
c37e7e1774 | ||
|
|
c2b1dbd84c | ||
|
|
ea1d6c16cd | ||
|
|
72a4de2ce6 | ||
|
|
0194e71e99 | ||
|
|
baa5b9a975 | ||
|
|
bfffd2e108 | ||
|
|
2674aeb96a | ||
|
|
91fc5eef1d | ||
|
|
6138584651 | ||
|
|
a5ad6f862c | ||
|
|
8a59915d7d | ||
|
|
5f68529036 | ||
|
|
64def4f953 | ||
|
|
650dc7f0f9 | ||
|
|
0d872f5aa6 | ||
|
|
fa662b52d0 | ||
|
|
183b3d4e47 | ||
|
|
cb43fbeeb4 | ||
|
|
f2fdcb7c4b | ||
|
|
f518324426 | ||
|
|
14d413752b | ||
|
|
fd40d992ad | ||
|
|
8beb613916 | ||
|
|
c7783d6fee | ||
|
|
9978c5c103 | ||
|
|
53557fc532 | ||
|
|
f7cac2f7b6 | ||
|
|
ab3c897ce1 | ||
|
|
5a7dd05818 | ||
|
|
ac3183caaa | ||
|
|
73a680b2a8 | ||
|
|
0995810273 | ||
|
|
c3ae8def75 | ||
|
|
e426df8e10 | ||
|
|
0227618d3c | ||
|
|
11e6b3d18b | ||
|
|
a3c6010718 | ||
|
|
cab4c73088 | ||
|
|
e9484d6a95 | ||
|
|
c20281ee33 | ||
|
|
fc8bcc809d | ||
|
|
5b99b471b2 | ||
|
|
c163357f38 | ||
|
|
951690e54d | ||
|
|
c71456117d | ||
|
|
0613666d9c | ||
|
|
131e036402 | ||
|
|
51d63ac329 | ||
|
|
bc658907f0 | ||
|
|
b932600653 | ||
|
|
f0c730252f | ||
|
|
27091f146a | ||
|
|
a1a4960baf | ||
|
|
543e84fe70 | ||
|
|
6d3e4f4d0a | ||
|
|
96d4bf9012 | ||
|
|
aa8cce58bf | ||
|
|
ce8bc642ae | ||
|
|
89f2e8fbdf | ||
|
|
525a218b2b | ||
|
|
17753f0c20 | ||
|
|
94a5db9f4d | ||
|
|
f2c039bfb9 | ||
|
|
a060b47b13 | ||
|
|
3bd2841fdb | ||
|
|
197f3ea4ba | ||
|
|
9ff349a3cb | ||
|
|
1a2de0c5fe | ||
|
|
a006d168c5 | ||
|
|
c059c9fea5 | ||
|
|
42876969b9 | ||
|
|
b46fa8603e | ||
|
|
fbeaeb8689 | ||
|
|
ec3719b583 | ||
|
|
92171f9dd1 | ||
|
|
7331d34839 | ||
|
|
51449e0665 | ||
|
|
6efdc11cc8 | ||
|
|
fa8e6ff900 | ||
|
|
7f058c5ff7 | ||
|
|
82be4457de |
5
.gitignore
vendored
5
.gitignore
vendored
@@ -41,3 +41,8 @@ media_store/
|
|||||||
build/
|
build/
|
||||||
|
|
||||||
localhost-800*/
|
localhost-800*/
|
||||||
|
static/client/register/register_config.js
|
||||||
|
.tox
|
||||||
|
|
||||||
|
env/
|
||||||
|
*.config
|
||||||
|
|||||||
56
AUTHORS.rst
Normal file
56
AUTHORS.rst
Normal file
@@ -0,0 +1,56 @@
|
|||||||
|
Erik Johnston <erik at matrix.org>
|
||||||
|
* HS core
|
||||||
|
* Federation API impl
|
||||||
|
|
||||||
|
Mark Haines <mark at matrix.org>
|
||||||
|
* HS core
|
||||||
|
* Crypto
|
||||||
|
* Content repository
|
||||||
|
* CS v2 API impl
|
||||||
|
|
||||||
|
Kegan Dougal <kegan at matrix.org>
|
||||||
|
* HS core
|
||||||
|
* CS v1 API impl
|
||||||
|
* AS API impl
|
||||||
|
|
||||||
|
Paul "LeoNerd" Evans <paul at matrix.org>
|
||||||
|
* HS core
|
||||||
|
* Presence
|
||||||
|
* Typing Notifications
|
||||||
|
* Performance metrics and caching layer
|
||||||
|
|
||||||
|
Dave Baker <dave at matrix.org>
|
||||||
|
* Push notifications
|
||||||
|
* Auth CS v2 impl
|
||||||
|
|
||||||
|
Matthew Hodgson <matthew at matrix.org>
|
||||||
|
* General doc & housekeeping
|
||||||
|
* Vertobot/vertobridge matrix<->verto PoC
|
||||||
|
|
||||||
|
Emmanuel Rohee <manu at matrix.org>
|
||||||
|
* Supporting iOS clients (testability and fallback registration)
|
||||||
|
|
||||||
|
Turned to Dust <dwinslow86 at gmail.com>
|
||||||
|
* ArchLinux installation instructions
|
||||||
|
|
||||||
|
Brabo <brabo at riseup.net>
|
||||||
|
* Installation instruction fixes
|
||||||
|
|
||||||
|
Ivan Shapovalov <intelfx100 at gmail.com>
|
||||||
|
* contrib/systemd: a sample systemd unit file and a logger configuration
|
||||||
|
|
||||||
|
Eric Myhre <hash at exultant.us>
|
||||||
|
* Fix bug where ``media_store_path`` config option was ignored by v0 content
|
||||||
|
repository API.
|
||||||
|
|
||||||
|
Muthu Subramanian <muthu.subramanian.karunanidhi at ericsson.com>
|
||||||
|
* Add SAML2 support for registration and login.
|
||||||
|
|
||||||
|
Steven Hammerton <steven.hammerton at openmarket.com>
|
||||||
|
* Add CAS support for registration and login.
|
||||||
|
|
||||||
|
Mads Robin Christensen <mads at v42 dot dk>
|
||||||
|
* CentOS 7 installation instructions.
|
||||||
|
|
||||||
|
Florent Violleau <floviolleau at gmail dot com>
|
||||||
|
* Add Raspberry Pi installation instructions and general troubleshooting items
|
||||||
569
CHANGES.rst
569
CHANGES.rst
@@ -1,3 +1,572 @@
|
|||||||
|
Changes in synapse v0.13.2 (2016-02-11)
|
||||||
|
=======================================
|
||||||
|
|
||||||
|
* Fix bug where ``/events`` would fail to skip some events if there had been
|
||||||
|
more events than the limit specified since the last request (PR #570)
|
||||||
|
|
||||||
|
Changes in synapse v0.13.1 (2016-02-10)
|
||||||
|
=======================================
|
||||||
|
|
||||||
|
* Bump matrix-angular-sdk (matrix web console) dependency to 0.6.8 to
|
||||||
|
pull in the fix for SYWEB-361 so that the default client can display
|
||||||
|
HTML messages again(!)
|
||||||
|
|
||||||
|
Changes in synapse v0.13.0 (2016-02-10)
|
||||||
|
=======================================
|
||||||
|
|
||||||
|
This version includes an upgrade of the schema, specifically adding an index to
|
||||||
|
the ``events`` table. This may cause synapse to pause for several minutes the
|
||||||
|
first time it is started after the upgrade.
|
||||||
|
|
||||||
|
Changes:
|
||||||
|
|
||||||
|
* Improve general performance (PR #540, #543. #544, #54, #549, #567)
|
||||||
|
* Change guest user ids to be incrementing integers (PR #550)
|
||||||
|
* Improve performance of public room list API (PR #552)
|
||||||
|
* Change profile API to omit keys rather than return null (PR #557)
|
||||||
|
* Add ``/media/r0`` endpoint prefix, which is equivalent to ``/media/v1/``
|
||||||
|
(PR #595)
|
||||||
|
|
||||||
|
Bug fixes:
|
||||||
|
|
||||||
|
* Fix bug with upgrading guest accounts where it would fail if you opened the
|
||||||
|
registration email on a different device (PR #547)
|
||||||
|
* Fix bug where unread count could be wrong (PR #568)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
Changes in synapse v0.12.1-rc1 (2016-01-29)
|
||||||
|
===========================================
|
||||||
|
|
||||||
|
Features:
|
||||||
|
|
||||||
|
* Add unread notification counts in ``/sync`` (PR #456)
|
||||||
|
* Add support for inviting 3pids in ``/createRoom`` (PR #460)
|
||||||
|
* Add ability for guest accounts to upgrade (PR #462)
|
||||||
|
* Add ``/versions`` API (PR #468)
|
||||||
|
* Add ``event`` to ``/context`` API (PR #492)
|
||||||
|
* Add specific error code for invalid user names in ``/register`` (PR #499)
|
||||||
|
* Add support for push badge counts (PR #507)
|
||||||
|
* Add support for non-guest users to peek in rooms using ``/events`` (PR #510)
|
||||||
|
|
||||||
|
Changes:
|
||||||
|
|
||||||
|
* Change ``/sync`` so that guest users only get rooms they've joined (PR #469)
|
||||||
|
* Change to require unbanning before other membership changes (PR #501)
|
||||||
|
* Change default push rules to notify for all messages (PR #486)
|
||||||
|
* Change default push rules to not notify on membership changes (PR #514)
|
||||||
|
* Change default push rules in one to one rooms to only notify for events that
|
||||||
|
are messages (PR #529)
|
||||||
|
* Change ``/sync`` to reject requests with a ``from`` query param (PR #512)
|
||||||
|
* Change server manhole to use SSH rather than telnet (PR #473)
|
||||||
|
* Change server to require AS users to be registered before use (PR #487)
|
||||||
|
* Change server not to start when ASes are invalidly configured (PR #494)
|
||||||
|
* Change server to require ID and ``as_token`` to be unique for AS's (PR #496)
|
||||||
|
* Change maximum pagination limit to 1000 (PR #497)
|
||||||
|
|
||||||
|
Bug fixes:
|
||||||
|
|
||||||
|
* Fix bug where ``/sync`` didn't return when something under the leave key
|
||||||
|
changed (PR #461)
|
||||||
|
* Fix bug where we returned smaller rather than larger than requested
|
||||||
|
thumbnails when ``method=crop`` (PR #464)
|
||||||
|
* Fix thumbnails API to only return cropped thumbnails when asking for a
|
||||||
|
cropped thumbnail (PR #475)
|
||||||
|
* Fix bug where we occasionally still logged access tokens (PR #477)
|
||||||
|
* Fix bug where ``/events`` would always return immediately for guest users
|
||||||
|
(PR #480)
|
||||||
|
* Fix bug where ``/sync`` unexpectedly returned old left rooms (PR #481)
|
||||||
|
* Fix enabling and disabling push rules (PR #498)
|
||||||
|
* Fix bug where ``/register`` returned 500 when given unicode username
|
||||||
|
(PR #513)
|
||||||
|
|
||||||
|
Changes in synapse v0.12.0 (2016-01-04)
|
||||||
|
=======================================
|
||||||
|
|
||||||
|
* Expose ``/login`` under ``r0`` (PR #459)
|
||||||
|
|
||||||
|
Changes in synapse v0.12.0-rc3 (2015-12-23)
|
||||||
|
===========================================
|
||||||
|
|
||||||
|
* Allow guest accounts access to ``/sync`` (PR #455)
|
||||||
|
* Allow filters to include/exclude rooms at the room level
|
||||||
|
rather than just from the components of the sync for each
|
||||||
|
room. (PR #454)
|
||||||
|
* Include urls for room avatars in the response to ``/publicRooms`` (PR #453)
|
||||||
|
* Don't set a identicon as the avatar for a user when they register (PR #450)
|
||||||
|
* Add a ``display_name`` to third-party invites (PR #449)
|
||||||
|
* Send more information to the identity server for third-party invites so that
|
||||||
|
it can send richer messages to the invitee (PR #446)
|
||||||
|
* Cache the responses to ``/initialSync`` for 5 minutes. If a client
|
||||||
|
retries a request to ``/initialSync`` before the a response was computed
|
||||||
|
to the first request then the same response is used for both requests
|
||||||
|
(PR #457)
|
||||||
|
* Fix a bug where synapse would always request the signing keys of
|
||||||
|
remote servers even when the key was cached locally (PR #452)
|
||||||
|
* Fix 500 when pagination search results (PR #447)
|
||||||
|
* Fix a bug where synapse was leaking raw email address in third-party invites
|
||||||
|
(PR #448)
|
||||||
|
|
||||||
|
Changes in synapse v0.12.0-rc2 (2015-12-14)
|
||||||
|
===========================================
|
||||||
|
|
||||||
|
* Add caches for whether rooms have been forgotten by a user (PR #434)
|
||||||
|
* Remove instructions to use ``--process-dependency-link`` since all of the
|
||||||
|
dependencies of synapse are on PyPI (PR #436)
|
||||||
|
* Parallelise the processing of ``/sync`` requests (PR #437)
|
||||||
|
* Fix race updating presence in ``/events`` (PR #444)
|
||||||
|
* Fix bug back-populating search results (PR #441)
|
||||||
|
* Fix bug calculating state in ``/sync`` requests (PR #442)
|
||||||
|
|
||||||
|
Changes in synapse v0.12.0-rc1 (2015-12-10)
|
||||||
|
===========================================
|
||||||
|
|
||||||
|
* Host the client APIs released as r0 by
|
||||||
|
https://matrix.org/docs/spec/r0.0.0/client_server.html
|
||||||
|
on paths prefixed by ``/_matrix/client/r0``. (PR #430, PR #415, PR #400)
|
||||||
|
* Updates the client APIs to match r0 of the matrix specification.
|
||||||
|
|
||||||
|
* All APIs return events in the new event format, old APIs also include
|
||||||
|
the fields needed to parse the event using the old format for
|
||||||
|
compatibility. (PR #402)
|
||||||
|
* Search results are now given as a JSON array rather than
|
||||||
|
a JSON object (PR #405)
|
||||||
|
* Miscellaneous changes to search (PR #403, PR #406, PR #412)
|
||||||
|
* Filter JSON objects may now be passed as query parameters to ``/sync``
|
||||||
|
(PR #431)
|
||||||
|
* Fix implementation of ``/admin/whois`` (PR #418)
|
||||||
|
* Only include the rooms that user has left in ``/sync`` if the client
|
||||||
|
requests them in the filter (PR #423)
|
||||||
|
* Don't push for ``m.room.message`` by default (PR #411)
|
||||||
|
* Add API for setting per account user data (PR #392)
|
||||||
|
* Allow users to forget rooms (PR #385)
|
||||||
|
|
||||||
|
* Performance improvements and monitoring:
|
||||||
|
|
||||||
|
* Add per-request counters for CPU time spent on the main python thread.
|
||||||
|
(PR #421, PR #420)
|
||||||
|
* Add per-request counters for time spent in the database (PR #429)
|
||||||
|
* Make state updates in the C+S API idempotent (PR #416)
|
||||||
|
* Only fire ``user_joined_room`` if the user has actually joined. (PR #410)
|
||||||
|
* Reuse a single http client, rather than creating new ones (PR #413)
|
||||||
|
|
||||||
|
* Fixed a bug upgrading from older versions of synapse on postgresql (PR #417)
|
||||||
|
|
||||||
|
Changes in synapse v0.11.1 (2015-11-20)
|
||||||
|
=======================================
|
||||||
|
|
||||||
|
* Add extra options to search API (PR #394)
|
||||||
|
* Fix bug where we did not correctly cap federation retry timers. This meant it
|
||||||
|
could take several hours for servers to start talking to ressurected servers,
|
||||||
|
even when they were receiving traffic from them (PR #393)
|
||||||
|
* Don't advertise login token flow unless CAS is enabled. This caused issues
|
||||||
|
where some clients would always use the fallback API if they did not
|
||||||
|
recognize all login flows (PR #391)
|
||||||
|
* Change /v2 sync API to rename ``private_user_data`` to ``account_data``
|
||||||
|
(PR #386)
|
||||||
|
* Change /v2 sync API to remove the ``event_map`` and rename keys in ``rooms``
|
||||||
|
object (PR #389)
|
||||||
|
|
||||||
|
Changes in synapse v0.11.0-r2 (2015-11-19)
|
||||||
|
==========================================
|
||||||
|
|
||||||
|
* Fix bug in database port script (PR #387)
|
||||||
|
|
||||||
|
Changes in synapse v0.11.0-r1 (2015-11-18)
|
||||||
|
==========================================
|
||||||
|
|
||||||
|
* Retry and fail federation requests more aggressively for requests that block
|
||||||
|
client side requests (PR #384)
|
||||||
|
|
||||||
|
Changes in synapse v0.11.0 (2015-11-17)
|
||||||
|
=======================================
|
||||||
|
|
||||||
|
* Change CAS login API (PR #349)
|
||||||
|
|
||||||
|
Changes in synapse v0.11.0-rc2 (2015-11-13)
|
||||||
|
===========================================
|
||||||
|
|
||||||
|
* Various changes to /sync API response format (PR #373)
|
||||||
|
* Fix regression when setting display name in newly joined room over
|
||||||
|
federation (PR #368)
|
||||||
|
* Fix problem where /search was slow when using SQLite (PR #366)
|
||||||
|
|
||||||
|
Changes in synapse v0.11.0-rc1 (2015-11-11)
|
||||||
|
===========================================
|
||||||
|
|
||||||
|
* Add Search API (PR #307, #324, #327, #336, #350, #359)
|
||||||
|
* Add 'archived' state to v2 /sync API (PR #316)
|
||||||
|
* Add ability to reject invites (PR #317)
|
||||||
|
* Add config option to disable password login (PR #322)
|
||||||
|
* Add the login fallback API (PR #330)
|
||||||
|
* Add room context API (PR #334)
|
||||||
|
* Add room tagging support (PR #335)
|
||||||
|
* Update v2 /sync API to match spec (PR #305, #316, #321, #332, #337, #341)
|
||||||
|
* Change retry schedule for application services (PR #320)
|
||||||
|
* Change retry schedule for remote servers (PR #340)
|
||||||
|
* Fix bug where we hosted static content in the incorrect place (PR #329)
|
||||||
|
* Fix bug where we didn't increment retry interval for remote servers (PR #343)
|
||||||
|
|
||||||
|
Changes in synapse v0.10.1-rc1 (2015-10-15)
|
||||||
|
===========================================
|
||||||
|
|
||||||
|
* Add support for CAS, thanks to Steven Hammerton (PR #295, #296)
|
||||||
|
* Add support for using macaroons for ``access_token`` (PR #256, #229)
|
||||||
|
* Add support for ``m.room.canonical_alias`` (PR #287)
|
||||||
|
* Add support for viewing the history of rooms that they have left. (PR #276,
|
||||||
|
#294)
|
||||||
|
* Add support for refresh tokens (PR #240)
|
||||||
|
* Add flag on creation which disables federation of the room (PR #279)
|
||||||
|
* Add some room state to invites. (PR #275)
|
||||||
|
* Atomically persist events when joining a room over federation (PR #283)
|
||||||
|
* Change default history visibility for private rooms (PR #271)
|
||||||
|
* Allow users to redact their own sent events (PR #262)
|
||||||
|
* Use tox for tests (PR #247)
|
||||||
|
* Split up syutil into separate libraries (PR #243)
|
||||||
|
|
||||||
|
Changes in synapse v0.10.0-r2 (2015-09-16)
|
||||||
|
==========================================
|
||||||
|
|
||||||
|
* Fix bug where we always fetched remote server signing keys instead of using
|
||||||
|
ones in our cache.
|
||||||
|
* Fix adding threepids to an existing account.
|
||||||
|
* Fix bug with invinting over federation where remote server was already in
|
||||||
|
the room. (PR #281, SYN-392)
|
||||||
|
|
||||||
|
Changes in synapse v0.10.0-r1 (2015-09-08)
|
||||||
|
==========================================
|
||||||
|
|
||||||
|
* Fix bug with python packaging
|
||||||
|
|
||||||
|
Changes in synapse v0.10.0 (2015-09-03)
|
||||||
|
=======================================
|
||||||
|
|
||||||
|
No change from release candidate.
|
||||||
|
|
||||||
|
Changes in synapse v0.10.0-rc6 (2015-09-02)
|
||||||
|
===========================================
|
||||||
|
|
||||||
|
* Remove some of the old database upgrade scripts.
|
||||||
|
* Fix database port script to work with newly created sqlite databases.
|
||||||
|
|
||||||
|
Changes in synapse v0.10.0-rc5 (2015-08-27)
|
||||||
|
===========================================
|
||||||
|
|
||||||
|
* Fix bug that broke downloading files with ascii filenames across federation.
|
||||||
|
|
||||||
|
Changes in synapse v0.10.0-rc4 (2015-08-27)
|
||||||
|
===========================================
|
||||||
|
|
||||||
|
* Allow UTF-8 filenames for upload. (PR #259)
|
||||||
|
|
||||||
|
Changes in synapse v0.10.0-rc3 (2015-08-25)
|
||||||
|
===========================================
|
||||||
|
|
||||||
|
* Add ``--keys-directory`` config option to specify where files such as
|
||||||
|
certs and signing keys should be stored in, when using ``--generate-config``
|
||||||
|
or ``--generate-keys``. (PR #250)
|
||||||
|
* Allow ``--config-path`` to specify a directory, causing synapse to use all
|
||||||
|
\*.yaml files in the directory as config files. (PR #249)
|
||||||
|
* Add ``web_client_location`` config option to specify static files to be
|
||||||
|
hosted by synapse under ``/_matrix/client``. (PR #245)
|
||||||
|
* Add helper utility to synapse to read and parse the config files and extract
|
||||||
|
the value of a given key. For example::
|
||||||
|
|
||||||
|
$ python -m synapse.config read server_name -c homeserver.yaml
|
||||||
|
localhost
|
||||||
|
|
||||||
|
(PR #246)
|
||||||
|
|
||||||
|
|
||||||
|
Changes in synapse v0.10.0-rc2 (2015-08-24)
|
||||||
|
===========================================
|
||||||
|
|
||||||
|
* Fix bug where we incorrectly populated the ``event_forward_extremities``
|
||||||
|
table, resulting in problems joining large remote rooms (e.g.
|
||||||
|
``#matrix:matrix.org``)
|
||||||
|
* Reduce the number of times we wake up pushers by not listening for presence
|
||||||
|
or typing events, reducing the CPU cost of each pusher.
|
||||||
|
|
||||||
|
|
||||||
|
Changes in synapse v0.10.0-rc1 (2015-08-21)
|
||||||
|
===========================================
|
||||||
|
|
||||||
|
Also see v0.9.4-rc1 changelog, which has been amalgamated into this release.
|
||||||
|
|
||||||
|
General:
|
||||||
|
|
||||||
|
* Upgrade to Twisted 15 (PR #173)
|
||||||
|
* Add support for serving and fetching encryption keys over federation.
|
||||||
|
(PR #208)
|
||||||
|
* Add support for logging in with email address (PR #234)
|
||||||
|
* Add support for new ``m.room.canonical_alias`` event. (PR #233)
|
||||||
|
* Change synapse to treat user IDs case insensitively during registration and
|
||||||
|
login. (If two users already exist with case insensitive matching user ids,
|
||||||
|
synapse will continue to require them to specify their user ids exactly.)
|
||||||
|
* Error if a user tries to register with an email already in use. (PR #211)
|
||||||
|
* Add extra and improve existing caches (PR #212, #219, #226, #228)
|
||||||
|
* Batch various storage request (PR #226, #228)
|
||||||
|
* Fix bug where we didn't correctly log the entity that triggered the request
|
||||||
|
if the request came in via an application service (PR #230)
|
||||||
|
* Fix bug where we needlessly regenerated the full list of rooms an AS is
|
||||||
|
interested in. (PR #232)
|
||||||
|
* Add support for AS's to use v2_alpha registration API (PR #210)
|
||||||
|
|
||||||
|
|
||||||
|
Configuration:
|
||||||
|
|
||||||
|
* Add ``--generate-keys`` that will generate any missing cert and key files in
|
||||||
|
the configuration files. This is equivalent to running ``--generate-config``
|
||||||
|
on an existing configuration file. (PR #220)
|
||||||
|
* ``--generate-config`` now no longer requires a ``--server-name`` parameter
|
||||||
|
when used on existing configuration files. (PR #220)
|
||||||
|
* Add ``--print-pidfile`` flag that controls the printing of the pid to stdout
|
||||||
|
of the demonised process. (PR #213)
|
||||||
|
|
||||||
|
Media Repository:
|
||||||
|
|
||||||
|
* Fix bug where we picked a lower resolution image than requested. (PR #205)
|
||||||
|
* Add support for specifying if a the media repository should dynamically
|
||||||
|
thumbnail images or not. (PR #206)
|
||||||
|
|
||||||
|
Metrics:
|
||||||
|
|
||||||
|
* Add statistics from the reactor to the metrics API. (PR #224, #225)
|
||||||
|
|
||||||
|
Demo Homeservers:
|
||||||
|
|
||||||
|
* Fix starting the demo homeservers without rate-limiting enabled. (PR #182)
|
||||||
|
* Fix enabling registration on demo homeservers (PR #223)
|
||||||
|
|
||||||
|
|
||||||
|
Changes in synapse v0.9.4-rc1 (2015-07-21)
|
||||||
|
==========================================
|
||||||
|
|
||||||
|
General:
|
||||||
|
|
||||||
|
* Add basic implementation of receipts. (SPEC-99)
|
||||||
|
* Add support for configuration presets in room creation API. (PR #203)
|
||||||
|
* Add auth event that limits the visibility of history for new users.
|
||||||
|
(SPEC-134)
|
||||||
|
* Add SAML2 login/registration support. (PR #201. Thanks Muthu Subramanian!)
|
||||||
|
* Add client side key management APIs for end to end encryption. (PR #198)
|
||||||
|
* Change power level semantics so that you cannot kick, ban or change power
|
||||||
|
levels of users that have equal or greater power level than you. (SYN-192)
|
||||||
|
* Improve performance by bulk inserting events where possible. (PR #193)
|
||||||
|
* Improve performance by bulk verifying signatures where possible. (PR #194)
|
||||||
|
|
||||||
|
|
||||||
|
Configuration:
|
||||||
|
|
||||||
|
* Add support for including TLS certificate chains.
|
||||||
|
|
||||||
|
Media Repository:
|
||||||
|
|
||||||
|
* Add Content-Disposition headers to content repository responses. (SYN-150)
|
||||||
|
|
||||||
|
|
||||||
|
Changes in synapse v0.9.3 (2015-07-01)
|
||||||
|
======================================
|
||||||
|
|
||||||
|
No changes from v0.9.3 Release Candidate 1.
|
||||||
|
|
||||||
|
Changes in synapse v0.9.3-rc1 (2015-06-23)
|
||||||
|
==========================================
|
||||||
|
|
||||||
|
General:
|
||||||
|
|
||||||
|
* Fix a memory leak in the notifier. (SYN-412)
|
||||||
|
* Improve performance of room initial sync. (SYN-418)
|
||||||
|
* General improvements to logging.
|
||||||
|
* Remove ``access_token`` query params from ``INFO`` level logging.
|
||||||
|
|
||||||
|
Configuration:
|
||||||
|
|
||||||
|
* Add support for specifying and configuring multiple listeners. (SYN-389)
|
||||||
|
|
||||||
|
Application services:
|
||||||
|
|
||||||
|
* Fix bug where synapse failed to send user queries to application services.
|
||||||
|
|
||||||
|
Changes in synapse v0.9.2-r2 (2015-06-15)
|
||||||
|
=========================================
|
||||||
|
|
||||||
|
Fix packaging so that schema delta python files get included in the package.
|
||||||
|
|
||||||
|
Changes in synapse v0.9.2 (2015-06-12)
|
||||||
|
======================================
|
||||||
|
|
||||||
|
General:
|
||||||
|
|
||||||
|
* Use ultrajson for json (de)serialisation when a canonical encoding is not
|
||||||
|
required. Ultrajson is significantly faster than simplejson in certain
|
||||||
|
circumstances.
|
||||||
|
* Use connection pools for outgoing HTTP connections.
|
||||||
|
* Process thumbnails on separate threads.
|
||||||
|
|
||||||
|
Configuration:
|
||||||
|
|
||||||
|
* Add option, ``gzip_responses``, to disable HTTP response compression.
|
||||||
|
|
||||||
|
Federation:
|
||||||
|
|
||||||
|
* Improve resilience of backfill by ensuring we fetch any missing auth events.
|
||||||
|
* Improve performance of backfill and joining remote rooms by removing
|
||||||
|
unnecessary computations. This included handling events we'd previously
|
||||||
|
handled as well as attempting to compute the current state for outliers.
|
||||||
|
|
||||||
|
|
||||||
|
Changes in synapse v0.9.1 (2015-05-26)
|
||||||
|
======================================
|
||||||
|
|
||||||
|
General:
|
||||||
|
|
||||||
|
* Add support for backfilling when a client paginates. This allows servers to
|
||||||
|
request history for a room from remote servers when a client tries to
|
||||||
|
paginate history the server does not have - SYN-36
|
||||||
|
* Fix bug where you couldn't disable non-default pushrules - SYN-378
|
||||||
|
* Fix ``register_new_user`` script - SYN-359
|
||||||
|
* Improve performance of fetching events from the database, this improves both
|
||||||
|
initialSync and sending of events.
|
||||||
|
* Improve performance of event streams, allowing synapse to handle more
|
||||||
|
simultaneous connected clients.
|
||||||
|
|
||||||
|
Federation:
|
||||||
|
|
||||||
|
* Fix bug with existing backfill implementation where it returned the wrong
|
||||||
|
selection of events in some circumstances.
|
||||||
|
* Improve performance of joining remote rooms.
|
||||||
|
|
||||||
|
Configuration:
|
||||||
|
|
||||||
|
* Add support for changing the bind host of the metrics listener via the
|
||||||
|
``metrics_bind_host`` option.
|
||||||
|
|
||||||
|
|
||||||
|
Changes in synapse v0.9.0-r5 (2015-05-21)
|
||||||
|
=========================================
|
||||||
|
|
||||||
|
* Add more database caches to reduce amount of work done for each pusher. This
|
||||||
|
radically reduces CPU usage when multiple pushers are set up in the same room.
|
||||||
|
|
||||||
|
Changes in synapse v0.9.0 (2015-05-07)
|
||||||
|
======================================
|
||||||
|
|
||||||
|
General:
|
||||||
|
|
||||||
|
* Add support for using a PostgreSQL database instead of SQLite. See
|
||||||
|
`docs/postgres.rst`_ for details.
|
||||||
|
* Add password change and reset APIs. See `Registration`_ in the spec.
|
||||||
|
* Fix memory leak due to not releasing stale notifiers - SYN-339.
|
||||||
|
* Fix race in caches that occasionally caused some presence updates to be
|
||||||
|
dropped - SYN-369.
|
||||||
|
* Check server name has not changed on restart.
|
||||||
|
* Add a sample systemd unit file and a logger configuration in
|
||||||
|
contrib/systemd. Contributed Ivan Shapovalov.
|
||||||
|
|
||||||
|
Federation:
|
||||||
|
|
||||||
|
* Add key distribution mechanisms for fetching public keys of unavailable
|
||||||
|
remote home servers. See `Retrieving Server Keys`_ in the spec.
|
||||||
|
|
||||||
|
Configuration:
|
||||||
|
|
||||||
|
* Add support for multiple config files.
|
||||||
|
* Add support for dictionaries in config files.
|
||||||
|
* Remove support for specifying config options on the command line, except
|
||||||
|
for:
|
||||||
|
|
||||||
|
* ``--daemonize`` - Daemonize the home server.
|
||||||
|
* ``--manhole`` - Turn on the twisted telnet manhole service on the given
|
||||||
|
port.
|
||||||
|
* ``--database-path`` - The path to a sqlite database to use.
|
||||||
|
* ``--verbose`` - The verbosity level.
|
||||||
|
* ``--log-file`` - File to log to.
|
||||||
|
* ``--log-config`` - Python logging config file.
|
||||||
|
* ``--enable-registration`` - Enable registration for new users.
|
||||||
|
|
||||||
|
Application services:
|
||||||
|
|
||||||
|
* Reliably retry sending of events from Synapse to application services, as per
|
||||||
|
`Application Services`_ spec.
|
||||||
|
* Application services can no longer register via the ``/register`` API,
|
||||||
|
instead their configuration should be saved to a file and listed in the
|
||||||
|
synapse ``app_service_config_files`` config option. The AS configuration file
|
||||||
|
has the same format as the old ``/register`` request.
|
||||||
|
See `docs/application_services.rst`_ for more information.
|
||||||
|
|
||||||
|
.. _`docs/postgres.rst`: docs/postgres.rst
|
||||||
|
.. _`docs/application_services.rst`: docs/application_services.rst
|
||||||
|
.. _`Registration`: https://github.com/matrix-org/matrix-doc/blob/master/specification/10_client_server_api.rst#registration
|
||||||
|
.. _`Retrieving Server Keys`: https://github.com/matrix-org/matrix-doc/blob/6f2698/specification/30_server_server_api.rst#retrieving-server-keys
|
||||||
|
.. _`Application Services`: https://github.com/matrix-org/matrix-doc/blob/0c6bd9/specification/25_application_service_api.rst#home-server---application-service-api
|
||||||
|
|
||||||
|
Changes in synapse v0.8.1 (2015-03-18)
|
||||||
|
======================================
|
||||||
|
|
||||||
|
* Disable registration by default. New users can be added using the command
|
||||||
|
``register_new_matrix_user`` or by enabling registration in the config.
|
||||||
|
* Add metrics to synapse. To enable metrics use config options
|
||||||
|
``enable_metrics`` and ``metrics_port``.
|
||||||
|
* Fix bug where banning only kicked the user.
|
||||||
|
|
||||||
|
Changes in synapse v0.8.0 (2015-03-06)
|
||||||
|
======================================
|
||||||
|
|
||||||
|
General:
|
||||||
|
|
||||||
|
* Add support for registration fallback. This is a page hosted on the server
|
||||||
|
which allows a user to register for an account, regardless of what client
|
||||||
|
they are using (e.g. mobile devices).
|
||||||
|
|
||||||
|
* Added new default push rules and made them configurable by clients:
|
||||||
|
|
||||||
|
* Suppress all notice messages.
|
||||||
|
* Notify when invited to a new room.
|
||||||
|
* Notify for messages that don't match any rule.
|
||||||
|
* Notify on incoming call.
|
||||||
|
|
||||||
|
Federation:
|
||||||
|
|
||||||
|
* Added per host server side rate-limiting of incoming federation requests.
|
||||||
|
* Added a ``/get_missing_events/`` API to federation to reduce number of
|
||||||
|
``/events/`` requests.
|
||||||
|
|
||||||
|
Configuration:
|
||||||
|
|
||||||
|
* Added configuration option to disable registration:
|
||||||
|
``disable_registration``.
|
||||||
|
* Added configuration option to change soft limit of number of open file
|
||||||
|
descriptors: ``soft_file_limit``.
|
||||||
|
* Make ``tls_private_key_path`` optional when running with ``no_tls``.
|
||||||
|
|
||||||
|
Application services:
|
||||||
|
|
||||||
|
* Application services can now poll on the CS API ``/events`` for their events,
|
||||||
|
by providing their application service ``access_token``.
|
||||||
|
* Added exclusive namespace support to application services API.
|
||||||
|
|
||||||
|
|
||||||
|
Changes in synapse v0.7.1 (2015-02-19)
|
||||||
|
======================================
|
||||||
|
|
||||||
|
* Initial alpha implementation of parts of the Application Services API.
|
||||||
|
Including:
|
||||||
|
|
||||||
|
- AS Registration / Unregistration
|
||||||
|
- User Query API
|
||||||
|
- Room Alias Query API
|
||||||
|
- Push transport for receiving events.
|
||||||
|
- User/Alias namespace admin control
|
||||||
|
|
||||||
|
* Add cache when fetching events from remote servers to stop repeatedly
|
||||||
|
fetching events with bad signatures.
|
||||||
|
* Respect the per remote server retry scheme when fetching both events and
|
||||||
|
server keys to reduce the number of times we send requests to dead servers.
|
||||||
|
* Inform remote servers when the local server fails to handle a received event.
|
||||||
|
* Turn off python bytecode generation due to problems experienced when
|
||||||
|
upgrading from previous versions.
|
||||||
|
|
||||||
Changes in synapse v0.7.0 (2015-02-12)
|
Changes in synapse v0.7.0 (2015-02-12)
|
||||||
======================================
|
======================================
|
||||||
|
|
||||||
|
|||||||
118
CONTRIBUTING.rst
Normal file
118
CONTRIBUTING.rst
Normal file
@@ -0,0 +1,118 @@
|
|||||||
|
Contributing code to Matrix
|
||||||
|
===========================
|
||||||
|
|
||||||
|
Everyone is welcome to contribute code to Matrix
|
||||||
|
(https://github.com/matrix-org), provided that they are willing to license
|
||||||
|
their contributions under the same license as the project itself. We follow a
|
||||||
|
simple 'inbound=outbound' model for contributions: the act of submitting an
|
||||||
|
'inbound' contribution means that the contributor agrees to license the code
|
||||||
|
under the same terms as the project's overall 'outbound' license - in our
|
||||||
|
case, this is almost always Apache Software License v2 (see LICENSE).
|
||||||
|
|
||||||
|
How to contribute
|
||||||
|
~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
The preferred and easiest way to contribute changes to Matrix is to fork the
|
||||||
|
relevant project on github, and then create a pull request to ask us to pull
|
||||||
|
your changes into our repo
|
||||||
|
(https://help.github.com/articles/using-pull-requests/)
|
||||||
|
|
||||||
|
**The single biggest thing you need to know is: please base your changes on
|
||||||
|
the develop branch - /not/ master.**
|
||||||
|
|
||||||
|
We use the master branch to track the most recent release, so that folks who
|
||||||
|
blindly clone the repo and automatically check out master get something that
|
||||||
|
works. Develop is the unstable branch where all the development actually
|
||||||
|
happens: the workflow is that contributors should fork the develop branch to
|
||||||
|
make a 'feature' branch for a particular contribution, and then make a pull
|
||||||
|
request to merge this back into the matrix.org 'official' develop branch. We
|
||||||
|
use github's pull request workflow to review the contribution, and either ask
|
||||||
|
you to make any refinements needed or merge it and make them ourselves. The
|
||||||
|
changes will then land on master when we next do a release.
|
||||||
|
|
||||||
|
We use Jenkins for continuous integration (http://matrix.org/jenkins), and
|
||||||
|
typically all pull requests get automatically tested Jenkins: if your change breaks the build, Jenkins will yell about it in #matrix-dev:matrix.org so please lurk there and keep an eye open.
|
||||||
|
|
||||||
|
Code style
|
||||||
|
~~~~~~~~~~
|
||||||
|
|
||||||
|
All Matrix projects have a well-defined code-style - and sometimes we've even
|
||||||
|
got as far as documenting it... For instance, synapse's code style doc lives
|
||||||
|
at https://github.com/matrix-org/synapse/tree/master/docs/code_style.rst.
|
||||||
|
|
||||||
|
Please ensure your changes match the cosmetic style of the existing project,
|
||||||
|
and **never** mix cosmetic and functional changes in the same commit, as it
|
||||||
|
makes it horribly hard to review otherwise.
|
||||||
|
|
||||||
|
Attribution
|
||||||
|
~~~~~~~~~~~
|
||||||
|
|
||||||
|
Everyone who contributes anything to Matrix is welcome to be listed in the
|
||||||
|
AUTHORS.rst file for the project in question. Please feel free to include a
|
||||||
|
change to AUTHORS.rst in your pull request to list yourself and a short
|
||||||
|
description of the area(s) you've worked on. Also, we sometimes have swag to
|
||||||
|
give away to contributors - if you feel that Matrix-branded apparel is missing
|
||||||
|
from your life, please mail us your shipping address to matrix at matrix.org and we'll try to fix it :)
|
||||||
|
|
||||||
|
Sign off
|
||||||
|
~~~~~~~~
|
||||||
|
|
||||||
|
In order to have a concrete record that your contribution is intentional
|
||||||
|
and you agree to license it under the same terms as the project's license, we've adopted the
|
||||||
|
same lightweight approach that the Linux Kernel
|
||||||
|
(https://www.kernel.org/doc/Documentation/SubmittingPatches), Docker
|
||||||
|
(https://github.com/docker/docker/blob/master/CONTRIBUTING.md), and many other
|
||||||
|
projects use: the DCO (Developer Certificate of Origin:
|
||||||
|
http://developercertificate.org/). This is a simple declaration that you wrote
|
||||||
|
the contribution or otherwise have the right to contribute it to Matrix::
|
||||||
|
|
||||||
|
Developer Certificate of Origin
|
||||||
|
Version 1.1
|
||||||
|
|
||||||
|
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
|
||||||
|
660 York Street, Suite 102,
|
||||||
|
San Francisco, CA 94110 USA
|
||||||
|
|
||||||
|
Everyone is permitted to copy and distribute verbatim copies of this
|
||||||
|
license document, but changing it is not allowed.
|
||||||
|
|
||||||
|
Developer's Certificate of Origin 1.1
|
||||||
|
|
||||||
|
By making a contribution to this project, I certify that:
|
||||||
|
|
||||||
|
(a) The contribution was created in whole or in part by me and I
|
||||||
|
have the right to submit it under the open source license
|
||||||
|
indicated in the file; or
|
||||||
|
|
||||||
|
(b) The contribution is based upon previous work that, to the best
|
||||||
|
of my knowledge, is covered under an appropriate open source
|
||||||
|
license and I have the right under that license to submit that
|
||||||
|
work with modifications, whether created in whole or in part
|
||||||
|
by me, under the same open source license (unless I am
|
||||||
|
permitted to submit under a different license), as indicated
|
||||||
|
in the file; or
|
||||||
|
|
||||||
|
(c) The contribution was provided directly to me by some other
|
||||||
|
person who certified (a), (b) or (c) and I have not modified
|
||||||
|
it.
|
||||||
|
|
||||||
|
(d) I understand and agree that this project and the contribution
|
||||||
|
are public and that a record of the contribution (including all
|
||||||
|
personal information I submit with it, including my sign-off) is
|
||||||
|
maintained indefinitely and may be redistributed consistent with
|
||||||
|
this project or the open source license(s) involved.
|
||||||
|
|
||||||
|
If you agree to this for your contribution, then all that's needed is to
|
||||||
|
include the line in your commit or pull request comment::
|
||||||
|
|
||||||
|
Signed-off-by: Your Name <your@email.example.org>
|
||||||
|
|
||||||
|
...using your real name; unfortunately pseudonyms and anonymous contributions
|
||||||
|
can't be accepted. Git makes this trivial - just use the -s flag when you do
|
||||||
|
``git commit``, having first set ``user.name`` and ``user.email`` git configs
|
||||||
|
(which you should have done anyway :)
|
||||||
|
|
||||||
|
Conclusion
|
||||||
|
~~~~~~~~~~
|
||||||
|
|
||||||
|
That's it! Matrix is a very open and collaborative project as you might expect given our obsession with open communication. If we're going to successfully matrix together all the fragmented communication technologies out there we are reliant on contributions and collaboration from the community to do so. So please get involved - and we hope you have as much fun hacking on Matrix as we do!
|
||||||
17
MANIFEST.in
17
MANIFEST.in
@@ -3,12 +3,23 @@ include LICENSE
|
|||||||
include VERSION
|
include VERSION
|
||||||
include *.rst
|
include *.rst
|
||||||
include demo/README
|
include demo/README
|
||||||
|
include demo/demo.tls.dh
|
||||||
|
include demo/*.py
|
||||||
|
include demo/*.sh
|
||||||
|
|
||||||
recursive-include synapse/storage/schema *.sql
|
recursive-include synapse/storage/schema *.sql
|
||||||
|
recursive-include synapse/storage/schema *.py
|
||||||
|
|
||||||
recursive-include demo *.dh
|
|
||||||
recursive-include demo *.py
|
|
||||||
recursive-include demo *.sh
|
|
||||||
recursive-include docs *
|
recursive-include docs *
|
||||||
recursive-include scripts *
|
recursive-include scripts *
|
||||||
|
recursive-include scripts-dev *
|
||||||
recursive-include tests *.py
|
recursive-include tests *.py
|
||||||
|
|
||||||
|
recursive-include synapse/static *.css
|
||||||
|
recursive-include synapse/static *.gif
|
||||||
|
recursive-include synapse/static *.html
|
||||||
|
recursive-include synapse/static *.js
|
||||||
|
|
||||||
|
exclude jenkins.sh
|
||||||
|
|
||||||
|
prune demo/etc
|
||||||
|
|||||||
378
README.rst
378
README.rst
@@ -1,3 +1,5 @@
|
|||||||
|
.. contents::
|
||||||
|
|
||||||
Introduction
|
Introduction
|
||||||
============
|
============
|
||||||
|
|
||||||
@@ -5,7 +7,7 @@ Matrix is an ambitious new ecosystem for open federated Instant Messaging and
|
|||||||
VoIP. The basics you need to know to get up and running are:
|
VoIP. The basics you need to know to get up and running are:
|
||||||
|
|
||||||
- Everything in Matrix happens in a room. Rooms are distributed and do not
|
- Everything in Matrix happens in a room. Rooms are distributed and do not
|
||||||
exist on any single server. Rooms can be located using convenience aliases
|
exist on any single server. Rooms can be located using convenience aliases
|
||||||
like ``#matrix:matrix.org`` or ``#test:localhost:8448``.
|
like ``#matrix:matrix.org`` or ``#test:localhost:8448``.
|
||||||
|
|
||||||
- Matrix user IDs look like ``@matthew:matrix.org`` (although in the future
|
- Matrix user IDs look like ``@matthew:matrix.org`` (although in the future
|
||||||
@@ -18,10 +20,10 @@ The overall architecture is::
|
|||||||
https://somewhere.org/_matrix https://elsewhere.net/_matrix
|
https://somewhere.org/_matrix https://elsewhere.net/_matrix
|
||||||
|
|
||||||
``#matrix:matrix.org`` is the official support room for Matrix, and can be
|
``#matrix:matrix.org`` is the official support room for Matrix, and can be
|
||||||
accessed by the web client at http://matrix.org/alpha or via an IRC bridge at
|
accessed by any client from https://matrix.org/blog/try-matrix-now or via IRC
|
||||||
irc://irc.freenode.net/matrix.
|
bridge at irc://irc.freenode.net/matrix.
|
||||||
|
|
||||||
Synapse is currently in rapid development, but as of version 0.5 we believe it
|
Synapse is currently in rapid development, but as of version 0.5 we believe it
|
||||||
is sufficiently stable to be run as an internet-facing service for real usage!
|
is sufficiently stable to be run as an internet-facing service for real usage!
|
||||||
|
|
||||||
About Matrix
|
About Matrix
|
||||||
@@ -67,25 +69,32 @@ Synapse ships with two basic demo Matrix clients: webclient (a basic group chat
|
|||||||
web client demo implemented in AngularJS) and cmdclient (a basic Python
|
web client demo implemented in AngularJS) and cmdclient (a basic Python
|
||||||
command line utility which lets you easily see what the JSON APIs are up to).
|
command line utility which lets you easily see what the JSON APIs are up to).
|
||||||
|
|
||||||
Meanwhile, iOS and Android SDKs and clients are currently in development and available from:
|
Meanwhile, iOS and Android SDKs and clients are available from:
|
||||||
|
|
||||||
- https://github.com/matrix-org/matrix-ios-sdk
|
- https://github.com/matrix-org/matrix-ios-sdk
|
||||||
|
- https://github.com/matrix-org/matrix-ios-kit
|
||||||
|
- https://github.com/matrix-org/matrix-ios-console
|
||||||
- https://github.com/matrix-org/matrix-android-sdk
|
- https://github.com/matrix-org/matrix-android-sdk
|
||||||
|
|
||||||
We'd like to invite you to join #matrix:matrix.org (via http://matrix.org/alpha), run a homeserver, take a look at the Matrix spec at
|
We'd like to invite you to join #matrix:matrix.org (via
|
||||||
http://matrix.org/docs/spec, experiment with the APIs and the demo
|
https://matrix.org/blog/try-matrix-now), run a homeserver, take a look at the
|
||||||
clients, and report any bugs via http://matrix.org/jira.
|
Matrix spec at https://matrix.org/docs/spec and API docs at
|
||||||
|
https://matrix.org/docs/api, experiment with the APIs and the demo clients, and
|
||||||
|
report any bugs via https://matrix.org/jira.
|
||||||
|
|
||||||
Thanks for using Matrix!
|
Thanks for using Matrix!
|
||||||
|
|
||||||
[1] End-to-end encryption is currently in development
|
[1] End-to-end encryption is currently in development - see https://matrix.org/git/olm
|
||||||
|
|
||||||
Homeserver Installation
|
Synapse Installation
|
||||||
=======================
|
====================
|
||||||
|
|
||||||
|
Synapse is the reference python/twisted Matrix homeserver implementation.
|
||||||
|
|
||||||
System requirements:
|
System requirements:
|
||||||
- POSIX-compliant system (tested on Linux & OSX)
|
- POSIX-compliant system (tested on Linux & OS X)
|
||||||
- Python 2.7
|
- Python 2.7
|
||||||
|
- At least 512 MB RAM.
|
||||||
|
|
||||||
Synapse is written in python but some of the libraries is uses are written in
|
Synapse is written in python but some of the libraries is uses are written in
|
||||||
C. So before we can install synapse itself we need a working C compiler and the
|
C. So before we can install synapse itself we need a working C compiler and the
|
||||||
@@ -93,115 +102,197 @@ header files for python C extensions.
|
|||||||
|
|
||||||
Installing prerequisites on Ubuntu or Debian::
|
Installing prerequisites on Ubuntu or Debian::
|
||||||
|
|
||||||
$ sudo apt-get install build-essential python2.7-dev libffi-dev \
|
sudo apt-get install build-essential python2.7-dev libffi-dev \
|
||||||
python-pip python-setuptools sqlite3 \
|
python-pip python-setuptools sqlite3 \
|
||||||
libssl-dev python-virtualenv libjpeg-dev
|
libssl-dev python-virtualenv libjpeg-dev
|
||||||
|
|
||||||
Installing prerequisites on ArchLinux::
|
Installing prerequisites on ArchLinux::
|
||||||
|
|
||||||
$ sudo pacman -S base-devel python2 python-pip \
|
sudo pacman -S base-devel python2 python-pip \
|
||||||
python-setuptools python-virtualenv sqlite3
|
python-setuptools python-virtualenv sqlite3
|
||||||
|
|
||||||
|
Installing prerequisites on CentOS 7::
|
||||||
|
|
||||||
|
sudo yum install libtiff-devel libjpeg-devel libzip-devel freetype-devel \
|
||||||
|
lcms2-devel libwebp-devel tcl-devel tk-devel \
|
||||||
|
python-virtualenv libffi-devel openssl-devel
|
||||||
|
sudo yum groupinstall "Development Tools"
|
||||||
|
|
||||||
|
|
||||||
Installing prerequisites on Mac OS X::
|
Installing prerequisites on Mac OS X::
|
||||||
|
|
||||||
$ xcode-select --install
|
xcode-select --install
|
||||||
$ sudo pip install virtualenv
|
sudo easy_install pip
|
||||||
|
sudo pip install virtualenv
|
||||||
|
|
||||||
|
Installing prerequisites on Raspbian::
|
||||||
|
|
||||||
|
sudo apt-get install build-essential python2.7-dev libffi-dev \
|
||||||
|
python-pip python-setuptools sqlite3 \
|
||||||
|
libssl-dev python-virtualenv libjpeg-dev
|
||||||
|
sudo pip install --upgrade pip
|
||||||
|
sudo pip install --upgrade ndg-httpsclient
|
||||||
|
sudo pip install --upgrade virtualenv
|
||||||
|
|
||||||
To install the synapse homeserver run::
|
To install the synapse homeserver run::
|
||||||
|
|
||||||
$ virtualenv ~/.synapse
|
virtualenv -p python2.7 ~/.synapse
|
||||||
$ source ~/.synapse/bin/activate
|
source ~/.synapse/bin/activate
|
||||||
$ pip install --process-dependency-links https://github.com/matrix-org/synapse/tarball/master
|
pip install --upgrade setuptools
|
||||||
|
pip install https://github.com/matrix-org/synapse/tarball/master
|
||||||
|
|
||||||
This installs synapse, along with the libraries it uses, into a virtual
|
This installs synapse, along with the libraries it uses, into a virtual
|
||||||
environment under ``~/.synapse``.
|
environment under ``~/.synapse``. Feel free to pick a different directory
|
||||||
|
if you prefer.
|
||||||
|
|
||||||
|
In case of problems, please see the _Troubleshooting section below.
|
||||||
|
|
||||||
|
Alternatively, Silvio Fricke has contributed a Dockerfile to automate the
|
||||||
|
above in Docker at https://registry.hub.docker.com/u/silviof/docker-matrix/.
|
||||||
|
|
||||||
|
Another alternative is to install via apt from http://matrix.org/packages/debian/.
|
||||||
|
Note that these packages do not include a client - choose one from
|
||||||
|
https://matrix.org/blog/try-matrix-now/ (or build your own with
|
||||||
|
https://github.com/matrix-org/matrix-js-sdk/).
|
||||||
|
|
||||||
|
Finally, Martin Giess has created an auto-deployment process with vagrant/ansible,
|
||||||
|
tested with VirtualBox/AWS/DigitalOcean - see https://github.com/EMnify/matrix-synapse-auto-deploy
|
||||||
|
for details.
|
||||||
|
|
||||||
To set up your homeserver, run (in your virtualenv, as before)::
|
To set up your homeserver, run (in your virtualenv, as before)::
|
||||||
|
|
||||||
$ python -m synapse.app.homeserver \
|
cd ~/.synapse
|
||||||
|
python -m synapse.app.homeserver \
|
||||||
--server-name machine.my.domain.name \
|
--server-name machine.my.domain.name \
|
||||||
--config-path homeserver.yaml \
|
--config-path homeserver.yaml \
|
||||||
--generate-config
|
--generate-config \
|
||||||
|
--report-stats=[yes|no]
|
||||||
|
|
||||||
Substituting your host and domain name as appropriate.
|
...substituting your host and domain name as appropriate.
|
||||||
|
|
||||||
|
This will generate you a config file that you can then customise, but it will
|
||||||
|
also generate a set of keys for you. These keys will allow your Home Server to
|
||||||
|
identify itself to other Home Servers, so don't lose or delete them. It would be
|
||||||
|
wise to back them up somewhere safe. If, for whatever reason, you do need to
|
||||||
|
change your Home Server's keys, you may find that other Home Servers have the
|
||||||
|
old key cached. If you update the signing key, you should change the name of the
|
||||||
|
key in the <server name>.signing.key file (the second word) to something different.
|
||||||
|
|
||||||
|
By default, registration of new users is disabled. You can either enable
|
||||||
|
registration in the config by specifying ``enable_registration: true``
|
||||||
|
(it is then recommended to also set up CAPTCHA - see docs/CAPTCHA_SETUP), or
|
||||||
|
you can use the command line to register new users::
|
||||||
|
|
||||||
|
$ source ~/.synapse/bin/activate
|
||||||
|
$ synctl start # if not already running
|
||||||
|
$ register_new_matrix_user -c homeserver.yaml https://localhost:8448
|
||||||
|
New user localpart: erikj
|
||||||
|
Password:
|
||||||
|
Confirm password:
|
||||||
|
Success!
|
||||||
|
|
||||||
For reliable VoIP calls to be routed via this homeserver, you MUST configure
|
For reliable VoIP calls to be routed via this homeserver, you MUST configure
|
||||||
a TURN server. See docs/turn-howto.rst for details.
|
a TURN server. See docs/turn-howto.rst for details.
|
||||||
|
|
||||||
Troubleshooting Installation
|
Running Synapse
|
||||||
----------------------------
|
===============
|
||||||
|
|
||||||
Synapse requires pip 1.7 or later, so if your OS provides too old a version and
|
To actually run your new homeserver, pick a working directory for Synapse to
|
||||||
you get errors about ``error: no such option: --process-dependency-links`` you
|
run (e.g. ``~/.synapse``), and::
|
||||||
may need to manually upgrade it::
|
|
||||||
|
|
||||||
$ sudo pip install --upgrade pip
|
cd ~/.synapse
|
||||||
|
source ./bin/activate
|
||||||
|
synctl start
|
||||||
|
|
||||||
If pip crashes mid-installation for reason (e.g. lost terminal), pip may
|
Using PostgreSQL
|
||||||
refuse to run until you remove the temporary installation directory it
|
================
|
||||||
created. To reset the installation::
|
|
||||||
|
|
||||||
$ rm -rf /tmp/pip_install_matrix
|
As of Synapse 0.9, `PostgreSQL <http://www.postgresql.org>`_ is supported as an
|
||||||
|
alternative to the `SQLite <http://sqlite.org/>`_ database that Synapse has
|
||||||
|
traditionally used for convenience and simplicity.
|
||||||
|
|
||||||
pip seems to leak *lots* of memory during installation. For instance, a Linux
|
The advantages of Postgres include:
|
||||||
host with 512MB of RAM may run out of memory whilst installing Twisted. If this
|
|
||||||
happens, you will have to individually install the dependencies which are
|
|
||||||
failing, e.g.::
|
|
||||||
|
|
||||||
$ pip install twisted
|
* significant performance improvements due to the superior threading and
|
||||||
|
caching model, smarter query optimiser
|
||||||
|
* allowing the DB to be run on separate hardware
|
||||||
|
* allowing basic active/backup high-availability with a "hot spare" synapse
|
||||||
|
pointing at the same DB master, as well as enabling DB replication in
|
||||||
|
synapse itself.
|
||||||
|
|
||||||
On OSX, if you encounter clang: error: unknown argument: '-mno-fused-madd' you
|
The only disadvantage is that the code is relatively new as of April 2015 and
|
||||||
will need to export CFLAGS=-Qunused-arguments.
|
may have a few regressions relative to SQLite.
|
||||||
|
|
||||||
|
For information on how to install and use PostgreSQL, please see
|
||||||
|
`docs/postgres.rst <docs/postgres.rst>`_.
|
||||||
|
|
||||||
|
Platform Specific Instructions
|
||||||
|
==============================
|
||||||
|
|
||||||
ArchLinux
|
ArchLinux
|
||||||
---------
|
---------
|
||||||
|
|
||||||
Installation on ArchLinux may encounter a few hiccups as Arch defaults to
|
The quickest way to get up and running with ArchLinux is probably with Ivan
|
||||||
python 3, but synapse currently assumes python 2.7 by default.
|
Shapovalov's AUR package from
|
||||||
|
https://aur.archlinux.org/packages/matrix-synapse/, which should pull in all
|
||||||
|
the necessary dependencies.
|
||||||
|
|
||||||
|
Alternatively, to install using pip a few changes may be needed as ArchLinux
|
||||||
|
defaults to python 3, but synapse currently assumes python 2.7 by default:
|
||||||
|
|
||||||
pip may be outdated (6.0.7-1 and needs to be upgraded to 6.0.8-1 )::
|
pip may be outdated (6.0.7-1 and needs to be upgraded to 6.0.8-1 )::
|
||||||
|
|
||||||
$ sudo pip2.7 install --upgrade pip
|
sudo pip2.7 install --upgrade pip
|
||||||
|
|
||||||
You also may need to explicitly specify python 2.7 again during the install
|
You also may need to explicitly specify python 2.7 again during the install
|
||||||
request::
|
request::
|
||||||
|
|
||||||
$ pip2.7 install --process-dependency-links \
|
pip2.7 install https://github.com/matrix-org/synapse/tarball/master
|
||||||
https://github.com/matrix-org/synapse/tarball/master
|
|
||||||
|
|
||||||
If you encounter an error with lib bcrypt causing an Wrong ELF Class:
|
If you encounter an error with lib bcrypt causing an Wrong ELF Class:
|
||||||
ELFCLASS32 (x64 Systems), you may need to reinstall py-bcrypt to correctly
|
ELFCLASS32 (x64 Systems), you may need to reinstall py-bcrypt to correctly
|
||||||
compile it under the right architecture. (This should not be needed if
|
compile it under the right architecture. (This should not be needed if
|
||||||
installing under virtualenv)::
|
installing under virtualenv)::
|
||||||
|
|
||||||
$ sudo pip2.7 uninstall py-bcrypt
|
sudo pip2.7 uninstall py-bcrypt
|
||||||
$ sudo pip2.7 install py-bcrypt
|
sudo pip2.7 install py-bcrypt
|
||||||
|
|
||||||
During setup of homeserver you need to call python2.7 directly again::
|
|
||||||
|
|
||||||
$ python2.7 -m synapse.app.homeserver \
|
During setup of Synapse you need to call python2.7 directly again::
|
||||||
|
|
||||||
|
cd ~/.synapse
|
||||||
|
python2.7 -m synapse.app.homeserver \
|
||||||
--server-name machine.my.domain.name \
|
--server-name machine.my.domain.name \
|
||||||
--config-path homeserver.yaml \
|
--config-path homeserver.yaml \
|
||||||
--generate-config
|
--generate-config
|
||||||
|
|
||||||
...substituting your host and domain name as appropriate.
|
...substituting your host and domain name as appropriate.
|
||||||
|
|
||||||
|
FreeBSD
|
||||||
|
-------
|
||||||
|
|
||||||
|
Synapse can be installed via FreeBSD Ports or Packages:
|
||||||
|
|
||||||
|
- Ports: ``cd /usr/ports/net/py-matrix-synapse && make install clean``
|
||||||
|
- Packages: ``pkg install py27-matrix-synapse``
|
||||||
|
|
||||||
Windows Install
|
Windows Install
|
||||||
---------------
|
---------------
|
||||||
Synapse can be installed on Cygwin. It requires the following Cygwin packages:
|
Synapse can be installed on Cygwin. It requires the following Cygwin packages:
|
||||||
|
|
||||||
- gcc
|
- gcc
|
||||||
- git
|
- git
|
||||||
- libffi-devel
|
- libffi-devel
|
||||||
- openssl (and openssl-devel, python-openssl)
|
- openssl (and openssl-devel, python-openssl)
|
||||||
- python
|
- python
|
||||||
- python-setuptools
|
- python-setuptools
|
||||||
|
|
||||||
The content repository requires additional packages and will be unable to process
|
The content repository requires additional packages and will be unable to process
|
||||||
uploads without them:
|
uploads without them:
|
||||||
- libjpeg8
|
|
||||||
- libjpeg8-devel
|
- libjpeg8
|
||||||
- zlib
|
- libjpeg8-devel
|
||||||
|
- zlib
|
||||||
|
|
||||||
If you choose to install Synapse without these packages, you will need to reinstall
|
If you choose to install Synapse without these packages, you will need to reinstall
|
||||||
``pillow`` for changes to be applied, e.g. ``pip uninstall pillow`` ``pip install
|
``pillow`` for changes to be applied, e.g. ``pip uninstall pillow`` ``pip install
|
||||||
pillow --user``
|
pillow --user``
|
||||||
@@ -217,21 +308,55 @@ Troubleshooting:
|
|||||||
you do, you may need to create a symlink to ``libsodium.a`` so ``ld`` can find
|
you do, you may need to create a symlink to ``libsodium.a`` so ``ld`` can find
|
||||||
it: ``ln -s /usr/local/lib/libsodium.a /usr/lib/libsodium.a``
|
it: ``ln -s /usr/local/lib/libsodium.a /usr/lib/libsodium.a``
|
||||||
|
|
||||||
Running Your Homeserver
|
Troubleshooting
|
||||||
=======================
|
===============
|
||||||
|
|
||||||
To actually run your new homeserver, pick a working directory for Synapse to run
|
Troubleshooting Installation
|
||||||
(e.g. ``~/.synapse``), and::
|
----------------------------
|
||||||
|
|
||||||
$ cd ~/.synapse
|
Synapse requires pip 1.7 or later, so if your OS provides too old a version you
|
||||||
$ source ./bin/activate
|
may need to manually upgrade it::
|
||||||
$ synctl start
|
|
||||||
|
sudo pip install --upgrade pip
|
||||||
|
|
||||||
|
Installing may fail with ``Could not find any downloads that satisfy the requirement pymacaroons-pynacl (from matrix-synapse==0.12.0)``.
|
||||||
|
You can fix this by manually upgrading pip and virtualenv::
|
||||||
|
|
||||||
|
sudo pip install --upgrade virtualenv
|
||||||
|
|
||||||
|
You can next rerun ``virtualenv -p python2.7 synapse`` to update the virtual env.
|
||||||
|
|
||||||
|
Installing may fail during installing virtualenv with ``InsecurePlatformWarning: A true SSLContext object is not available. This prevents urllib3 from configuring SSL appropriately and may cause certain SSL connections to fail. For more information, see https://urllib3.readthedocs.org/en/latest/security.html#insecureplatformwarning.``
|
||||||
|
You can fix this by manually installing ndg-httpsclient::
|
||||||
|
|
||||||
|
pip install --upgrade ndg-httpsclient
|
||||||
|
|
||||||
|
Installing may fail with ``mock requires setuptools>=17.1. Aborting installation``.
|
||||||
|
You can fix this by upgrading setuptools::
|
||||||
|
|
||||||
|
pip install --upgrade setuptools
|
||||||
|
|
||||||
|
If pip crashes mid-installation for reason (e.g. lost terminal), pip may
|
||||||
|
refuse to run until you remove the temporary installation directory it
|
||||||
|
created. To reset the installation::
|
||||||
|
|
||||||
|
rm -rf /tmp/pip_install_matrix
|
||||||
|
|
||||||
|
pip seems to leak *lots* of memory during installation. For instance, a Linux
|
||||||
|
host with 512MB of RAM may run out of memory whilst installing Twisted. If this
|
||||||
|
happens, you will have to individually install the dependencies which are
|
||||||
|
failing, e.g.::
|
||||||
|
|
||||||
|
pip install twisted
|
||||||
|
|
||||||
|
On OS X, if you encounter clang: error: unknown argument: '-mno-fused-madd' you
|
||||||
|
will need to export CFLAGS=-Qunused-arguments.
|
||||||
|
|
||||||
Troubleshooting Running
|
Troubleshooting Running
|
||||||
-----------------------
|
-----------------------
|
||||||
|
|
||||||
If synapse fails with ``missing "sodium.h"`` crypto errors, you may need
|
If synapse fails with ``missing "sodium.h"`` crypto errors, you may need
|
||||||
to manually upgrade PyNaCL, as synapse uses NaCl (http://nacl.cr.yp.to/) for
|
to manually upgrade PyNaCL, as synapse uses NaCl (http://nacl.cr.yp.to/) for
|
||||||
encryption and digital signatures.
|
encryption and digital signatures.
|
||||||
Unfortunately PyNACL currently has a few issues
|
Unfortunately PyNACL currently has a few issues
|
||||||
(https://github.com/pyca/pynacl/issues/53) and
|
(https://github.com/pyca/pynacl/issues/53) and
|
||||||
@@ -240,44 +365,46 @@ correctly, causing all tests to fail with errors about missing "sodium.h". To
|
|||||||
fix try re-installing from PyPI or directly from
|
fix try re-installing from PyPI or directly from
|
||||||
(https://github.com/pyca/pynacl)::
|
(https://github.com/pyca/pynacl)::
|
||||||
|
|
||||||
$ # Install from PyPI
|
# Install from PyPI
|
||||||
$ pip install --user --upgrade --force pynacl
|
pip install --user --upgrade --force pynacl
|
||||||
$ # Install from github
|
|
||||||
$ pip install --user https://github.com/pyca/pynacl/tarball/master
|
# Install from github
|
||||||
|
pip install --user https://github.com/pyca/pynacl/tarball/master
|
||||||
|
|
||||||
ArchLinux
|
ArchLinux
|
||||||
---------
|
~~~~~~~~~
|
||||||
|
|
||||||
If running `$ synctl start` fails wit 'returned non-zero exit status 1', you will need to explicitly call Python2.7 - either running as::
|
If running `$ synctl start` fails with 'returned non-zero exit status 1',
|
||||||
|
you will need to explicitly call Python2.7 - either running as::
|
||||||
|
|
||||||
|
python2.7 -m synapse.app.homeserver --daemonize -c homeserver.yaml
|
||||||
|
|
||||||
$ python2.7 -m synapse.app.homeserver --daemonize -c homeserver.yaml --pid-file homeserver.pid
|
|
||||||
|
|
||||||
...or by editing synctl with the correct python executable.
|
...or by editing synctl with the correct python executable.
|
||||||
|
|
||||||
Homeserver Development
|
Synapse Development
|
||||||
======================
|
===================
|
||||||
|
|
||||||
To check out a homeserver for development, clone the git repo into a working
|
To check out a synapse for development, clone the git repo into a working
|
||||||
directory of your choice::
|
directory of your choice::
|
||||||
|
|
||||||
$ git clone https://github.com/matrix-org/synapse.git
|
git clone https://github.com/matrix-org/synapse.git
|
||||||
$ cd synapse
|
cd synapse
|
||||||
|
|
||||||
The homeserver has a number of external dependencies, that are easiest
|
Synapse has a number of external dependencies, that are easiest
|
||||||
to install using pip and a virtualenv::
|
to install using pip and a virtualenv::
|
||||||
|
|
||||||
$ virtualenv env
|
virtualenv env
|
||||||
$ source env/bin/activate
|
source env/bin/activate
|
||||||
$ python synapse/python_dependencies.py | xargs -n1 pip install
|
python synapse/python_dependencies.py | xargs -n1 pip install
|
||||||
$ pip install setuptools_trial mock
|
pip install setuptools_trial mock
|
||||||
|
|
||||||
This will run a process of downloading and installing all the needed
|
This will run a process of downloading and installing all the needed
|
||||||
dependencies into a virtual env.
|
dependencies into a virtual env.
|
||||||
|
|
||||||
Once this is done, you may wish to run the homeserver's unit tests, to
|
Once this is done, you may wish to run Synapse's unit tests, to
|
||||||
check that everything is installed as it should be::
|
check that everything is installed as it should be::
|
||||||
|
|
||||||
$ python setup.py test
|
python setup.py test
|
||||||
|
|
||||||
This should end with a 'PASSED' result::
|
This should end with a 'PASSED' result::
|
||||||
|
|
||||||
@@ -286,17 +413,14 @@ This should end with a 'PASSED' result::
|
|||||||
PASSED (successes=143)
|
PASSED (successes=143)
|
||||||
|
|
||||||
|
|
||||||
Upgrading an existing homeserver
|
Upgrading an existing Synapse
|
||||||
================================
|
=============================
|
||||||
|
|
||||||
IMPORTANT: Before upgrading an existing homeserver to a new version, please
|
The instructions for upgrading synapse are in `UPGRADE.rst`_.
|
||||||
refer to UPGRADE.rst for any additional instructions.
|
Please check these instructions as upgrading may require extra steps for some
|
||||||
|
versions of synapse.
|
||||||
Otherwise, simply re-install the new codebase over the current one - e.g.
|
|
||||||
by ``pip install --process-dependency-links
|
|
||||||
https://github.com/matrix-org/synapse/tarball/master``
|
|
||||||
if using pip, or by ``git pull`` if running off a git working copy.
|
|
||||||
|
|
||||||
|
.. _UPGRADE.rst: UPGRADE.rst
|
||||||
|
|
||||||
Setting up Federation
|
Setting up Federation
|
||||||
=====================
|
=====================
|
||||||
@@ -318,11 +442,11 @@ IDs:
|
|||||||
For the first form, simply pass the required hostname (of the machine) as the
|
For the first form, simply pass the required hostname (of the machine) as the
|
||||||
--server-name parameter::
|
--server-name parameter::
|
||||||
|
|
||||||
$ python -m synapse.app.homeserver \
|
python -m synapse.app.homeserver \
|
||||||
--server-name machine.my.domain.name \
|
--server-name machine.my.domain.name \
|
||||||
--config-path homeserver.yaml \
|
--config-path homeserver.yaml \
|
||||||
--generate-config
|
--generate-config
|
||||||
$ python -m synapse.app.homeserver --config-path homeserver.yaml
|
python -m synapse.app.homeserver --config-path homeserver.yaml
|
||||||
|
|
||||||
Alternatively, you can run ``synctl start`` to guide you through the process.
|
Alternatively, you can run ``synctl start`` to guide you through the process.
|
||||||
|
|
||||||
@@ -332,38 +456,37 @@ and port where the server is running. (At the current time synapse does not
|
|||||||
support clustering multiple servers into a single logical homeserver). The DNS
|
support clustering multiple servers into a single logical homeserver). The DNS
|
||||||
record would then look something like::
|
record would then look something like::
|
||||||
|
|
||||||
$ dig -t srv _matrix._tcp.machine.my.domaine.name
|
$ dig -t srv _matrix._tcp.machine.my.domain.name
|
||||||
_matrix._tcp IN SRV 10 0 8448 machine.my.domain.name.
|
_matrix._tcp IN SRV 10 0 8448 machine.my.domain.name.
|
||||||
|
|
||||||
|
|
||||||
At this point, you should then run the homeserver with the hostname of this
|
At this point, you should then run the homeserver with the hostname of this
|
||||||
SRV record, as that is the name other machines will expect it to have::
|
SRV record, as that is the name other machines will expect it to have::
|
||||||
|
|
||||||
$ python -m synapse.app.homeserver \
|
python -m synapse.app.homeserver \
|
||||||
--server-name YOURDOMAIN \
|
--server-name YOURDOMAIN \
|
||||||
--bind-port 8448 \
|
|
||||||
--config-path homeserver.yaml \
|
--config-path homeserver.yaml \
|
||||||
--generate-config
|
--generate-config
|
||||||
$ python -m synapse.app.homeserver --config-path homeserver.yaml
|
python -m synapse.app.homeserver --config-path homeserver.yaml
|
||||||
|
|
||||||
|
|
||||||
|
If you've already generated the config file, you need to edit the "server_name"
|
||||||
|
in you ```homeserver.yaml``` file. If you've already started Synapse and a
|
||||||
|
database has been created, you will have to recreate the database.
|
||||||
|
|
||||||
You may additionally want to pass one or more "-v" options, in order to
|
You may additionally want to pass one or more "-v" options, in order to
|
||||||
increase the verbosity of logging output; at least for initial testing.
|
increase the verbosity of logging output; at least for initial testing.
|
||||||
|
|
||||||
For the initial alpha release, the homeserver is not speaking TLS for
|
Running a Demo Federation of Synapses
|
||||||
either client-server or server-server traffic for ease of debugging. We have
|
-------------------------------------
|
||||||
also not spent any time yet getting the homeserver to run behind loadbalancers.
|
|
||||||
|
|
||||||
Running a Demo Federation of Homeservers
|
|
||||||
----------------------------------------
|
|
||||||
|
|
||||||
If you want to get up and running quickly with a trio of homeservers in a
|
If you want to get up and running quickly with a trio of homeservers in a
|
||||||
private federation (``localhost:8080``, ``localhost:8081`` and
|
private federation (``localhost:8080``, ``localhost:8081`` and
|
||||||
``localhost:8082``) which you can then access through the webclient running at
|
``localhost:8082``) which you can then access through the webclient running at
|
||||||
http://localhost:8080. Simply run::
|
http://localhost:8080. Simply run::
|
||||||
|
|
||||||
$ demo/start.sh
|
demo/start.sh
|
||||||
|
|
||||||
This is mainly useful just for development purposes.
|
This is mainly useful just for development purposes.
|
||||||
|
|
||||||
Running The Demo Web Client
|
Running The Demo Web Client
|
||||||
@@ -390,7 +513,10 @@ account. Your name will take the form of::
|
|||||||
Specify your desired localpart in the topmost box of the "Register for an
|
Specify your desired localpart in the topmost box of the "Register for an
|
||||||
account" form, and click the "Register" button. Hostnames can contain ports if
|
account" form, and click the "Register" button. Hostnames can contain ports if
|
||||||
required due to lack of SRV records (e.g. @matthew:localhost:8448 on an
|
required due to lack of SRV records (e.g. @matthew:localhost:8448 on an
|
||||||
internal synapse sandbox running on localhost)
|
internal synapse sandbox running on localhost).
|
||||||
|
|
||||||
|
If registration fails, you may need to enable it in the homeserver (see
|
||||||
|
`Synapse Installation`_ above)
|
||||||
|
|
||||||
|
|
||||||
Logging In To An Existing Account
|
Logging In To An Existing Account
|
||||||
@@ -416,14 +542,14 @@ track 3PID logins and publish end-user public keys.
|
|||||||
|
|
||||||
It's currently early days for identity servers as Matrix is not yet using 3PIDs
|
It's currently early days for identity servers as Matrix is not yet using 3PIDs
|
||||||
as the primary means of identity and E2E encryption is not complete. As such,
|
as the primary means of identity and E2E encryption is not complete. As such,
|
||||||
we are running a single identity server (http://matrix.org:8090) at the current
|
we are running a single identity server (https://matrix.org) at the current
|
||||||
time.
|
time.
|
||||||
|
|
||||||
|
|
||||||
Where's the spec?!
|
Where's the spec?!
|
||||||
==================
|
==================
|
||||||
|
|
||||||
The source of the matrix spec lives at https://github.com/matrix-org/matrix-doc.
|
The source of the matrix spec lives at https://github.com/matrix-org/matrix-doc.
|
||||||
A recent HTML snapshot of this lives at http://matrix.org/docs/spec
|
A recent HTML snapshot of this lives at http://matrix.org/docs/spec
|
||||||
|
|
||||||
|
|
||||||
@@ -433,10 +559,10 @@ Building Internal API Documentation
|
|||||||
Before building internal API documentation install sphinx and
|
Before building internal API documentation install sphinx and
|
||||||
sphinxcontrib-napoleon::
|
sphinxcontrib-napoleon::
|
||||||
|
|
||||||
$ pip install sphinx
|
pip install sphinx
|
||||||
$ pip install sphinxcontrib-napoleon
|
pip install sphinxcontrib-napoleon
|
||||||
|
|
||||||
Building internal API documentation::
|
Building internal API documentation::
|
||||||
|
|
||||||
$ python setup.py build_sphinx
|
python setup.py build_sphinx
|
||||||
|
|
||||||
95
UPGRADE.rst
95
UPGRADE.rst
@@ -1,3 +1,98 @@
|
|||||||
|
Upgrading Synapse
|
||||||
|
=================
|
||||||
|
|
||||||
|
Before upgrading check if any special steps are required to upgrade from the
|
||||||
|
what you currently have installed to current version of synapse. The extra
|
||||||
|
instructions that may be required are listed later in this document.
|
||||||
|
|
||||||
|
If synapse was installed in a virtualenv then active that virtualenv before
|
||||||
|
upgrading. If synapse is installed in a virtualenv in ``~/.synapse/`` then run:
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
source ~/.synapse/bin/activate
|
||||||
|
|
||||||
|
If synapse was installed using pip then upgrade to the latest version by
|
||||||
|
running:
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
pip install --upgrade --process-dependency-links https://github.com/matrix-org/synapse/tarball/master
|
||||||
|
|
||||||
|
If synapse was installed using git then upgrade to the latest version by
|
||||||
|
running:
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
# Pull the latest version of the master branch.
|
||||||
|
git pull
|
||||||
|
# Update the versions of synapse's python dependencies.
|
||||||
|
python synapse/python_dependencies.py | xargs -n1 pip install
|
||||||
|
|
||||||
|
|
||||||
|
Upgrading to v0.11.0
|
||||||
|
====================
|
||||||
|
|
||||||
|
This release includes the option to send anonymous usage stats to matrix.org,
|
||||||
|
and requires that administrators explictly opt in or out by setting the
|
||||||
|
``report_stats`` option to either ``true`` or ``false``.
|
||||||
|
|
||||||
|
We would really appreciate it if you could help our project out by reporting
|
||||||
|
anonymized usage statistics from your homeserver. Only very basic aggregate
|
||||||
|
data (e.g. number of users) will be reported, but it helps us to track the
|
||||||
|
growth of the Matrix community, and helps us to make Matrix a success, as well
|
||||||
|
as to convince other networks that they should peer with us.
|
||||||
|
|
||||||
|
|
||||||
|
Upgrading to v0.9.0
|
||||||
|
===================
|
||||||
|
|
||||||
|
Application services have had a breaking API change in this version.
|
||||||
|
|
||||||
|
They can no longer register themselves with a home server using the AS HTTP API. This
|
||||||
|
decision was made because a compromised application service with free reign to register
|
||||||
|
any regex in effect grants full read/write access to the home server if a regex of ``.*``
|
||||||
|
is used. An attack where a compromised AS re-registers itself with ``.*`` was deemed too
|
||||||
|
big of a security risk to ignore, and so the ability to register with the HS remotely has
|
||||||
|
been removed.
|
||||||
|
|
||||||
|
It has been replaced by specifying a list of application service registrations in
|
||||||
|
``homeserver.yaml``::
|
||||||
|
|
||||||
|
app_service_config_files: ["registration-01.yaml", "registration-02.yaml"]
|
||||||
|
|
||||||
|
Where ``registration-01.yaml`` looks like::
|
||||||
|
|
||||||
|
url: <String> # e.g. "https://my.application.service.com"
|
||||||
|
as_token: <String>
|
||||||
|
hs_token: <String>
|
||||||
|
sender_localpart: <String> # This is a new field which denotes the user_id localpart when using the AS token
|
||||||
|
namespaces:
|
||||||
|
users:
|
||||||
|
- exclusive: <Boolean>
|
||||||
|
regex: <String> # e.g. "@prefix_.*"
|
||||||
|
aliases:
|
||||||
|
- exclusive: <Boolean>
|
||||||
|
regex: <String>
|
||||||
|
rooms:
|
||||||
|
- exclusive: <Boolean>
|
||||||
|
regex: <String>
|
||||||
|
|
||||||
|
Upgrading to v0.8.0
|
||||||
|
===================
|
||||||
|
|
||||||
|
Servers which use captchas will need to add their public key to::
|
||||||
|
|
||||||
|
static/client/register/register_config.js
|
||||||
|
|
||||||
|
window.matrixRegistrationConfig = {
|
||||||
|
recaptcha_public_key: "YOUR_PUBLIC_KEY"
|
||||||
|
};
|
||||||
|
|
||||||
|
This is required in order to support registration fallback (typically used on
|
||||||
|
mobile devices).
|
||||||
|
|
||||||
|
|
||||||
Upgrading to v0.7.0
|
Upgrading to v0.7.0
|
||||||
===================
|
===================
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
|
|
||||||
# Copyright 2014 OpenMarket Ltd
|
# Copyright 2014-2016 OpenMarket Ltd
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
# Copyright 2014 OpenMarket Ltd
|
# Copyright 2014-2016 OpenMarket Ltd
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
# Copyright 2014 OpenMarket Ltd
|
# Copyright 2014-2016 OpenMarket Ltd
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
# Copyright 2014 OpenMarket Ltd
|
# Copyright 2014-2016 OpenMarket Ltd
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
# Copyright 2014 OpenMarket Ltd
|
# Copyright 2014-2016 OpenMarket Ltd
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
# Copyright 2014 OpenMarket Ltd
|
# Copyright 2014-2016 OpenMarket Ltd
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@@ -21,6 +21,7 @@ import datetime
|
|||||||
import argparse
|
import argparse
|
||||||
|
|
||||||
from synapse.events import FrozenEvent
|
from synapse.events import FrozenEvent
|
||||||
|
from synapse.util.frozenutils import unfreeze
|
||||||
|
|
||||||
|
|
||||||
def make_graph(db_name, room_id, file_prefix, limit):
|
def make_graph(db_name, room_id, file_prefix, limit):
|
||||||
@@ -70,7 +71,7 @@ def make_graph(db_name, room_id, file_prefix, limit):
|
|||||||
float(event.origin_server_ts) / 1000
|
float(event.origin_server_ts) / 1000
|
||||||
).strftime('%Y-%m-%d %H:%M:%S,%f')
|
).strftime('%Y-%m-%d %H:%M:%S,%f')
|
||||||
|
|
||||||
content = json.dumps(event.get_dict()["content"])
|
content = json.dumps(unfreeze(event.get_dict()["content"]))
|
||||||
|
|
||||||
label = (
|
label = (
|
||||||
"<"
|
"<"
|
||||||
|
|||||||
151
contrib/graph/graph3.py
Normal file
151
contrib/graph/graph3.py
Normal file
@@ -0,0 +1,151 @@
|
|||||||
|
# Copyright 2016 OpenMarket Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
|
||||||
|
import pydot
|
||||||
|
import cgi
|
||||||
|
import simplejson as json
|
||||||
|
import datetime
|
||||||
|
import argparse
|
||||||
|
|
||||||
|
from synapse.events import FrozenEvent
|
||||||
|
from synapse.util.frozenutils import unfreeze
|
||||||
|
|
||||||
|
|
||||||
|
def make_graph(file_name, room_id, file_prefix, limit):
|
||||||
|
print "Reading lines"
|
||||||
|
with open(file_name) as f:
|
||||||
|
lines = f.readlines()
|
||||||
|
|
||||||
|
print "Read lines"
|
||||||
|
|
||||||
|
events = [FrozenEvent(json.loads(line)) for line in lines]
|
||||||
|
|
||||||
|
print "Loaded events."
|
||||||
|
|
||||||
|
events.sort(key=lambda e: e.depth)
|
||||||
|
|
||||||
|
print "Sorted events"
|
||||||
|
|
||||||
|
if limit:
|
||||||
|
events = events[-int(limit):]
|
||||||
|
|
||||||
|
node_map = {}
|
||||||
|
|
||||||
|
graph = pydot.Dot(graph_name="Test")
|
||||||
|
|
||||||
|
for event in events:
|
||||||
|
t = datetime.datetime.fromtimestamp(
|
||||||
|
float(event.origin_server_ts) / 1000
|
||||||
|
).strftime('%Y-%m-%d %H:%M:%S,%f')
|
||||||
|
|
||||||
|
content = json.dumps(unfreeze(event.get_dict()["content"]), indent=4)
|
||||||
|
content = content.replace("\n", "<br/>\n")
|
||||||
|
|
||||||
|
print content
|
||||||
|
content = []
|
||||||
|
for key, value in unfreeze(event.get_dict()["content"]).items():
|
||||||
|
if value is None:
|
||||||
|
value = "<null>"
|
||||||
|
elif isinstance(value, basestring):
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
value = json.dumps(value)
|
||||||
|
|
||||||
|
content.append(
|
||||||
|
"<b>%s</b>: %s," % (
|
||||||
|
cgi.escape(key, quote=True).encode("ascii", 'xmlcharrefreplace'),
|
||||||
|
cgi.escape(value, quote=True).encode("ascii", 'xmlcharrefreplace'),
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
content = "<br/>\n".join(content)
|
||||||
|
|
||||||
|
print content
|
||||||
|
|
||||||
|
label = (
|
||||||
|
"<"
|
||||||
|
"<b>%(name)s </b><br/>"
|
||||||
|
"Type: <b>%(type)s </b><br/>"
|
||||||
|
"State key: <b>%(state_key)s </b><br/>"
|
||||||
|
"Content: <b>%(content)s </b><br/>"
|
||||||
|
"Time: <b>%(time)s </b><br/>"
|
||||||
|
"Depth: <b>%(depth)s </b><br/>"
|
||||||
|
">"
|
||||||
|
) % {
|
||||||
|
"name": event.event_id,
|
||||||
|
"type": event.type,
|
||||||
|
"state_key": event.get("state_key", None),
|
||||||
|
"content": content,
|
||||||
|
"time": t,
|
||||||
|
"depth": event.depth,
|
||||||
|
}
|
||||||
|
|
||||||
|
node = pydot.Node(
|
||||||
|
name=event.event_id,
|
||||||
|
label=label,
|
||||||
|
)
|
||||||
|
|
||||||
|
node_map[event.event_id] = node
|
||||||
|
graph.add_node(node)
|
||||||
|
|
||||||
|
print "Created Nodes"
|
||||||
|
|
||||||
|
for event in events:
|
||||||
|
for prev_id, _ in event.prev_events:
|
||||||
|
try:
|
||||||
|
end_node = node_map[prev_id]
|
||||||
|
except:
|
||||||
|
end_node = pydot.Node(
|
||||||
|
name=prev_id,
|
||||||
|
label="<<b>%s</b>>" % (prev_id,),
|
||||||
|
)
|
||||||
|
|
||||||
|
node_map[prev_id] = end_node
|
||||||
|
graph.add_node(end_node)
|
||||||
|
|
||||||
|
edge = pydot.Edge(node_map[event.event_id], end_node)
|
||||||
|
graph.add_edge(edge)
|
||||||
|
|
||||||
|
print "Created edges"
|
||||||
|
|
||||||
|
graph.write('%s.dot' % file_prefix, format='raw', prog='dot')
|
||||||
|
|
||||||
|
print "Created Dot"
|
||||||
|
|
||||||
|
graph.write_svg("%s.svg" % file_prefix, prog='dot')
|
||||||
|
|
||||||
|
print "Created svg"
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
description="Generate a PDU graph for a given room by reading "
|
||||||
|
"from a file with line deliminated events. \n"
|
||||||
|
"Requires pydot."
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"-p", "--prefix", dest="prefix",
|
||||||
|
help="String to prefix output files with",
|
||||||
|
default="graph_output"
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"-l", "--limit",
|
||||||
|
help="Only retrieve the last N events.",
|
||||||
|
)
|
||||||
|
parser.add_argument('event_file')
|
||||||
|
parser.add_argument('room')
|
||||||
|
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
make_graph(args.event_file, args.room, args.prefix, args.limit)
|
||||||
93
contrib/scripts/kick_users.py
Executable file
93
contrib/scripts/kick_users.py
Executable file
@@ -0,0 +1,93 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
from argparse import ArgumentParser
|
||||||
|
import json
|
||||||
|
import requests
|
||||||
|
import sys
|
||||||
|
import urllib
|
||||||
|
|
||||||
|
def _mkurl(template, kws):
|
||||||
|
for key in kws:
|
||||||
|
template = template.replace(key, kws[key])
|
||||||
|
return template
|
||||||
|
|
||||||
|
def main(hs, room_id, access_token, user_id_prefix, why):
|
||||||
|
if not why:
|
||||||
|
why = "Automated kick."
|
||||||
|
print "Kicking members on %s in room %s matching %s" % (hs, room_id, user_id_prefix)
|
||||||
|
room_state_url = _mkurl(
|
||||||
|
"$HS/_matrix/client/api/v1/rooms/$ROOM/state?access_token=$TOKEN",
|
||||||
|
{
|
||||||
|
"$HS": hs,
|
||||||
|
"$ROOM": room_id,
|
||||||
|
"$TOKEN": access_token
|
||||||
|
}
|
||||||
|
)
|
||||||
|
print "Getting room state => %s" % room_state_url
|
||||||
|
res = requests.get(room_state_url)
|
||||||
|
print "HTTP %s" % res.status_code
|
||||||
|
state_events = res.json()
|
||||||
|
if "error" in state_events:
|
||||||
|
print "FATAL"
|
||||||
|
print state_events
|
||||||
|
return
|
||||||
|
|
||||||
|
kick_list = []
|
||||||
|
room_name = room_id
|
||||||
|
for event in state_events:
|
||||||
|
if not event["type"] == "m.room.member":
|
||||||
|
if event["type"] == "m.room.name":
|
||||||
|
room_name = event["content"].get("name")
|
||||||
|
continue
|
||||||
|
if not event["content"].get("membership") == "join":
|
||||||
|
continue
|
||||||
|
if event["state_key"].startswith(user_id_prefix):
|
||||||
|
kick_list.append(event["state_key"])
|
||||||
|
|
||||||
|
if len(kick_list) == 0:
|
||||||
|
print "No user IDs match the prefix '%s'" % user_id_prefix
|
||||||
|
return
|
||||||
|
|
||||||
|
print "The following user IDs will be kicked from %s" % room_name
|
||||||
|
for uid in kick_list:
|
||||||
|
print uid
|
||||||
|
doit = raw_input("Continue? [Y]es\n")
|
||||||
|
if len(doit) > 0 and doit.lower() == 'y':
|
||||||
|
print "Kicking members..."
|
||||||
|
# encode them all
|
||||||
|
kick_list = [urllib.quote(uid) for uid in kick_list]
|
||||||
|
for uid in kick_list:
|
||||||
|
kick_url = _mkurl(
|
||||||
|
"$HS/_matrix/client/api/v1/rooms/$ROOM/state/m.room.member/$UID?access_token=$TOKEN",
|
||||||
|
{
|
||||||
|
"$HS": hs,
|
||||||
|
"$UID": uid,
|
||||||
|
"$ROOM": room_id,
|
||||||
|
"$TOKEN": access_token
|
||||||
|
}
|
||||||
|
)
|
||||||
|
kick_body = {
|
||||||
|
"membership": "leave",
|
||||||
|
"reason": why
|
||||||
|
}
|
||||||
|
print "Kicking %s" % uid
|
||||||
|
res = requests.put(kick_url, data=json.dumps(kick_body))
|
||||||
|
if res.status_code != 200:
|
||||||
|
print "ERROR: HTTP %s" % res.status_code
|
||||||
|
if res.json().get("error"):
|
||||||
|
print "ERROR: JSON %s" % res.json()
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
parser = ArgumentParser("Kick members in a room matching a certain user ID prefix.")
|
||||||
|
parser.add_argument("-u","--user-id",help="The user ID prefix e.g. '@irc_'")
|
||||||
|
parser.add_argument("-t","--token",help="Your access_token")
|
||||||
|
parser.add_argument("-r","--room",help="The room ID to kick members in")
|
||||||
|
parser.add_argument("-s","--homeserver",help="The base HS url e.g. http://matrix.org")
|
||||||
|
parser.add_argument("-w","--why",help="Reason for the kick. Optional.")
|
||||||
|
args = parser.parse_args()
|
||||||
|
if not args.room or not args.token or not args.user_id or not args.homeserver:
|
||||||
|
parser.print_help()
|
||||||
|
sys.exit(1)
|
||||||
|
else:
|
||||||
|
main(args.homeserver, args.room, args.token, args.user_id, args.why)
|
||||||
25
contrib/systemd/log_config.yaml
Normal file
25
contrib/systemd/log_config.yaml
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
version: 1
|
||||||
|
|
||||||
|
# In systemd's journal, loglevel is implicitly stored, so let's omit it
|
||||||
|
# from the message text.
|
||||||
|
formatters:
|
||||||
|
journal_fmt:
|
||||||
|
format: '%(name)s: [%(request)s] %(message)s'
|
||||||
|
|
||||||
|
filters:
|
||||||
|
context:
|
||||||
|
(): synapse.util.logcontext.LoggingContextFilter
|
||||||
|
request: ""
|
||||||
|
|
||||||
|
handlers:
|
||||||
|
journal:
|
||||||
|
class: systemd.journal.JournalHandler
|
||||||
|
formatter: journal_fmt
|
||||||
|
filters: [context]
|
||||||
|
SYSLOG_IDENTIFIER: synapse
|
||||||
|
|
||||||
|
root:
|
||||||
|
level: INFO
|
||||||
|
handlers: [journal]
|
||||||
|
|
||||||
|
disable_existing_loggers: False
|
||||||
16
contrib/systemd/synapse.service
Normal file
16
contrib/systemd/synapse.service
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
# This assumes that Synapse has been installed as a system package
|
||||||
|
# (e.g. https://aur.archlinux.org/packages/matrix-synapse/ for ArchLinux)
|
||||||
|
# rather than in a user home directory or similar under virtualenv.
|
||||||
|
|
||||||
|
[Unit]
|
||||||
|
Description=Synapse Matrix homeserver
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
Type=simple
|
||||||
|
User=synapse
|
||||||
|
Group=synapse
|
||||||
|
WorkingDirectory=/var/lib/synapse
|
||||||
|
ExecStart=/usr/bin/python2.7 -m synapse.app.homeserver --config-path=/etc/synapse/homeserver.yaml --log-config=/etc/synapse/log_config.yaml
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
||||||
@@ -126,12 +126,26 @@ sub on_unknown_event
|
|||||||
if (!$bridgestate->{$room_id}->{gathered_candidates}) {
|
if (!$bridgestate->{$room_id}->{gathered_candidates}) {
|
||||||
$bridgestate->{$room_id}->{gathered_candidates} = 1;
|
$bridgestate->{$room_id}->{gathered_candidates} = 1;
|
||||||
my $offer = $bridgestate->{$room_id}->{offer};
|
my $offer = $bridgestate->{$room_id}->{offer};
|
||||||
my $candidate_block = "";
|
my $candidate_block = {
|
||||||
|
audio => '',
|
||||||
|
video => '',
|
||||||
|
};
|
||||||
foreach (@{$event->{content}->{candidates}}) {
|
foreach (@{$event->{content}->{candidates}}) {
|
||||||
$candidate_block .= "a=" . $_->{candidate} . "\r\n";
|
if ($_->{sdpMid}) {
|
||||||
|
$candidate_block->{$_->{sdpMid}} .= "a=" . $_->{candidate} . "\r\n";
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
$candidate_block->{audio} .= "a=" . $_->{candidate} . "\r\n";
|
||||||
|
$candidate_block->{video} .= "a=" . $_->{candidate} . "\r\n";
|
||||||
|
}
|
||||||
}
|
}
|
||||||
# XXX: collate using the right m= line - for now assume audio call
|
|
||||||
$offer =~ s/(a=rtcp.*[\r\n]+)/$1$candidate_block/;
|
# XXX: assumes audio comes first
|
||||||
|
#$offer =~ s/(a=rtcp-mux[\r\n]+)/$1$candidate_block->{audio}/;
|
||||||
|
#$offer =~ s/(a=rtcp-mux[\r\n]+)/$1$candidate_block->{video}/;
|
||||||
|
|
||||||
|
$offer =~ s/(m=video)/$candidate_block->{audio}$1/;
|
||||||
|
$offer =~ s/(.$)/$1\n$candidate_block->{video}$1/;
|
||||||
|
|
||||||
my $f = send_verto_json_request("verto.invite", {
|
my $f = send_verto_json_request("verto.invite", {
|
||||||
"sdp" => $offer,
|
"sdp" => $offer,
|
||||||
@@ -172,23 +186,18 @@ sub on_room_message
|
|||||||
warn "[Matrix] in $room_id: $from: " . $content->{body} . "\n";
|
warn "[Matrix] in $room_id: $from: " . $content->{body} . "\n";
|
||||||
}
|
}
|
||||||
|
|
||||||
my $verto_connecting = $loop->new_future;
|
|
||||||
$bot_verto->connect(
|
|
||||||
%{ $CONFIG{"verto-bot"} },
|
|
||||||
on_connected => sub {
|
|
||||||
warn("[Verto] connected to websocket");
|
|
||||||
$verto_connecting->done($bot_verto) if not $verto_connecting->is_done;
|
|
||||||
},
|
|
||||||
on_connect_error => sub { die "Cannot connect to verto - $_[-1]" },
|
|
||||||
on_resolve_error => sub { die "Cannot resolve to verto - $_[-1]" },
|
|
||||||
);
|
|
||||||
|
|
||||||
Future->needs_all(
|
Future->needs_all(
|
||||||
$bot_matrix->login( %{ $CONFIG{"matrix-bot"} } )->then( sub {
|
$bot_matrix->login( %{ $CONFIG{"matrix-bot"} } )->then( sub {
|
||||||
$bot_matrix->start;
|
$bot_matrix->start;
|
||||||
}),
|
}),
|
||||||
|
|
||||||
$verto_connecting,
|
$bot_verto->connect(
|
||||||
|
%{ $CONFIG{"verto-bot"} },
|
||||||
|
on_connect_error => sub { die "Cannot connect to verto - $_[-1]" },
|
||||||
|
on_resolve_error => sub { die "Cannot resolve to verto - $_[-1]" },
|
||||||
|
)->on_done( sub {
|
||||||
|
warn("[Verto] connected to websocket");
|
||||||
|
}),
|
||||||
)->get;
|
)->get;
|
||||||
|
|
||||||
$loop->attach_signal(
|
$loop->attach_signal(
|
||||||
|
|||||||
493
contrib/vertobot/bridge.pl
Executable file
493
contrib/vertobot/bridge.pl
Executable file
@@ -0,0 +1,493 @@
|
|||||||
|
#!/usr/bin/env perl
|
||||||
|
|
||||||
|
use strict;
|
||||||
|
use warnings;
|
||||||
|
use 5.010; # //
|
||||||
|
use IO::Socket::SSL qw(SSL_VERIFY_NONE);
|
||||||
|
use IO::Async::Loop;
|
||||||
|
use Net::Async::WebSocket::Client;
|
||||||
|
use Net::Async::HTTP;
|
||||||
|
use Net::Async::HTTP::Server;
|
||||||
|
use JSON;
|
||||||
|
use YAML;
|
||||||
|
use Data::UUID;
|
||||||
|
use Getopt::Long;
|
||||||
|
use Data::Dumper;
|
||||||
|
use URI::Encode qw(uri_encode uri_decode);
|
||||||
|
|
||||||
|
binmode STDOUT, ":encoding(UTF-8)";
|
||||||
|
binmode STDERR, ":encoding(UTF-8)";
|
||||||
|
|
||||||
|
my $msisdn_to_matrix = {
|
||||||
|
'447417892400' => '@matthew:matrix.org',
|
||||||
|
};
|
||||||
|
|
||||||
|
my $matrix_to_msisdn = {};
|
||||||
|
foreach (keys %$msisdn_to_matrix) {
|
||||||
|
$matrix_to_msisdn->{$msisdn_to_matrix->{$_}} = $_;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
my $loop = IO::Async::Loop->new;
|
||||||
|
# Net::Async::HTTP + SSL + IO::Poll doesn't play well. See
|
||||||
|
# https://rt.cpan.org/Ticket/Display.html?id=93107
|
||||||
|
# ref $loop eq "IO::Async::Loop::Poll" and
|
||||||
|
# warn "Using SSL with IO::Poll causes known memory-leaks!!\n";
|
||||||
|
|
||||||
|
GetOptions(
|
||||||
|
'C|config=s' => \my $CONFIG,
|
||||||
|
'eval-from=s' => \my $EVAL_FROM,
|
||||||
|
) or exit 1;
|
||||||
|
|
||||||
|
if( defined $EVAL_FROM ) {
|
||||||
|
# An emergency 'eval() this file' hack
|
||||||
|
$SIG{HUP} = sub {
|
||||||
|
my $code = do {
|
||||||
|
open my $fh, "<", $EVAL_FROM or warn( "Cannot read - $!" ), return;
|
||||||
|
local $/; <$fh>
|
||||||
|
};
|
||||||
|
|
||||||
|
eval $code or warn "Cannot eval() - $@";
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
defined $CONFIG or die "Must supply --config\n";
|
||||||
|
|
||||||
|
my %CONFIG = %{ YAML::LoadFile( $CONFIG ) };
|
||||||
|
|
||||||
|
my %MATRIX_CONFIG = %{ $CONFIG{matrix} };
|
||||||
|
# No harm in always applying this
|
||||||
|
$MATRIX_CONFIG{SSL_verify_mode} = SSL_VERIFY_NONE;
|
||||||
|
|
||||||
|
my $bridgestate = {};
|
||||||
|
my $roomid_by_callid = {};
|
||||||
|
|
||||||
|
my $sessid = lc new Data::UUID->create_str();
|
||||||
|
my $as_token = $CONFIG{"matrix-bot"}->{as_token};
|
||||||
|
my $hs_domain = $CONFIG{"matrix-bot"}->{domain};
|
||||||
|
|
||||||
|
my $http = Net::Async::HTTP->new();
|
||||||
|
$loop->add( $http );
|
||||||
|
|
||||||
|
sub create_virtual_user
|
||||||
|
{
|
||||||
|
my ($localpart) = @_;
|
||||||
|
my ( $response ) = $http->do_request(
|
||||||
|
method => "POST",
|
||||||
|
uri => URI->new(
|
||||||
|
$CONFIG{"matrix"}->{server}.
|
||||||
|
"/_matrix/client/api/v1/register?".
|
||||||
|
"access_token=$as_token&user_id=$localpart"
|
||||||
|
),
|
||||||
|
content_type => "application/json",
|
||||||
|
content => <<EOT
|
||||||
|
{
|
||||||
|
"type": "m.login.application_service",
|
||||||
|
"user": "$localpart"
|
||||||
|
}
|
||||||
|
EOT
|
||||||
|
)->get;
|
||||||
|
warn $response->as_string if ($response->code != 200);
|
||||||
|
}
|
||||||
|
|
||||||
|
my $http_server = Net::Async::HTTP::Server->new(
|
||||||
|
on_request => sub {
|
||||||
|
my $self = shift;
|
||||||
|
my ( $req ) = @_;
|
||||||
|
|
||||||
|
my $response;
|
||||||
|
my $path = uri_decode($req->path);
|
||||||
|
warn("request: $path");
|
||||||
|
if ($path =~ m#/users/\@(\+.*)#) {
|
||||||
|
# when queried about virtual users, auto-create them in the HS
|
||||||
|
my $localpart = $1;
|
||||||
|
create_virtual_user($localpart);
|
||||||
|
$response = HTTP::Response->new( 200 );
|
||||||
|
$response->add_content('{}');
|
||||||
|
$response->content_type( "application/json" );
|
||||||
|
}
|
||||||
|
elsif ($path =~ m#/transactions/(.*)#) {
|
||||||
|
my $event = JSON->new->decode($req->body);
|
||||||
|
print Dumper($event);
|
||||||
|
|
||||||
|
my $room_id = $event->{room_id};
|
||||||
|
my %dp = %{$CONFIG{'verto-dialog-params'}};
|
||||||
|
$dp{callID} = $bridgestate->{$room_id}->{callid};
|
||||||
|
|
||||||
|
if ($event->{type} eq 'm.room.membership') {
|
||||||
|
my $membership = $event->{content}->{membership};
|
||||||
|
my $state_key = $event->{state_key};
|
||||||
|
my $room_id = $event->{state_id};
|
||||||
|
|
||||||
|
if ($membership eq 'invite') {
|
||||||
|
# autojoin invites
|
||||||
|
my ( $response ) = $http->do_request(
|
||||||
|
method => "POST",
|
||||||
|
uri => URI->new(
|
||||||
|
$CONFIG{"matrix"}->{server}.
|
||||||
|
"/_matrix/client/api/v1/rooms/$room_id/join?".
|
||||||
|
"access_token=$as_token&user_id=$state_key"
|
||||||
|
),
|
||||||
|
content_type => "application/json",
|
||||||
|
content => "{}",
|
||||||
|
)->get;
|
||||||
|
warn $response->as_string if ($response->code != 200);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
elsif ($event->{type} eq 'm.call.invite') {
|
||||||
|
my $room_id = $event->{room_id};
|
||||||
|
$bridgestate->{$room_id}->{matrix_callid} = $event->{content}->{call_id};
|
||||||
|
$bridgestate->{$room_id}->{callid} = lc new Data::UUID->create_str();
|
||||||
|
$bridgestate->{$room_id}->{sessid} = $sessid;
|
||||||
|
# $bridgestate->{$room_id}->{offer} = $event->{content}->{offer}->{sdp};
|
||||||
|
my $offer = $event->{content}->{offer}->{sdp};
|
||||||
|
# $bridgestate->{$room_id}->{gathered_candidates} = 0;
|
||||||
|
$roomid_by_callid->{ $bridgestate->{$room_id}->{callid} } = $room_id;
|
||||||
|
# no trickle ICE in verto apparently
|
||||||
|
|
||||||
|
my $f = send_verto_json_request("verto.invite", {
|
||||||
|
"sdp" => $offer,
|
||||||
|
"dialogParams" => \%dp,
|
||||||
|
"sessid" => $bridgestate->{$room_id}->{sessid},
|
||||||
|
});
|
||||||
|
$self->adopt_future($f);
|
||||||
|
}
|
||||||
|
# elsif ($event->{type} eq 'm.call.candidates') {
|
||||||
|
# # XXX: this could fire for both matrix->verto and verto->matrix calls
|
||||||
|
# # and races as it collects candidates. much better to just turn off
|
||||||
|
# # candidate gathering in the webclient entirely for now
|
||||||
|
#
|
||||||
|
# my $room_id = $event->{room_id};
|
||||||
|
# # XXX: compare call IDs
|
||||||
|
# if (!$bridgestate->{$room_id}->{gathered_candidates}) {
|
||||||
|
# $bridgestate->{$room_id}->{gathered_candidates} = 1;
|
||||||
|
# my $offer = $bridgestate->{$room_id}->{offer};
|
||||||
|
# my $candidate_block = "";
|
||||||
|
# foreach (@{$event->{content}->{candidates}}) {
|
||||||
|
# $candidate_block .= "a=" . $_->{candidate} . "\r\n";
|
||||||
|
# }
|
||||||
|
# # XXX: collate using the right m= line - for now assume audio call
|
||||||
|
# $offer =~ s/(a=rtcp.*[\r\n]+)/$1$candidate_block/;
|
||||||
|
#
|
||||||
|
# my $f = send_verto_json_request("verto.invite", {
|
||||||
|
# "sdp" => $offer,
|
||||||
|
# "dialogParams" => \%dp,
|
||||||
|
# "sessid" => $bridgestate->{$room_id}->{sessid},
|
||||||
|
# });
|
||||||
|
# $self->adopt_future($f);
|
||||||
|
# }
|
||||||
|
# else {
|
||||||
|
# # ignore them, as no trickle ICE, although we might as well
|
||||||
|
# # batch them up
|
||||||
|
# # foreach (@{$event->{content}->{candidates}}) {
|
||||||
|
# # push @{$bridgestate->{$room_id}->{candidates}}, $_;
|
||||||
|
# # }
|
||||||
|
# }
|
||||||
|
# }
|
||||||
|
elsif ($event->{type} eq 'm.call.answer') {
|
||||||
|
# grab the answer and relay it to verto as a verto.answer
|
||||||
|
my $room_id = $event->{room_id};
|
||||||
|
|
||||||
|
my $answer = $event->{content}->{answer}->{sdp};
|
||||||
|
my $f = send_verto_json_request("verto.answer", {
|
||||||
|
"sdp" => $answer,
|
||||||
|
"dialogParams" => \%dp,
|
||||||
|
"sessid" => $bridgestate->{$room_id}->{sessid},
|
||||||
|
});
|
||||||
|
$self->adopt_future($f);
|
||||||
|
}
|
||||||
|
elsif ($event->{type} eq 'm.call.hangup') {
|
||||||
|
my $room_id = $event->{room_id};
|
||||||
|
if ($bridgestate->{$room_id}->{matrix_callid} eq $event->{content}->{call_id}) {
|
||||||
|
my $f = send_verto_json_request("verto.bye", {
|
||||||
|
"dialogParams" => \%dp,
|
||||||
|
"sessid" => $bridgestate->{$room_id}->{sessid},
|
||||||
|
});
|
||||||
|
$self->adopt_future($f);
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
warn "Ignoring unrecognised callid: ".$event->{content}->{call_id};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
warn "Unhandled event: $event->{type}";
|
||||||
|
}
|
||||||
|
|
||||||
|
$response = HTTP::Response->new( 200 );
|
||||||
|
$response->add_content('{}');
|
||||||
|
$response->content_type( "application/json" );
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
warn "Unhandled path: $path";
|
||||||
|
$response = HTTP::Response->new( 404 );
|
||||||
|
}
|
||||||
|
|
||||||
|
$req->respond( $response );
|
||||||
|
},
|
||||||
|
);
|
||||||
|
$loop->add( $http_server );
|
||||||
|
|
||||||
|
$http_server->listen(
|
||||||
|
addr => { family => "inet", socktype => "stream", port => 8009 },
|
||||||
|
on_listen_error => sub { die "Cannot listen - $_[-1]\n" },
|
||||||
|
);
|
||||||
|
|
||||||
|
my $bot_verto = Net::Async::WebSocket::Client->new(
|
||||||
|
on_frame => sub {
|
||||||
|
my ( $self, $frame ) = @_;
|
||||||
|
warn "[Verto] receiving $frame";
|
||||||
|
on_verto_json($frame);
|
||||||
|
},
|
||||||
|
);
|
||||||
|
$loop->add( $bot_verto );
|
||||||
|
|
||||||
|
my $verto_connecting = $loop->new_future;
|
||||||
|
$bot_verto->connect(
|
||||||
|
%{ $CONFIG{"verto-bot"} },
|
||||||
|
on_connected => sub {
|
||||||
|
warn("[Verto] connected to websocket");
|
||||||
|
if (not $verto_connecting->is_done) {
|
||||||
|
$verto_connecting->done($bot_verto);
|
||||||
|
|
||||||
|
send_verto_json_request("login", {
|
||||||
|
'login' => $CONFIG{'verto-dialog-params'}{'login'},
|
||||||
|
'passwd' => $CONFIG{'verto-config'}{'passwd'},
|
||||||
|
'sessid' => $sessid,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
},
|
||||||
|
on_connect_error => sub { die "Cannot connect to verto - $_[-1]" },
|
||||||
|
on_resolve_error => sub { die "Cannot resolve to verto - $_[-1]" },
|
||||||
|
);
|
||||||
|
|
||||||
|
# die Dumper($verto_connecting);
|
||||||
|
|
||||||
|
my $as_url = $CONFIG{"matrix-bot"}->{as_url};
|
||||||
|
|
||||||
|
Future->needs_all(
|
||||||
|
$http->do_request(
|
||||||
|
method => "POST",
|
||||||
|
uri => URI->new( $CONFIG{"matrix"}->{server}."/_matrix/appservice/v1/register" ),
|
||||||
|
content_type => "application/json",
|
||||||
|
content => <<EOT
|
||||||
|
{
|
||||||
|
"as_token": "$as_token",
|
||||||
|
"url": "$as_url",
|
||||||
|
"namespaces": { "users": [ { "regex": "\@\\\\+.*", "exclusive": false } ] }
|
||||||
|
}
|
||||||
|
EOT
|
||||||
|
)->then( sub{
|
||||||
|
my ($response) = (@_);
|
||||||
|
warn $response->as_string if ($response->code != 200);
|
||||||
|
return Future->done;
|
||||||
|
}),
|
||||||
|
$verto_connecting,
|
||||||
|
)->get;
|
||||||
|
|
||||||
|
$loop->attach_signal(
|
||||||
|
PIPE => sub { warn "pipe\n" }
|
||||||
|
);
|
||||||
|
$loop->attach_signal(
|
||||||
|
INT => sub { $loop->stop },
|
||||||
|
);
|
||||||
|
$loop->attach_signal(
|
||||||
|
TERM => sub { $loop->stop },
|
||||||
|
);
|
||||||
|
|
||||||
|
eval {
|
||||||
|
$loop->run;
|
||||||
|
} or my $e = $@;
|
||||||
|
|
||||||
|
die $e if $e;
|
||||||
|
|
||||||
|
exit 0;
|
||||||
|
|
||||||
|
{
|
||||||
|
my $json_id;
|
||||||
|
my $requests;
|
||||||
|
|
||||||
|
sub send_verto_json_request
|
||||||
|
{
|
||||||
|
$json_id ||= 1;
|
||||||
|
|
||||||
|
my ($method, $params) = @_;
|
||||||
|
my $json = {
|
||||||
|
jsonrpc => "2.0",
|
||||||
|
method => $method,
|
||||||
|
params => $params,
|
||||||
|
id => $json_id,
|
||||||
|
};
|
||||||
|
my $text = JSON->new->encode( $json );
|
||||||
|
warn "[Verto] sending $text";
|
||||||
|
$bot_verto->send_frame ( $text );
|
||||||
|
my $request = $loop->new_future;
|
||||||
|
$requests->{$json_id} = $request;
|
||||||
|
$json_id++;
|
||||||
|
return $request;
|
||||||
|
}
|
||||||
|
|
||||||
|
sub send_verto_json_response
|
||||||
|
{
|
||||||
|
my ($result, $id) = @_;
|
||||||
|
my $json = {
|
||||||
|
jsonrpc => "2.0",
|
||||||
|
result => $result,
|
||||||
|
id => $id,
|
||||||
|
};
|
||||||
|
my $text = JSON->new->encode( $json );
|
||||||
|
warn "[Verto] sending $text";
|
||||||
|
$bot_verto->send_frame ( $text );
|
||||||
|
}
|
||||||
|
|
||||||
|
sub on_verto_json
|
||||||
|
{
|
||||||
|
my $json = JSON->new->decode( $_[0] );
|
||||||
|
if ($json->{method}) {
|
||||||
|
if (($json->{method} eq 'verto.answer' && $json->{params}->{sdp}) ||
|
||||||
|
$json->{method} eq 'verto.media') {
|
||||||
|
|
||||||
|
my $caller = $json->{dialogParams}->{caller_id_number};
|
||||||
|
my $callee = $json->{dialogParams}->{destination_number};
|
||||||
|
my $caller_user = '@+' . $caller . ':' . $hs_domain;
|
||||||
|
my $callee_user = $msisdn_to_matrix->{$callee} || warn "unrecogised callee: $callee";
|
||||||
|
my $room_id = $roomid_by_callid->{$json->{params}->{callID}};
|
||||||
|
|
||||||
|
if ($json->{params}->{sdp}) {
|
||||||
|
$http->do_request(
|
||||||
|
method => "POST",
|
||||||
|
uri => URI->new(
|
||||||
|
$CONFIG{"matrix"}->{server}.
|
||||||
|
"/_matrix/client/api/v1/send/m.call.answer?".
|
||||||
|
"access_token=$as_token&user_id=$caller_user"
|
||||||
|
),
|
||||||
|
content_type => "application/json",
|
||||||
|
content => JSON->new->encode({
|
||||||
|
call_id => $bridgestate->{$room_id}->{matrix_callid},
|
||||||
|
version => 0,
|
||||||
|
answer => {
|
||||||
|
sdp => $json->{params}->{sdp},
|
||||||
|
type => "answer",
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
)->then( sub {
|
||||||
|
send_verto_json_response( {
|
||||||
|
method => $json->{method},
|
||||||
|
}, $json->{id});
|
||||||
|
})->get;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
elsif ($json->{method} eq 'verto.invite') {
|
||||||
|
my $caller = $json->{dialogParams}->{caller_id_number};
|
||||||
|
my $callee = $json->{dialogParams}->{destination_number};
|
||||||
|
my $caller_user = '@+' . $caller . ':' . $hs_domain;
|
||||||
|
my $callee_user = $msisdn_to_matrix->{$callee} || warn "unrecogised callee: $callee";
|
||||||
|
|
||||||
|
my $alias = ($caller lt $callee) ? ($caller.'-'.$callee) : ($callee.'-'.$caller);
|
||||||
|
my $room_id;
|
||||||
|
|
||||||
|
# create a virtual user for the caller if needed.
|
||||||
|
create_virtual_user($caller);
|
||||||
|
|
||||||
|
# create a room of form #peer-peer and invite the callee
|
||||||
|
$http->do_request(
|
||||||
|
method => "POST",
|
||||||
|
uri => URI->new(
|
||||||
|
$CONFIG{"matrix"}->{server}.
|
||||||
|
"/_matrix/client/api/v1/createRoom?".
|
||||||
|
"access_token=$as_token&user_id=$caller_user"
|
||||||
|
),
|
||||||
|
content_type => "application/json",
|
||||||
|
content => JSON->new->encode({
|
||||||
|
room_alias_name => $alias,
|
||||||
|
invite => [ $callee_user ],
|
||||||
|
}),
|
||||||
|
)->then( sub {
|
||||||
|
my ( $response ) = @_;
|
||||||
|
my $resp = JSON->new->decode($response->content);
|
||||||
|
$room_id = $resp->{room_id};
|
||||||
|
$roomid_by_callid->{$json->{params}->{callID}} = $room_id;
|
||||||
|
})->get;
|
||||||
|
|
||||||
|
# join it
|
||||||
|
my ($response) = $http->do_request(
|
||||||
|
method => "POST",
|
||||||
|
uri => URI->new(
|
||||||
|
$CONFIG{"matrix"}->{server}.
|
||||||
|
"/_matrix/client/api/v1/join/$room_id?".
|
||||||
|
"access_token=$as_token&user_id=$caller_user"
|
||||||
|
),
|
||||||
|
content_type => "application/json",
|
||||||
|
content => '{}',
|
||||||
|
)->get;
|
||||||
|
|
||||||
|
$bridgestate->{$room_id}->{matrix_callid} = lc new Data::UUID->create_str();
|
||||||
|
$bridgestate->{$room_id}->{callid} = $json->{dialogParams}->{callID};
|
||||||
|
$bridgestate->{$room_id}->{sessid} = $sessid;
|
||||||
|
|
||||||
|
# put the m.call.invite in there
|
||||||
|
$http->do_request(
|
||||||
|
method => "POST",
|
||||||
|
uri => URI->new(
|
||||||
|
$CONFIG{"matrix"}->{server}.
|
||||||
|
"/_matrix/client/api/v1/send/m.call.invite?".
|
||||||
|
"access_token=$as_token&user_id=$caller_user"
|
||||||
|
),
|
||||||
|
content_type => "application/json",
|
||||||
|
content => JSON->new->encode({
|
||||||
|
call_id => $bridgestate->{$room_id}->{matrix_callid},
|
||||||
|
version => 0,
|
||||||
|
answer => {
|
||||||
|
sdp => $json->{params}->{sdp},
|
||||||
|
type => "offer",
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
)->then( sub {
|
||||||
|
# acknowledge the verto
|
||||||
|
send_verto_json_response( {
|
||||||
|
method => $json->{method},
|
||||||
|
}, $json->{id});
|
||||||
|
})->get;
|
||||||
|
}
|
||||||
|
elsif ($json->{method} eq 'verto.bye') {
|
||||||
|
my $caller = $json->{dialogParams}->{caller_id_number};
|
||||||
|
my $callee = $json->{dialogParams}->{destination_number};
|
||||||
|
my $caller_user = '@+' . $caller . ':' . $hs_domain;
|
||||||
|
my $callee_user = $msisdn_to_matrix->{$callee} || warn "unrecogised callee: $callee";
|
||||||
|
my $room_id = $roomid_by_callid->{$json->{params}->{callID}};
|
||||||
|
|
||||||
|
# put the m.call.hangup into the room
|
||||||
|
$http->do_request(
|
||||||
|
method => "POST",
|
||||||
|
uri => URI->new(
|
||||||
|
$CONFIG{"matrix"}->{server}.
|
||||||
|
"/_matrix/client/api/v1/send/m.call.hangup?".
|
||||||
|
"access_token=$as_token&user_id=$caller_user"
|
||||||
|
),
|
||||||
|
content_type => "application/json",
|
||||||
|
content => JSON->new->encode({
|
||||||
|
call_id => $bridgestate->{$room_id}->{matrix_callid},
|
||||||
|
version => 0,
|
||||||
|
}),
|
||||||
|
)->then( sub {
|
||||||
|
# acknowledge the verto
|
||||||
|
send_verto_json_response( {
|
||||||
|
method => $json->{method},
|
||||||
|
}, $json->{id});
|
||||||
|
})->get;
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
warn ("[Verto] unhandled method: " . $json->{method});
|
||||||
|
send_verto_json_response( {
|
||||||
|
method => $json->{method},
|
||||||
|
}, $json->{id});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
elsif ($json->{result}) {
|
||||||
|
$requests->{$json->{id}}->done($json->{result});
|
||||||
|
}
|
||||||
|
elsif ($json->{error}) {
|
||||||
|
$requests->{$json->{id}}->fail($json->{error}->{message}, $json->{error});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
@@ -7,6 +7,9 @@ matrix:
|
|||||||
matrix-bot:
|
matrix-bot:
|
||||||
user_id: '@vertobot:matrix.org'
|
user_id: '@vertobot:matrix.org'
|
||||||
password: ''
|
password: ''
|
||||||
|
domain: 'matrix.org"
|
||||||
|
as_url: 'http://localhost:8009'
|
||||||
|
as_token: 'vertobot123'
|
||||||
|
|
||||||
verto-bot:
|
verto-bot:
|
||||||
host: webrtc.freeswitch.org
|
host: webrtc.freeswitch.org
|
||||||
|
|||||||
@@ -11,7 +11,4 @@ requires 'YAML', 0;
|
|||||||
requires 'JSON', 0;
|
requires 'JSON', 0;
|
||||||
requires 'Getopt::Long', 0;
|
requires 'Getopt::Long', 0;
|
||||||
|
|
||||||
on 'test' => sub {
|
|
||||||
requires 'Test::More', '>= 0.98';
|
|
||||||
};
|
|
||||||
|
|
||||||
|
|||||||
@@ -11,7 +11,9 @@ if [ -f $PID_FILE ]; then
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
find "$DIR" -name "*.log" -delete
|
for port in 8080 8081 8082; do
|
||||||
find "$DIR" -name "*.db" -delete
|
rm -rf $DIR/$port
|
||||||
|
rm -rf $DIR/media_store.$port
|
||||||
|
done
|
||||||
|
|
||||||
rm -rf $DIR/etc
|
rm -rf $DIR/etc
|
||||||
|
|||||||
@@ -8,37 +8,49 @@ cd "$DIR/.."
|
|||||||
|
|
||||||
mkdir -p demo/etc
|
mkdir -p demo/etc
|
||||||
|
|
||||||
# Check the --no-rate-limit param
|
export PYTHONPATH=$(readlink -f $(pwd))
|
||||||
PARAMS=""
|
|
||||||
if [ $# -eq 1 ]; then
|
|
||||||
if [ $1 = "--no-rate-limit" ]; then
|
echo $PYTHONPATH
|
||||||
PARAMS="--rc-messages-per-second 1000 --rc-message-burst-count 1000"
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
for port in 8080 8081 8082; do
|
for port in 8080 8081 8082; do
|
||||||
echo "Starting server on port $port... "
|
echo "Starting server on port $port... "
|
||||||
|
|
||||||
https_port=$((port + 400))
|
https_port=$((port + 400))
|
||||||
|
mkdir -p demo/$port
|
||||||
|
pushd demo/$port
|
||||||
|
|
||||||
|
#rm $DIR/etc/$port.config
|
||||||
python -m synapse.app.homeserver \
|
python -m synapse.app.homeserver \
|
||||||
--generate-config \
|
--generate-config \
|
||||||
--config-path "demo/etc/$port.config" \
|
|
||||||
-p "$https_port" \
|
|
||||||
--unsecure-port "$port" \
|
|
||||||
-H "localhost:$https_port" \
|
-H "localhost:$https_port" \
|
||||||
-f "$DIR/$port.log" \
|
--config-path "$DIR/etc/$port.config" \
|
||||||
-d "$DIR/$port.db" \
|
--report-stats no
|
||||||
-D --pid-file "$DIR/$port.pid" \
|
|
||||||
--manhole $((port + 1000)) \
|
# Check script parameters
|
||||||
--tls-dh-params-path "demo/demo.tls.dh" \
|
if [ $# -eq 1 ]; then
|
||||||
--media-store-path "demo/media_store.$port" \
|
if [ $1 = "--no-rate-limit" ]; then
|
||||||
$PARAMS $SYNAPSE_PARAMS \
|
# Set high limits in config file to disable rate limiting
|
||||||
|
perl -p -i -e 's/rc_messages_per_second.*/rc_messages_per_second: 1000/g' $DIR/etc/$port.config
|
||||||
|
perl -p -i -e 's/rc_message_burst_count.*/rc_message_burst_count: 1000/g' $DIR/etc/$port.config
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
perl -p -i -e 's/^enable_registration:.*/enable_registration: true/g' $DIR/etc/$port.config
|
||||||
|
|
||||||
|
if ! grep -F "full_twisted_stacktraces" -q $DIR/etc/$port.config; then
|
||||||
|
echo "full_twisted_stacktraces: true" >> $DIR/etc/$port.config
|
||||||
|
fi
|
||||||
|
if ! grep -F "report_stats" -q $DIR/etc/$port.config ; then
|
||||||
|
echo "report_stats: false" >> $DIR/etc/$port.config
|
||||||
|
fi
|
||||||
|
|
||||||
python -m synapse.app.homeserver \
|
python -m synapse.app.homeserver \
|
||||||
--config-path "demo/etc/$port.config" \
|
--config-path "$DIR/etc/$port.config" \
|
||||||
|
-D \
|
||||||
-vv \
|
-vv \
|
||||||
|
|
||||||
|
popd
|
||||||
done
|
done
|
||||||
|
|
||||||
cd "$CWD"
|
cd "$CWD"
|
||||||
|
|||||||
31
docs/CAPTCHA_SETUP
Normal file
31
docs/CAPTCHA_SETUP
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
Captcha can be enabled for this home server. This file explains how to do that.
|
||||||
|
The captcha mechanism used is Google's ReCaptcha. This requires API keys from Google.
|
||||||
|
|
||||||
|
Getting keys
|
||||||
|
------------
|
||||||
|
Requires a public/private key pair from:
|
||||||
|
|
||||||
|
https://developers.google.com/recaptcha/
|
||||||
|
|
||||||
|
|
||||||
|
Setting ReCaptcha Keys
|
||||||
|
----------------------
|
||||||
|
The keys are a config option on the home server config. If they are not
|
||||||
|
visible, you can generate them via --generate-config. Set the following value:
|
||||||
|
|
||||||
|
recaptcha_public_key: YOUR_PUBLIC_KEY
|
||||||
|
recaptcha_private_key: YOUR_PRIVATE_KEY
|
||||||
|
|
||||||
|
In addition, you MUST enable captchas via:
|
||||||
|
|
||||||
|
enable_registration_captcha: true
|
||||||
|
|
||||||
|
Configuring IP used for auth
|
||||||
|
----------------------------
|
||||||
|
The ReCaptcha API requires that the IP address of the user who solved the
|
||||||
|
captcha is sent. If the client is connecting through a proxy or load balancer,
|
||||||
|
it may be required to use the X-Forwarded-For (XFF) header instead of the origin
|
||||||
|
IP address. This can be configured as an option on the home server like so:
|
||||||
|
|
||||||
|
captcha_ip_origin_is_x_forwarded: true
|
||||||
|
|
||||||
36
docs/application_services.rst
Normal file
36
docs/application_services.rst
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
Registering an Application Service
|
||||||
|
==================================
|
||||||
|
|
||||||
|
The registration of new application services depends on the homeserver used.
|
||||||
|
In synapse, you need to create a new configuration file for your AS and add it
|
||||||
|
to the list specified under the ``app_service_config_files`` config
|
||||||
|
option in your synapse config.
|
||||||
|
|
||||||
|
For example:
|
||||||
|
|
||||||
|
.. code-block:: yaml
|
||||||
|
|
||||||
|
app_service_config_files:
|
||||||
|
- /home/matrix/.synapse/<your-AS>.yaml
|
||||||
|
|
||||||
|
|
||||||
|
The format of the AS configuration file is as follows:
|
||||||
|
|
||||||
|
.. code-block:: yaml
|
||||||
|
|
||||||
|
url: <base url of AS>
|
||||||
|
as_token: <token AS will add to requests to HS>
|
||||||
|
hs_token: <token HS will add to requests to AS>
|
||||||
|
sender_localpart: <localpart of AS user>
|
||||||
|
namespaces:
|
||||||
|
users: # List of users we're interested in
|
||||||
|
- exclusive: <bool>
|
||||||
|
regex: <regex>
|
||||||
|
- ...
|
||||||
|
aliases: [] # List of aliases we're interested in
|
||||||
|
rooms: [] # List of room ids we're interested in
|
||||||
|
|
||||||
|
See the spec_ for further details on how application services work.
|
||||||
|
|
||||||
|
.. _spec: https://github.com/matrix-org/matrix-doc/blob/master/specification/25_application_service_api.rst#application-service-api
|
||||||
|
|
||||||
50
docs/metrics-howto.rst
Normal file
50
docs/metrics-howto.rst
Normal file
@@ -0,0 +1,50 @@
|
|||||||
|
How to monitor Synapse metrics using Prometheus
|
||||||
|
===============================================
|
||||||
|
|
||||||
|
1: Install prometheus:
|
||||||
|
Follow instructions at http://prometheus.io/docs/introduction/install/
|
||||||
|
|
||||||
|
2: Enable synapse metrics:
|
||||||
|
Simply setting a (local) port number will enable it. Pick a port.
|
||||||
|
prometheus itself defaults to 9090, so starting just above that for
|
||||||
|
locally monitored services seems reasonable. E.g. 9092:
|
||||||
|
|
||||||
|
Add to homeserver.yaml
|
||||||
|
|
||||||
|
metrics_port: 9092
|
||||||
|
|
||||||
|
Restart synapse
|
||||||
|
|
||||||
|
3: Check out synapse-prometheus-config
|
||||||
|
https://github.com/matrix-org/synapse-prometheus-config
|
||||||
|
|
||||||
|
4: Add ``synapse.html`` and ``synapse.rules``
|
||||||
|
The ``.html`` file needs to appear in prometheus's ``consoles`` directory,
|
||||||
|
and the ``.rules`` file needs to be invoked somewhere in the main config
|
||||||
|
file. A symlink to each from the git checkout into the prometheus directory
|
||||||
|
might be easiest to ensure ``git pull`` keeps it updated.
|
||||||
|
|
||||||
|
5: Add a prometheus target for synapse
|
||||||
|
This is easiest if prometheus runs on the same machine as synapse, as it can
|
||||||
|
then just use localhost::
|
||||||
|
|
||||||
|
global: {
|
||||||
|
rule_file: "synapse.rules"
|
||||||
|
}
|
||||||
|
|
||||||
|
job: {
|
||||||
|
name: "synapse"
|
||||||
|
|
||||||
|
target_group: {
|
||||||
|
target: "http://localhost:9092/"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
6: Start prometheus::
|
||||||
|
|
||||||
|
./prometheus -config.file=prometheus.conf
|
||||||
|
|
||||||
|
7: Wait a few seconds for it to start and perform the first scrape,
|
||||||
|
then visit the console:
|
||||||
|
|
||||||
|
http://server-where-prometheus-runs:9090/consoles/synapse.html
|
||||||
120
docs/postgres.rst
Normal file
120
docs/postgres.rst
Normal file
@@ -0,0 +1,120 @@
|
|||||||
|
Using Postgres
|
||||||
|
--------------
|
||||||
|
|
||||||
|
Set up database
|
||||||
|
===============
|
||||||
|
|
||||||
|
The PostgreSQL database used *must* have the correct encoding set, otherwise
|
||||||
|
would not be able to store UTF8 strings. To create a database with the correct
|
||||||
|
encoding use, e.g.::
|
||||||
|
|
||||||
|
CREATE DATABASE synapse
|
||||||
|
ENCODING 'UTF8'
|
||||||
|
LC_COLLATE='C'
|
||||||
|
LC_CTYPE='C'
|
||||||
|
template=template0
|
||||||
|
OWNER synapse_user;
|
||||||
|
|
||||||
|
This would create an appropriate database named ``synapse`` owned by the
|
||||||
|
``synapse_user`` user (which must already exist).
|
||||||
|
|
||||||
|
Set up client in Debian/Ubuntu
|
||||||
|
===========================
|
||||||
|
|
||||||
|
Postgres support depends on the postgres python connector ``psycopg2``. In the
|
||||||
|
virtual env::
|
||||||
|
|
||||||
|
sudo apt-get install libpq-dev
|
||||||
|
pip install psycopg2
|
||||||
|
|
||||||
|
Set up client in RHEL/CentOs 7
|
||||||
|
==============================
|
||||||
|
|
||||||
|
Make sure you have the appropriate version of postgres-devel installed. For a
|
||||||
|
postgres 9.4, use the postgres 9.4 packages from
|
||||||
|
[here](https://wiki.postgresql.org/wiki/YUM_Installation).
|
||||||
|
|
||||||
|
As with Debian/Ubuntu, postgres support depends on the postgres python connector
|
||||||
|
``psycopg2``. In the virtual env::
|
||||||
|
|
||||||
|
sudo yum install postgresql-devel libpqxx-devel.x86_64
|
||||||
|
export PATH=/usr/pgsql-9.4/bin/:$PATH
|
||||||
|
pip install psycopg2
|
||||||
|
|
||||||
|
Synapse config
|
||||||
|
==============
|
||||||
|
|
||||||
|
When you are ready to start using PostgreSQL, add the following line to your
|
||||||
|
config file::
|
||||||
|
|
||||||
|
database:
|
||||||
|
name: psycopg2
|
||||||
|
args:
|
||||||
|
user: <user>
|
||||||
|
password: <pass>
|
||||||
|
database: <db>
|
||||||
|
host: <host>
|
||||||
|
cp_min: 5
|
||||||
|
cp_max: 10
|
||||||
|
|
||||||
|
All key, values in ``args`` are passed to the ``psycopg2.connect(..)``
|
||||||
|
function, except keys beginning with ``cp_``, which are consumed by the twisted
|
||||||
|
adbapi connection pool.
|
||||||
|
|
||||||
|
|
||||||
|
Porting from SQLite
|
||||||
|
===================
|
||||||
|
|
||||||
|
Overview
|
||||||
|
~~~~~~~~
|
||||||
|
|
||||||
|
The script ``synapse_port_db`` allows porting an existing synapse server
|
||||||
|
backed by SQLite to using PostgreSQL. This is done in as a two phase process:
|
||||||
|
|
||||||
|
1. Copy the existing SQLite database to a separate location (while the server
|
||||||
|
is down) and running the port script against that offline database.
|
||||||
|
2. Shut down the server. Rerun the port script to port any data that has come
|
||||||
|
in since taking the first snapshot. Restart server against the PostgreSQL
|
||||||
|
database.
|
||||||
|
|
||||||
|
The port script is designed to be run repeatedly against newer snapshots of the
|
||||||
|
SQLite database file. This makes it safe to repeat step 1 if there was a delay
|
||||||
|
between taking the previous snapshot and being ready to do step 2.
|
||||||
|
|
||||||
|
It is safe to at any time kill the port script and restart it.
|
||||||
|
|
||||||
|
Using the port script
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Firstly, shut down the currently running synapse server and copy its database
|
||||||
|
file (typically ``homeserver.db``) to another location. Once the copy is
|
||||||
|
complete, restart synapse. For instance::
|
||||||
|
|
||||||
|
./synctl stop
|
||||||
|
cp homeserver.db homeserver.db.snapshot
|
||||||
|
./synctl start
|
||||||
|
|
||||||
|
Assuming your new config file (as described in the section *Synapse config*)
|
||||||
|
is named ``homeserver-postgres.yaml`` and the SQLite snapshot is at
|
||||||
|
``homeserver.db.snapshot`` then simply run::
|
||||||
|
|
||||||
|
synapse_port_db --sqlite-database homeserver.db.snapshot \
|
||||||
|
--postgres-config homeserver-postgres.yaml
|
||||||
|
|
||||||
|
The flag ``--curses`` displays a coloured curses progress UI.
|
||||||
|
|
||||||
|
If the script took a long time to complete, or time has otherwise passed since
|
||||||
|
the original snapshot was taken, repeat the previous steps with a newer
|
||||||
|
snapshot.
|
||||||
|
|
||||||
|
To complete the conversion shut down the synapse server and run the port
|
||||||
|
script one last time, e.g. if the SQLite database is at ``homeserver.db``
|
||||||
|
run::
|
||||||
|
|
||||||
|
synapse_port_db --sqlite-database homeserver.db \
|
||||||
|
--postgres-config database_config.yaml
|
||||||
|
|
||||||
|
Once that has completed, change the synapse config to point at the PostgreSQL
|
||||||
|
database configuration file using the ``database_config`` parameter (see
|
||||||
|
`Synapse Config`_) and restart synapse. Synapse should now be running against
|
||||||
|
PostgreSQL.
|
||||||
@@ -81,7 +81,7 @@ Your home server configuration file needs the following extra keys:
|
|||||||
As an example, here is the relevant section of the config file for
|
As an example, here is the relevant section of the config file for
|
||||||
matrix.org::
|
matrix.org::
|
||||||
|
|
||||||
turn_uris: turn:turn.matrix.org:3478?transport=udp,turn:turn.matrix.org:3478?transport=tcp
|
turn_uris: [ "turn:turn.matrix.org:3478?transport=udp", "turn:turn.matrix.org:3478?transport=tcp" ]
|
||||||
turn_shared_secret: n0t4ctuAllymatr1Xd0TorgSshar3d5ecret4obvIousreAsons
|
turn_shared_secret: n0t4ctuAllymatr1Xd0TorgSshar3d5ecret4obvIousreAsons
|
||||||
turn_user_lifetime: 86400000
|
turn_user_lifetime: 86400000
|
||||||
|
|
||||||
|
|||||||
81
jenkins.sh
Executable file
81
jenkins.sh
Executable file
@@ -0,0 +1,81 @@
|
|||||||
|
#!/bin/bash -eu
|
||||||
|
|
||||||
|
export PYTHONDONTWRITEBYTECODE=yep
|
||||||
|
|
||||||
|
# Output test results as junit xml
|
||||||
|
export TRIAL_FLAGS="--reporter=subunit"
|
||||||
|
export TOXSUFFIX="| subunit-1to2 | subunit2junitxml --no-passthrough --output-to=results.xml"
|
||||||
|
# Write coverage reports to a separate file for each process
|
||||||
|
export COVERAGE_OPTS="-p"
|
||||||
|
export DUMP_COVERAGE_COMMAND="coverage help"
|
||||||
|
|
||||||
|
# Output flake8 violations to violations.flake8.log
|
||||||
|
# Don't exit with non-0 status code on Jenkins,
|
||||||
|
# so that the build steps continue and a later step can decided whether to
|
||||||
|
# UNSTABLE or FAILURE this build.
|
||||||
|
export PEP8SUFFIX="--output-file=violations.flake8.log || echo flake8 finished with status code \$?"
|
||||||
|
|
||||||
|
rm .coverage* || echo "No coverage files to remove"
|
||||||
|
|
||||||
|
tox
|
||||||
|
|
||||||
|
: ${GIT_BRANCH:="origin/$(git rev-parse --abbrev-ref HEAD)"}
|
||||||
|
|
||||||
|
TOX_BIN=$WORKSPACE/.tox/py27/bin
|
||||||
|
|
||||||
|
if [[ ! -e .sytest-base ]]; then
|
||||||
|
git clone https://github.com/matrix-org/sytest.git .sytest-base --mirror
|
||||||
|
else
|
||||||
|
(cd .sytest-base; git fetch -p)
|
||||||
|
fi
|
||||||
|
|
||||||
|
rm -rf sytest
|
||||||
|
git clone .sytest-base sytest --shared
|
||||||
|
cd sytest
|
||||||
|
|
||||||
|
git checkout "${GIT_BRANCH}" || (echo >&2 "No ref ${GIT_BRANCH} found, falling back to develop" ; git checkout develop)
|
||||||
|
|
||||||
|
: ${PERL5LIB:=$WORKSPACE/perl5/lib/perl5}
|
||||||
|
: ${PERL_MB_OPT:=--install_base=$WORKSPACE/perl5}
|
||||||
|
: ${PERL_MM_OPT:=INSTALL_BASE=$WORKSPACE/perl5}
|
||||||
|
export PERL5LIB PERL_MB_OPT PERL_MM_OPT
|
||||||
|
|
||||||
|
./install-deps.pl
|
||||||
|
|
||||||
|
: ${PORT_BASE:=8000}
|
||||||
|
|
||||||
|
echo >&2 "Running sytest with SQLite3";
|
||||||
|
./run-tests.pl --coverage -O tap --synapse-directory $WORKSPACE \
|
||||||
|
--python $TOX_BIN/python --all --port-base $PORT_BASE > results-sqlite3.tap
|
||||||
|
|
||||||
|
RUN_POSTGRES=""
|
||||||
|
|
||||||
|
for port in $(($PORT_BASE + 1)) $(($PORT_BASE + 2)); do
|
||||||
|
if psql synapse_jenkins_$port <<< ""; then
|
||||||
|
RUN_POSTGRES="$RUN_POSTGRES:$port"
|
||||||
|
cat > localhost-$port/database.yaml << EOF
|
||||||
|
name: psycopg2
|
||||||
|
args:
|
||||||
|
database: synapse_jenkins_$port
|
||||||
|
EOF
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
# Run if both postgresql databases exist
|
||||||
|
if test "$RUN_POSTGRES" = ":$(($PORT_BASE + 1)):$(($PORT_BASE + 2))"; then
|
||||||
|
echo >&2 "Running sytest with PostgreSQL";
|
||||||
|
$TOX_BIN/pip install psycopg2
|
||||||
|
./run-tests.pl --coverage -O tap --synapse-directory $WORKSPACE \
|
||||||
|
--python $TOX_BIN/python --all --port-base $PORT_BASE > results-postgresql.tap
|
||||||
|
else
|
||||||
|
echo >&2 "Skipping running sytest with PostgreSQL, $RUN_POSTGRES"
|
||||||
|
fi
|
||||||
|
|
||||||
|
cd ..
|
||||||
|
cp sytest/.coverage.* .
|
||||||
|
|
||||||
|
# Combine the coverage reports
|
||||||
|
echo "Combining:" .coverage.*
|
||||||
|
$TOX_BIN/python -m coverage combine
|
||||||
|
# Output coverage to coverage.xml
|
||||||
|
$TOX_BIN/coverage xml -o coverage.xml
|
||||||
@@ -56,10 +56,9 @@ if __name__ == '__main__':
|
|||||||
|
|
||||||
js = json.load(args.json)
|
js = json.load(args.json)
|
||||||
|
|
||||||
|
|
||||||
auth = Auth(Mock())
|
auth = Auth(Mock())
|
||||||
check_auth(
|
check_auth(
|
||||||
auth,
|
auth,
|
||||||
[FrozenEvent(d) for d in js["auth_chain"]],
|
[FrozenEvent(d) for d in js["auth_chain"]],
|
||||||
[FrozenEvent(d) for d in js["pdus"]],
|
[FrozenEvent(d) for d in js.get("pdus", [])],
|
||||||
)
|
)
|
||||||
@@ -1,5 +1,5 @@
|
|||||||
from synapse.crypto.event_signing import *
|
from synapse.crypto.event_signing import *
|
||||||
from syutil.base64util import encode_base64
|
from unpaddedbase64 import encode_base64
|
||||||
|
|
||||||
import argparse
|
import argparse
|
||||||
import hashlib
|
import hashlib
|
||||||
@@ -1,9 +1,7 @@
|
|||||||
|
|
||||||
from syutil.crypto.jsonsign import verify_signed_json
|
from signedjson.sign import verify_signed_json
|
||||||
from syutil.crypto.signing_key import (
|
from signedjson.key import decode_verify_key_bytes, write_signing_keys
|
||||||
decode_verify_key_bytes, write_signing_keys
|
from unpaddedbase64 import decode_base64
|
||||||
)
|
|
||||||
from syutil.base64util import decode_base64
|
|
||||||
|
|
||||||
import urllib2
|
import urllib2
|
||||||
import json
|
import json
|
||||||
116
scripts-dev/convert_server_keys.py
Normal file
116
scripts-dev/convert_server_keys.py
Normal file
@@ -0,0 +1,116 @@
|
|||||||
|
import psycopg2
|
||||||
|
import yaml
|
||||||
|
import sys
|
||||||
|
import json
|
||||||
|
import time
|
||||||
|
import hashlib
|
||||||
|
from unpaddedbase64 import encode_base64
|
||||||
|
from signedjson.key import read_signing_keys
|
||||||
|
from signedjson.sign import sign_json
|
||||||
|
from canonicaljson import encode_canonical_json
|
||||||
|
|
||||||
|
|
||||||
|
def select_v1_keys(connection):
|
||||||
|
cursor = connection.cursor()
|
||||||
|
cursor.execute("SELECT server_name, key_id, verify_key FROM server_signature_keys")
|
||||||
|
rows = cursor.fetchall()
|
||||||
|
cursor.close()
|
||||||
|
results = {}
|
||||||
|
for server_name, key_id, verify_key in rows:
|
||||||
|
results.setdefault(server_name, {})[key_id] = encode_base64(verify_key)
|
||||||
|
return results
|
||||||
|
|
||||||
|
|
||||||
|
def select_v1_certs(connection):
|
||||||
|
cursor = connection.cursor()
|
||||||
|
cursor.execute("SELECT server_name, tls_certificate FROM server_tls_certificates")
|
||||||
|
rows = cursor.fetchall()
|
||||||
|
cursor.close()
|
||||||
|
results = {}
|
||||||
|
for server_name, tls_certificate in rows:
|
||||||
|
results[server_name] = tls_certificate
|
||||||
|
return results
|
||||||
|
|
||||||
|
|
||||||
|
def select_v2_json(connection):
|
||||||
|
cursor = connection.cursor()
|
||||||
|
cursor.execute("SELECT server_name, key_id, key_json FROM server_keys_json")
|
||||||
|
rows = cursor.fetchall()
|
||||||
|
cursor.close()
|
||||||
|
results = {}
|
||||||
|
for server_name, key_id, key_json in rows:
|
||||||
|
results.setdefault(server_name, {})[key_id] = json.loads(str(key_json).decode("utf-8"))
|
||||||
|
return results
|
||||||
|
|
||||||
|
|
||||||
|
def convert_v1_to_v2(server_name, valid_until, keys, certificate):
|
||||||
|
return {
|
||||||
|
"old_verify_keys": {},
|
||||||
|
"server_name": server_name,
|
||||||
|
"verify_keys": {
|
||||||
|
key_id: {"key": key}
|
||||||
|
for key_id, key in keys.items()
|
||||||
|
},
|
||||||
|
"valid_until_ts": valid_until,
|
||||||
|
"tls_fingerprints": [fingerprint(certificate)],
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def fingerprint(certificate):
|
||||||
|
finger = hashlib.sha256(certificate)
|
||||||
|
return {"sha256": encode_base64(finger.digest())}
|
||||||
|
|
||||||
|
|
||||||
|
def rows_v2(server, json):
|
||||||
|
valid_until = json["valid_until_ts"]
|
||||||
|
key_json = encode_canonical_json(json)
|
||||||
|
for key_id in json["verify_keys"]:
|
||||||
|
yield (server, key_id, "-", valid_until, valid_until, buffer(key_json))
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
config = yaml.load(open(sys.argv[1]))
|
||||||
|
valid_until = int(time.time() / (3600 * 24)) * 1000 * 3600 * 24
|
||||||
|
|
||||||
|
server_name = config["server_name"]
|
||||||
|
signing_key = read_signing_keys(open(config["signing_key_path"]))[0]
|
||||||
|
|
||||||
|
database = config["database"]
|
||||||
|
assert database["name"] == "psycopg2", "Can only convert for postgresql"
|
||||||
|
args = database["args"]
|
||||||
|
args.pop("cp_max")
|
||||||
|
args.pop("cp_min")
|
||||||
|
connection = psycopg2.connect(**args)
|
||||||
|
keys = select_v1_keys(connection)
|
||||||
|
certificates = select_v1_certs(connection)
|
||||||
|
json = select_v2_json(connection)
|
||||||
|
|
||||||
|
result = {}
|
||||||
|
for server in keys:
|
||||||
|
if not server in json:
|
||||||
|
v2_json = convert_v1_to_v2(
|
||||||
|
server, valid_until, keys[server], certificates[server]
|
||||||
|
)
|
||||||
|
v2_json = sign_json(v2_json, server_name, signing_key)
|
||||||
|
result[server] = v2_json
|
||||||
|
|
||||||
|
yaml.safe_dump(result, sys.stdout, default_flow_style=False)
|
||||||
|
|
||||||
|
rows = list(
|
||||||
|
row for server, json in result.items()
|
||||||
|
for row in rows_v2(server, json)
|
||||||
|
)
|
||||||
|
|
||||||
|
cursor = connection.cursor()
|
||||||
|
cursor.executemany(
|
||||||
|
"INSERT INTO server_keys_json ("
|
||||||
|
" server_name, key_id, from_server,"
|
||||||
|
" ts_added_ms, ts_valid_until_ms, key_json"
|
||||||
|
") VALUES (%s, %s, %s, %s, %s, %s)",
|
||||||
|
rows
|
||||||
|
)
|
||||||
|
connection.commit()
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main()
|
||||||
@@ -1,5 +1,5 @@
|
|||||||
#!/usr/bin/perl -pi
|
#!/usr/bin/perl -pi
|
||||||
# Copyright 2015 OpenMarket Ltd
|
# Copyright 2014-2016 OpenMarket Ltd
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@@ -14,7 +14,7 @@
|
|||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
$copyright = <<EOT;
|
$copyright = <<EOT;
|
||||||
/* Copyright 2015 OpenMarket Ltd
|
/* Copyright 2016 OpenMarket Ltd
|
||||||
*
|
*
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
* you may not use this file except in compliance with the License.
|
* you may not use this file except in compliance with the License.
|
||||||
@@ -1,5 +1,5 @@
|
|||||||
#!/usr/bin/perl -pi
|
#!/usr/bin/perl -pi
|
||||||
# Copyright 2014 OpenMarket Ltd
|
# Copyright 2014-2016 OpenMarket Ltd
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@@ -14,7 +14,7 @@
|
|||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
$copyright = <<EOT;
|
$copyright = <<EOT;
|
||||||
# Copyright 2015 OpenMarket Ltd
|
# Copyright 2016 OpenMarket Ltd
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
175
scripts-dev/definitions.py
Executable file
175
scripts-dev/definitions.py
Executable file
@@ -0,0 +1,175 @@
|
|||||||
|
#! /usr/bin/python
|
||||||
|
|
||||||
|
import ast
|
||||||
|
import yaml
|
||||||
|
|
||||||
|
class DefinitionVisitor(ast.NodeVisitor):
|
||||||
|
def __init__(self):
|
||||||
|
super(DefinitionVisitor, self).__init__()
|
||||||
|
self.functions = {}
|
||||||
|
self.classes = {}
|
||||||
|
self.names = {}
|
||||||
|
self.attrs = set()
|
||||||
|
self.definitions = {
|
||||||
|
'def': self.functions,
|
||||||
|
'class': self.classes,
|
||||||
|
'names': self.names,
|
||||||
|
'attrs': self.attrs,
|
||||||
|
}
|
||||||
|
|
||||||
|
def visit_Name(self, node):
|
||||||
|
self.names.setdefault(type(node.ctx).__name__, set()).add(node.id)
|
||||||
|
|
||||||
|
def visit_Attribute(self, node):
|
||||||
|
self.attrs.add(node.attr)
|
||||||
|
for child in ast.iter_child_nodes(node):
|
||||||
|
self.visit(child)
|
||||||
|
|
||||||
|
def visit_ClassDef(self, node):
|
||||||
|
visitor = DefinitionVisitor()
|
||||||
|
self.classes[node.name] = visitor.definitions
|
||||||
|
for child in ast.iter_child_nodes(node):
|
||||||
|
visitor.visit(child)
|
||||||
|
|
||||||
|
def visit_FunctionDef(self, node):
|
||||||
|
visitor = DefinitionVisitor()
|
||||||
|
self.functions[node.name] = visitor.definitions
|
||||||
|
for child in ast.iter_child_nodes(node):
|
||||||
|
visitor.visit(child)
|
||||||
|
|
||||||
|
|
||||||
|
def non_empty(defs):
|
||||||
|
functions = {name: non_empty(f) for name, f in defs['def'].items()}
|
||||||
|
classes = {name: non_empty(f) for name, f in defs['class'].items()}
|
||||||
|
result = {}
|
||||||
|
if functions: result['def'] = functions
|
||||||
|
if classes: result['class'] = classes
|
||||||
|
names = defs['names']
|
||||||
|
uses = []
|
||||||
|
for name in names.get('Load', ()):
|
||||||
|
if name not in names.get('Param', ()) and name not in names.get('Store', ()):
|
||||||
|
uses.append(name)
|
||||||
|
uses.extend(defs['attrs'])
|
||||||
|
if uses: result['uses'] = uses
|
||||||
|
result['names'] = names
|
||||||
|
result['attrs'] = defs['attrs']
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
def definitions_in_code(input_code):
|
||||||
|
input_ast = ast.parse(input_code)
|
||||||
|
visitor = DefinitionVisitor()
|
||||||
|
visitor.visit(input_ast)
|
||||||
|
definitions = non_empty(visitor.definitions)
|
||||||
|
return definitions
|
||||||
|
|
||||||
|
|
||||||
|
def definitions_in_file(filepath):
|
||||||
|
with open(filepath) as f:
|
||||||
|
return definitions_in_code(f.read())
|
||||||
|
|
||||||
|
|
||||||
|
def defined_names(prefix, defs, names):
|
||||||
|
for name, funcs in defs.get('def', {}).items():
|
||||||
|
names.setdefault(name, {'defined': []})['defined'].append(prefix + name)
|
||||||
|
defined_names(prefix + name + ".", funcs, names)
|
||||||
|
|
||||||
|
for name, funcs in defs.get('class', {}).items():
|
||||||
|
names.setdefault(name, {'defined': []})['defined'].append(prefix + name)
|
||||||
|
defined_names(prefix + name + ".", funcs, names)
|
||||||
|
|
||||||
|
|
||||||
|
def used_names(prefix, item, defs, names):
|
||||||
|
for name, funcs in defs.get('def', {}).items():
|
||||||
|
used_names(prefix + name + ".", name, funcs, names)
|
||||||
|
|
||||||
|
for name, funcs in defs.get('class', {}).items():
|
||||||
|
used_names(prefix + name + ".", name, funcs, names)
|
||||||
|
|
||||||
|
for used in defs.get('uses', ()):
|
||||||
|
if used in names:
|
||||||
|
names[used].setdefault('used', {}).setdefault(item, []).append(prefix.rstrip('.'))
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
import sys, os, argparse, re
|
||||||
|
|
||||||
|
parser = argparse.ArgumentParser(description='Find definitions.')
|
||||||
|
parser.add_argument(
|
||||||
|
"--unused", action="store_true", help="Only list unused definitions"
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--ignore", action="append", metavar="REGEXP", help="Ignore a pattern"
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--pattern", action="append", metavar="REGEXP",
|
||||||
|
help="Search for a pattern"
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"directories", nargs='+', metavar="DIR",
|
||||||
|
help="Directories to search for definitions"
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--referrers", default=0, type=int,
|
||||||
|
help="Include referrers up to the given depth"
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--format", default="yaml",
|
||||||
|
help="Output format, one of 'yaml' or 'dot'"
|
||||||
|
)
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
definitions = {}
|
||||||
|
for directory in args.directories:
|
||||||
|
for root, dirs, files in os.walk(directory):
|
||||||
|
for filename in files:
|
||||||
|
if filename.endswith(".py"):
|
||||||
|
filepath = os.path.join(root, filename)
|
||||||
|
definitions[filepath] = definitions_in_file(filepath)
|
||||||
|
|
||||||
|
names = {}
|
||||||
|
for filepath, defs in definitions.items():
|
||||||
|
defined_names(filepath + ":", defs, names)
|
||||||
|
|
||||||
|
for filepath, defs in definitions.items():
|
||||||
|
used_names(filepath + ":", None, defs, names)
|
||||||
|
|
||||||
|
patterns = [re.compile(pattern) for pattern in args.pattern or ()]
|
||||||
|
ignore = [re.compile(pattern) for pattern in args.ignore or ()]
|
||||||
|
|
||||||
|
result = {}
|
||||||
|
for name, definition in names.items():
|
||||||
|
if patterns and not any(pattern.match(name) for pattern in patterns):
|
||||||
|
continue
|
||||||
|
if ignore and any(pattern.match(name) for pattern in ignore):
|
||||||
|
continue
|
||||||
|
if args.unused and definition.get('used'):
|
||||||
|
continue
|
||||||
|
result[name] = definition
|
||||||
|
|
||||||
|
referrer_depth = args.referrers
|
||||||
|
referrers = set()
|
||||||
|
while referrer_depth:
|
||||||
|
referrer_depth -= 1
|
||||||
|
for entry in result.values():
|
||||||
|
for used_by in entry.get("used", ()):
|
||||||
|
referrers.add(used_by)
|
||||||
|
for name, definition in names.items():
|
||||||
|
if not name in referrers:
|
||||||
|
continue
|
||||||
|
if ignore and any(pattern.match(name) for pattern in ignore):
|
||||||
|
continue
|
||||||
|
result[name] = definition
|
||||||
|
|
||||||
|
if args.format == 'yaml':
|
||||||
|
yaml.dump(result, sys.stdout, default_flow_style=False)
|
||||||
|
elif args.format == 'dot':
|
||||||
|
print "digraph {"
|
||||||
|
for name, entry in result.items():
|
||||||
|
print name
|
||||||
|
for used_by in entry.get("used", ()):
|
||||||
|
if used_by in result:
|
||||||
|
print used_by, "->", name
|
||||||
|
print "}"
|
||||||
|
else:
|
||||||
|
raise ValueError("Unknown format %r" % (args.format))
|
||||||
24
scripts-dev/dump_macaroon.py
Executable file
24
scripts-dev/dump_macaroon.py
Executable file
@@ -0,0 +1,24 @@
|
|||||||
|
#!/usr/bin/env python2
|
||||||
|
|
||||||
|
import pymacaroons
|
||||||
|
import sys
|
||||||
|
|
||||||
|
if len(sys.argv) == 1:
|
||||||
|
sys.stderr.write("usage: %s macaroon [key]\n" % (sys.argv[0],))
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
macaroon_string = sys.argv[1]
|
||||||
|
key = sys.argv[2] if len(sys.argv) > 2 else None
|
||||||
|
|
||||||
|
macaroon = pymacaroons.Macaroon.deserialize(macaroon_string)
|
||||||
|
print macaroon.inspect()
|
||||||
|
|
||||||
|
print ""
|
||||||
|
|
||||||
|
verifier = pymacaroons.Verifier()
|
||||||
|
verifier.satisfy_general(lambda c: True)
|
||||||
|
try:
|
||||||
|
verifier.verify(macaroon, key)
|
||||||
|
print "Signature is correct"
|
||||||
|
except Exception as e:
|
||||||
|
print e.message
|
||||||
@@ -6,8 +6,8 @@ from synapse.crypto.event_signing import (
|
|||||||
add_event_pdu_content_hash, compute_pdu_event_reference_hash
|
add_event_pdu_content_hash, compute_pdu_event_reference_hash
|
||||||
)
|
)
|
||||||
from synapse.api.events.utils import prune_pdu
|
from synapse.api.events.utils import prune_pdu
|
||||||
from syutil.base64util import encode_base64, decode_base64
|
from unpaddedbase64 import encode_base64, decode_base64
|
||||||
from syutil.jsonutil import encode_canonical_json
|
from canonicaljson import encode_canonical_json
|
||||||
import sqlite3
|
import sqlite3
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
62
scripts-dev/list_url_patterns.py
Executable file
62
scripts-dev/list_url_patterns.py
Executable file
@@ -0,0 +1,62 @@
|
|||||||
|
#! /usr/bin/python
|
||||||
|
|
||||||
|
import ast
|
||||||
|
import argparse
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
import yaml
|
||||||
|
|
||||||
|
PATTERNS_V1 = []
|
||||||
|
PATTERNS_V2 = []
|
||||||
|
|
||||||
|
RESULT = {
|
||||||
|
"v1": PATTERNS_V1,
|
||||||
|
"v2": PATTERNS_V2,
|
||||||
|
}
|
||||||
|
|
||||||
|
class CallVisitor(ast.NodeVisitor):
|
||||||
|
def visit_Call(self, node):
|
||||||
|
if isinstance(node.func, ast.Name):
|
||||||
|
name = node.func.id
|
||||||
|
else:
|
||||||
|
return
|
||||||
|
|
||||||
|
|
||||||
|
if name == "client_path_patterns":
|
||||||
|
PATTERNS_V1.append(node.args[0].s)
|
||||||
|
elif name == "client_v2_patterns":
|
||||||
|
PATTERNS_V2.append(node.args[0].s)
|
||||||
|
|
||||||
|
|
||||||
|
def find_patterns_in_code(input_code):
|
||||||
|
input_ast = ast.parse(input_code)
|
||||||
|
visitor = CallVisitor()
|
||||||
|
visitor.visit(input_ast)
|
||||||
|
|
||||||
|
|
||||||
|
def find_patterns_in_file(filepath):
|
||||||
|
with open(filepath) as f:
|
||||||
|
find_patterns_in_code(f.read())
|
||||||
|
|
||||||
|
|
||||||
|
parser = argparse.ArgumentParser(description='Find url patterns.')
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"directories", nargs='+', metavar="DIR",
|
||||||
|
help="Directories to search for definitions"
|
||||||
|
)
|
||||||
|
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
|
||||||
|
for directory in args.directories:
|
||||||
|
for root, dirs, files in os.walk(directory):
|
||||||
|
for filename in files:
|
||||||
|
if filename.endswith(".py"):
|
||||||
|
filepath = os.path.join(root, filename)
|
||||||
|
find_patterns_in_file(filepath)
|
||||||
|
|
||||||
|
PATTERNS_V1.sort()
|
||||||
|
PATTERNS_V2.sort()
|
||||||
|
|
||||||
|
yaml.dump(RESULT, sys.stdout, default_flow_style=False)
|
||||||
@@ -1,21 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
# This is will prepare a synapse database for running with v0.0.1 of synapse.
|
|
||||||
# It will store all the user information, but will *delete* all messages and
|
|
||||||
# room data.
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
cp "$1" "$1.bak"
|
|
||||||
|
|
||||||
DUMP=$(sqlite3 "$1" << 'EOF'
|
|
||||||
.dump users
|
|
||||||
.dump access_tokens
|
|
||||||
.dump presence
|
|
||||||
.dump profiles
|
|
||||||
EOF
|
|
||||||
)
|
|
||||||
|
|
||||||
rm "$1"
|
|
||||||
|
|
||||||
sqlite3 "$1" <<< "$DUMP"
|
|
||||||
@@ -1,21 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
# This is will prepare a synapse database for running with v0.5.0 of synapse.
|
|
||||||
# It will store all the user information, but will *delete* all messages and
|
|
||||||
# room data.
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
cp "$1" "$1.bak"
|
|
||||||
|
|
||||||
DUMP=$(sqlite3 "$1" << 'EOF'
|
|
||||||
.dump users
|
|
||||||
.dump access_tokens
|
|
||||||
.dump presence
|
|
||||||
.dump profiles
|
|
||||||
EOF
|
|
||||||
)
|
|
||||||
|
|
||||||
rm "$1"
|
|
||||||
|
|
||||||
sqlite3 "$1" <<< "$DUMP"
|
|
||||||
1
scripts/gen_password
Normal file
1
scripts/gen_password
Normal file
@@ -0,0 +1 @@
|
|||||||
|
perl -MCrypt::Random -MCrypt::Eksblowfish::Bcrypt -e 'print Crypt::Eksblowfish::Bcrypt::bcrypt("secret", "\$2\$12\$" . Crypt::Eksblowfish::Bcrypt::en_base64(Crypt::Random::makerandom_octet(Length=>16)))."\n"'
|
||||||
154
scripts/register_new_matrix_user
Executable file
154
scripts/register_new_matrix_user
Executable file
@@ -0,0 +1,154 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright 2015, 2016 OpenMarket Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import getpass
|
||||||
|
import hashlib
|
||||||
|
import hmac
|
||||||
|
import json
|
||||||
|
import sys
|
||||||
|
import urllib2
|
||||||
|
import yaml
|
||||||
|
|
||||||
|
|
||||||
|
def request_registration(user, password, server_location, shared_secret):
|
||||||
|
mac = hmac.new(
|
||||||
|
key=shared_secret,
|
||||||
|
msg=user,
|
||||||
|
digestmod=hashlib.sha1,
|
||||||
|
).hexdigest()
|
||||||
|
|
||||||
|
data = {
|
||||||
|
"user": user,
|
||||||
|
"password": password,
|
||||||
|
"mac": mac,
|
||||||
|
"type": "org.matrix.login.shared_secret",
|
||||||
|
}
|
||||||
|
|
||||||
|
server_location = server_location.rstrip("/")
|
||||||
|
|
||||||
|
print "Sending registration request..."
|
||||||
|
|
||||||
|
req = urllib2.Request(
|
||||||
|
"%s/_matrix/client/api/v1/register" % (server_location,),
|
||||||
|
data=json.dumps(data),
|
||||||
|
headers={'Content-Type': 'application/json'}
|
||||||
|
)
|
||||||
|
try:
|
||||||
|
if sys.version_info[:3] >= (2, 7, 9):
|
||||||
|
# As of version 2.7.9, urllib2 now checks SSL certs
|
||||||
|
import ssl
|
||||||
|
f = urllib2.urlopen(req, context=ssl.SSLContext(ssl.PROTOCOL_SSLv23))
|
||||||
|
else:
|
||||||
|
f = urllib2.urlopen(req)
|
||||||
|
f.read()
|
||||||
|
f.close()
|
||||||
|
print "Success."
|
||||||
|
except urllib2.HTTPError as e:
|
||||||
|
print "ERROR! Received %d %s" % (e.code, e.reason,)
|
||||||
|
if 400 <= e.code < 500:
|
||||||
|
if e.info().type == "application/json":
|
||||||
|
resp = json.load(e)
|
||||||
|
if "error" in resp:
|
||||||
|
print resp["error"]
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
|
def register_new_user(user, password, server_location, shared_secret):
|
||||||
|
if not user:
|
||||||
|
try:
|
||||||
|
default_user = getpass.getuser()
|
||||||
|
except:
|
||||||
|
default_user = None
|
||||||
|
|
||||||
|
if default_user:
|
||||||
|
user = raw_input("New user localpart [%s]: " % (default_user,))
|
||||||
|
if not user:
|
||||||
|
user = default_user
|
||||||
|
else:
|
||||||
|
user = raw_input("New user localpart: ")
|
||||||
|
|
||||||
|
if not user:
|
||||||
|
print "Invalid user name"
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
if not password:
|
||||||
|
password = getpass.getpass("Password: ")
|
||||||
|
|
||||||
|
if not password:
|
||||||
|
print "Password cannot be blank."
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
confirm_password = getpass.getpass("Confirm password: ")
|
||||||
|
|
||||||
|
if password != confirm_password:
|
||||||
|
print "Passwords do not match"
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
request_registration(user, password, server_location, shared_secret)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
description="Used to register new users with a given home server when"
|
||||||
|
" registration has been disabled. The home server must be"
|
||||||
|
" configured with the 'registration_shared_secret' option"
|
||||||
|
" set.",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"-u", "--user",
|
||||||
|
default=None,
|
||||||
|
help="Local part of the new user. Will prompt if omitted.",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"-p", "--password",
|
||||||
|
default=None,
|
||||||
|
help="New password for user. Will prompt if omitted.",
|
||||||
|
)
|
||||||
|
|
||||||
|
group = parser.add_mutually_exclusive_group(required=True)
|
||||||
|
group.add_argument(
|
||||||
|
"-c", "--config",
|
||||||
|
type=argparse.FileType('r'),
|
||||||
|
help="Path to server config file. Used to read in shared secret.",
|
||||||
|
)
|
||||||
|
|
||||||
|
group.add_argument(
|
||||||
|
"-k", "--shared-secret",
|
||||||
|
help="Shared secret as defined in server config file.",
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"server_url",
|
||||||
|
default="https://localhost:8448",
|
||||||
|
nargs='?',
|
||||||
|
help="URL to use to talk to the home server. Defaults to "
|
||||||
|
" 'https://localhost:8448'.",
|
||||||
|
)
|
||||||
|
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
if "config" in args and args.config:
|
||||||
|
config = yaml.safe_load(args.config)
|
||||||
|
secret = config.get("registration_shared_secret", None)
|
||||||
|
if not secret:
|
||||||
|
print "No 'registration_shared_secret' defined in config."
|
||||||
|
sys.exit(1)
|
||||||
|
else:
|
||||||
|
secret = args.shared_secret
|
||||||
|
|
||||||
|
register_new_user(args.user, args.password, args.server_url, secret)
|
||||||
794
scripts/synapse_port_db
Executable file
794
scripts/synapse_port_db
Executable file
@@ -0,0 +1,794 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright 2015, 2016 OpenMarket Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
from twisted.internet import defer, reactor
|
||||||
|
from twisted.enterprise import adbapi
|
||||||
|
|
||||||
|
from synapse.storage._base import LoggingTransaction, SQLBaseStore
|
||||||
|
from synapse.storage.engines import create_engine
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import curses
|
||||||
|
import logging
|
||||||
|
import sys
|
||||||
|
import time
|
||||||
|
import traceback
|
||||||
|
import yaml
|
||||||
|
|
||||||
|
|
||||||
|
logger = logging.getLogger("synapse_port_db")
|
||||||
|
|
||||||
|
|
||||||
|
BOOLEAN_COLUMNS = {
|
||||||
|
"events": ["processed", "outlier"],
|
||||||
|
"rooms": ["is_public"],
|
||||||
|
"event_edges": ["is_state"],
|
||||||
|
"presence_list": ["accepted"],
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
APPEND_ONLY_TABLES = [
|
||||||
|
"event_content_hashes",
|
||||||
|
"event_reference_hashes",
|
||||||
|
"event_signatures",
|
||||||
|
"event_edge_hashes",
|
||||||
|
"events",
|
||||||
|
"event_json",
|
||||||
|
"state_events",
|
||||||
|
"room_memberships",
|
||||||
|
"feedback",
|
||||||
|
"topics",
|
||||||
|
"room_names",
|
||||||
|
"rooms",
|
||||||
|
"local_media_repository",
|
||||||
|
"local_media_repository_thumbnails",
|
||||||
|
"remote_media_cache",
|
||||||
|
"remote_media_cache_thumbnails",
|
||||||
|
"redactions",
|
||||||
|
"event_edges",
|
||||||
|
"event_auth",
|
||||||
|
"received_transactions",
|
||||||
|
"sent_transactions",
|
||||||
|
"transaction_id_to_pdu",
|
||||||
|
"users",
|
||||||
|
"state_groups",
|
||||||
|
"state_groups_state",
|
||||||
|
"event_to_state_groups",
|
||||||
|
"rejections",
|
||||||
|
"event_search",
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
end_error_exec_info = None
|
||||||
|
|
||||||
|
|
||||||
|
class Store(object):
|
||||||
|
"""This object is used to pull out some of the convenience API from the
|
||||||
|
Storage layer.
|
||||||
|
|
||||||
|
*All* database interactions should go through this object.
|
||||||
|
"""
|
||||||
|
def __init__(self, db_pool, engine):
|
||||||
|
self.db_pool = db_pool
|
||||||
|
self.database_engine = engine
|
||||||
|
|
||||||
|
_simple_insert_txn = SQLBaseStore.__dict__["_simple_insert_txn"]
|
||||||
|
_simple_insert = SQLBaseStore.__dict__["_simple_insert"]
|
||||||
|
|
||||||
|
_simple_select_onecol_txn = SQLBaseStore.__dict__["_simple_select_onecol_txn"]
|
||||||
|
_simple_select_onecol = SQLBaseStore.__dict__["_simple_select_onecol"]
|
||||||
|
_simple_select_one_onecol = SQLBaseStore.__dict__["_simple_select_one_onecol"]
|
||||||
|
_simple_select_one_onecol_txn = SQLBaseStore.__dict__["_simple_select_one_onecol_txn"]
|
||||||
|
|
||||||
|
_simple_update_one = SQLBaseStore.__dict__["_simple_update_one"]
|
||||||
|
_simple_update_one_txn = SQLBaseStore.__dict__["_simple_update_one_txn"]
|
||||||
|
|
||||||
|
def runInteraction(self, desc, func, *args, **kwargs):
|
||||||
|
def r(conn):
|
||||||
|
try:
|
||||||
|
i = 0
|
||||||
|
N = 5
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
txn = conn.cursor()
|
||||||
|
return func(
|
||||||
|
LoggingTransaction(txn, desc, self.database_engine, []),
|
||||||
|
*args, **kwargs
|
||||||
|
)
|
||||||
|
except self.database_engine.module.DatabaseError as e:
|
||||||
|
if self.database_engine.is_deadlock(e):
|
||||||
|
logger.warn("[TXN DEADLOCK] {%s} %d/%d", desc, i, N)
|
||||||
|
if i < N:
|
||||||
|
i += 1
|
||||||
|
conn.rollback()
|
||||||
|
continue
|
||||||
|
raise
|
||||||
|
except Exception as e:
|
||||||
|
logger.debug("[TXN FAIL] {%s} %s", desc, e)
|
||||||
|
raise
|
||||||
|
|
||||||
|
return self.db_pool.runWithConnection(r)
|
||||||
|
|
||||||
|
def execute(self, f, *args, **kwargs):
|
||||||
|
return self.runInteraction(f.__name__, f, *args, **kwargs)
|
||||||
|
|
||||||
|
def execute_sql(self, sql, *args):
|
||||||
|
def r(txn):
|
||||||
|
txn.execute(sql, args)
|
||||||
|
return txn.fetchall()
|
||||||
|
return self.runInteraction("execute_sql", r)
|
||||||
|
|
||||||
|
def insert_many_txn(self, txn, table, headers, rows):
|
||||||
|
sql = "INSERT INTO %s (%s) VALUES (%s)" % (
|
||||||
|
table,
|
||||||
|
", ".join(k for k in headers),
|
||||||
|
", ".join("%s" for _ in headers)
|
||||||
|
)
|
||||||
|
|
||||||
|
try:
|
||||||
|
txn.executemany(sql, rows)
|
||||||
|
except:
|
||||||
|
logger.exception(
|
||||||
|
"Failed to insert: %s",
|
||||||
|
table,
|
||||||
|
)
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
class Porter(object):
|
||||||
|
def __init__(self, **kwargs):
|
||||||
|
self.__dict__.update(kwargs)
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def setup_table(self, table):
|
||||||
|
if table in APPEND_ONLY_TABLES:
|
||||||
|
# It's safe to just carry on inserting.
|
||||||
|
next_chunk = yield self.postgres_store._simple_select_one_onecol(
|
||||||
|
table="port_from_sqlite3",
|
||||||
|
keyvalues={"table_name": table},
|
||||||
|
retcol="rowid",
|
||||||
|
allow_none=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
total_to_port = None
|
||||||
|
if next_chunk is None:
|
||||||
|
if table == "sent_transactions":
|
||||||
|
next_chunk, already_ported, total_to_port = (
|
||||||
|
yield self._setup_sent_transactions()
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
yield self.postgres_store._simple_insert(
|
||||||
|
table="port_from_sqlite3",
|
||||||
|
values={"table_name": table, "rowid": 1}
|
||||||
|
)
|
||||||
|
|
||||||
|
next_chunk = 1
|
||||||
|
already_ported = 0
|
||||||
|
|
||||||
|
if total_to_port is None:
|
||||||
|
already_ported, total_to_port = yield self._get_total_count_to_port(
|
||||||
|
table, next_chunk
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
def delete_all(txn):
|
||||||
|
txn.execute(
|
||||||
|
"DELETE FROM port_from_sqlite3 WHERE table_name = %s",
|
||||||
|
(table,)
|
||||||
|
)
|
||||||
|
txn.execute("TRUNCATE %s CASCADE" % (table,))
|
||||||
|
|
||||||
|
yield self.postgres_store.execute(delete_all)
|
||||||
|
|
||||||
|
yield self.postgres_store._simple_insert(
|
||||||
|
table="port_from_sqlite3",
|
||||||
|
values={"table_name": table, "rowid": 0}
|
||||||
|
)
|
||||||
|
|
||||||
|
next_chunk = 1
|
||||||
|
|
||||||
|
already_ported, total_to_port = yield self._get_total_count_to_port(
|
||||||
|
table, next_chunk
|
||||||
|
)
|
||||||
|
|
||||||
|
defer.returnValue((table, already_ported, total_to_port, next_chunk))
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def handle_table(self, table, postgres_size, table_size, next_chunk):
|
||||||
|
if not table_size:
|
||||||
|
return
|
||||||
|
|
||||||
|
self.progress.add_table(table, postgres_size, table_size)
|
||||||
|
|
||||||
|
select = (
|
||||||
|
"SELECT rowid, * FROM %s WHERE rowid >= ? ORDER BY rowid LIMIT ?"
|
||||||
|
% (table,)
|
||||||
|
)
|
||||||
|
|
||||||
|
while True:
|
||||||
|
def r(txn):
|
||||||
|
txn.execute(select, (next_chunk, self.batch_size,))
|
||||||
|
rows = txn.fetchall()
|
||||||
|
headers = [column[0] for column in txn.description]
|
||||||
|
|
||||||
|
return headers, rows
|
||||||
|
|
||||||
|
headers, rows = yield self.sqlite_store.runInteraction("select", r)
|
||||||
|
|
||||||
|
if rows:
|
||||||
|
next_chunk = rows[-1][0] + 1
|
||||||
|
|
||||||
|
if table == "event_search":
|
||||||
|
# We have to treat event_search differently since it has a
|
||||||
|
# different structure in the two different databases.
|
||||||
|
def insert(txn):
|
||||||
|
sql = (
|
||||||
|
"INSERT INTO event_search (event_id, room_id, key, sender, vector)"
|
||||||
|
" VALUES (?,?,?,?,to_tsvector('english', ?))"
|
||||||
|
)
|
||||||
|
|
||||||
|
rows_dict = [
|
||||||
|
dict(zip(headers, row))
|
||||||
|
for row in rows
|
||||||
|
]
|
||||||
|
|
||||||
|
txn.executemany(sql, [
|
||||||
|
(
|
||||||
|
row["event_id"],
|
||||||
|
row["room_id"],
|
||||||
|
row["key"],
|
||||||
|
row["sender"],
|
||||||
|
row["value"],
|
||||||
|
)
|
||||||
|
for row in rows_dict
|
||||||
|
])
|
||||||
|
|
||||||
|
self.postgres_store._simple_update_one_txn(
|
||||||
|
txn,
|
||||||
|
table="port_from_sqlite3",
|
||||||
|
keyvalues={"table_name": table},
|
||||||
|
updatevalues={"rowid": next_chunk},
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
self._convert_rows(table, headers, rows)
|
||||||
|
|
||||||
|
def insert(txn):
|
||||||
|
self.postgres_store.insert_many_txn(
|
||||||
|
txn, table, headers[1:], rows
|
||||||
|
)
|
||||||
|
|
||||||
|
self.postgres_store._simple_update_one_txn(
|
||||||
|
txn,
|
||||||
|
table="port_from_sqlite3",
|
||||||
|
keyvalues={"table_name": table},
|
||||||
|
updatevalues={"rowid": next_chunk},
|
||||||
|
)
|
||||||
|
|
||||||
|
yield self.postgres_store.execute(insert)
|
||||||
|
|
||||||
|
postgres_size += len(rows)
|
||||||
|
|
||||||
|
self.progress.update(table, postgres_size)
|
||||||
|
else:
|
||||||
|
return
|
||||||
|
|
||||||
|
def setup_db(self, db_config, database_engine):
|
||||||
|
db_conn = database_engine.module.connect(
|
||||||
|
**{
|
||||||
|
k: v for k, v in db_config.get("args", {}).items()
|
||||||
|
if not k.startswith("cp_")
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
database_engine.prepare_database(db_conn)
|
||||||
|
|
||||||
|
db_conn.commit()
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def run(self):
|
||||||
|
try:
|
||||||
|
sqlite_db_pool = adbapi.ConnectionPool(
|
||||||
|
self.sqlite_config["name"],
|
||||||
|
**self.sqlite_config["args"]
|
||||||
|
)
|
||||||
|
|
||||||
|
postgres_db_pool = adbapi.ConnectionPool(
|
||||||
|
self.postgres_config["name"],
|
||||||
|
**self.postgres_config["args"]
|
||||||
|
)
|
||||||
|
|
||||||
|
sqlite_engine = create_engine("sqlite3")
|
||||||
|
postgres_engine = create_engine("psycopg2")
|
||||||
|
|
||||||
|
self.sqlite_store = Store(sqlite_db_pool, sqlite_engine)
|
||||||
|
self.postgres_store = Store(postgres_db_pool, postgres_engine)
|
||||||
|
|
||||||
|
yield self.postgres_store.execute(
|
||||||
|
postgres_engine.check_database
|
||||||
|
)
|
||||||
|
|
||||||
|
# Step 1. Set up databases.
|
||||||
|
self.progress.set_state("Preparing SQLite3")
|
||||||
|
self.setup_db(sqlite_config, sqlite_engine)
|
||||||
|
|
||||||
|
self.progress.set_state("Preparing PostgreSQL")
|
||||||
|
self.setup_db(postgres_config, postgres_engine)
|
||||||
|
|
||||||
|
# Step 2. Get tables.
|
||||||
|
self.progress.set_state("Fetching tables")
|
||||||
|
sqlite_tables = yield self.sqlite_store._simple_select_onecol(
|
||||||
|
table="sqlite_master",
|
||||||
|
keyvalues={
|
||||||
|
"type": "table",
|
||||||
|
},
|
||||||
|
retcol="name",
|
||||||
|
)
|
||||||
|
|
||||||
|
postgres_tables = yield self.postgres_store._simple_select_onecol(
|
||||||
|
table="information_schema.tables",
|
||||||
|
keyvalues={
|
||||||
|
"table_schema": "public",
|
||||||
|
},
|
||||||
|
retcol="distinct table_name",
|
||||||
|
)
|
||||||
|
|
||||||
|
tables = set(sqlite_tables) & set(postgres_tables)
|
||||||
|
|
||||||
|
self.progress.set_state("Creating tables")
|
||||||
|
|
||||||
|
logger.info("Found %d tables", len(tables))
|
||||||
|
|
||||||
|
def create_port_table(txn):
|
||||||
|
txn.execute(
|
||||||
|
"CREATE TABLE port_from_sqlite3 ("
|
||||||
|
" table_name varchar(100) NOT NULL UNIQUE,"
|
||||||
|
" rowid bigint NOT NULL"
|
||||||
|
")"
|
||||||
|
)
|
||||||
|
|
||||||
|
try:
|
||||||
|
yield self.postgres_store.runInteraction(
|
||||||
|
"create_port_table", create_port_table
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
logger.info("Failed to create port table: %s", e)
|
||||||
|
|
||||||
|
self.progress.set_state("Setting up")
|
||||||
|
|
||||||
|
# Set up tables.
|
||||||
|
setup_res = yield defer.gatherResults(
|
||||||
|
[
|
||||||
|
self.setup_table(table)
|
||||||
|
for table in tables
|
||||||
|
if table not in ["schema_version", "applied_schema_deltas"]
|
||||||
|
and not table.startswith("sqlite_")
|
||||||
|
],
|
||||||
|
consumeErrors=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Process tables.
|
||||||
|
yield defer.gatherResults(
|
||||||
|
[
|
||||||
|
self.handle_table(*res)
|
||||||
|
for res in setup_res
|
||||||
|
],
|
||||||
|
consumeErrors=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
self.progress.done()
|
||||||
|
except:
|
||||||
|
global end_error_exec_info
|
||||||
|
end_error_exec_info = sys.exc_info()
|
||||||
|
logger.exception("")
|
||||||
|
finally:
|
||||||
|
reactor.stop()
|
||||||
|
|
||||||
|
def _convert_rows(self, table, headers, rows):
|
||||||
|
bool_col_names = BOOLEAN_COLUMNS.get(table, [])
|
||||||
|
|
||||||
|
bool_cols = [
|
||||||
|
i for i, h in enumerate(headers) if h in bool_col_names
|
||||||
|
]
|
||||||
|
|
||||||
|
def conv(j, col):
|
||||||
|
if j in bool_cols:
|
||||||
|
return bool(col)
|
||||||
|
return col
|
||||||
|
|
||||||
|
for i, row in enumerate(rows):
|
||||||
|
rows[i] = tuple(
|
||||||
|
conv(j, col)
|
||||||
|
for j, col in enumerate(row)
|
||||||
|
if j > 0
|
||||||
|
)
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def _setup_sent_transactions(self):
|
||||||
|
# Only save things from the last day
|
||||||
|
yesterday = int(time.time()*1000) - 86400000
|
||||||
|
|
||||||
|
# And save the max transaction id from each destination
|
||||||
|
select = (
|
||||||
|
"SELECT rowid, * FROM sent_transactions WHERE rowid IN ("
|
||||||
|
"SELECT max(rowid) FROM sent_transactions"
|
||||||
|
" GROUP BY destination"
|
||||||
|
")"
|
||||||
|
)
|
||||||
|
|
||||||
|
def r(txn):
|
||||||
|
txn.execute(select)
|
||||||
|
rows = txn.fetchall()
|
||||||
|
headers = [column[0] for column in txn.description]
|
||||||
|
|
||||||
|
ts_ind = headers.index('ts')
|
||||||
|
|
||||||
|
return headers, [r for r in rows if r[ts_ind] < yesterday]
|
||||||
|
|
||||||
|
headers, rows = yield self.sqlite_store.runInteraction(
|
||||||
|
"select", r,
|
||||||
|
)
|
||||||
|
|
||||||
|
self._convert_rows("sent_transactions", headers, rows)
|
||||||
|
|
||||||
|
inserted_rows = len(rows)
|
||||||
|
if inserted_rows:
|
||||||
|
max_inserted_rowid = max(r[0] for r in rows)
|
||||||
|
|
||||||
|
def insert(txn):
|
||||||
|
self.postgres_store.insert_many_txn(
|
||||||
|
txn, "sent_transactions", headers[1:], rows
|
||||||
|
)
|
||||||
|
|
||||||
|
yield self.postgres_store.execute(insert)
|
||||||
|
else:
|
||||||
|
max_inserted_rowid = 0
|
||||||
|
|
||||||
|
def get_start_id(txn):
|
||||||
|
txn.execute(
|
||||||
|
"SELECT rowid FROM sent_transactions WHERE ts >= ?"
|
||||||
|
" ORDER BY rowid ASC LIMIT 1",
|
||||||
|
(yesterday,)
|
||||||
|
)
|
||||||
|
|
||||||
|
rows = txn.fetchall()
|
||||||
|
if rows:
|
||||||
|
return rows[0][0]
|
||||||
|
else:
|
||||||
|
return 1
|
||||||
|
|
||||||
|
next_chunk = yield self.sqlite_store.execute(get_start_id)
|
||||||
|
next_chunk = max(max_inserted_rowid + 1, next_chunk)
|
||||||
|
|
||||||
|
yield self.postgres_store._simple_insert(
|
||||||
|
table="port_from_sqlite3",
|
||||||
|
values={"table_name": "sent_transactions", "rowid": next_chunk}
|
||||||
|
)
|
||||||
|
|
||||||
|
def get_sent_table_size(txn):
|
||||||
|
txn.execute(
|
||||||
|
"SELECT count(*) FROM sent_transactions"
|
||||||
|
" WHERE ts >= ?",
|
||||||
|
(yesterday,)
|
||||||
|
)
|
||||||
|
size, = txn.fetchone()
|
||||||
|
return int(size)
|
||||||
|
|
||||||
|
remaining_count = yield self.sqlite_store.execute(
|
||||||
|
get_sent_table_size
|
||||||
|
)
|
||||||
|
|
||||||
|
total_count = remaining_count + inserted_rows
|
||||||
|
|
||||||
|
defer.returnValue((next_chunk, inserted_rows, total_count))
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def _get_remaining_count_to_port(self, table, next_chunk):
|
||||||
|
rows = yield self.sqlite_store.execute_sql(
|
||||||
|
"SELECT count(*) FROM %s WHERE rowid >= ?" % (table,),
|
||||||
|
next_chunk,
|
||||||
|
)
|
||||||
|
|
||||||
|
defer.returnValue(rows[0][0])
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def _get_already_ported_count(self, table):
|
||||||
|
rows = yield self.postgres_store.execute_sql(
|
||||||
|
"SELECT count(*) FROM %s" % (table,),
|
||||||
|
)
|
||||||
|
|
||||||
|
defer.returnValue(rows[0][0])
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def _get_total_count_to_port(self, table, next_chunk):
|
||||||
|
remaining, done = yield defer.gatherResults(
|
||||||
|
[
|
||||||
|
self._get_remaining_count_to_port(table, next_chunk),
|
||||||
|
self._get_already_ported_count(table),
|
||||||
|
],
|
||||||
|
consumeErrors=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
remaining = int(remaining) if remaining else 0
|
||||||
|
done = int(done) if done else 0
|
||||||
|
|
||||||
|
defer.returnValue((done, remaining + done))
|
||||||
|
|
||||||
|
|
||||||
|
##############################################
|
||||||
|
###### The following is simply UI stuff ######
|
||||||
|
##############################################
|
||||||
|
|
||||||
|
|
||||||
|
class Progress(object):
|
||||||
|
"""Used to report progress of the port
|
||||||
|
"""
|
||||||
|
def __init__(self):
|
||||||
|
self.tables = {}
|
||||||
|
|
||||||
|
self.start_time = int(time.time())
|
||||||
|
|
||||||
|
def add_table(self, table, cur, size):
|
||||||
|
self.tables[table] = {
|
||||||
|
"start": cur,
|
||||||
|
"num_done": cur,
|
||||||
|
"total": size,
|
||||||
|
"perc": int(cur * 100 / size),
|
||||||
|
}
|
||||||
|
|
||||||
|
def update(self, table, num_done):
|
||||||
|
data = self.tables[table]
|
||||||
|
data["num_done"] = num_done
|
||||||
|
data["perc"] = int(num_done * 100 / data["total"])
|
||||||
|
|
||||||
|
def done(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class CursesProgress(Progress):
|
||||||
|
"""Reports progress to a curses window
|
||||||
|
"""
|
||||||
|
def __init__(self, stdscr):
|
||||||
|
self.stdscr = stdscr
|
||||||
|
|
||||||
|
curses.use_default_colors()
|
||||||
|
curses.curs_set(0)
|
||||||
|
|
||||||
|
curses.init_pair(1, curses.COLOR_RED, -1)
|
||||||
|
curses.init_pair(2, curses.COLOR_GREEN, -1)
|
||||||
|
|
||||||
|
self.last_update = 0
|
||||||
|
|
||||||
|
self.finished = False
|
||||||
|
|
||||||
|
self.total_processed = 0
|
||||||
|
self.total_remaining = 0
|
||||||
|
|
||||||
|
super(CursesProgress, self).__init__()
|
||||||
|
|
||||||
|
def update(self, table, num_done):
|
||||||
|
super(CursesProgress, self).update(table, num_done)
|
||||||
|
|
||||||
|
self.total_processed = 0
|
||||||
|
self.total_remaining = 0
|
||||||
|
for table, data in self.tables.items():
|
||||||
|
self.total_processed += data["num_done"] - data["start"]
|
||||||
|
self.total_remaining += data["total"] - data["num_done"]
|
||||||
|
|
||||||
|
self.render()
|
||||||
|
|
||||||
|
def render(self, force=False):
|
||||||
|
now = time.time()
|
||||||
|
|
||||||
|
if not force and now - self.last_update < 0.2:
|
||||||
|
# reactor.callLater(1, self.render)
|
||||||
|
return
|
||||||
|
|
||||||
|
self.stdscr.clear()
|
||||||
|
|
||||||
|
rows, cols = self.stdscr.getmaxyx()
|
||||||
|
|
||||||
|
duration = int(now) - int(self.start_time)
|
||||||
|
|
||||||
|
minutes, seconds = divmod(duration, 60)
|
||||||
|
duration_str = '%02dm %02ds' % (minutes, seconds,)
|
||||||
|
|
||||||
|
if self.finished:
|
||||||
|
status = "Time spent: %s (Done!)" % (duration_str,)
|
||||||
|
else:
|
||||||
|
|
||||||
|
if self.total_processed > 0:
|
||||||
|
left = float(self.total_remaining) / self.total_processed
|
||||||
|
|
||||||
|
est_remaining = (int(now) - self.start_time) * left
|
||||||
|
est_remaining_str = '%02dm %02ds remaining' % divmod(est_remaining, 60)
|
||||||
|
else:
|
||||||
|
est_remaining_str = "Unknown"
|
||||||
|
status = (
|
||||||
|
"Time spent: %s (est. remaining: %s)"
|
||||||
|
% (duration_str, est_remaining_str,)
|
||||||
|
)
|
||||||
|
|
||||||
|
self.stdscr.addstr(
|
||||||
|
0, 0,
|
||||||
|
status,
|
||||||
|
curses.A_BOLD,
|
||||||
|
)
|
||||||
|
|
||||||
|
max_len = max([len(t) for t in self.tables.keys()])
|
||||||
|
|
||||||
|
left_margin = 5
|
||||||
|
middle_space = 1
|
||||||
|
|
||||||
|
items = self.tables.items()
|
||||||
|
items.sort(
|
||||||
|
key=lambda i: (i[1]["perc"], i[0]),
|
||||||
|
)
|
||||||
|
|
||||||
|
for i, (table, data) in enumerate(items):
|
||||||
|
if i + 2 >= rows:
|
||||||
|
break
|
||||||
|
|
||||||
|
perc = data["perc"]
|
||||||
|
|
||||||
|
color = curses.color_pair(2) if perc == 100 else curses.color_pair(1)
|
||||||
|
|
||||||
|
self.stdscr.addstr(
|
||||||
|
i+2, left_margin + max_len - len(table),
|
||||||
|
table,
|
||||||
|
curses.A_BOLD | color,
|
||||||
|
)
|
||||||
|
|
||||||
|
size = 20
|
||||||
|
|
||||||
|
progress = "[%s%s]" % (
|
||||||
|
"#" * int(perc*size/100),
|
||||||
|
" " * (size - int(perc*size/100)),
|
||||||
|
)
|
||||||
|
|
||||||
|
self.stdscr.addstr(
|
||||||
|
i+2, left_margin + max_len + middle_space,
|
||||||
|
"%s %3d%% (%d/%d)" % (progress, perc, data["num_done"], data["total"]),
|
||||||
|
)
|
||||||
|
|
||||||
|
if self.finished:
|
||||||
|
self.stdscr.addstr(
|
||||||
|
rows-1, 0,
|
||||||
|
"Press any key to exit...",
|
||||||
|
)
|
||||||
|
|
||||||
|
self.stdscr.refresh()
|
||||||
|
self.last_update = time.time()
|
||||||
|
|
||||||
|
def done(self):
|
||||||
|
self.finished = True
|
||||||
|
self.render(True)
|
||||||
|
self.stdscr.getch()
|
||||||
|
|
||||||
|
def set_state(self, state):
|
||||||
|
self.stdscr.clear()
|
||||||
|
self.stdscr.addstr(
|
||||||
|
0, 0,
|
||||||
|
state + "...",
|
||||||
|
curses.A_BOLD,
|
||||||
|
)
|
||||||
|
self.stdscr.refresh()
|
||||||
|
|
||||||
|
|
||||||
|
class TerminalProgress(Progress):
|
||||||
|
"""Just prints progress to the terminal
|
||||||
|
"""
|
||||||
|
def update(self, table, num_done):
|
||||||
|
super(TerminalProgress, self).update(table, num_done)
|
||||||
|
|
||||||
|
data = self.tables[table]
|
||||||
|
|
||||||
|
print "%s: %d%% (%d/%d)" % (
|
||||||
|
table, data["perc"],
|
||||||
|
data["num_done"], data["total"],
|
||||||
|
)
|
||||||
|
|
||||||
|
def set_state(self, state):
|
||||||
|
print state + "..."
|
||||||
|
|
||||||
|
|
||||||
|
##############################################
|
||||||
|
##############################################
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
description="A script to port an existing synapse SQLite database to"
|
||||||
|
" a new PostgreSQL database."
|
||||||
|
)
|
||||||
|
parser.add_argument("-v", action='store_true')
|
||||||
|
parser.add_argument(
|
||||||
|
"--sqlite-database", required=True,
|
||||||
|
help="The snapshot of the SQLite database file. This must not be"
|
||||||
|
" currently used by a running synapse server"
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--postgres-config", type=argparse.FileType('r'), required=True,
|
||||||
|
help="The database config file for the PostgreSQL database"
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--curses", action='store_true',
|
||||||
|
help="display a curses based progress UI"
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"--batch-size", type=int, default=1000,
|
||||||
|
help="The number of rows to select from the SQLite table each"
|
||||||
|
" iteration [default=1000]",
|
||||||
|
)
|
||||||
|
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
logging_config = {
|
||||||
|
"level": logging.DEBUG if args.v else logging.INFO,
|
||||||
|
"format": "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(message)s"
|
||||||
|
}
|
||||||
|
|
||||||
|
if args.curses:
|
||||||
|
logging_config["filename"] = "port-synapse.log"
|
||||||
|
|
||||||
|
logging.basicConfig(**logging_config)
|
||||||
|
|
||||||
|
sqlite_config = {
|
||||||
|
"name": "sqlite3",
|
||||||
|
"args": {
|
||||||
|
"database": args.sqlite_database,
|
||||||
|
"cp_min": 1,
|
||||||
|
"cp_max": 1,
|
||||||
|
"check_same_thread": False,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
postgres_config = yaml.safe_load(args.postgres_config)
|
||||||
|
|
||||||
|
if "database" in postgres_config:
|
||||||
|
postgres_config = postgres_config["database"]
|
||||||
|
|
||||||
|
if "name" not in postgres_config:
|
||||||
|
sys.stderr.write("Malformed database config: no 'name'")
|
||||||
|
sys.exit(2)
|
||||||
|
if postgres_config["name"] != "psycopg2":
|
||||||
|
sys.stderr.write("Database must use 'psycopg2' connector.")
|
||||||
|
sys.exit(3)
|
||||||
|
|
||||||
|
def start(stdscr=None):
|
||||||
|
if stdscr:
|
||||||
|
progress = CursesProgress(stdscr)
|
||||||
|
else:
|
||||||
|
progress = TerminalProgress()
|
||||||
|
|
||||||
|
porter = Porter(
|
||||||
|
sqlite_config=sqlite_config,
|
||||||
|
postgres_config=postgres_config,
|
||||||
|
progress=progress,
|
||||||
|
batch_size=args.batch_size,
|
||||||
|
)
|
||||||
|
|
||||||
|
reactor.callWhenRunning(porter.run)
|
||||||
|
|
||||||
|
reactor.run()
|
||||||
|
|
||||||
|
if args.curses:
|
||||||
|
curses.wrapper(start)
|
||||||
|
else:
|
||||||
|
start()
|
||||||
|
|
||||||
|
if end_error_exec_info:
|
||||||
|
exc_type, exc_value, exc_traceback = end_error_exec_info
|
||||||
|
traceback.print_exception(exc_type, exc_value, exc_traceback)
|
||||||
@@ -1,331 +0,0 @@
|
|||||||
|
|
||||||
from synapse.storage import SCHEMA_VERSION, read_schema
|
|
||||||
from synapse.storage._base import SQLBaseStore
|
|
||||||
from synapse.storage.signatures import SignatureStore
|
|
||||||
from synapse.storage.event_federation import EventFederationStore
|
|
||||||
|
|
||||||
from syutil.base64util import encode_base64, decode_base64
|
|
||||||
|
|
||||||
from synapse.crypto.event_signing import compute_event_signature
|
|
||||||
|
|
||||||
from synapse.events.builder import EventBuilder
|
|
||||||
from synapse.events.utils import prune_event
|
|
||||||
|
|
||||||
from synapse.crypto.event_signing import check_event_content_hash
|
|
||||||
|
|
||||||
from syutil.crypto.jsonsign import (
|
|
||||||
verify_signed_json, SignatureVerifyException,
|
|
||||||
)
|
|
||||||
from syutil.crypto.signing_key import decode_verify_key_bytes
|
|
||||||
|
|
||||||
from syutil.jsonutil import encode_canonical_json
|
|
||||||
|
|
||||||
import argparse
|
|
||||||
# import dns.resolver
|
|
||||||
import hashlib
|
|
||||||
import httplib
|
|
||||||
import json
|
|
||||||
import sqlite3
|
|
||||||
import syutil
|
|
||||||
import urllib2
|
|
||||||
|
|
||||||
|
|
||||||
delta_sql = """
|
|
||||||
CREATE TABLE IF NOT EXISTS event_json(
|
|
||||||
event_id TEXT NOT NULL,
|
|
||||||
room_id TEXT NOT NULL,
|
|
||||||
internal_metadata NOT NULL,
|
|
||||||
json BLOB NOT NULL,
|
|
||||||
CONSTRAINT ev_j_uniq UNIQUE (event_id)
|
|
||||||
);
|
|
||||||
|
|
||||||
CREATE INDEX IF NOT EXISTS event_json_id ON event_json(event_id);
|
|
||||||
CREATE INDEX IF NOT EXISTS event_json_room_id ON event_json(room_id);
|
|
||||||
|
|
||||||
PRAGMA user_version = 10;
|
|
||||||
"""
|
|
||||||
|
|
||||||
|
|
||||||
class Store(object):
|
|
||||||
_get_event_signatures_txn = SignatureStore.__dict__["_get_event_signatures_txn"]
|
|
||||||
_get_event_content_hashes_txn = SignatureStore.__dict__["_get_event_content_hashes_txn"]
|
|
||||||
_get_event_reference_hashes_txn = SignatureStore.__dict__["_get_event_reference_hashes_txn"]
|
|
||||||
_get_prev_event_hashes_txn = SignatureStore.__dict__["_get_prev_event_hashes_txn"]
|
|
||||||
_get_prev_events_and_state = EventFederationStore.__dict__["_get_prev_events_and_state"]
|
|
||||||
_get_auth_events = EventFederationStore.__dict__["_get_auth_events"]
|
|
||||||
cursor_to_dict = SQLBaseStore.__dict__["cursor_to_dict"]
|
|
||||||
_simple_select_onecol_txn = SQLBaseStore.__dict__["_simple_select_onecol_txn"]
|
|
||||||
_simple_select_list_txn = SQLBaseStore.__dict__["_simple_select_list_txn"]
|
|
||||||
_simple_insert_txn = SQLBaseStore.__dict__["_simple_insert_txn"]
|
|
||||||
|
|
||||||
def _generate_event_json(self, txn, rows):
|
|
||||||
events = []
|
|
||||||
for row in rows:
|
|
||||||
d = dict(row)
|
|
||||||
|
|
||||||
d.pop("stream_ordering", None)
|
|
||||||
d.pop("topological_ordering", None)
|
|
||||||
d.pop("processed", None)
|
|
||||||
|
|
||||||
if "origin_server_ts" not in d:
|
|
||||||
d["origin_server_ts"] = d.pop("ts", 0)
|
|
||||||
else:
|
|
||||||
d.pop("ts", 0)
|
|
||||||
|
|
||||||
d.pop("prev_state", None)
|
|
||||||
d.update(json.loads(d.pop("unrecognized_keys")))
|
|
||||||
|
|
||||||
d["sender"] = d.pop("user_id")
|
|
||||||
|
|
||||||
d["content"] = json.loads(d["content"])
|
|
||||||
|
|
||||||
if "age_ts" not in d:
|
|
||||||
# For compatibility
|
|
||||||
d["age_ts"] = d.get("origin_server_ts", 0)
|
|
||||||
|
|
||||||
d.setdefault("unsigned", {})["age_ts"] = d.pop("age_ts")
|
|
||||||
|
|
||||||
outlier = d.pop("outlier", False)
|
|
||||||
|
|
||||||
# d.pop("membership", None)
|
|
||||||
|
|
||||||
d.pop("state_hash", None)
|
|
||||||
|
|
||||||
d.pop("replaces_state", None)
|
|
||||||
|
|
||||||
b = EventBuilder(d)
|
|
||||||
b.internal_metadata.outlier = outlier
|
|
||||||
|
|
||||||
events.append(b)
|
|
||||||
|
|
||||||
for i, ev in enumerate(events):
|
|
||||||
signatures = self._get_event_signatures_txn(
|
|
||||||
txn, ev.event_id,
|
|
||||||
)
|
|
||||||
|
|
||||||
ev.signatures = {
|
|
||||||
n: {
|
|
||||||
k: encode_base64(v) for k, v in s.items()
|
|
||||||
}
|
|
||||||
for n, s in signatures.items()
|
|
||||||
}
|
|
||||||
|
|
||||||
hashes = self._get_event_content_hashes_txn(
|
|
||||||
txn, ev.event_id,
|
|
||||||
)
|
|
||||||
|
|
||||||
ev.hashes = {
|
|
||||||
k: encode_base64(v) for k, v in hashes.items()
|
|
||||||
}
|
|
||||||
|
|
||||||
prevs = self._get_prev_events_and_state(txn, ev.event_id)
|
|
||||||
|
|
||||||
ev.prev_events = [
|
|
||||||
(e_id, h)
|
|
||||||
for e_id, h, is_state in prevs
|
|
||||||
if is_state == 0
|
|
||||||
]
|
|
||||||
|
|
||||||
# ev.auth_events = self._get_auth_events(txn, ev.event_id)
|
|
||||||
|
|
||||||
hashes = dict(ev.auth_events)
|
|
||||||
|
|
||||||
for e_id, hash in ev.prev_events:
|
|
||||||
if e_id in hashes and not hash:
|
|
||||||
hash.update(hashes[e_id])
|
|
||||||
#
|
|
||||||
# if hasattr(ev, "state_key"):
|
|
||||||
# ev.prev_state = [
|
|
||||||
# (e_id, h)
|
|
||||||
# for e_id, h, is_state in prevs
|
|
||||||
# if is_state == 1
|
|
||||||
# ]
|
|
||||||
|
|
||||||
return [e.build() for e in events]
|
|
||||||
|
|
||||||
|
|
||||||
store = Store()
|
|
||||||
|
|
||||||
|
|
||||||
# def get_key(server_name):
|
|
||||||
# print "Getting keys for: %s" % (server_name,)
|
|
||||||
# targets = []
|
|
||||||
# if ":" in server_name:
|
|
||||||
# target, port = server_name.split(":")
|
|
||||||
# targets.append((target, int(port)))
|
|
||||||
# try:
|
|
||||||
# answers = dns.resolver.query("_matrix._tcp." + server_name, "SRV")
|
|
||||||
# for srv in answers:
|
|
||||||
# targets.append((srv.target, srv.port))
|
|
||||||
# except dns.resolver.NXDOMAIN:
|
|
||||||
# targets.append((server_name, 8448))
|
|
||||||
# except:
|
|
||||||
# print "Failed to lookup keys for %s" % (server_name,)
|
|
||||||
# return {}
|
|
||||||
#
|
|
||||||
# for target, port in targets:
|
|
||||||
# url = "https://%s:%i/_matrix/key/v1" % (target, port)
|
|
||||||
# try:
|
|
||||||
# keys = json.load(urllib2.urlopen(url, timeout=2))
|
|
||||||
# verify_keys = {}
|
|
||||||
# for key_id, key_base64 in keys["verify_keys"].items():
|
|
||||||
# verify_key = decode_verify_key_bytes(
|
|
||||||
# key_id, decode_base64(key_base64)
|
|
||||||
# )
|
|
||||||
# verify_signed_json(keys, server_name, verify_key)
|
|
||||||
# verify_keys[key_id] = verify_key
|
|
||||||
# print "Got keys for: %s" % (server_name,)
|
|
||||||
# return verify_keys
|
|
||||||
# except urllib2.URLError:
|
|
||||||
# pass
|
|
||||||
# except urllib2.HTTPError:
|
|
||||||
# pass
|
|
||||||
# except httplib.HTTPException:
|
|
||||||
# pass
|
|
||||||
#
|
|
||||||
# print "Failed to get keys for %s" % (server_name,)
|
|
||||||
# return {}
|
|
||||||
|
|
||||||
|
|
||||||
def reinsert_events(cursor, server_name, signing_key):
|
|
||||||
print "Running delta: v10"
|
|
||||||
|
|
||||||
cursor.executescript(delta_sql)
|
|
||||||
|
|
||||||
cursor.execute(
|
|
||||||
"SELECT * FROM events ORDER BY rowid ASC"
|
|
||||||
)
|
|
||||||
|
|
||||||
print "Getting events..."
|
|
||||||
|
|
||||||
rows = store.cursor_to_dict(cursor)
|
|
||||||
|
|
||||||
events = store._generate_event_json(cursor, rows)
|
|
||||||
|
|
||||||
print "Got events from DB."
|
|
||||||
|
|
||||||
algorithms = {
|
|
||||||
"sha256": hashlib.sha256,
|
|
||||||
}
|
|
||||||
|
|
||||||
key_id = "%s:%s" % (signing_key.alg, signing_key.version)
|
|
||||||
verify_key = signing_key.verify_key
|
|
||||||
verify_key.alg = signing_key.alg
|
|
||||||
verify_key.version = signing_key.version
|
|
||||||
|
|
||||||
server_keys = {
|
|
||||||
server_name: {
|
|
||||||
key_id: verify_key
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
i = 0
|
|
||||||
N = len(events)
|
|
||||||
|
|
||||||
for event in events:
|
|
||||||
if i % 100 == 0:
|
|
||||||
print "Processed: %d/%d events" % (i,N,)
|
|
||||||
i += 1
|
|
||||||
|
|
||||||
# for alg_name in event.hashes:
|
|
||||||
# if check_event_content_hash(event, algorithms[alg_name]):
|
|
||||||
# pass
|
|
||||||
# else:
|
|
||||||
# pass
|
|
||||||
# print "FAIL content hash %s %s" % (alg_name, event.event_id, )
|
|
||||||
|
|
||||||
have_own_correctly_signed = False
|
|
||||||
for host, sigs in event.signatures.items():
|
|
||||||
pruned = prune_event(event)
|
|
||||||
|
|
||||||
for key_id in sigs:
|
|
||||||
if host not in server_keys:
|
|
||||||
server_keys[host] = {} # get_key(host)
|
|
||||||
if key_id in server_keys[host]:
|
|
||||||
try:
|
|
||||||
verify_signed_json(
|
|
||||||
pruned.get_pdu_json(),
|
|
||||||
host,
|
|
||||||
server_keys[host][key_id]
|
|
||||||
)
|
|
||||||
|
|
||||||
if host == server_name:
|
|
||||||
have_own_correctly_signed = True
|
|
||||||
except SignatureVerifyException:
|
|
||||||
print "FAIL signature check %s %s" % (
|
|
||||||
key_id, event.event_id
|
|
||||||
)
|
|
||||||
|
|
||||||
# TODO: Re sign with our own server key
|
|
||||||
if not have_own_correctly_signed:
|
|
||||||
sigs = compute_event_signature(event, server_name, signing_key)
|
|
||||||
event.signatures.update(sigs)
|
|
||||||
|
|
||||||
pruned = prune_event(event)
|
|
||||||
|
|
||||||
for key_id in event.signatures[server_name]:
|
|
||||||
verify_signed_json(
|
|
||||||
pruned.get_pdu_json(),
|
|
||||||
server_name,
|
|
||||||
server_keys[server_name][key_id]
|
|
||||||
)
|
|
||||||
|
|
||||||
event_json = encode_canonical_json(
|
|
||||||
event.get_dict()
|
|
||||||
).decode("UTF-8")
|
|
||||||
|
|
||||||
metadata_json = encode_canonical_json(
|
|
||||||
event.internal_metadata.get_dict()
|
|
||||||
).decode("UTF-8")
|
|
||||||
|
|
||||||
store._simple_insert_txn(
|
|
||||||
cursor,
|
|
||||||
table="event_json",
|
|
||||||
values={
|
|
||||||
"event_id": event.event_id,
|
|
||||||
"room_id": event.room_id,
|
|
||||||
"internal_metadata": metadata_json,
|
|
||||||
"json": event_json,
|
|
||||||
},
|
|
||||||
or_replace=True,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def main(database, server_name, signing_key):
|
|
||||||
conn = sqlite3.connect(database)
|
|
||||||
cursor = conn.cursor()
|
|
||||||
|
|
||||||
# Do other deltas:
|
|
||||||
cursor.execute("PRAGMA user_version")
|
|
||||||
row = cursor.fetchone()
|
|
||||||
|
|
||||||
if row and row[0]:
|
|
||||||
user_version = row[0]
|
|
||||||
# Run every version since after the current version.
|
|
||||||
for v in range(user_version + 1, 10):
|
|
||||||
print "Running delta: %d" % (v,)
|
|
||||||
sql_script = read_schema("delta/v%d" % (v,))
|
|
||||||
cursor.executescript(sql_script)
|
|
||||||
|
|
||||||
reinsert_events(cursor, server_name, signing_key)
|
|
||||||
|
|
||||||
conn.commit()
|
|
||||||
|
|
||||||
print "Success!"
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
parser = argparse.ArgumentParser()
|
|
||||||
|
|
||||||
parser.add_argument("database")
|
|
||||||
parser.add_argument("server_name")
|
|
||||||
parser.add_argument(
|
|
||||||
"signing_key", type=argparse.FileType('r'),
|
|
||||||
)
|
|
||||||
args = parser.parse_args()
|
|
||||||
|
|
||||||
signing_key = syutil.crypto.signing_key.read_signing_keys(
|
|
||||||
args.signing_key
|
|
||||||
)
|
|
||||||
|
|
||||||
main(args.database, args.server_name, signing_key[0])
|
|
||||||
@@ -3,9 +3,6 @@ source-dir = docs/sphinx
|
|||||||
build-dir = docs/build
|
build-dir = docs/build
|
||||||
all_files = 1
|
all_files = 1
|
||||||
|
|
||||||
[aliases]
|
|
||||||
test = trial
|
|
||||||
|
|
||||||
[trial]
|
[trial]
|
||||||
test_suite = tests
|
test_suite = tests
|
||||||
|
|
||||||
@@ -16,3 +13,7 @@ ignore =
|
|||||||
docs/*
|
docs/*
|
||||||
pylint.cfg
|
pylint.cfg
|
||||||
tox.ini
|
tox.ini
|
||||||
|
|
||||||
|
[flake8]
|
||||||
|
max-line-length = 90
|
||||||
|
ignore = W503 ; W503 requires that binary operators be at the end, not start, of lines. Erik doesn't like it.
|
||||||
|
|||||||
51
setup.py
51
setup.py
@@ -1,6 +1,6 @@
|
|||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
|
|
||||||
# Copyright 2014 OpenMarket Ltd
|
# Copyright 2014-2016 OpenMarket Ltd
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@@ -14,8 +14,10 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
|
import glob
|
||||||
import os
|
import os
|
||||||
from setuptools import setup, find_packages
|
from setuptools import setup, find_packages, Command
|
||||||
|
import sys
|
||||||
|
|
||||||
|
|
||||||
here = os.path.abspath(os.path.dirname(__file__))
|
here = os.path.abspath(os.path.dirname(__file__))
|
||||||
@@ -36,6 +38,39 @@ def exec_file(path_segments):
|
|||||||
exec(code, result)
|
exec(code, result)
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
class Tox(Command):
|
||||||
|
user_options = [('tox-args=', 'a', "Arguments to pass to tox")]
|
||||||
|
|
||||||
|
def initialize_options(self):
|
||||||
|
self.tox_args = None
|
||||||
|
|
||||||
|
def finalize_options(self):
|
||||||
|
self.test_args = []
|
||||||
|
self.test_suite = True
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
#import here, cause outside the eggs aren't loaded
|
||||||
|
try:
|
||||||
|
import tox
|
||||||
|
except ImportError:
|
||||||
|
try:
|
||||||
|
self.distribution.fetch_build_eggs("tox")
|
||||||
|
import tox
|
||||||
|
except:
|
||||||
|
raise RuntimeError(
|
||||||
|
"The tests need 'tox' to run. Please install 'tox'."
|
||||||
|
)
|
||||||
|
import shlex
|
||||||
|
args = self.tox_args
|
||||||
|
if args:
|
||||||
|
args = shlex.split(self.tox_args)
|
||||||
|
else:
|
||||||
|
args = []
|
||||||
|
errno = tox.cmdline(args=args)
|
||||||
|
sys.exit(errno)
|
||||||
|
|
||||||
|
|
||||||
version = exec_file(("synapse", "__init__.py"))["__version__"]
|
version = exec_file(("synapse", "__init__.py"))["__version__"]
|
||||||
dependencies = exec_file(("synapse", "python_dependencies.py"))
|
dependencies = exec_file(("synapse", "python_dependencies.py"))
|
||||||
long_description = read_file(("README.rst",))
|
long_description = read_file(("README.rst",))
|
||||||
@@ -45,15 +80,11 @@ setup(
|
|||||||
version=version,
|
version=version,
|
||||||
packages=find_packages(exclude=["tests", "tests.*"]),
|
packages=find_packages(exclude=["tests", "tests.*"]),
|
||||||
description="Reference Synapse Home Server",
|
description="Reference Synapse Home Server",
|
||||||
install_requires=dependencies["REQUIREMENTS"].keys(),
|
install_requires=dependencies['requirements'](include_conditional=True).keys(),
|
||||||
setup_requires=[
|
dependency_links=dependencies["DEPENDENCY_LINKS"].values(),
|
||||||
"Twisted==14.0.2", # Here to override setuptools_trial's dependency on Twisted>=2.4.0
|
|
||||||
"setuptools_trial",
|
|
||||||
"mock"
|
|
||||||
],
|
|
||||||
dependency_links=dependencies["DEPENDENCY_LINKS"],
|
|
||||||
include_package_data=True,
|
include_package_data=True,
|
||||||
zip_safe=False,
|
zip_safe=False,
|
||||||
long_description=long_description,
|
long_description=long_description,
|
||||||
scripts=["synctl"],
|
scripts=["synctl"] + glob.glob("scripts/*"),
|
||||||
|
cmdclass={'test': Tox},
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
# Copyright 2014, 2015 OpenMarket Ltd
|
# Copyright 2014-2016 OpenMarket Ltd
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@@ -16,4 +16,4 @@
|
|||||||
""" This is a reference implementation of a Matrix home server.
|
""" This is a reference implementation of a Matrix home server.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
__version__ = "0.7.0f"
|
__version__ = "0.13.2"
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
# Copyright 2014, 2015 OpenMarket Ltd
|
# Copyright 2014-2016 OpenMarket Ltd
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
# Copyright 2014, 2015 OpenMarket Ltd
|
# Copyright 2014 - 2016 OpenMarket Ltd
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@@ -14,33 +14,60 @@
|
|||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
"""This module contains classes for authenticating the user."""
|
"""This module contains classes for authenticating the user."""
|
||||||
|
from canonicaljson import encode_canonical_json
|
||||||
|
from signedjson.key import decode_verify_key_bytes
|
||||||
|
from signedjson.sign import verify_signed_json, SignatureVerifyException
|
||||||
|
|
||||||
from twisted.internet import defer
|
from twisted.internet import defer
|
||||||
|
|
||||||
from synapse.api.constants import EventTypes, Membership, JoinRules
|
from synapse.api.constants import EventTypes, Membership, JoinRules
|
||||||
from synapse.api.errors import AuthError, StoreError, Codes, SynapseError
|
from synapse.api.errors import AuthError, Codes, SynapseError, EventSizeError
|
||||||
|
from synapse.types import Requester, RoomID, UserID, EventID
|
||||||
from synapse.util.logutils import log_function
|
from synapse.util.logutils import log_function
|
||||||
from synapse.util.async import run_on_reactor
|
from synapse.util.logcontext import preserve_context_over_fn
|
||||||
from synapse.types import UserID, ClientInfo
|
from unpaddedbase64 import decode_base64
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
|
import pymacaroons
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
AuthEventTypes = (
|
||||||
|
EventTypes.Create, EventTypes.Member, EventTypes.PowerLevels,
|
||||||
|
EventTypes.JoinRules, EventTypes.RoomHistoryVisibility,
|
||||||
|
EventTypes.ThirdPartyInvite,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class Auth(object):
|
class Auth(object):
|
||||||
|
|
||||||
def __init__(self, hs):
|
def __init__(self, hs):
|
||||||
self.hs = hs
|
self.hs = hs
|
||||||
self.store = hs.get_datastore()
|
self.store = hs.get_datastore()
|
||||||
self.state = hs.get_state_handler()
|
self.state = hs.get_state_handler()
|
||||||
|
self.TOKEN_NOT_FOUND_HTTP_STATUS = 401
|
||||||
|
self._KNOWN_CAVEAT_PREFIXES = set([
|
||||||
|
"gen = ",
|
||||||
|
"guest = ",
|
||||||
|
"type = ",
|
||||||
|
"time < ",
|
||||||
|
"user_id = ",
|
||||||
|
])
|
||||||
|
|
||||||
def check(self, event, auth_events):
|
def check(self, event, auth_events):
|
||||||
""" Checks if this event is correctly authed.
|
""" Checks if this event is correctly authed.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
event: the event being checked.
|
||||||
|
auth_events (dict: event-key -> event): the existing room state.
|
||||||
|
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
True if the auth checks pass.
|
True if the auth checks pass.
|
||||||
"""
|
"""
|
||||||
|
self.check_size_limits(event)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
if not hasattr(event, "room_id"):
|
if not hasattr(event, "room_id"):
|
||||||
raise AuthError(500, "Event has no room_id: %s" % event)
|
raise AuthError(500, "Event has no room_id: %s" % event)
|
||||||
@@ -54,11 +81,31 @@ class Auth(object):
|
|||||||
# FIXME
|
# FIXME
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
creation_event = auth_events.get((EventTypes.Create, ""), None)
|
||||||
|
|
||||||
|
if not creation_event:
|
||||||
|
raise SynapseError(
|
||||||
|
403,
|
||||||
|
"Room %r does not exist" % (event.room_id,)
|
||||||
|
)
|
||||||
|
|
||||||
|
creating_domain = RoomID.from_string(event.room_id).domain
|
||||||
|
originating_domain = UserID.from_string(event.sender).domain
|
||||||
|
if creating_domain != originating_domain:
|
||||||
|
if not self.can_federate(event, auth_events):
|
||||||
|
raise AuthError(
|
||||||
|
403,
|
||||||
|
"This room has been marked as unfederatable."
|
||||||
|
)
|
||||||
|
|
||||||
# FIXME: Temp hack
|
# FIXME: Temp hack
|
||||||
if event.type == EventTypes.Aliases:
|
if event.type == EventTypes.Aliases:
|
||||||
return True
|
return True
|
||||||
|
|
||||||
logger.debug("Auth events: %s", auth_events)
|
logger.debug(
|
||||||
|
"Auth events: %s",
|
||||||
|
[a.event_id for a in auth_events.values()]
|
||||||
|
)
|
||||||
|
|
||||||
if event.type == EventTypes.Member:
|
if event.type == EventTypes.Member:
|
||||||
allowed = self.is_membership_change_allowed(
|
allowed = self.is_membership_change_allowed(
|
||||||
@@ -77,7 +124,7 @@ class Auth(object):
|
|||||||
self._check_power_levels(event, auth_events)
|
self._check_power_levels(event, auth_events)
|
||||||
|
|
||||||
if event.type == EventTypes.Redaction:
|
if event.type == EventTypes.Redaction:
|
||||||
self._check_redaction(event, auth_events)
|
self.check_redaction(event, auth_events)
|
||||||
|
|
||||||
logger.debug("Allowing! %s", event)
|
logger.debug("Allowing! %s", event)
|
||||||
except AuthError as e:
|
except AuthError as e:
|
||||||
@@ -88,8 +135,39 @@ class Auth(object):
|
|||||||
logger.info("Denying! %s", event)
|
logger.info("Denying! %s", event)
|
||||||
raise
|
raise
|
||||||
|
|
||||||
|
def check_size_limits(self, event):
|
||||||
|
def too_big(field):
|
||||||
|
raise EventSizeError("%s too large" % (field,))
|
||||||
|
|
||||||
|
if len(event.user_id) > 255:
|
||||||
|
too_big("user_id")
|
||||||
|
if len(event.room_id) > 255:
|
||||||
|
too_big("room_id")
|
||||||
|
if event.is_state() and len(event.state_key) > 255:
|
||||||
|
too_big("state_key")
|
||||||
|
if len(event.type) > 255:
|
||||||
|
too_big("type")
|
||||||
|
if len(event.event_id) > 255:
|
||||||
|
too_big("event_id")
|
||||||
|
if len(encode_canonical_json(event.get_pdu_json())) > 65536:
|
||||||
|
too_big("event")
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def check_joined_room(self, room_id, user_id, current_state=None):
|
def check_joined_room(self, room_id, user_id, current_state=None):
|
||||||
|
"""Check if the user is currently joined in the room
|
||||||
|
Args:
|
||||||
|
room_id(str): The room to check.
|
||||||
|
user_id(str): The user to check.
|
||||||
|
current_state(dict): Optional map of the current state of the room.
|
||||||
|
If provided then that map is used to check whether they are a
|
||||||
|
member of the room. Otherwise the current membership is
|
||||||
|
loaded from the database.
|
||||||
|
Raises:
|
||||||
|
AuthError if the user is not in the room.
|
||||||
|
Returns:
|
||||||
|
A deferred membership event for the user if the user is in
|
||||||
|
the room.
|
||||||
|
"""
|
||||||
if current_state:
|
if current_state:
|
||||||
member = current_state.get(
|
member = current_state.get(
|
||||||
(EventTypes.Member, user_id),
|
(EventTypes.Member, user_id),
|
||||||
@@ -105,6 +183,40 @@ class Auth(object):
|
|||||||
self._check_joined_room(member, user_id, room_id)
|
self._check_joined_room(member, user_id, room_id)
|
||||||
defer.returnValue(member)
|
defer.returnValue(member)
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def check_user_was_in_room(self, room_id, user_id):
|
||||||
|
"""Check if the user was in the room at some point.
|
||||||
|
Args:
|
||||||
|
room_id(str): The room to check.
|
||||||
|
user_id(str): The user to check.
|
||||||
|
Raises:
|
||||||
|
AuthError if the user was never in the room.
|
||||||
|
Returns:
|
||||||
|
A deferred membership event for the user if the user was in the
|
||||||
|
room. This will be the join event if they are currently joined to
|
||||||
|
the room. This will be the leave event if they have left the room.
|
||||||
|
"""
|
||||||
|
member = yield self.state.get_current_state(
|
||||||
|
room_id=room_id,
|
||||||
|
event_type=EventTypes.Member,
|
||||||
|
state_key=user_id
|
||||||
|
)
|
||||||
|
membership = member.membership if member else None
|
||||||
|
|
||||||
|
if membership not in (Membership.JOIN, Membership.LEAVE):
|
||||||
|
raise AuthError(403, "User %s not in room %s" % (
|
||||||
|
user_id, room_id
|
||||||
|
))
|
||||||
|
|
||||||
|
if membership == Membership.LEAVE:
|
||||||
|
forgot = yield self.store.did_forget(user_id, room_id)
|
||||||
|
if forgot:
|
||||||
|
raise AuthError(403, "User %s not in room %s" % (
|
||||||
|
user_id, room_id
|
||||||
|
))
|
||||||
|
|
||||||
|
defer.returnValue(member)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def check_host_in_room(self, room_id, host):
|
def check_host_in_room(self, room_id, host):
|
||||||
curr_state = yield self.state.get_current_state(room_id)
|
curr_state = yield self.state.get_current_state(room_id)
|
||||||
@@ -139,6 +251,11 @@ class Auth(object):
|
|||||||
user_id, room_id, repr(member)
|
user_id, room_id, repr(member)
|
||||||
))
|
))
|
||||||
|
|
||||||
|
def can_federate(self, event, auth_events):
|
||||||
|
creation_event = auth_events.get((EventTypes.Create, ""))
|
||||||
|
|
||||||
|
return creation_event.content.get("m.federate", True) is True
|
||||||
|
|
||||||
@log_function
|
@log_function
|
||||||
def is_membership_change_allowed(self, event, auth_events):
|
def is_membership_change_allowed(self, event, auth_events):
|
||||||
membership = event.content["membership"]
|
membership = event.content["membership"]
|
||||||
@@ -154,6 +271,15 @@ class Auth(object):
|
|||||||
|
|
||||||
target_user_id = event.state_key
|
target_user_id = event.state_key
|
||||||
|
|
||||||
|
creating_domain = RoomID.from_string(event.room_id).domain
|
||||||
|
target_domain = UserID.from_string(target_user_id).domain
|
||||||
|
if creating_domain != target_domain:
|
||||||
|
if not self.can_federate(event, auth_events):
|
||||||
|
raise AuthError(
|
||||||
|
403,
|
||||||
|
"This room has been marked as unfederatable."
|
||||||
|
)
|
||||||
|
|
||||||
# get info about the caller
|
# get info about the caller
|
||||||
key = (EventTypes.Member, event.user_id, )
|
key = (EventTypes.Member, event.user_id, )
|
||||||
caller = auth_events.get(key)
|
caller = auth_events.get(key)
|
||||||
@@ -166,6 +292,7 @@ class Auth(object):
|
|||||||
target = auth_events.get(key)
|
target = auth_events.get(key)
|
||||||
|
|
||||||
target_in_room = target and target.membership == Membership.JOIN
|
target_in_room = target and target.membership == Membership.JOIN
|
||||||
|
target_banned = target and target.membership == Membership.BAN
|
||||||
|
|
||||||
key = (EventTypes.JoinRules, "", )
|
key = (EventTypes.JoinRules, "", )
|
||||||
join_rule_event = auth_events.get(key)
|
join_rule_event = auth_events.get(key)
|
||||||
@@ -176,24 +303,20 @@ class Auth(object):
|
|||||||
else:
|
else:
|
||||||
join_rule = JoinRules.INVITE
|
join_rule = JoinRules.INVITE
|
||||||
|
|
||||||
user_level = self._get_power_level_from_event_state(
|
user_level = self._get_user_power_level(event.user_id, auth_events)
|
||||||
event,
|
target_level = self._get_user_power_level(
|
||||||
event.user_id,
|
target_user_id, auth_events
|
||||||
auth_events,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
ban_level, kick_level, redact_level = (
|
# FIXME (erikj): What should we do here as the default?
|
||||||
self._get_ops_level_from_event_state(
|
ban_level = self._get_named_level(auth_events, "ban", 50)
|
||||||
event,
|
|
||||||
auth_events,
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
logger.debug(
|
logger.debug(
|
||||||
"is_membership_change_allowed: %s",
|
"is_membership_change_allowed: %s",
|
||||||
{
|
{
|
||||||
"caller_in_room": caller_in_room,
|
"caller_in_room": caller_in_room,
|
||||||
"caller_invited": caller_invited,
|
"caller_invited": caller_invited,
|
||||||
|
"target_banned": target_banned,
|
||||||
"target_in_room": target_in_room,
|
"target_in_room": target_in_room,
|
||||||
"membership": membership,
|
"membership": membership,
|
||||||
"join_rule": join_rule,
|
"join_rule": join_rule,
|
||||||
@@ -202,25 +325,50 @@ class Auth(object):
|
|||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
if Membership.INVITE == membership:
|
if Membership.INVITE == membership and "third_party_invite" in event.content:
|
||||||
# TODO (erikj): We should probably handle this more intelligently
|
if not self._verify_third_party_invite(event, auth_events):
|
||||||
# PRIVATE join rules.
|
raise AuthError(403, "You are not invited to this room.")
|
||||||
|
return True
|
||||||
|
|
||||||
|
if Membership.JOIN != membership:
|
||||||
|
if (caller_invited
|
||||||
|
and Membership.LEAVE == membership
|
||||||
|
and target_user_id == event.user_id):
|
||||||
|
return True
|
||||||
|
|
||||||
# Invites are valid iff caller is in the room and target isn't.
|
|
||||||
if not caller_in_room: # caller isn't joined
|
if not caller_in_room: # caller isn't joined
|
||||||
raise AuthError(
|
raise AuthError(
|
||||||
403,
|
403,
|
||||||
"%s not in room %s." % (event.user_id, event.room_id,)
|
"%s not in room %s." % (event.user_id, event.room_id,)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if Membership.INVITE == membership:
|
||||||
|
# TODO (erikj): We should probably handle this more intelligently
|
||||||
|
# PRIVATE join rules.
|
||||||
|
|
||||||
|
# Invites are valid iff caller is in the room and target isn't.
|
||||||
|
if target_banned:
|
||||||
|
raise AuthError(
|
||||||
|
403, "%s is banned from the room" % (target_user_id,)
|
||||||
|
)
|
||||||
elif target_in_room: # the target is already in the room.
|
elif target_in_room: # the target is already in the room.
|
||||||
raise AuthError(403, "%s is already in the room." %
|
raise AuthError(403, "%s is already in the room." %
|
||||||
target_user_id)
|
target_user_id)
|
||||||
|
else:
|
||||||
|
invite_level = self._get_named_level(auth_events, "invite", 0)
|
||||||
|
|
||||||
|
if user_level < invite_level:
|
||||||
|
raise AuthError(
|
||||||
|
403, "You cannot invite user %s." % target_user_id
|
||||||
|
)
|
||||||
elif Membership.JOIN == membership:
|
elif Membership.JOIN == membership:
|
||||||
# Joins are valid iff caller == target and they were:
|
# Joins are valid iff caller == target and they were:
|
||||||
# invited: They are accepting the invitation
|
# invited: They are accepting the invitation
|
||||||
# joined: It's a NOOP
|
# joined: It's a NOOP
|
||||||
if event.user_id != target_user_id:
|
if event.user_id != target_user_id:
|
||||||
raise AuthError(403, "Cannot force another user to join.")
|
raise AuthError(403, "Cannot force another user to join.")
|
||||||
|
elif target_banned:
|
||||||
|
raise AuthError(403, "You are banned from this room")
|
||||||
elif join_rule == JoinRules.PUBLIC:
|
elif join_rule == JoinRules.PUBLIC:
|
||||||
pass
|
pass
|
||||||
elif join_rule == JoinRules.INVITE:
|
elif join_rule == JoinRules.INVITE:
|
||||||
@@ -232,84 +380,149 @@ class Auth(object):
|
|||||||
raise AuthError(403, "You are not allowed to join this room")
|
raise AuthError(403, "You are not allowed to join this room")
|
||||||
elif Membership.LEAVE == membership:
|
elif Membership.LEAVE == membership:
|
||||||
# TODO (erikj): Implement kicks.
|
# TODO (erikj): Implement kicks.
|
||||||
|
if target_banned and user_level < ban_level:
|
||||||
if not caller_in_room: # trying to leave a room you aren't joined
|
|
||||||
raise AuthError(
|
raise AuthError(
|
||||||
403,
|
403, "You cannot unban user &s." % (target_user_id,)
|
||||||
"%s not in room %s." % (target_user_id, event.room_id,)
|
|
||||||
)
|
)
|
||||||
elif target_user_id != event.user_id:
|
elif target_user_id != event.user_id:
|
||||||
if kick_level:
|
kick_level = self._get_named_level(auth_events, "kick", 50)
|
||||||
kick_level = int(kick_level)
|
|
||||||
else:
|
|
||||||
kick_level = 50 # FIXME (erikj): What should we do here?
|
|
||||||
|
|
||||||
if user_level < kick_level:
|
if user_level < kick_level or user_level <= target_level:
|
||||||
raise AuthError(
|
raise AuthError(
|
||||||
403, "You cannot kick user %s." % target_user_id
|
403, "You cannot kick user %s." % target_user_id
|
||||||
)
|
)
|
||||||
elif Membership.BAN == membership:
|
elif Membership.BAN == membership:
|
||||||
if ban_level:
|
if user_level < ban_level or user_level <= target_level:
|
||||||
ban_level = int(ban_level)
|
|
||||||
else:
|
|
||||||
ban_level = 50 # FIXME (erikj): What should we do here?
|
|
||||||
|
|
||||||
if user_level < ban_level:
|
|
||||||
raise AuthError(403, "You don't have permission to ban")
|
raise AuthError(403, "You don't have permission to ban")
|
||||||
else:
|
else:
|
||||||
raise AuthError(500, "Unknown membership %s" % membership)
|
raise AuthError(500, "Unknown membership %s" % membership)
|
||||||
|
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def _get_power_level_from_event_state(self, event, user_id, auth_events):
|
def _verify_third_party_invite(self, event, auth_events):
|
||||||
|
"""
|
||||||
|
Validates that the invite event is authorized by a previous third-party invite.
|
||||||
|
|
||||||
|
Checks that the public key, and keyserver, match those in the third party invite,
|
||||||
|
and that the invite event has a signature issued using that public key.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
event: The m.room.member join event being validated.
|
||||||
|
auth_events: All relevant previous context events which may be used
|
||||||
|
for authorization decisions.
|
||||||
|
|
||||||
|
Return:
|
||||||
|
True if the event fulfills the expectations of a previous third party
|
||||||
|
invite event.
|
||||||
|
"""
|
||||||
|
if "third_party_invite" not in event.content:
|
||||||
|
return False
|
||||||
|
if "signed" not in event.content["third_party_invite"]:
|
||||||
|
return False
|
||||||
|
signed = event.content["third_party_invite"]["signed"]
|
||||||
|
for key in {"mxid", "token"}:
|
||||||
|
if key not in signed:
|
||||||
|
return False
|
||||||
|
|
||||||
|
token = signed["token"]
|
||||||
|
|
||||||
|
invite_event = auth_events.get(
|
||||||
|
(EventTypes.ThirdPartyInvite, token,)
|
||||||
|
)
|
||||||
|
if not invite_event:
|
||||||
|
return False
|
||||||
|
|
||||||
|
if event.user_id != invite_event.user_id:
|
||||||
|
return False
|
||||||
|
try:
|
||||||
|
public_key = invite_event.content["public_key"]
|
||||||
|
if signed["mxid"] != event.state_key:
|
||||||
|
return False
|
||||||
|
if signed["token"] != token:
|
||||||
|
return False
|
||||||
|
for server, signature_block in signed["signatures"].items():
|
||||||
|
for key_name, encoded_signature in signature_block.items():
|
||||||
|
if not key_name.startswith("ed25519:"):
|
||||||
|
return False
|
||||||
|
verify_key = decode_verify_key_bytes(
|
||||||
|
key_name,
|
||||||
|
decode_base64(public_key)
|
||||||
|
)
|
||||||
|
verify_signed_json(signed, server, verify_key)
|
||||||
|
|
||||||
|
# We got the public key from the invite, so we know that the
|
||||||
|
# correct server signed the signed bundle.
|
||||||
|
# The caller is responsible for checking that the signing
|
||||||
|
# server has not revoked that public key.
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
except (KeyError, SignatureVerifyException,):
|
||||||
|
return False
|
||||||
|
|
||||||
|
def _get_power_level_event(self, auth_events):
|
||||||
key = (EventTypes.PowerLevels, "", )
|
key = (EventTypes.PowerLevels, "", )
|
||||||
power_level_event = auth_events.get(key)
|
return auth_events.get(key)
|
||||||
level = None
|
|
||||||
|
def _get_user_power_level(self, user_id, auth_events):
|
||||||
|
power_level_event = self._get_power_level_event(auth_events)
|
||||||
|
|
||||||
if power_level_event:
|
if power_level_event:
|
||||||
level = power_level_event.content.get("users", {}).get(user_id)
|
level = power_level_event.content.get("users", {}).get(user_id)
|
||||||
if not level:
|
if not level:
|
||||||
level = power_level_event.content.get("users_default", 0)
|
level = power_level_event.content.get("users_default", 0)
|
||||||
|
|
||||||
|
if level is None:
|
||||||
|
return 0
|
||||||
|
else:
|
||||||
|
return int(level)
|
||||||
else:
|
else:
|
||||||
key = (EventTypes.Create, "", )
|
key = (EventTypes.Create, "", )
|
||||||
create_event = auth_events.get(key)
|
create_event = auth_events.get(key)
|
||||||
if (create_event is not None and
|
if (create_event is not None and
|
||||||
create_event.content["creator"] == user_id):
|
create_event.content["creator"] == user_id):
|
||||||
return 100
|
return 100
|
||||||
|
else:
|
||||||
|
return 0
|
||||||
|
|
||||||
return level
|
def _get_named_level(self, auth_events, name, default):
|
||||||
|
power_level_event = self._get_power_level_event(auth_events)
|
||||||
|
|
||||||
def _get_ops_level_from_event_state(self, event, auth_events):
|
if not power_level_event:
|
||||||
key = (EventTypes.PowerLevels, "", )
|
return default
|
||||||
power_level_event = auth_events.get(key)
|
|
||||||
|
|
||||||
if power_level_event:
|
level = power_level_event.content.get(name, None)
|
||||||
return (
|
if level is not None:
|
||||||
power_level_event.content.get("ban", 50),
|
return int(level)
|
||||||
power_level_event.content.get("kick", 50),
|
else:
|
||||||
power_level_event.content.get("redact", 50),
|
return default
|
||||||
)
|
|
||||||
return None, None, None,
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def get_user_by_req(self, request):
|
def get_user_by_req(self, request, allow_guest=False):
|
||||||
""" Get a registered user's ID.
|
""" Get a registered user's ID.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
request - An HTTP request with an access_token query parameter.
|
request - An HTTP request with an access_token query parameter.
|
||||||
Returns:
|
Returns:
|
||||||
tuple : of UserID and device string:
|
tuple of:
|
||||||
User ID object of the user making the request
|
UserID (str)
|
||||||
Client ID object of the client instance the user is using
|
Access token ID (str)
|
||||||
Raises:
|
Raises:
|
||||||
AuthError if no user by that token exists or the token is invalid.
|
AuthError if no user by that token exists or the token is invalid.
|
||||||
"""
|
"""
|
||||||
# Can optionally look elsewhere in the request (e.g. headers)
|
# Can optionally look elsewhere in the request (e.g. headers)
|
||||||
try:
|
try:
|
||||||
|
user_id = yield self._get_appservice_user_id(request.args)
|
||||||
|
if user_id:
|
||||||
|
request.authenticated_entity = user_id
|
||||||
|
defer.returnValue(
|
||||||
|
Requester(UserID.from_string(user_id), "", False)
|
||||||
|
)
|
||||||
|
|
||||||
access_token = request.args["access_token"][0]
|
access_token = request.args["access_token"][0]
|
||||||
user_info = yield self.get_user_by_token(access_token)
|
user_info = yield self._get_user_by_access_token(access_token)
|
||||||
user = user_info["user"]
|
user = user_info["user"]
|
||||||
device_id = user_info["device_id"]
|
|
||||||
token_id = user_info["token_id"]
|
token_id = user_info["token_id"]
|
||||||
|
is_guest = user_info["is_guest"]
|
||||||
|
|
||||||
ip_addr = self.hs.get_ip_from_request(request)
|
ip_addr = self.hs.get_ip_from_request(request)
|
||||||
user_agent = request.requestHeaders.getRawHeaders(
|
user_agent = request.requestHeaders.getRawHeaders(
|
||||||
@@ -317,54 +530,210 @@ class Auth(object):
|
|||||||
default=[""]
|
default=[""]
|
||||||
)[0]
|
)[0]
|
||||||
if user and access_token and ip_addr:
|
if user and access_token and ip_addr:
|
||||||
yield self.store.insert_client_ip(
|
preserve_context_over_fn(
|
||||||
|
self.store.insert_client_ip,
|
||||||
user=user,
|
user=user,
|
||||||
access_token=access_token,
|
access_token=access_token,
|
||||||
device_id=user_info["device_id"],
|
|
||||||
ip=ip_addr,
|
ip=ip_addr,
|
||||||
user_agent=user_agent
|
user_agent=user_agent
|
||||||
)
|
)
|
||||||
|
|
||||||
defer.returnValue((user, ClientInfo(device_id, token_id)))
|
if is_guest and not allow_guest:
|
||||||
|
raise AuthError(
|
||||||
|
403, "Guest access not allowed", errcode=Codes.GUEST_ACCESS_FORBIDDEN
|
||||||
|
)
|
||||||
|
|
||||||
|
request.authenticated_entity = user.to_string()
|
||||||
|
|
||||||
|
defer.returnValue(Requester(user, token_id, is_guest))
|
||||||
except KeyError:
|
except KeyError:
|
||||||
raise AuthError(403, "Missing access token.")
|
raise AuthError(
|
||||||
|
self.TOKEN_NOT_FOUND_HTTP_STATUS, "Missing access token.",
|
||||||
|
errcode=Codes.MISSING_TOKEN
|
||||||
|
)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def get_user_by_token(self, token):
|
def _get_appservice_user_id(self, request_args):
|
||||||
|
app_service = yield self.store.get_app_service_by_token(
|
||||||
|
request_args["access_token"][0]
|
||||||
|
)
|
||||||
|
if app_service is None:
|
||||||
|
defer.returnValue(None)
|
||||||
|
|
||||||
|
if "user_id" not in request_args:
|
||||||
|
defer.returnValue(app_service.sender)
|
||||||
|
|
||||||
|
user_id = request_args["user_id"][0]
|
||||||
|
if app_service.sender == user_id:
|
||||||
|
defer.returnValue(app_service.sender)
|
||||||
|
|
||||||
|
if not app_service.is_interested_in_user(user_id):
|
||||||
|
raise AuthError(
|
||||||
|
403,
|
||||||
|
"Application service cannot masquerade as this user."
|
||||||
|
)
|
||||||
|
if not (yield self.store.get_user_by_id(user_id)):
|
||||||
|
raise AuthError(
|
||||||
|
403,
|
||||||
|
"Application service has not registered this user"
|
||||||
|
)
|
||||||
|
defer.returnValue(user_id)
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def _get_user_by_access_token(self, token):
|
||||||
""" Get a registered user's ID.
|
""" Get a registered user's ID.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
token (str): The access token to get the user by.
|
token (str): The access token to get the user by.
|
||||||
Returns:
|
Returns:
|
||||||
dict : dict that includes the user, device_id, and whether the
|
dict : dict that includes the user and the ID of their access token.
|
||||||
user is a server admin.
|
|
||||||
Raises:
|
Raises:
|
||||||
AuthError if no user by that token exists or the token is invalid.
|
AuthError if no user by that token exists or the token is invalid.
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
ret = yield self.store.get_user_by_token(token=token)
|
ret = yield self.get_user_from_macaroon(token)
|
||||||
if not ret:
|
except AuthError:
|
||||||
raise StoreError()
|
# TODO(daniel): Remove this fallback when all existing access tokens
|
||||||
|
# have been re-issued as macaroons.
|
||||||
|
ret = yield self._look_up_user_by_access_token(token)
|
||||||
|
defer.returnValue(ret)
|
||||||
|
|
||||||
user_info = {
|
@defer.inlineCallbacks
|
||||||
"admin": bool(ret.get("admin", False)),
|
def get_user_from_macaroon(self, macaroon_str):
|
||||||
"device_id": ret.get("device_id"),
|
try:
|
||||||
"user": UserID.from_string(ret.get("name")),
|
macaroon = pymacaroons.Macaroon.deserialize(macaroon_str)
|
||||||
"token_id": ret.get("token_id", None),
|
self.validate_macaroon(macaroon, "access", False)
|
||||||
}
|
|
||||||
|
|
||||||
defer.returnValue(user_info)
|
user_prefix = "user_id = "
|
||||||
except StoreError:
|
user = None
|
||||||
raise AuthError(403, "Unrecognised access token.",
|
guest = False
|
||||||
errcode=Codes.UNKNOWN_TOKEN)
|
for caveat in macaroon.caveats:
|
||||||
|
if caveat.caveat_id.startswith(user_prefix):
|
||||||
|
user = UserID.from_string(caveat.caveat_id[len(user_prefix):])
|
||||||
|
elif caveat.caveat_id == "guest = true":
|
||||||
|
guest = True
|
||||||
|
|
||||||
|
if user is None:
|
||||||
|
raise AuthError(
|
||||||
|
self.TOKEN_NOT_FOUND_HTTP_STATUS, "No user caveat in macaroon",
|
||||||
|
errcode=Codes.UNKNOWN_TOKEN
|
||||||
|
)
|
||||||
|
|
||||||
|
if guest:
|
||||||
|
ret = {
|
||||||
|
"user": user,
|
||||||
|
"is_guest": True,
|
||||||
|
"token_id": None,
|
||||||
|
}
|
||||||
|
else:
|
||||||
|
# This codepath exists so that we can actually return a
|
||||||
|
# token ID, because we use token IDs in place of device
|
||||||
|
# identifiers throughout the codebase.
|
||||||
|
# TODO(daniel): Remove this fallback when device IDs are
|
||||||
|
# properly implemented.
|
||||||
|
ret = yield self._look_up_user_by_access_token(macaroon_str)
|
||||||
|
if ret["user"] != user:
|
||||||
|
logger.error(
|
||||||
|
"Macaroon user (%s) != DB user (%s)",
|
||||||
|
user,
|
||||||
|
ret["user"]
|
||||||
|
)
|
||||||
|
raise AuthError(
|
||||||
|
self.TOKEN_NOT_FOUND_HTTP_STATUS,
|
||||||
|
"User mismatch in macaroon",
|
||||||
|
errcode=Codes.UNKNOWN_TOKEN
|
||||||
|
)
|
||||||
|
defer.returnValue(ret)
|
||||||
|
except (pymacaroons.exceptions.MacaroonException, TypeError, ValueError):
|
||||||
|
raise AuthError(
|
||||||
|
self.TOKEN_NOT_FOUND_HTTP_STATUS, "Invalid macaroon passed.",
|
||||||
|
errcode=Codes.UNKNOWN_TOKEN
|
||||||
|
)
|
||||||
|
|
||||||
|
def validate_macaroon(self, macaroon, type_string, verify_expiry):
|
||||||
|
"""
|
||||||
|
validate that a Macaroon is understood by and was signed by this server.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
macaroon(pymacaroons.Macaroon): The macaroon to validate
|
||||||
|
type_string(str): The kind of token this is (e.g. "access", "refresh")
|
||||||
|
verify_expiry(bool): Whether to verify whether the macaroon has expired.
|
||||||
|
This should really always be True, but no clients currently implement
|
||||||
|
token refresh, so we can't enforce expiry yet.
|
||||||
|
"""
|
||||||
|
v = pymacaroons.Verifier()
|
||||||
|
v.satisfy_exact("gen = 1")
|
||||||
|
v.satisfy_exact("type = " + type_string)
|
||||||
|
v.satisfy_general(lambda c: c.startswith("user_id = "))
|
||||||
|
v.satisfy_exact("guest = true")
|
||||||
|
if verify_expiry:
|
||||||
|
v.satisfy_general(self._verify_expiry)
|
||||||
|
else:
|
||||||
|
v.satisfy_general(lambda c: c.startswith("time < "))
|
||||||
|
|
||||||
|
v.verify(macaroon, self.hs.config.macaroon_secret_key)
|
||||||
|
|
||||||
|
v = pymacaroons.Verifier()
|
||||||
|
v.satisfy_general(self._verify_recognizes_caveats)
|
||||||
|
v.verify(macaroon, self.hs.config.macaroon_secret_key)
|
||||||
|
|
||||||
|
def _verify_expiry(self, caveat):
|
||||||
|
prefix = "time < "
|
||||||
|
if not caveat.startswith(prefix):
|
||||||
|
return False
|
||||||
|
expiry = int(caveat[len(prefix):])
|
||||||
|
now = self.hs.get_clock().time_msec()
|
||||||
|
return now < expiry
|
||||||
|
|
||||||
|
def _verify_recognizes_caveats(self, caveat):
|
||||||
|
first_space = caveat.find(" ")
|
||||||
|
if first_space < 0:
|
||||||
|
return False
|
||||||
|
second_space = caveat.find(" ", first_space + 1)
|
||||||
|
if second_space < 0:
|
||||||
|
return False
|
||||||
|
return caveat[:second_space + 1] in self._KNOWN_CAVEAT_PREFIXES
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def _look_up_user_by_access_token(self, token):
|
||||||
|
ret = yield self.store.get_user_by_access_token(token)
|
||||||
|
if not ret:
|
||||||
|
logger.warn("Unrecognised access token - not in store: %s" % (token,))
|
||||||
|
raise AuthError(
|
||||||
|
self.TOKEN_NOT_FOUND_HTTP_STATUS, "Unrecognised access token.",
|
||||||
|
errcode=Codes.UNKNOWN_TOKEN
|
||||||
|
)
|
||||||
|
user_info = {
|
||||||
|
"user": UserID.from_string(ret.get("name")),
|
||||||
|
"token_id": ret.get("token_id", None),
|
||||||
|
"is_guest": False,
|
||||||
|
}
|
||||||
|
defer.returnValue(user_info)
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def get_appservice_by_req(self, request):
|
||||||
|
try:
|
||||||
|
token = request.args["access_token"][0]
|
||||||
|
service = yield self.store.get_app_service_by_token(token)
|
||||||
|
if not service:
|
||||||
|
logger.warn("Unrecognised appservice access token: %s" % (token,))
|
||||||
|
raise AuthError(
|
||||||
|
self.TOKEN_NOT_FOUND_HTTP_STATUS,
|
||||||
|
"Unrecognised access token.",
|
||||||
|
errcode=Codes.UNKNOWN_TOKEN
|
||||||
|
)
|
||||||
|
request.authenticated_entity = service.sender
|
||||||
|
defer.returnValue(service)
|
||||||
|
except KeyError:
|
||||||
|
raise AuthError(
|
||||||
|
self.TOKEN_NOT_FOUND_HTTP_STATUS, "Missing access token."
|
||||||
|
)
|
||||||
|
|
||||||
def is_server_admin(self, user):
|
def is_server_admin(self, user):
|
||||||
return self.store.is_server_admin(user)
|
return self.store.is_server_admin(user)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def add_auth_events(self, builder, context):
|
def add_auth_events(self, builder, context):
|
||||||
yield run_on_reactor()
|
|
||||||
|
|
||||||
auth_ids = self.compute_auth_events(builder, context.current_state)
|
auth_ids = self.compute_auth_events(builder, context.current_state)
|
||||||
|
|
||||||
auth_events_entries = yield self.store.add_event_hashes(
|
auth_events_entries = yield self.store.add_event_hashes(
|
||||||
@@ -373,12 +742,6 @@ class Auth(object):
|
|||||||
|
|
||||||
builder.auth_events = auth_events_entries
|
builder.auth_events = auth_events_entries
|
||||||
|
|
||||||
context.auth_events = {
|
|
||||||
k: v
|
|
||||||
for k, v in context.current_state.items()
|
|
||||||
if v.event_id in auth_ids
|
|
||||||
}
|
|
||||||
|
|
||||||
def compute_auth_events(self, event, current_state):
|
def compute_auth_events(self, event, current_state):
|
||||||
if event.type == EventTypes.Create:
|
if event.type == EventTypes.Create:
|
||||||
return []
|
return []
|
||||||
@@ -420,6 +783,16 @@ class Auth(object):
|
|||||||
else:
|
else:
|
||||||
if member_event:
|
if member_event:
|
||||||
auth_ids.append(member_event.event_id)
|
auth_ids.append(member_event.event_id)
|
||||||
|
|
||||||
|
if e_type == Membership.INVITE:
|
||||||
|
if "third_party_invite" in event.content:
|
||||||
|
key = (
|
||||||
|
EventTypes.ThirdPartyInvite,
|
||||||
|
event.content["third_party_invite"]["signed"]["token"]
|
||||||
|
)
|
||||||
|
third_party_invite = current_state.get(key)
|
||||||
|
if third_party_invite:
|
||||||
|
auth_ids.append(third_party_invite.event_id)
|
||||||
elif member_event:
|
elif member_event:
|
||||||
if member_event.content["membership"] == Membership.JOIN:
|
if member_event.content["membership"] == Membership.JOIN:
|
||||||
auth_ids.append(member_event.event_id)
|
auth_ids.append(member_event.event_id)
|
||||||
@@ -435,7 +808,7 @@ class Auth(object):
|
|||||||
send_level = send_level_event.content.get("events", {}).get(
|
send_level = send_level_event.content.get("events", {}).get(
|
||||||
event.type
|
event.type
|
||||||
)
|
)
|
||||||
if not send_level:
|
if send_level is None:
|
||||||
if hasattr(event, "state_key"):
|
if hasattr(event, "state_key"):
|
||||||
send_level = send_level_event.content.get(
|
send_level = send_level_event.content.get(
|
||||||
"state_default", 50
|
"state_default", 50
|
||||||
@@ -450,16 +823,7 @@ class Auth(object):
|
|||||||
else:
|
else:
|
||||||
send_level = 0
|
send_level = 0
|
||||||
|
|
||||||
user_level = self._get_power_level_from_event_state(
|
user_level = self._get_user_power_level(event.user_id, auth_events)
|
||||||
event,
|
|
||||||
event.user_id,
|
|
||||||
auth_events,
|
|
||||||
)
|
|
||||||
|
|
||||||
if user_level:
|
|
||||||
user_level = int(user_level)
|
|
||||||
else:
|
|
||||||
user_level = 0
|
|
||||||
|
|
||||||
if user_level < send_level:
|
if user_level < send_level:
|
||||||
raise AuthError(
|
raise AuthError(
|
||||||
@@ -470,43 +834,54 @@ class Auth(object):
|
|||||||
|
|
||||||
# Check state_key
|
# Check state_key
|
||||||
if hasattr(event, "state_key"):
|
if hasattr(event, "state_key"):
|
||||||
if not event.state_key.startswith("_"):
|
if event.state_key.startswith("@"):
|
||||||
if event.state_key.startswith("@"):
|
if event.state_key != event.user_id:
|
||||||
if event.state_key != event.user_id:
|
raise AuthError(
|
||||||
|
403,
|
||||||
|
"You are not allowed to set others state"
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
sender_domain = UserID.from_string(
|
||||||
|
event.user_id
|
||||||
|
).domain
|
||||||
|
|
||||||
|
if sender_domain != event.state_key:
|
||||||
raise AuthError(
|
raise AuthError(
|
||||||
403,
|
403,
|
||||||
"You are not allowed to set others state"
|
"You are not allowed to set others state"
|
||||||
)
|
)
|
||||||
else:
|
|
||||||
sender_domain = UserID.from_string(
|
|
||||||
event.user_id
|
|
||||||
).domain
|
|
||||||
|
|
||||||
if sender_domain != event.state_key:
|
|
||||||
raise AuthError(
|
|
||||||
403,
|
|
||||||
"You are not allowed to set others state"
|
|
||||||
)
|
|
||||||
|
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def _check_redaction(self, event, auth_events):
|
def check_redaction(self, event, auth_events):
|
||||||
user_level = self._get_power_level_from_event_state(
|
"""Check whether the event sender is allowed to redact the target event.
|
||||||
event,
|
|
||||||
event.user_id,
|
|
||||||
auth_events,
|
|
||||||
)
|
|
||||||
|
|
||||||
_, _, redact_level = self._get_ops_level_from_event_state(
|
Returns:
|
||||||
event,
|
True if the the sender is allowed to redact the target event if the
|
||||||
auth_events,
|
target event was created by them.
|
||||||
)
|
False if the sender is allowed to redact the target event with no
|
||||||
|
further checks.
|
||||||
|
|
||||||
if user_level < redact_level:
|
Raises:
|
||||||
raise AuthError(
|
AuthError if the event sender is definitely not allowed to redact
|
||||||
403,
|
the target event.
|
||||||
"You don't have permission to redact events"
|
"""
|
||||||
)
|
user_level = self._get_user_power_level(event.user_id, auth_events)
|
||||||
|
|
||||||
|
redact_level = self._get_named_level(auth_events, "redact", 50)
|
||||||
|
|
||||||
|
if user_level >= redact_level:
|
||||||
|
return False
|
||||||
|
|
||||||
|
redacter_domain = EventID.from_string(event.event_id).domain
|
||||||
|
redactee_domain = EventID.from_string(event.redacts).domain
|
||||||
|
if redacter_domain == redactee_domain:
|
||||||
|
return True
|
||||||
|
|
||||||
|
raise AuthError(
|
||||||
|
403,
|
||||||
|
"You don't have permission to redact events"
|
||||||
|
)
|
||||||
|
|
||||||
def _check_power_levels(self, event, auth_events):
|
def _check_power_levels(self, event, auth_events):
|
||||||
user_list = event.content.get("users", {})
|
user_list = event.content.get("users", {})
|
||||||
@@ -528,32 +903,30 @@ class Auth(object):
|
|||||||
if not current_state:
|
if not current_state:
|
||||||
return
|
return
|
||||||
|
|
||||||
user_level = self._get_power_level_from_event_state(
|
user_level = self._get_user_power_level(event.user_id, auth_events)
|
||||||
event,
|
|
||||||
event.user_id,
|
|
||||||
auth_events,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Check other levels:
|
# Check other levels:
|
||||||
levels_to_check = [
|
levels_to_check = [
|
||||||
("users_default", []),
|
("users_default", None),
|
||||||
("events_default", []),
|
("events_default", None),
|
||||||
("ban", []),
|
("state_default", None),
|
||||||
("redact", []),
|
("ban", None),
|
||||||
("kick", []),
|
("redact", None),
|
||||||
|
("kick", None),
|
||||||
|
("invite", None),
|
||||||
]
|
]
|
||||||
|
|
||||||
old_list = current_state.content.get("users")
|
old_list = current_state.content.get("users")
|
||||||
for user in set(old_list.keys() + user_list.keys()):
|
for user in set(old_list.keys() + user_list.keys()):
|
||||||
levels_to_check.append(
|
levels_to_check.append(
|
||||||
(user, ["users"])
|
(user, "users")
|
||||||
)
|
)
|
||||||
|
|
||||||
old_list = current_state.content.get("events")
|
old_list = current_state.content.get("events")
|
||||||
new_list = event.content.get("events")
|
new_list = event.content.get("events")
|
||||||
for ev_id in set(old_list.keys() + new_list.keys()):
|
for ev_id in set(old_list.keys() + new_list.keys()):
|
||||||
levels_to_check.append(
|
levels_to_check.append(
|
||||||
(ev_id, ["events"])
|
(ev_id, "events")
|
||||||
)
|
)
|
||||||
|
|
||||||
old_state = current_state.content
|
old_state = current_state.content
|
||||||
@@ -561,12 +934,10 @@ class Auth(object):
|
|||||||
|
|
||||||
for level_to_check, dir in levels_to_check:
|
for level_to_check, dir in levels_to_check:
|
||||||
old_loc = old_state
|
old_loc = old_state
|
||||||
for d in dir:
|
|
||||||
old_loc = old_loc.get(d, {})
|
|
||||||
|
|
||||||
new_loc = new_state
|
new_loc = new_state
|
||||||
for d in dir:
|
if dir:
|
||||||
new_loc = new_loc.get(d, {})
|
old_loc = old_loc.get(dir, {})
|
||||||
|
new_loc = new_loc.get(dir, {})
|
||||||
|
|
||||||
if level_to_check in old_loc:
|
if level_to_check in old_loc:
|
||||||
old_level = int(old_loc[level_to_check])
|
old_level = int(old_loc[level_to_check])
|
||||||
@@ -582,6 +953,14 @@ class Auth(object):
|
|||||||
if new_level == old_level:
|
if new_level == old_level:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
if dir == "users" and level_to_check != event.user_id:
|
||||||
|
if old_level == user_level:
|
||||||
|
raise AuthError(
|
||||||
|
403,
|
||||||
|
"You don't have permission to remove ops level equal "
|
||||||
|
"to your own"
|
||||||
|
)
|
||||||
|
|
||||||
if old_level > user_level or new_level > user_level:
|
if old_level > user_level or new_level > user_level:
|
||||||
raise AuthError(
|
raise AuthError(
|
||||||
403,
|
403,
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
# Copyright 2014, 2015 OpenMarket Ltd
|
# Copyright 2014-2016 OpenMarket Ltd
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@@ -27,16 +27,6 @@ class Membership(object):
|
|||||||
LIST = (INVITE, JOIN, KNOCK, LEAVE, BAN)
|
LIST = (INVITE, JOIN, KNOCK, LEAVE, BAN)
|
||||||
|
|
||||||
|
|
||||||
class Feedback(object):
|
|
||||||
|
|
||||||
"""Represents the types of feedback a user can send in response to a
|
|
||||||
message."""
|
|
||||||
|
|
||||||
DELIVERED = u"delivered"
|
|
||||||
READ = u"read"
|
|
||||||
LIST = (DELIVERED, READ)
|
|
||||||
|
|
||||||
|
|
||||||
class PresenceState(object):
|
class PresenceState(object):
|
||||||
"""Represents the presence state of a user."""
|
"""Represents the presence state of a user."""
|
||||||
OFFLINE = u"offline"
|
OFFLINE = u"offline"
|
||||||
@@ -59,6 +49,11 @@ class LoginType(object):
|
|||||||
EMAIL_URL = u"m.login.email.url"
|
EMAIL_URL = u"m.login.email.url"
|
||||||
EMAIL_IDENTITY = u"m.login.email.identity"
|
EMAIL_IDENTITY = u"m.login.email.identity"
|
||||||
RECAPTCHA = u"m.login.recaptcha"
|
RECAPTCHA = u"m.login.recaptcha"
|
||||||
|
DUMMY = u"m.login.dummy"
|
||||||
|
|
||||||
|
# Only for C/S API v1
|
||||||
|
APPLICATION_SERVICE = u"m.login.application_service"
|
||||||
|
SHARED_SECRET = u"org.matrix.login.shared_secret"
|
||||||
|
|
||||||
|
|
||||||
class EventTypes(object):
|
class EventTypes(object):
|
||||||
@@ -68,7 +63,12 @@ class EventTypes(object):
|
|||||||
PowerLevels = "m.room.power_levels"
|
PowerLevels = "m.room.power_levels"
|
||||||
Aliases = "m.room.aliases"
|
Aliases = "m.room.aliases"
|
||||||
Redaction = "m.room.redaction"
|
Redaction = "m.room.redaction"
|
||||||
Feedback = "m.room.message.feedback"
|
ThirdPartyInvite = "m.room.third_party_invite"
|
||||||
|
|
||||||
|
RoomHistoryVisibility = "m.room.history_visibility"
|
||||||
|
CanonicalAlias = "m.room.canonical_alias"
|
||||||
|
RoomAvatar = "m.room.avatar"
|
||||||
|
GuestAccess = "m.room.guest_access"
|
||||||
|
|
||||||
# These are used for validation
|
# These are used for validation
|
||||||
Message = "m.room.message"
|
Message = "m.room.message"
|
||||||
@@ -80,3 +80,9 @@ class RejectedReason(object):
|
|||||||
AUTH_ERROR = "auth_error"
|
AUTH_ERROR = "auth_error"
|
||||||
REPLACED = "replaced"
|
REPLACED = "replaced"
|
||||||
NOT_ANCESTOR = "not_ancestor"
|
NOT_ANCESTOR = "not_ancestor"
|
||||||
|
|
||||||
|
|
||||||
|
class RoomCreationPreset(object):
|
||||||
|
PRIVATE_CHAT = "private_chat"
|
||||||
|
PUBLIC_CHAT = "public_chat"
|
||||||
|
TRUSTED_PRIVATE_CHAT = "trusted_private_chat"
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
# Copyright 2014, 2015 OpenMarket Ltd
|
# Copyright 2014-2016 OpenMarket Ltd
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@@ -29,21 +29,27 @@ class Codes(object):
|
|||||||
USER_IN_USE = "M_USER_IN_USE"
|
USER_IN_USE = "M_USER_IN_USE"
|
||||||
ROOM_IN_USE = "M_ROOM_IN_USE"
|
ROOM_IN_USE = "M_ROOM_IN_USE"
|
||||||
BAD_PAGINATION = "M_BAD_PAGINATION"
|
BAD_PAGINATION = "M_BAD_PAGINATION"
|
||||||
|
BAD_STATE = "M_BAD_STATE"
|
||||||
UNKNOWN = "M_UNKNOWN"
|
UNKNOWN = "M_UNKNOWN"
|
||||||
NOT_FOUND = "M_NOT_FOUND"
|
NOT_FOUND = "M_NOT_FOUND"
|
||||||
|
MISSING_TOKEN = "M_MISSING_TOKEN"
|
||||||
UNKNOWN_TOKEN = "M_UNKNOWN_TOKEN"
|
UNKNOWN_TOKEN = "M_UNKNOWN_TOKEN"
|
||||||
|
GUEST_ACCESS_FORBIDDEN = "M_GUEST_ACCESS_FORBIDDEN"
|
||||||
LIMIT_EXCEEDED = "M_LIMIT_EXCEEDED"
|
LIMIT_EXCEEDED = "M_LIMIT_EXCEEDED"
|
||||||
CAPTCHA_NEEDED = "M_CAPTCHA_NEEDED"
|
CAPTCHA_NEEDED = "M_CAPTCHA_NEEDED"
|
||||||
CAPTCHA_INVALID = "M_CAPTCHA_INVALID"
|
CAPTCHA_INVALID = "M_CAPTCHA_INVALID"
|
||||||
MISSING_PARAM = "M_MISSING_PARAM",
|
MISSING_PARAM = "M_MISSING_PARAM"
|
||||||
TOO_LARGE = "M_TOO_LARGE"
|
TOO_LARGE = "M_TOO_LARGE"
|
||||||
|
EXCLUSIVE = "M_EXCLUSIVE"
|
||||||
|
THREEPID_AUTH_FAILED = "M_THREEPID_AUTH_FAILED"
|
||||||
|
THREEPID_IN_USE = "THREEPID_IN_USE"
|
||||||
|
INVALID_USERNAME = "M_INVALID_USERNAME"
|
||||||
|
|
||||||
|
|
||||||
class CodeMessageException(RuntimeError):
|
class CodeMessageException(RuntimeError):
|
||||||
"""An exception with integer code and message string attributes."""
|
"""An exception with integer code and message string attributes."""
|
||||||
|
|
||||||
def __init__(self, code, msg):
|
def __init__(self, code, msg):
|
||||||
logger.info("%s: %s, %s", type(self).__name__, code, msg)
|
|
||||||
super(CodeMessageException, self).__init__("%d: %s" % (code, msg))
|
super(CodeMessageException, self).__init__("%d: %s" % (code, msg))
|
||||||
self.code = code
|
self.code = code
|
||||||
self.msg = msg
|
self.msg = msg
|
||||||
@@ -73,11 +79,6 @@ class SynapseError(CodeMessageException):
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
class RoomError(SynapseError):
|
|
||||||
"""An error raised when a room event fails."""
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class RegistrationError(SynapseError):
|
class RegistrationError(SynapseError):
|
||||||
"""An error raised when a registration event fails."""
|
"""An error raised when a registration event fails."""
|
||||||
pass
|
pass
|
||||||
@@ -121,6 +122,15 @@ class AuthError(SynapseError):
|
|||||||
super(AuthError, self).__init__(*args, **kwargs)
|
super(AuthError, self).__init__(*args, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
class EventSizeError(SynapseError):
|
||||||
|
"""An error raised when an event is too big."""
|
||||||
|
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
if "errcode" not in kwargs:
|
||||||
|
kwargs["errcode"] = Codes.TOO_LARGE
|
||||||
|
super(EventSizeError, self).__init__(413, *args, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
class EventStreamError(SynapseError):
|
class EventStreamError(SynapseError):
|
||||||
"""An error raised when there a problem with the event stream."""
|
"""An error raised when there a problem with the event stream."""
|
||||||
def __init__(self, *args, **kwargs):
|
def __init__(self, *args, **kwargs):
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
# Copyright 2015 OpenMarket Ltd
|
# Copyright 2015, 2016 OpenMarket Ltd
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@@ -15,6 +15,8 @@
|
|||||||
from synapse.api.errors import SynapseError
|
from synapse.api.errors import SynapseError
|
||||||
from synapse.types import UserID, RoomID
|
from synapse.types import UserID, RoomID
|
||||||
|
|
||||||
|
import ujson as json
|
||||||
|
|
||||||
|
|
||||||
class Filtering(object):
|
class Filtering(object):
|
||||||
|
|
||||||
@@ -24,18 +26,18 @@ class Filtering(object):
|
|||||||
|
|
||||||
def get_user_filter(self, user_localpart, filter_id):
|
def get_user_filter(self, user_localpart, filter_id):
|
||||||
result = self.store.get_user_filter(user_localpart, filter_id)
|
result = self.store.get_user_filter(user_localpart, filter_id)
|
||||||
result.addCallback(Filter)
|
result.addCallback(FilterCollection)
|
||||||
return result
|
return result
|
||||||
|
|
||||||
def add_user_filter(self, user_localpart, user_filter):
|
def add_user_filter(self, user_localpart, user_filter):
|
||||||
self._check_valid_filter(user_filter)
|
self.check_valid_filter(user_filter)
|
||||||
return self.store.add_user_filter(user_localpart, user_filter)
|
return self.store.add_user_filter(user_localpart, user_filter)
|
||||||
|
|
||||||
# TODO(paul): surely we should probably add a delete_user_filter or
|
# TODO(paul): surely we should probably add a delete_user_filter or
|
||||||
# replace_user_filter at some point? There's no REST API specified for
|
# replace_user_filter at some point? There's no REST API specified for
|
||||||
# them however
|
# them however
|
||||||
|
|
||||||
def _check_valid_filter(self, user_filter_json):
|
def check_valid_filter(self, user_filter_json):
|
||||||
"""Check if the provided filter is valid.
|
"""Check if the provided filter is valid.
|
||||||
|
|
||||||
This inspects all definitions contained within the filter.
|
This inspects all definitions contained within the filter.
|
||||||
@@ -50,11 +52,11 @@ class Filtering(object):
|
|||||||
# many definitions.
|
# many definitions.
|
||||||
|
|
||||||
top_level_definitions = [
|
top_level_definitions = [
|
||||||
"public_user_data", "private_user_data", "server_data"
|
"presence", "account_data"
|
||||||
]
|
]
|
||||||
|
|
||||||
room_level_definitions = [
|
room_level_definitions = [
|
||||||
"state", "events", "ephemeral"
|
"state", "timeline", "ephemeral", "account_data"
|
||||||
]
|
]
|
||||||
|
|
||||||
for key in top_level_definitions:
|
for key in top_level_definitions:
|
||||||
@@ -62,10 +64,29 @@ class Filtering(object):
|
|||||||
self._check_definition(user_filter_json[key])
|
self._check_definition(user_filter_json[key])
|
||||||
|
|
||||||
if "room" in user_filter_json:
|
if "room" in user_filter_json:
|
||||||
|
self._check_definition_room_lists(user_filter_json["room"])
|
||||||
for key in room_level_definitions:
|
for key in room_level_definitions:
|
||||||
if key in user_filter_json["room"]:
|
if key in user_filter_json["room"]:
|
||||||
self._check_definition(user_filter_json["room"][key])
|
self._check_definition(user_filter_json["room"][key])
|
||||||
|
|
||||||
|
def _check_definition_room_lists(self, definition):
|
||||||
|
"""Check that "rooms" and "not_rooms" are lists of room ids if they
|
||||||
|
are present
|
||||||
|
|
||||||
|
Args:
|
||||||
|
definition(dict): The filter definition
|
||||||
|
Raises:
|
||||||
|
SynapseError: If there was a problem with this definition.
|
||||||
|
"""
|
||||||
|
# check rooms are valid room IDs
|
||||||
|
room_id_keys = ["rooms", "not_rooms"]
|
||||||
|
for key in room_id_keys:
|
||||||
|
if key in definition:
|
||||||
|
if type(definition[key]) != list:
|
||||||
|
raise SynapseError(400, "Expected %s to be a list." % key)
|
||||||
|
for room_id in definition[key]:
|
||||||
|
RoomID.from_string(room_id)
|
||||||
|
|
||||||
def _check_definition(self, definition):
|
def _check_definition(self, definition):
|
||||||
"""Check if the provided definition is valid.
|
"""Check if the provided definition is valid.
|
||||||
|
|
||||||
@@ -85,14 +106,7 @@ class Filtering(object):
|
|||||||
400, "Expected JSON object, not %s" % (definition,)
|
400, "Expected JSON object, not %s" % (definition,)
|
||||||
)
|
)
|
||||||
|
|
||||||
# check rooms are valid room IDs
|
self._check_definition_room_lists(definition)
|
||||||
room_id_keys = ["rooms", "not_rooms"]
|
|
||||||
for key in room_id_keys:
|
|
||||||
if key in definition:
|
|
||||||
if type(definition[key]) != list:
|
|
||||||
raise SynapseError(400, "Expected %s to be a list." % key)
|
|
||||||
for room_id in definition[key]:
|
|
||||||
RoomID.from_string(room_id)
|
|
||||||
|
|
||||||
# check senders are valid user IDs
|
# check senders are valid user IDs
|
||||||
user_id_keys = ["senders", "not_senders"]
|
user_id_keys = ["senders", "not_senders"]
|
||||||
@@ -114,116 +128,142 @@ class Filtering(object):
|
|||||||
if not isinstance(event_type, basestring):
|
if not isinstance(event_type, basestring):
|
||||||
raise SynapseError(400, "Event type should be a string")
|
raise SynapseError(400, "Event type should be a string")
|
||||||
|
|
||||||
if "format" in definition:
|
|
||||||
event_format = definition["format"]
|
|
||||||
if event_format not in ["federation", "events"]:
|
|
||||||
raise SynapseError(400, "Invalid format: %s" % (event_format,))
|
|
||||||
|
|
||||||
if "select" in definition:
|
class FilterCollection(object):
|
||||||
event_select_list = definition["select"]
|
def __init__(self, filter_json):
|
||||||
for select_key in event_select_list:
|
self._filter_json = filter_json
|
||||||
if select_key not in ["event_id", "origin_server_ts",
|
|
||||||
"thread_id", "content", "content.body"]:
|
|
||||||
raise SynapseError(400, "Bad select: %s" % (select_key,))
|
|
||||||
|
|
||||||
if ("bundle_updates" in definition and
|
room_filter_json = self._filter_json.get("room", {})
|
||||||
type(definition["bundle_updates"]) != bool):
|
|
||||||
raise SynapseError(400, "Bad bundle_updates: expected bool.")
|
self._room_filter = Filter({
|
||||||
|
k: v for k, v in room_filter_json.items()
|
||||||
|
if k in ("rooms", "not_rooms")
|
||||||
|
})
|
||||||
|
|
||||||
|
self._room_timeline_filter = Filter(room_filter_json.get("timeline", {}))
|
||||||
|
self._room_state_filter = Filter(room_filter_json.get("state", {}))
|
||||||
|
self._room_ephemeral_filter = Filter(room_filter_json.get("ephemeral", {}))
|
||||||
|
self._room_account_data = Filter(room_filter_json.get("account_data", {}))
|
||||||
|
self._presence_filter = Filter(filter_json.get("presence", {}))
|
||||||
|
self._account_data = Filter(filter_json.get("account_data", {}))
|
||||||
|
|
||||||
|
self.include_leave = filter_json.get("room", {}).get(
|
||||||
|
"include_leave", False
|
||||||
|
)
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return "<FilterCollection %s>" % (json.dumps(self._filter_json),)
|
||||||
|
|
||||||
|
def get_filter_json(self):
|
||||||
|
return self._filter_json
|
||||||
|
|
||||||
|
def timeline_limit(self):
|
||||||
|
return self._room_timeline_filter.limit()
|
||||||
|
|
||||||
|
def presence_limit(self):
|
||||||
|
return self._presence_filter.limit()
|
||||||
|
|
||||||
|
def ephemeral_limit(self):
|
||||||
|
return self._room_ephemeral_filter.limit()
|
||||||
|
|
||||||
|
def filter_presence(self, events):
|
||||||
|
return self._presence_filter.filter(events)
|
||||||
|
|
||||||
|
def filter_account_data(self, events):
|
||||||
|
return self._account_data.filter(events)
|
||||||
|
|
||||||
|
def filter_room_state(self, events):
|
||||||
|
return self._room_state_filter.filter(self._room_filter.filter(events))
|
||||||
|
|
||||||
|
def filter_room_timeline(self, events):
|
||||||
|
return self._room_timeline_filter.filter(self._room_filter.filter(events))
|
||||||
|
|
||||||
|
def filter_room_ephemeral(self, events):
|
||||||
|
return self._room_ephemeral_filter.filter(self._room_filter.filter(events))
|
||||||
|
|
||||||
|
def filter_room_account_data(self, events):
|
||||||
|
return self._room_account_data.filter(self._room_filter.filter(events))
|
||||||
|
|
||||||
|
|
||||||
class Filter(object):
|
class Filter(object):
|
||||||
def __init__(self, filter_json):
|
def __init__(self, filter_json):
|
||||||
self.filter_json = filter_json
|
self.filter_json = filter_json
|
||||||
|
|
||||||
def filter_public_user_data(self, events):
|
def check(self, event):
|
||||||
return self._filter_on_key(events, ["public_user_data"])
|
"""Checks whether the filter matches the given event.
|
||||||
|
|
||||||
def filter_private_user_data(self, events):
|
|
||||||
return self._filter_on_key(events, ["private_user_data"])
|
|
||||||
|
|
||||||
def filter_room_state(self, events):
|
|
||||||
return self._filter_on_key(events, ["room", "state"])
|
|
||||||
|
|
||||||
def filter_room_events(self, events):
|
|
||||||
return self._filter_on_key(events, ["room", "events"])
|
|
||||||
|
|
||||||
def filter_room_ephemeral(self, events):
|
|
||||||
return self._filter_on_key(events, ["room", "ephemeral"])
|
|
||||||
|
|
||||||
def _filter_on_key(self, events, keys):
|
|
||||||
filter_json = self.filter_json
|
|
||||||
if not filter_json:
|
|
||||||
return events
|
|
||||||
|
|
||||||
try:
|
|
||||||
# extract the right definition from the filter
|
|
||||||
definition = filter_json
|
|
||||||
for key in keys:
|
|
||||||
definition = definition[key]
|
|
||||||
return self._filter_with_definition(events, definition)
|
|
||||||
except KeyError:
|
|
||||||
# return all events if definition isn't specified.
|
|
||||||
return events
|
|
||||||
|
|
||||||
def _filter_with_definition(self, events, definition):
|
|
||||||
return [e for e in events if self._passes_definition(definition, e)]
|
|
||||||
|
|
||||||
def _passes_definition(self, definition, event):
|
|
||||||
"""Check if the event passes through the given definition.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
definition(dict): The definition to check against.
|
|
||||||
event(Event): The event to check.
|
|
||||||
Returns:
|
Returns:
|
||||||
True if the event passes through the filter.
|
bool: True if the event matches
|
||||||
"""
|
"""
|
||||||
# Algorithm notes:
|
sender = event.get("sender", None)
|
||||||
# For each key in the definition, check the event meets the criteria:
|
if not sender:
|
||||||
# * For types: Literal match or prefix match (if ends with wildcard)
|
# Presence events have their 'sender' in content.user_id
|
||||||
# * For senders/rooms: Literal match only
|
sender = event.get("content", {}).get("user_id", None)
|
||||||
# * "not_" checks take presedence (e.g. if "m.*" is in both 'types'
|
|
||||||
# and 'not_types' then it is treated as only being in 'not_types')
|
|
||||||
|
|
||||||
# room checks
|
return self.check_fields(
|
||||||
if hasattr(event, "room_id"):
|
event.get("room_id", None),
|
||||||
room_id = event.room_id
|
sender,
|
||||||
allow_rooms = definition.get("rooms", None)
|
event.get("type", None),
|
||||||
reject_rooms = definition.get("not_rooms", None)
|
)
|
||||||
if reject_rooms and room_id in reject_rooms:
|
|
||||||
return False
|
def check_fields(self, room_id, sender, event_type):
|
||||||
if allow_rooms and room_id not in allow_rooms:
|
"""Checks whether the filter matches the given event fields.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if the event fields match
|
||||||
|
"""
|
||||||
|
literal_keys = {
|
||||||
|
"rooms": lambda v: room_id == v,
|
||||||
|
"senders": lambda v: sender == v,
|
||||||
|
"types": lambda v: _matches_wildcard(event_type, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
for name, match_func in literal_keys.items():
|
||||||
|
not_name = "not_%s" % (name,)
|
||||||
|
disallowed_values = self.filter_json.get(not_name, [])
|
||||||
|
if any(map(match_func, disallowed_values)):
|
||||||
return False
|
return False
|
||||||
|
|
||||||
# sender checks
|
allowed_values = self.filter_json.get(name, None)
|
||||||
if hasattr(event, "sender"):
|
if allowed_values is not None:
|
||||||
# Should we be including event.state_key for some event types?
|
if not any(map(match_func, allowed_values)):
|
||||||
sender = event.sender
|
|
||||||
allow_senders = definition.get("senders", None)
|
|
||||||
reject_senders = definition.get("not_senders", None)
|
|
||||||
if reject_senders and sender in reject_senders:
|
|
||||||
return False
|
|
||||||
if allow_senders and sender not in allow_senders:
|
|
||||||
return False
|
|
||||||
|
|
||||||
# type checks
|
|
||||||
if "not_types" in definition:
|
|
||||||
for def_type in definition["not_types"]:
|
|
||||||
if self._event_matches_type(event, def_type):
|
|
||||||
return False
|
return False
|
||||||
if "types" in definition:
|
|
||||||
included = False
|
|
||||||
for def_type in definition["types"]:
|
|
||||||
if self._event_matches_type(event, def_type):
|
|
||||||
included = True
|
|
||||||
break
|
|
||||||
if not included:
|
|
||||||
return False
|
|
||||||
|
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def _event_matches_type(self, event, def_type):
|
def filter_rooms(self, room_ids):
|
||||||
if def_type.endswith("*"):
|
"""Apply the 'rooms' filter to a given list of rooms.
|
||||||
type_prefix = def_type[:-1]
|
|
||||||
return event.type.startswith(type_prefix)
|
Args:
|
||||||
else:
|
room_ids (list): A list of room_ids.
|
||||||
return event.type == def_type
|
|
||||||
|
Returns:
|
||||||
|
list: A list of room_ids that match the filter
|
||||||
|
"""
|
||||||
|
room_ids = set(room_ids)
|
||||||
|
|
||||||
|
disallowed_rooms = set(self.filter_json.get("not_rooms", []))
|
||||||
|
room_ids -= disallowed_rooms
|
||||||
|
|
||||||
|
allowed_rooms = self.filter_json.get("rooms", None)
|
||||||
|
if allowed_rooms is not None:
|
||||||
|
room_ids &= set(allowed_rooms)
|
||||||
|
|
||||||
|
return room_ids
|
||||||
|
|
||||||
|
def filter(self, events):
|
||||||
|
return filter(self.check, events)
|
||||||
|
|
||||||
|
def limit(self):
|
||||||
|
return self.filter_json.get("limit", 10)
|
||||||
|
|
||||||
|
|
||||||
|
def _matches_wildcard(actual_value, filter_value):
|
||||||
|
if filter_value.endswith("*"):
|
||||||
|
type_prefix = filter_value[:-1]
|
||||||
|
return actual_value.startswith(type_prefix)
|
||||||
|
else:
|
||||||
|
return actual_value == filter_value
|
||||||
|
|
||||||
|
|
||||||
|
DEFAULT_FILTER_COLLECTION = FilterCollection({})
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
# Copyright 2014, 2015 OpenMarket Ltd
|
# Copyright 2014-2016 OpenMarket Ltd
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
# Copyright 2014, 2015 OpenMarket Ltd
|
# Copyright 2014-2016 OpenMarket Ltd
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@@ -18,7 +18,11 @@
|
|||||||
CLIENT_PREFIX = "/_matrix/client/api/v1"
|
CLIENT_PREFIX = "/_matrix/client/api/v1"
|
||||||
CLIENT_V2_ALPHA_PREFIX = "/_matrix/client/v2_alpha"
|
CLIENT_V2_ALPHA_PREFIX = "/_matrix/client/v2_alpha"
|
||||||
FEDERATION_PREFIX = "/_matrix/federation/v1"
|
FEDERATION_PREFIX = "/_matrix/federation/v1"
|
||||||
|
STATIC_PREFIX = "/_matrix/static"
|
||||||
WEB_CLIENT_PREFIX = "/_matrix/client"
|
WEB_CLIENT_PREFIX = "/_matrix/client"
|
||||||
CONTENT_REPO_PREFIX = "/_matrix/content"
|
CONTENT_REPO_PREFIX = "/_matrix/content"
|
||||||
SERVER_KEY_PREFIX = "/_matrix/key/v1"
|
SERVER_KEY_PREFIX = "/_matrix/key/v1"
|
||||||
MEDIA_PREFIX = "/_matrix/media/v1"
|
SERVER_KEY_V2_PREFIX = "/_matrix/key/v2"
|
||||||
|
MEDIA_PREFIX = "/_matrix/media/r0"
|
||||||
|
LEGACY_MEDIA_PREFIX = "/_matrix/media/v1"
|
||||||
|
APP_SERVICE_PREFIX = "/_matrix/appservice/v1"
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
# Copyright 2014, 2015 OpenMarket Ltd
|
# Copyright 2014-2016 OpenMarket Ltd
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@@ -12,3 +12,22 @@
|
|||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
|
import sys
|
||||||
|
sys.dont_write_bytecode = True
|
||||||
|
|
||||||
|
from synapse.python_dependencies import (
|
||||||
|
check_requirements, MissingRequirementError
|
||||||
|
) # NOQA
|
||||||
|
|
||||||
|
try:
|
||||||
|
check_requirements()
|
||||||
|
except MissingRequirementError as e:
|
||||||
|
message = "\n".join([
|
||||||
|
"Missing Requirement: %s" % (e.message,),
|
||||||
|
"To install run:",
|
||||||
|
" pip install --upgrade --force \"%s\"" % (e.dependency,),
|
||||||
|
"",
|
||||||
|
])
|
||||||
|
sys.stderr.writelines(message)
|
||||||
|
sys.exit(1)
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
# Copyright 2014, 2015 OpenMarket Ltd
|
# Copyright 2014-2016 OpenMarket Ltd
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@@ -14,234 +14,395 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
from synapse.storage import prepare_database, UpgradeDatabaseException
|
import synapse
|
||||||
|
|
||||||
|
import contextlib
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
import resource
|
||||||
|
import subprocess
|
||||||
|
import sys
|
||||||
|
import time
|
||||||
|
from synapse.config._base import ConfigError
|
||||||
|
|
||||||
|
from synapse.python_dependencies import (
|
||||||
|
check_requirements, DEPENDENCY_LINKS
|
||||||
|
)
|
||||||
|
|
||||||
|
from synapse.rest import ClientRestResource
|
||||||
|
from synapse.storage.engines import create_engine, IncorrectDatabaseSetup
|
||||||
|
from synapse.storage import are_all_users_on_domain
|
||||||
|
from synapse.storage.prepare_database import UpgradeDatabaseException
|
||||||
|
|
||||||
from synapse.server import HomeServer
|
from synapse.server import HomeServer
|
||||||
|
|
||||||
from synapse.python_dependencies import check_requirements
|
|
||||||
|
|
||||||
from twisted.internet import reactor
|
from twisted.conch.manhole import ColoredManhole
|
||||||
from twisted.enterprise import adbapi
|
from twisted.conch.insults import insults
|
||||||
from twisted.web.resource import Resource
|
from twisted.conch import manhole_ssh
|
||||||
|
from twisted.cred import checkers, portal
|
||||||
|
|
||||||
|
|
||||||
|
from twisted.internet import reactor, task, defer
|
||||||
|
from twisted.application import service
|
||||||
|
from twisted.web.resource import Resource, EncodingResourceWrapper
|
||||||
from twisted.web.static import File
|
from twisted.web.static import File
|
||||||
from twisted.web.server import Site
|
from twisted.web.server import Site, GzipEncoderFactory, Request
|
||||||
from synapse.http.server import JsonResource, RootRedirect
|
from synapse.http.server import RootRedirect
|
||||||
from synapse.rest.media.v0.content_repository import ContentRepoResource
|
from synapse.rest.media.v0.content_repository import ContentRepoResource
|
||||||
from synapse.rest.media.v1.media_repository import MediaRepositoryResource
|
from synapse.rest.media.v1.media_repository import MediaRepositoryResource
|
||||||
from synapse.http.server_key_resource import LocalKey
|
from synapse.rest.key.v1.server_key_resource import LocalKey
|
||||||
from synapse.http.matrixfederationclient import MatrixFederationHttpClient
|
from synapse.rest.key.v2 import KeyApiV2Resource
|
||||||
from synapse.api.urls import (
|
from synapse.api.urls import (
|
||||||
CLIENT_PREFIX, FEDERATION_PREFIX, WEB_CLIENT_PREFIX, CONTENT_REPO_PREFIX,
|
FEDERATION_PREFIX, WEB_CLIENT_PREFIX, CONTENT_REPO_PREFIX,
|
||||||
SERVER_KEY_PREFIX, MEDIA_PREFIX, CLIENT_V2_ALPHA_PREFIX,
|
SERVER_KEY_PREFIX, LEGACY_MEDIA_PREFIX, MEDIA_PREFIX, STATIC_PREFIX,
|
||||||
|
SERVER_KEY_V2_PREFIX,
|
||||||
)
|
)
|
||||||
from synapse.config.homeserver import HomeServerConfig
|
from synapse.config.homeserver import HomeServerConfig
|
||||||
from synapse.crypto import context_factory
|
from synapse.crypto import context_factory
|
||||||
from synapse.util.logcontext import LoggingContext
|
from synapse.util.logcontext import LoggingContext
|
||||||
from synapse.rest.client.v1 import ClientV1RestResource
|
from synapse.metrics.resource import MetricsResource, METRICS_PREFIX
|
||||||
from synapse.rest.client.v2_alpha import ClientV2AlphaRestResource
|
from synapse.federation.transport.server import TransportLayerServer
|
||||||
|
|
||||||
|
from synapse import events
|
||||||
|
|
||||||
from daemonize import Daemonize
|
from daemonize import Daemonize
|
||||||
import twisted.manhole.telnet
|
|
||||||
|
|
||||||
import synapse
|
logger = logging.getLogger("synapse.app.homeserver")
|
||||||
|
|
||||||
import logging
|
|
||||||
import os
|
|
||||||
import re
|
|
||||||
import sys
|
|
||||||
import sqlite3
|
|
||||||
import syweb
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
ACCESS_TOKEN_RE = re.compile(r'(\?.*access(_|%5[Ff])token=)[^&]*(.*)$')
|
||||||
|
|
||||||
|
|
||||||
|
def gz_wrap(r):
|
||||||
|
return EncodingResourceWrapper(r, [GzipEncoderFactory()])
|
||||||
|
|
||||||
|
|
||||||
|
def build_resource_for_web_client(hs):
|
||||||
|
webclient_path = hs.get_config().web_client_location
|
||||||
|
if not webclient_path:
|
||||||
|
try:
|
||||||
|
import syweb
|
||||||
|
except ImportError:
|
||||||
|
quit_with_error(
|
||||||
|
"Could not find a webclient.\n\n"
|
||||||
|
"Please either install the matrix-angular-sdk or configure\n"
|
||||||
|
"the location of the source to serve via the configuration\n"
|
||||||
|
"option `web_client_location`\n\n"
|
||||||
|
"To install the `matrix-angular-sdk` via pip, run:\n\n"
|
||||||
|
" pip install '%(dep)s'\n"
|
||||||
|
"\n"
|
||||||
|
"You can also disable hosting of the webclient via the\n"
|
||||||
|
"configuration option `web_client`\n"
|
||||||
|
% {"dep": DEPENDENCY_LINKS["matrix-angular-sdk"]}
|
||||||
|
)
|
||||||
|
syweb_path = os.path.dirname(syweb.__file__)
|
||||||
|
webclient_path = os.path.join(syweb_path, "webclient")
|
||||||
|
# GZip is disabled here due to
|
||||||
|
# https://twistedmatrix.com/trac/ticket/7678
|
||||||
|
# (It can stay enabled for the API resources: they call
|
||||||
|
# write() with the whole body and then finish() straight
|
||||||
|
# after and so do not trigger the bug.
|
||||||
|
# GzipFile was removed in commit 184ba09
|
||||||
|
# return GzipFile(webclient_path) # TODO configurable?
|
||||||
|
return File(webclient_path) # TODO configurable?
|
||||||
|
|
||||||
|
|
||||||
class SynapseHomeServer(HomeServer):
|
class SynapseHomeServer(HomeServer):
|
||||||
|
def _listener_http(self, config, listener_config):
|
||||||
|
port = listener_config["port"]
|
||||||
|
bind_address = listener_config.get("bind_address", "")
|
||||||
|
tls = listener_config.get("tls", False)
|
||||||
|
site_tag = listener_config.get("tag", port)
|
||||||
|
|
||||||
def build_http_client(self):
|
if tls and config.no_tls:
|
||||||
return MatrixFederationHttpClient(self)
|
return
|
||||||
|
|
||||||
def build_resource_for_client(self):
|
resources = {}
|
||||||
return ClientV1RestResource(self)
|
for res in listener_config["resources"]:
|
||||||
|
for name in res["names"]:
|
||||||
|
if name == "client":
|
||||||
|
client_resource = ClientRestResource(self)
|
||||||
|
if res["compress"]:
|
||||||
|
client_resource = gz_wrap(client_resource)
|
||||||
|
|
||||||
def build_resource_for_client_v2_alpha(self):
|
resources.update({
|
||||||
return ClientV2AlphaRestResource(self)
|
"/_matrix/client/api/v1": client_resource,
|
||||||
|
"/_matrix/client/r0": client_resource,
|
||||||
|
"/_matrix/client/unstable": client_resource,
|
||||||
|
"/_matrix/client/v2_alpha": client_resource,
|
||||||
|
"/_matrix/client/versions": client_resource,
|
||||||
|
})
|
||||||
|
|
||||||
def build_resource_for_federation(self):
|
if name == "federation":
|
||||||
return JsonResource(self)
|
resources.update({
|
||||||
|
FEDERATION_PREFIX: TransportLayerServer(self),
|
||||||
|
})
|
||||||
|
|
||||||
def build_resource_for_web_client(self):
|
if name in ["static", "client"]:
|
||||||
syweb_path = os.path.dirname(syweb.__file__)
|
resources.update({
|
||||||
webclient_path = os.path.join(syweb_path, "webclient")
|
STATIC_PREFIX: File(
|
||||||
return File(webclient_path) # TODO configurable?
|
os.path.join(os.path.dirname(synapse.__file__), "static")
|
||||||
|
),
|
||||||
|
})
|
||||||
|
|
||||||
def build_resource_for_content_repo(self):
|
if name in ["media", "federation", "client"]:
|
||||||
return ContentRepoResource(
|
media_repo = MediaRepositoryResource(self)
|
||||||
self, self.upload_dir, self.auth, self.content_addr
|
resources.update({
|
||||||
)
|
MEDIA_PREFIX: media_repo,
|
||||||
|
LEGACY_MEDIA_PREFIX: media_repo,
|
||||||
|
CONTENT_REPO_PREFIX: ContentRepoResource(
|
||||||
|
self, self.config.uploads_path, self.auth, self.content_addr
|
||||||
|
),
|
||||||
|
})
|
||||||
|
|
||||||
def build_resource_for_media_repository(self):
|
if name in ["keys", "federation"]:
|
||||||
return MediaRepositoryResource(self)
|
resources.update({
|
||||||
|
SERVER_KEY_PREFIX: LocalKey(self),
|
||||||
|
SERVER_KEY_V2_PREFIX: KeyApiV2Resource(self),
|
||||||
|
})
|
||||||
|
|
||||||
def build_resource_for_server_key(self):
|
if name == "webclient":
|
||||||
return LocalKey(self)
|
resources[WEB_CLIENT_PREFIX] = build_resource_for_web_client(self)
|
||||||
|
|
||||||
def build_db_pool(self):
|
if name == "metrics" and self.get_config().enable_metrics:
|
||||||
return adbapi.ConnectionPool(
|
resources[METRICS_PREFIX] = MetricsResource(self)
|
||||||
"sqlite3", self.get_db_name(),
|
|
||||||
check_same_thread=False,
|
|
||||||
cp_min=1,
|
|
||||||
cp_max=1
|
|
||||||
)
|
|
||||||
|
|
||||||
def create_resource_tree(self, web_client, redirect_root_to_web_client):
|
root_resource = create_resource_tree(resources)
|
||||||
"""Create the resource tree for this Home Server.
|
if tls:
|
||||||
|
|
||||||
This in unduly complicated because Twisted does not support putting
|
|
||||||
child resources more than 1 level deep at a time.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
web_client (bool): True to enable the web client.
|
|
||||||
redirect_root_to_web_client (bool): True to redirect '/' to the
|
|
||||||
location of the web client. This does nothing if web_client is not
|
|
||||||
True.
|
|
||||||
"""
|
|
||||||
# list containing (path_str, Resource) e.g:
|
|
||||||
# [ ("/aaa/bbb/cc", Resource1), ("/aaa/dummy", Resource2) ]
|
|
||||||
desired_tree = [
|
|
||||||
(CLIENT_PREFIX, self.get_resource_for_client()),
|
|
||||||
(CLIENT_V2_ALPHA_PREFIX, self.get_resource_for_client_v2_alpha()),
|
|
||||||
(FEDERATION_PREFIX, self.get_resource_for_federation()),
|
|
||||||
(CONTENT_REPO_PREFIX, self.get_resource_for_content_repo()),
|
|
||||||
(SERVER_KEY_PREFIX, self.get_resource_for_server_key()),
|
|
||||||
(MEDIA_PREFIX, self.get_resource_for_media_repository()),
|
|
||||||
]
|
|
||||||
if web_client:
|
|
||||||
logger.info("Adding the web client.")
|
|
||||||
desired_tree.append((WEB_CLIENT_PREFIX,
|
|
||||||
self.get_resource_for_web_client()))
|
|
||||||
|
|
||||||
if web_client and redirect_root_to_web_client:
|
|
||||||
self.root_resource = RootRedirect(WEB_CLIENT_PREFIX)
|
|
||||||
else:
|
|
||||||
self.root_resource = Resource()
|
|
||||||
|
|
||||||
# ideally we'd just use getChild and putChild but getChild doesn't work
|
|
||||||
# unless you give it a Request object IN ADDITION to the name :/ So
|
|
||||||
# instead, we'll store a copy of this mapping so we can actually add
|
|
||||||
# extra resources to existing nodes. See self._resource_id for the key.
|
|
||||||
resource_mappings = {}
|
|
||||||
for (full_path, resource) in desired_tree:
|
|
||||||
logger.info("Attaching %s to path %s", resource, full_path)
|
|
||||||
last_resource = self.root_resource
|
|
||||||
for path_seg in full_path.split('/')[1:-1]:
|
|
||||||
if path_seg not in last_resource.listNames():
|
|
||||||
# resource doesn't exist, so make a "dummy resource"
|
|
||||||
child_resource = Resource()
|
|
||||||
last_resource.putChild(path_seg, child_resource)
|
|
||||||
res_id = self._resource_id(last_resource, path_seg)
|
|
||||||
resource_mappings[res_id] = child_resource
|
|
||||||
last_resource = child_resource
|
|
||||||
else:
|
|
||||||
# we have an existing Resource, use that instead.
|
|
||||||
res_id = self._resource_id(last_resource, path_seg)
|
|
||||||
last_resource = resource_mappings[res_id]
|
|
||||||
|
|
||||||
# ===========================
|
|
||||||
# now attach the actual desired resource
|
|
||||||
last_path_seg = full_path.split('/')[-1]
|
|
||||||
|
|
||||||
# if there is already a resource here, thieve its children and
|
|
||||||
# replace it
|
|
||||||
res_id = self._resource_id(last_resource, last_path_seg)
|
|
||||||
if res_id in resource_mappings:
|
|
||||||
# there is a dummy resource at this path already, which needs
|
|
||||||
# to be replaced with the desired resource.
|
|
||||||
existing_dummy_resource = resource_mappings[res_id]
|
|
||||||
for child_name in existing_dummy_resource.listNames():
|
|
||||||
child_res_id = self._resource_id(existing_dummy_resource,
|
|
||||||
child_name)
|
|
||||||
child_resource = resource_mappings[child_res_id]
|
|
||||||
# steal the children
|
|
||||||
resource.putChild(child_name, child_resource)
|
|
||||||
|
|
||||||
# finally, insert the desired resource in the right place
|
|
||||||
last_resource.putChild(last_path_seg, resource)
|
|
||||||
res_id = self._resource_id(last_resource, last_path_seg)
|
|
||||||
resource_mappings[res_id] = resource
|
|
||||||
|
|
||||||
return self.root_resource
|
|
||||||
|
|
||||||
def _resource_id(self, resource, path_seg):
|
|
||||||
"""Construct an arbitrary resource ID so you can retrieve the mapping
|
|
||||||
later.
|
|
||||||
|
|
||||||
If you want to represent resource A putChild resource B with path C,
|
|
||||||
the mapping should looks like _resource_id(A,C) = B.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
resource (Resource): The *parent* Resource
|
|
||||||
path_seg (str): The name of the child Resource to be attached.
|
|
||||||
Returns:
|
|
||||||
str: A unique string which can be a key to the child Resource.
|
|
||||||
"""
|
|
||||||
return "%s-%s" % (resource, path_seg)
|
|
||||||
|
|
||||||
def start_listening(self, secure_port, unsecure_port):
|
|
||||||
if secure_port is not None:
|
|
||||||
reactor.listenSSL(
|
reactor.listenSSL(
|
||||||
secure_port, Site(self.root_resource), self.tls_context_factory
|
port,
|
||||||
|
SynapseSite(
|
||||||
|
"synapse.access.https.%s" % (site_tag,),
|
||||||
|
site_tag,
|
||||||
|
listener_config,
|
||||||
|
root_resource,
|
||||||
|
),
|
||||||
|
self.tls_server_context_factory,
|
||||||
|
interface=bind_address
|
||||||
)
|
)
|
||||||
logger.info("Synapse now listening on port %d", secure_port)
|
else:
|
||||||
if unsecure_port is not None:
|
|
||||||
reactor.listenTCP(
|
reactor.listenTCP(
|
||||||
unsecure_port, Site(self.root_resource)
|
port,
|
||||||
|
SynapseSite(
|
||||||
|
"synapse.access.http.%s" % (site_tag,),
|
||||||
|
site_tag,
|
||||||
|
listener_config,
|
||||||
|
root_resource,
|
||||||
|
),
|
||||||
|
interface=bind_address
|
||||||
)
|
)
|
||||||
logger.info("Synapse now listening on port %d", unsecure_port)
|
logger.info("Synapse now listening on port %d", port)
|
||||||
|
|
||||||
|
def start_listening(self):
|
||||||
|
config = self.get_config()
|
||||||
|
|
||||||
|
for listener in config.listeners:
|
||||||
|
if listener["type"] == "http":
|
||||||
|
self._listener_http(config, listener)
|
||||||
|
elif listener["type"] == "manhole":
|
||||||
|
checker = checkers.InMemoryUsernamePasswordDatabaseDontUse(
|
||||||
|
matrix="rabbithole"
|
||||||
|
)
|
||||||
|
|
||||||
|
rlm = manhole_ssh.TerminalRealm()
|
||||||
|
rlm.chainedProtocolFactory = lambda: insults.ServerProtocol(
|
||||||
|
ColoredManhole,
|
||||||
|
{
|
||||||
|
"__name__": "__console__",
|
||||||
|
"hs": self,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
f = manhole_ssh.ConchFactory(portal.Portal(rlm, [checker]))
|
||||||
|
|
||||||
|
reactor.listenTCP(
|
||||||
|
listener["port"],
|
||||||
|
f,
|
||||||
|
interface=listener.get("bind_address", '127.0.0.1')
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||||
|
|
||||||
|
def run_startup_checks(self, db_conn, database_engine):
|
||||||
|
all_users_native = are_all_users_on_domain(
|
||||||
|
db_conn.cursor(), database_engine, self.hostname
|
||||||
|
)
|
||||||
|
if not all_users_native:
|
||||||
|
quit_with_error(
|
||||||
|
"Found users in database not native to %s!\n"
|
||||||
|
"You cannot changed a synapse server_name after it's been configured"
|
||||||
|
% (self.hostname,)
|
||||||
|
)
|
||||||
|
|
||||||
|
try:
|
||||||
|
database_engine.check_database(db_conn.cursor())
|
||||||
|
except IncorrectDatabaseSetup as e:
|
||||||
|
quit_with_error(e.message)
|
||||||
|
|
||||||
|
def get_db_conn(self):
|
||||||
|
# Any param beginning with cp_ is a parameter for adbapi, and should
|
||||||
|
# not be passed to the database engine.
|
||||||
|
db_params = {
|
||||||
|
k: v for k, v in self.db_config.get("args", {}).items()
|
||||||
|
if not k.startswith("cp_")
|
||||||
|
}
|
||||||
|
db_conn = self.database_engine.module.connect(**db_params)
|
||||||
|
|
||||||
|
self.database_engine.on_new_connection(db_conn)
|
||||||
|
return db_conn
|
||||||
|
|
||||||
|
|
||||||
def setup():
|
def quit_with_error(error_string):
|
||||||
config = HomeServerConfig.load_config(
|
message_lines = error_string.split("\n")
|
||||||
"Synapse Homeserver",
|
line_length = max([len(l) for l in message_lines if len(l) < 80]) + 2
|
||||||
sys.argv[1:],
|
sys.stderr.write("*" * line_length + '\n')
|
||||||
generate_section="Homeserver"
|
for line in message_lines:
|
||||||
)
|
sys.stderr.write(" %s\n" % (line.rstrip(),))
|
||||||
|
sys.stderr.write("*" * line_length + '\n')
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
|
def get_version_string():
|
||||||
|
try:
|
||||||
|
null = open(os.devnull, 'w')
|
||||||
|
cwd = os.path.dirname(os.path.abspath(__file__))
|
||||||
|
try:
|
||||||
|
git_branch = subprocess.check_output(
|
||||||
|
['git', 'rev-parse', '--abbrev-ref', 'HEAD'],
|
||||||
|
stderr=null,
|
||||||
|
cwd=cwd,
|
||||||
|
).strip()
|
||||||
|
git_branch = "b=" + git_branch
|
||||||
|
except subprocess.CalledProcessError:
|
||||||
|
git_branch = ""
|
||||||
|
|
||||||
|
try:
|
||||||
|
git_tag = subprocess.check_output(
|
||||||
|
['git', 'describe', '--exact-match'],
|
||||||
|
stderr=null,
|
||||||
|
cwd=cwd,
|
||||||
|
).strip()
|
||||||
|
git_tag = "t=" + git_tag
|
||||||
|
except subprocess.CalledProcessError:
|
||||||
|
git_tag = ""
|
||||||
|
|
||||||
|
try:
|
||||||
|
git_commit = subprocess.check_output(
|
||||||
|
['git', 'rev-parse', '--short', 'HEAD'],
|
||||||
|
stderr=null,
|
||||||
|
cwd=cwd,
|
||||||
|
).strip()
|
||||||
|
except subprocess.CalledProcessError:
|
||||||
|
git_commit = ""
|
||||||
|
|
||||||
|
try:
|
||||||
|
dirty_string = "-this_is_a_dirty_checkout"
|
||||||
|
is_dirty = subprocess.check_output(
|
||||||
|
['git', 'describe', '--dirty=' + dirty_string],
|
||||||
|
stderr=null,
|
||||||
|
cwd=cwd,
|
||||||
|
).strip().endswith(dirty_string)
|
||||||
|
|
||||||
|
git_dirty = "dirty" if is_dirty else ""
|
||||||
|
except subprocess.CalledProcessError:
|
||||||
|
git_dirty = ""
|
||||||
|
|
||||||
|
if git_branch or git_tag or git_commit or git_dirty:
|
||||||
|
git_version = ",".join(
|
||||||
|
s for s in
|
||||||
|
(git_branch, git_tag, git_commit, git_dirty,)
|
||||||
|
if s
|
||||||
|
)
|
||||||
|
|
||||||
|
return (
|
||||||
|
"Synapse/%s (%s)" % (
|
||||||
|
synapse.__version__, git_version,
|
||||||
|
)
|
||||||
|
).encode("ascii")
|
||||||
|
except Exception as e:
|
||||||
|
logger.info("Failed to check for git repository: %s", e)
|
||||||
|
|
||||||
|
return ("Synapse/%s" % (synapse.__version__,)).encode("ascii")
|
||||||
|
|
||||||
|
|
||||||
|
def change_resource_limit(soft_file_no):
|
||||||
|
try:
|
||||||
|
soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
|
||||||
|
|
||||||
|
if not soft_file_no:
|
||||||
|
soft_file_no = hard
|
||||||
|
|
||||||
|
resource.setrlimit(resource.RLIMIT_NOFILE, (soft_file_no, hard))
|
||||||
|
logger.info("Set file limit to: %d", soft_file_no)
|
||||||
|
|
||||||
|
resource.setrlimit(
|
||||||
|
resource.RLIMIT_CORE, (resource.RLIM_INFINITY, resource.RLIM_INFINITY)
|
||||||
|
)
|
||||||
|
except (ValueError, resource.error) as e:
|
||||||
|
logger.warn("Failed to set file or core limit: %s", e)
|
||||||
|
|
||||||
|
|
||||||
|
def setup(config_options):
|
||||||
|
"""
|
||||||
|
Args:
|
||||||
|
config_options_options: The options passed to Synapse. Usually
|
||||||
|
`sys.argv[1:]`.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
HomeServer
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
config = HomeServerConfig.load_config(
|
||||||
|
"Synapse Homeserver",
|
||||||
|
config_options,
|
||||||
|
generate_section="Homeserver"
|
||||||
|
)
|
||||||
|
except ConfigError as e:
|
||||||
|
sys.stderr.write("\n" + e.message + "\n")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
if not config:
|
||||||
|
# If a config isn't returned, and an exception isn't raised, we're just
|
||||||
|
# generating config files and shouldn't try to continue.
|
||||||
|
sys.exit(0)
|
||||||
|
|
||||||
config.setup_logging()
|
config.setup_logging()
|
||||||
|
|
||||||
check_requirements()
|
# check any extra requirements we have now we have a config
|
||||||
|
check_requirements(config)
|
||||||
|
|
||||||
|
version_string = get_version_string()
|
||||||
|
|
||||||
logger.info("Server hostname: %s", config.server_name)
|
logger.info("Server hostname: %s", config.server_name)
|
||||||
logger.info("Server version: %s", synapse.__version__)
|
logger.info("Server version: %s", version_string)
|
||||||
|
|
||||||
if re.search(":[0-9]+$", config.server_name):
|
events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||||
domain_with_port = config.server_name
|
|
||||||
else:
|
|
||||||
domain_with_port = "%s:%s" % (config.server_name, config.bind_port)
|
|
||||||
|
|
||||||
tls_context_factory = context_factory.ServerContextFactory(config)
|
tls_server_context_factory = context_factory.ServerContextFactory(config)
|
||||||
|
|
||||||
|
database_engine = create_engine(config.database_config["name"])
|
||||||
|
config.database_config["args"]["cp_openfun"] = database_engine.on_new_connection
|
||||||
|
|
||||||
hs = SynapseHomeServer(
|
hs = SynapseHomeServer(
|
||||||
config.server_name,
|
config.server_name,
|
||||||
domain_with_port=domain_with_port,
|
db_config=config.database_config,
|
||||||
upload_dir=os.path.abspath("uploads"),
|
tls_server_context_factory=tls_server_context_factory,
|
||||||
db_name=config.database_path,
|
|
||||||
tls_context_factory=tls_context_factory,
|
|
||||||
config=config,
|
config=config,
|
||||||
content_addr=config.content_addr,
|
content_addr=config.content_addr,
|
||||||
|
version_string=version_string,
|
||||||
|
database_engine=database_engine,
|
||||||
)
|
)
|
||||||
|
|
||||||
hs.create_resource_tree(
|
logger.info("Preparing database: %s...", config.database_config['name'])
|
||||||
web_client=config.webclient,
|
|
||||||
redirect_root_to_web_client=True,
|
|
||||||
)
|
|
||||||
|
|
||||||
db_name = hs.get_db_name()
|
|
||||||
|
|
||||||
logger.info("Preparing database: %s...", db_name)
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
with sqlite3.connect(db_name) as db_conn:
|
db_conn = hs.get_db_conn()
|
||||||
prepare_database(db_conn)
|
database_engine.prepare_database(db_conn)
|
||||||
|
hs.run_startup_checks(db_conn, database_engine)
|
||||||
|
|
||||||
|
db_conn.commit()
|
||||||
except UpgradeDatabaseException:
|
except UpgradeDatabaseException:
|
||||||
sys.stderr.write(
|
sys.stderr.write(
|
||||||
"\nFailed to upgrade database.\n"
|
"\nFailed to upgrade database.\n"
|
||||||
@@ -250,39 +411,319 @@ def setup():
|
|||||||
)
|
)
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
logger.info("Database prepared in %s.", db_name)
|
logger.info("Database prepared in %s.", config.database_config['name'])
|
||||||
|
|
||||||
db_pool = hs.get_db_pool()
|
hs.setup()
|
||||||
|
hs.start_listening()
|
||||||
|
|
||||||
if db_name == ":memory:":
|
def start():
|
||||||
# Memory databases will need to be setup each time they are opened.
|
hs.get_pusherpool().start()
|
||||||
reactor.callWhenRunning(
|
hs.get_state_handler().start_caching()
|
||||||
db_pool.runWithConnection, prepare_database
|
hs.get_datastore().start_profiling()
|
||||||
|
hs.get_datastore().start_doing_background_updates()
|
||||||
|
hs.get_replication_layer().start_get_pdu_cache()
|
||||||
|
|
||||||
|
reactor.callWhenRunning(start)
|
||||||
|
|
||||||
|
return hs
|
||||||
|
|
||||||
|
|
||||||
|
class SynapseService(service.Service):
|
||||||
|
"""A twisted Service class that will start synapse. Used to run synapse
|
||||||
|
via twistd and a .tac.
|
||||||
|
"""
|
||||||
|
def __init__(self, config):
|
||||||
|
self.config = config
|
||||||
|
|
||||||
|
def startService(self):
|
||||||
|
hs = setup(self.config)
|
||||||
|
change_resource_limit(hs.config.soft_file_limit)
|
||||||
|
|
||||||
|
def stopService(self):
|
||||||
|
return self._port.stopListening()
|
||||||
|
|
||||||
|
|
||||||
|
class SynapseRequest(Request):
|
||||||
|
def __init__(self, site, *args, **kw):
|
||||||
|
Request.__init__(self, *args, **kw)
|
||||||
|
self.site = site
|
||||||
|
self.authenticated_entity = None
|
||||||
|
self.start_time = 0
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
# We overwrite this so that we don't log ``access_token``
|
||||||
|
return '<%s at 0x%x method=%s uri=%s clientproto=%s site=%s>' % (
|
||||||
|
self.__class__.__name__,
|
||||||
|
id(self),
|
||||||
|
self.method,
|
||||||
|
self.get_redacted_uri(),
|
||||||
|
self.clientproto,
|
||||||
|
self.site.site_tag,
|
||||||
)
|
)
|
||||||
|
|
||||||
if config.manhole:
|
def get_redacted_uri(self):
|
||||||
f = twisted.manhole.telnet.ShellFactory()
|
return ACCESS_TOKEN_RE.sub(
|
||||||
f.username = "matrix"
|
r'\1<redacted>\3',
|
||||||
f.password = "rabbithole"
|
self.uri
|
||||||
f.namespace['hs'] = hs
|
)
|
||||||
reactor.listenTCP(config.manhole, f, interface='127.0.0.1')
|
|
||||||
|
|
||||||
bind_port = config.bind_port
|
def get_user_agent(self):
|
||||||
if config.no_tls:
|
return self.requestHeaders.getRawHeaders("User-Agent", [None])[-1]
|
||||||
bind_port = None
|
|
||||||
hs.start_listening(bind_port, config.unsecure_port)
|
|
||||||
|
|
||||||
hs.get_pusherpool().start()
|
def started_processing(self):
|
||||||
|
self.site.access_logger.info(
|
||||||
|
"%s - %s - Received request: %s %s",
|
||||||
|
self.getClientIP(),
|
||||||
|
self.site.site_tag,
|
||||||
|
self.method,
|
||||||
|
self.get_redacted_uri()
|
||||||
|
)
|
||||||
|
self.start_time = int(time.time() * 1000)
|
||||||
|
|
||||||
hs.get_state_handler().start_caching()
|
def finished_processing(self):
|
||||||
hs.get_datastore().start_profiling()
|
|
||||||
|
try:
|
||||||
|
context = LoggingContext.current_context()
|
||||||
|
ru_utime, ru_stime = context.get_resource_usage()
|
||||||
|
db_txn_count = context.db_txn_count
|
||||||
|
db_txn_duration = context.db_txn_duration
|
||||||
|
except:
|
||||||
|
ru_utime, ru_stime = (0, 0)
|
||||||
|
db_txn_count, db_txn_duration = (0, 0)
|
||||||
|
|
||||||
|
self.site.access_logger.info(
|
||||||
|
"%s - %s - {%s}"
|
||||||
|
" Processed request: %dms (%dms, %dms) (%dms/%d)"
|
||||||
|
" %sB %s \"%s %s %s\" \"%s\"",
|
||||||
|
self.getClientIP(),
|
||||||
|
self.site.site_tag,
|
||||||
|
self.authenticated_entity,
|
||||||
|
int(time.time() * 1000) - self.start_time,
|
||||||
|
int(ru_utime * 1000),
|
||||||
|
int(ru_stime * 1000),
|
||||||
|
int(db_txn_duration * 1000),
|
||||||
|
int(db_txn_count),
|
||||||
|
self.sentLength,
|
||||||
|
self.code,
|
||||||
|
self.method,
|
||||||
|
self.get_redacted_uri(),
|
||||||
|
self.clientproto,
|
||||||
|
self.get_user_agent(),
|
||||||
|
)
|
||||||
|
|
||||||
|
@contextlib.contextmanager
|
||||||
|
def processing(self):
|
||||||
|
self.started_processing()
|
||||||
|
yield
|
||||||
|
self.finished_processing()
|
||||||
|
|
||||||
|
|
||||||
|
class XForwardedForRequest(SynapseRequest):
|
||||||
|
def __init__(self, *args, **kw):
|
||||||
|
SynapseRequest.__init__(self, *args, **kw)
|
||||||
|
|
||||||
|
"""
|
||||||
|
Add a layer on top of another request that only uses the value of an
|
||||||
|
X-Forwarded-For header as the result of C{getClientIP}.
|
||||||
|
"""
|
||||||
|
def getClientIP(self):
|
||||||
|
"""
|
||||||
|
@return: The client address (the first address) in the value of the
|
||||||
|
I{X-Forwarded-For header}. If the header is not present, return
|
||||||
|
C{b"-"}.
|
||||||
|
"""
|
||||||
|
return self.requestHeaders.getRawHeaders(
|
||||||
|
b"x-forwarded-for", [b"-"])[0].split(b",")[0].strip()
|
||||||
|
|
||||||
|
|
||||||
|
class SynapseRequestFactory(object):
|
||||||
|
def __init__(self, site, x_forwarded_for):
|
||||||
|
self.site = site
|
||||||
|
self.x_forwarded_for = x_forwarded_for
|
||||||
|
|
||||||
|
def __call__(self, *args, **kwargs):
|
||||||
|
if self.x_forwarded_for:
|
||||||
|
return XForwardedForRequest(self.site, *args, **kwargs)
|
||||||
|
else:
|
||||||
|
return SynapseRequest(self.site, *args, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
class SynapseSite(Site):
|
||||||
|
"""
|
||||||
|
Subclass of a twisted http Site that does access logging with python's
|
||||||
|
standard logging
|
||||||
|
"""
|
||||||
|
def __init__(self, logger_name, site_tag, config, resource, *args, **kwargs):
|
||||||
|
Site.__init__(self, resource, *args, **kwargs)
|
||||||
|
|
||||||
|
self.site_tag = site_tag
|
||||||
|
|
||||||
|
proxied = config.get("x_forwarded", False)
|
||||||
|
self.requestFactory = SynapseRequestFactory(self, proxied)
|
||||||
|
self.access_logger = logging.getLogger(logger_name)
|
||||||
|
|
||||||
|
def log(self, request):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def create_resource_tree(desired_tree, redirect_root_to_web_client=True):
|
||||||
|
"""Create the resource tree for this Home Server.
|
||||||
|
|
||||||
|
This in unduly complicated because Twisted does not support putting
|
||||||
|
child resources more than 1 level deep at a time.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
web_client (bool): True to enable the web client.
|
||||||
|
redirect_root_to_web_client (bool): True to redirect '/' to the
|
||||||
|
location of the web client. This does nothing if web_client is not
|
||||||
|
True.
|
||||||
|
"""
|
||||||
|
if redirect_root_to_web_client and WEB_CLIENT_PREFIX in desired_tree:
|
||||||
|
root_resource = RootRedirect(WEB_CLIENT_PREFIX)
|
||||||
|
else:
|
||||||
|
root_resource = Resource()
|
||||||
|
|
||||||
|
# ideally we'd just use getChild and putChild but getChild doesn't work
|
||||||
|
# unless you give it a Request object IN ADDITION to the name :/ So
|
||||||
|
# instead, we'll store a copy of this mapping so we can actually add
|
||||||
|
# extra resources to existing nodes. See self._resource_id for the key.
|
||||||
|
resource_mappings = {}
|
||||||
|
for full_path, res in desired_tree.items():
|
||||||
|
logger.info("Attaching %s to path %s", res, full_path)
|
||||||
|
last_resource = root_resource
|
||||||
|
for path_seg in full_path.split('/')[1:-1]:
|
||||||
|
if path_seg not in last_resource.listNames():
|
||||||
|
# resource doesn't exist, so make a "dummy resource"
|
||||||
|
child_resource = Resource()
|
||||||
|
last_resource.putChild(path_seg, child_resource)
|
||||||
|
res_id = _resource_id(last_resource, path_seg)
|
||||||
|
resource_mappings[res_id] = child_resource
|
||||||
|
last_resource = child_resource
|
||||||
|
else:
|
||||||
|
# we have an existing Resource, use that instead.
|
||||||
|
res_id = _resource_id(last_resource, path_seg)
|
||||||
|
last_resource = resource_mappings[res_id]
|
||||||
|
|
||||||
|
# ===========================
|
||||||
|
# now attach the actual desired resource
|
||||||
|
last_path_seg = full_path.split('/')[-1]
|
||||||
|
|
||||||
|
# if there is already a resource here, thieve its children and
|
||||||
|
# replace it
|
||||||
|
res_id = _resource_id(last_resource, last_path_seg)
|
||||||
|
if res_id in resource_mappings:
|
||||||
|
# there is a dummy resource at this path already, which needs
|
||||||
|
# to be replaced with the desired resource.
|
||||||
|
existing_dummy_resource = resource_mappings[res_id]
|
||||||
|
for child_name in existing_dummy_resource.listNames():
|
||||||
|
child_res_id = _resource_id(
|
||||||
|
existing_dummy_resource, child_name
|
||||||
|
)
|
||||||
|
child_resource = resource_mappings[child_res_id]
|
||||||
|
# steal the children
|
||||||
|
res.putChild(child_name, child_resource)
|
||||||
|
|
||||||
|
# finally, insert the desired resource in the right place
|
||||||
|
last_resource.putChild(last_path_seg, res)
|
||||||
|
res_id = _resource_id(last_resource, last_path_seg)
|
||||||
|
resource_mappings[res_id] = res
|
||||||
|
|
||||||
|
return root_resource
|
||||||
|
|
||||||
|
|
||||||
|
def _resource_id(resource, path_seg):
|
||||||
|
"""Construct an arbitrary resource ID so you can retrieve the mapping
|
||||||
|
later.
|
||||||
|
|
||||||
|
If you want to represent resource A putChild resource B with path C,
|
||||||
|
the mapping should looks like _resource_id(A,C) = B.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
resource (Resource): The *parent* Resourceb
|
||||||
|
path_seg (str): The name of the child Resource to be attached.
|
||||||
|
Returns:
|
||||||
|
str: A unique string which can be a key to the child Resource.
|
||||||
|
"""
|
||||||
|
return "%s-%s" % (resource, path_seg)
|
||||||
|
|
||||||
|
|
||||||
|
def run(hs):
|
||||||
|
PROFILE_SYNAPSE = False
|
||||||
|
if PROFILE_SYNAPSE:
|
||||||
|
def profile(func):
|
||||||
|
from cProfile import Profile
|
||||||
|
from threading import current_thread
|
||||||
|
|
||||||
|
def profiled(*args, **kargs):
|
||||||
|
profile = Profile()
|
||||||
|
profile.enable()
|
||||||
|
func(*args, **kargs)
|
||||||
|
profile.disable()
|
||||||
|
ident = current_thread().ident
|
||||||
|
profile.dump_stats("/tmp/%s.%s.%i.pstat" % (
|
||||||
|
hs.hostname, func.__name__, ident
|
||||||
|
))
|
||||||
|
|
||||||
|
return profiled
|
||||||
|
|
||||||
|
from twisted.python.threadpool import ThreadPool
|
||||||
|
ThreadPool._worker = profile(ThreadPool._worker)
|
||||||
|
reactor.run = profile(reactor.run)
|
||||||
|
|
||||||
|
start_time = hs.get_clock().time()
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def phone_stats_home():
|
||||||
|
logger.info("Gathering stats for reporting")
|
||||||
|
now = int(hs.get_clock().time())
|
||||||
|
uptime = int(now - start_time)
|
||||||
|
if uptime < 0:
|
||||||
|
uptime = 0
|
||||||
|
|
||||||
|
stats = {}
|
||||||
|
stats["homeserver"] = hs.config.server_name
|
||||||
|
stats["timestamp"] = now
|
||||||
|
stats["uptime_seconds"] = uptime
|
||||||
|
stats["total_users"] = yield hs.get_datastore().count_all_users()
|
||||||
|
|
||||||
|
room_count = yield hs.get_datastore().get_room_count()
|
||||||
|
stats["total_room_count"] = room_count
|
||||||
|
|
||||||
|
stats["daily_active_users"] = yield hs.get_datastore().count_daily_users()
|
||||||
|
daily_messages = yield hs.get_datastore().count_daily_messages()
|
||||||
|
if daily_messages is not None:
|
||||||
|
stats["daily_messages"] = daily_messages
|
||||||
|
|
||||||
|
logger.info("Reporting stats to matrix.org: %s" % (stats,))
|
||||||
|
try:
|
||||||
|
yield hs.get_simple_http_client().put_json(
|
||||||
|
"https://matrix.org/report-usage-stats/push",
|
||||||
|
stats
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
logger.warn("Error reporting stats: %s", e)
|
||||||
|
|
||||||
|
if hs.config.report_stats:
|
||||||
|
phone_home_task = task.LoopingCall(phone_stats_home)
|
||||||
|
logger.info("Scheduling stats reporting for 24 hour intervals")
|
||||||
|
phone_home_task.start(60 * 60 * 24, now=False)
|
||||||
|
|
||||||
|
def in_thread():
|
||||||
|
# Uncomment to enable tracing of log context changes.
|
||||||
|
# sys.settrace(logcontext_tracer)
|
||||||
|
with LoggingContext("run"):
|
||||||
|
change_resource_limit(hs.config.soft_file_limit)
|
||||||
|
reactor.run()
|
||||||
|
|
||||||
|
if hs.config.daemonize:
|
||||||
|
|
||||||
|
if hs.config.print_pidfile:
|
||||||
|
print hs.config.pid_file
|
||||||
|
|
||||||
if config.daemonize:
|
|
||||||
print config.pid_file
|
|
||||||
daemon = Daemonize(
|
daemon = Daemonize(
|
||||||
app="synapse-homeserver",
|
app="synapse-homeserver",
|
||||||
pid=config.pid_file,
|
pid=hs.config.pid_file,
|
||||||
action=run,
|
action=lambda: in_thread(),
|
||||||
auto_close_fds=False,
|
auto_close_fds=False,
|
||||||
verbose=True,
|
verbose=True,
|
||||||
logger=logger,
|
logger=logger,
|
||||||
@@ -290,18 +731,15 @@ def setup():
|
|||||||
|
|
||||||
daemon.start()
|
daemon.start()
|
||||||
else:
|
else:
|
||||||
reactor.run()
|
in_thread()
|
||||||
|
|
||||||
|
|
||||||
def run():
|
|
||||||
with LoggingContext("run"):
|
|
||||||
reactor.run()
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
with LoggingContext("main"):
|
with LoggingContext("main"):
|
||||||
|
# check base requirements
|
||||||
check_requirements()
|
check_requirements()
|
||||||
setup()
|
hs = setup(sys.argv[1:])
|
||||||
|
run(hs)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
# Copyright 2014, 2015 OpenMarket Ltd
|
# Copyright 2014-2016 OpenMarket Ltd
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@@ -16,53 +16,67 @@
|
|||||||
|
|
||||||
import sys
|
import sys
|
||||||
import os
|
import os
|
||||||
|
import os.path
|
||||||
import subprocess
|
import subprocess
|
||||||
import signal
|
import signal
|
||||||
|
import yaml
|
||||||
|
|
||||||
SYNAPSE = ["python", "-m", "synapse.app.homeserver"]
|
SYNAPSE = ["python", "-B", "-m", "synapse.app.homeserver"]
|
||||||
|
|
||||||
CONFIGFILE = "homeserver.yaml"
|
|
||||||
PIDFILE = "homeserver.pid"
|
|
||||||
|
|
||||||
GREEN = "\x1b[1;32m"
|
GREEN = "\x1b[1;32m"
|
||||||
|
RED = "\x1b[1;31m"
|
||||||
NORMAL = "\x1b[m"
|
NORMAL = "\x1b[m"
|
||||||
|
|
||||||
|
|
||||||
def start():
|
def start(configfile):
|
||||||
if not os.path.exists(CONFIGFILE):
|
|
||||||
sys.stderr.write(
|
|
||||||
"No config file found\n"
|
|
||||||
"To generate a config file, run '%s -c %s --generate-config"
|
|
||||||
" --server-name=<server name>'\n" % (
|
|
||||||
" ".join(SYNAPSE), CONFIGFILE
|
|
||||||
)
|
|
||||||
)
|
|
||||||
sys.exit(1)
|
|
||||||
print "Starting ...",
|
print "Starting ...",
|
||||||
args = SYNAPSE
|
args = SYNAPSE
|
||||||
args.extend(["--daemonize", "-c", CONFIGFILE, "--pid-file", PIDFILE])
|
args.extend(["--daemonize", "-c", configfile])
|
||||||
subprocess.check_call(args)
|
|
||||||
print GREEN + "started" + NORMAL
|
try:
|
||||||
|
subprocess.check_call(args)
|
||||||
|
print GREEN + "started" + NORMAL
|
||||||
|
except subprocess.CalledProcessError as e:
|
||||||
|
print (
|
||||||
|
RED +
|
||||||
|
"error starting (exit code: %d); see above for logs" % e.returncode +
|
||||||
|
NORMAL
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def stop():
|
def stop(pidfile):
|
||||||
if os.path.exists(PIDFILE):
|
if os.path.exists(pidfile):
|
||||||
pid = int(open(PIDFILE).read())
|
pid = int(open(pidfile).read())
|
||||||
os.kill(pid, signal.SIGTERM)
|
os.kill(pid, signal.SIGTERM)
|
||||||
print GREEN + "stopped" + NORMAL
|
print GREEN + "stopped" + NORMAL
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
|
configfile = sys.argv[2] if len(sys.argv) == 3 else "homeserver.yaml"
|
||||||
|
|
||||||
|
if not os.path.exists(configfile):
|
||||||
|
sys.stderr.write(
|
||||||
|
"No config file found\n"
|
||||||
|
"To generate a config file, run '%s -c %s --generate-config"
|
||||||
|
" --server-name=<server name>'\n" % (
|
||||||
|
" ".join(SYNAPSE), configfile
|
||||||
|
)
|
||||||
|
)
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
config = yaml.load(open(configfile))
|
||||||
|
pidfile = config["pid_file"]
|
||||||
|
|
||||||
action = sys.argv[1] if sys.argv[1:] else "usage"
|
action = sys.argv[1] if sys.argv[1:] else "usage"
|
||||||
if action == "start":
|
if action == "start":
|
||||||
start()
|
start(configfile)
|
||||||
elif action == "stop":
|
elif action == "stop":
|
||||||
stop()
|
stop(pidfile)
|
||||||
elif action == "restart":
|
elif action == "restart":
|
||||||
stop()
|
stop(pidfile)
|
||||||
start()
|
start(configfile)
|
||||||
else:
|
else:
|
||||||
sys.stderr.write("Usage: %s [start|stop|restart]\n" % (sys.argv[0],))
|
sys.stderr.write("Usage: %s [start|stop|restart] [configfile]\n" % (sys.argv[0],))
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
226
synapse/appservice/__init__.py
Normal file
226
synapse/appservice/__init__.py
Normal file
@@ -0,0 +1,226 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright 2015, 2016 OpenMarket Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
from synapse.api.constants import EventTypes
|
||||||
|
|
||||||
|
import logging
|
||||||
|
import re
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class ApplicationServiceState(object):
|
||||||
|
DOWN = "down"
|
||||||
|
UP = "up"
|
||||||
|
|
||||||
|
|
||||||
|
class AppServiceTransaction(object):
|
||||||
|
"""Represents an application service transaction."""
|
||||||
|
|
||||||
|
def __init__(self, service, id, events):
|
||||||
|
self.service = service
|
||||||
|
self.id = id
|
||||||
|
self.events = events
|
||||||
|
|
||||||
|
def send(self, as_api):
|
||||||
|
"""Sends this transaction using the provided AS API interface.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
as_api(ApplicationServiceApi): The API to use to send.
|
||||||
|
Returns:
|
||||||
|
A Deferred which resolves to True if the transaction was sent.
|
||||||
|
"""
|
||||||
|
return as_api.push_bulk(
|
||||||
|
service=self.service,
|
||||||
|
events=self.events,
|
||||||
|
txn_id=self.id
|
||||||
|
)
|
||||||
|
|
||||||
|
def complete(self, store):
|
||||||
|
"""Completes this transaction as successful.
|
||||||
|
|
||||||
|
Marks this transaction ID on the application service and removes the
|
||||||
|
transaction contents from the database.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
store: The database store to operate on.
|
||||||
|
Returns:
|
||||||
|
A Deferred which resolves to True if the transaction was completed.
|
||||||
|
"""
|
||||||
|
return store.complete_appservice_txn(
|
||||||
|
service=self.service,
|
||||||
|
txn_id=self.id
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class ApplicationService(object):
|
||||||
|
"""Defines an application service. This definition is mostly what is
|
||||||
|
provided to the /register AS API.
|
||||||
|
|
||||||
|
Provides methods to check if this service is "interested" in events.
|
||||||
|
"""
|
||||||
|
NS_USERS = "users"
|
||||||
|
NS_ALIASES = "aliases"
|
||||||
|
NS_ROOMS = "rooms"
|
||||||
|
# The ordering here is important as it is used to map database values (which
|
||||||
|
# are stored as ints representing the position in this list) to namespace
|
||||||
|
# values.
|
||||||
|
NS_LIST = [NS_USERS, NS_ALIASES, NS_ROOMS]
|
||||||
|
|
||||||
|
def __init__(self, token, url=None, namespaces=None, hs_token=None,
|
||||||
|
sender=None, id=None):
|
||||||
|
self.token = token
|
||||||
|
self.url = url
|
||||||
|
self.hs_token = hs_token
|
||||||
|
self.sender = sender
|
||||||
|
self.namespaces = self._check_namespaces(namespaces)
|
||||||
|
self.id = id
|
||||||
|
|
||||||
|
def _check_namespaces(self, namespaces):
|
||||||
|
# Sanity check that it is of the form:
|
||||||
|
# {
|
||||||
|
# users: [ {regex: "[A-z]+.*", exclusive: true}, ...],
|
||||||
|
# aliases: [ {regex: "[A-z]+.*", exclusive: true}, ...],
|
||||||
|
# rooms: [ {regex: "[A-z]+.*", exclusive: true}, ...],
|
||||||
|
# }
|
||||||
|
if not namespaces:
|
||||||
|
namespaces = {}
|
||||||
|
|
||||||
|
for ns in ApplicationService.NS_LIST:
|
||||||
|
if ns not in namespaces:
|
||||||
|
namespaces[ns] = []
|
||||||
|
continue
|
||||||
|
|
||||||
|
if type(namespaces[ns]) != list:
|
||||||
|
raise ValueError("Bad namespace value for '%s'" % ns)
|
||||||
|
for regex_obj in namespaces[ns]:
|
||||||
|
if not isinstance(regex_obj, dict):
|
||||||
|
raise ValueError("Expected dict regex for ns '%s'" % ns)
|
||||||
|
if not isinstance(regex_obj.get("exclusive"), bool):
|
||||||
|
raise ValueError(
|
||||||
|
"Expected bool for 'exclusive' in ns '%s'" % ns
|
||||||
|
)
|
||||||
|
if not isinstance(regex_obj.get("regex"), basestring):
|
||||||
|
raise ValueError(
|
||||||
|
"Expected string for 'regex' in ns '%s'" % ns
|
||||||
|
)
|
||||||
|
return namespaces
|
||||||
|
|
||||||
|
def _matches_regex(self, test_string, namespace_key, return_obj=False):
|
||||||
|
if not isinstance(test_string, basestring):
|
||||||
|
logger.error(
|
||||||
|
"Expected a string to test regex against, but got %s",
|
||||||
|
test_string
|
||||||
|
)
|
||||||
|
return False
|
||||||
|
|
||||||
|
for regex_obj in self.namespaces[namespace_key]:
|
||||||
|
if re.match(regex_obj["regex"], test_string):
|
||||||
|
if return_obj:
|
||||||
|
return regex_obj
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
def _is_exclusive(self, ns_key, test_string):
|
||||||
|
regex_obj = self._matches_regex(test_string, ns_key, return_obj=True)
|
||||||
|
if regex_obj:
|
||||||
|
return regex_obj["exclusive"]
|
||||||
|
return False
|
||||||
|
|
||||||
|
def _matches_user(self, event, member_list):
|
||||||
|
if (hasattr(event, "sender") and
|
||||||
|
self.is_interested_in_user(event.sender)):
|
||||||
|
return True
|
||||||
|
# also check m.room.member state key
|
||||||
|
if (hasattr(event, "type") and event.type == EventTypes.Member
|
||||||
|
and hasattr(event, "state_key")
|
||||||
|
and self.is_interested_in_user(event.state_key)):
|
||||||
|
return True
|
||||||
|
# check joined member events
|
||||||
|
for user_id in member_list:
|
||||||
|
if self.is_interested_in_user(user_id):
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
def _matches_room_id(self, event):
|
||||||
|
if hasattr(event, "room_id"):
|
||||||
|
return self.is_interested_in_room(event.room_id)
|
||||||
|
return False
|
||||||
|
|
||||||
|
def _matches_aliases(self, event, alias_list):
|
||||||
|
for alias in alias_list:
|
||||||
|
if self.is_interested_in_alias(alias):
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
def is_interested(self, event, restrict_to=None, aliases_for_event=None,
|
||||||
|
member_list=None):
|
||||||
|
"""Check if this service is interested in this event.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
event(Event): The event to check.
|
||||||
|
restrict_to(str): The namespace to restrict regex tests to.
|
||||||
|
aliases_for_event(list): A list of all the known room aliases for
|
||||||
|
this event.
|
||||||
|
member_list(list): A list of all joined user_ids in this room.
|
||||||
|
Returns:
|
||||||
|
bool: True if this service would like to know about this event.
|
||||||
|
"""
|
||||||
|
if aliases_for_event is None:
|
||||||
|
aliases_for_event = []
|
||||||
|
if member_list is None:
|
||||||
|
member_list = []
|
||||||
|
|
||||||
|
if restrict_to and restrict_to not in ApplicationService.NS_LIST:
|
||||||
|
# this is a programming error, so fail early and raise a general
|
||||||
|
# exception
|
||||||
|
raise Exception("Unexpected restrict_to value: %s". restrict_to)
|
||||||
|
|
||||||
|
if not restrict_to:
|
||||||
|
return (self._matches_user(event, member_list)
|
||||||
|
or self._matches_aliases(event, aliases_for_event)
|
||||||
|
or self._matches_room_id(event))
|
||||||
|
elif restrict_to == ApplicationService.NS_ALIASES:
|
||||||
|
return self._matches_aliases(event, aliases_for_event)
|
||||||
|
elif restrict_to == ApplicationService.NS_ROOMS:
|
||||||
|
return self._matches_room_id(event)
|
||||||
|
elif restrict_to == ApplicationService.NS_USERS:
|
||||||
|
return self._matches_user(event, member_list)
|
||||||
|
|
||||||
|
def is_interested_in_user(self, user_id):
|
||||||
|
return (
|
||||||
|
self._matches_regex(user_id, ApplicationService.NS_USERS)
|
||||||
|
or user_id == self.sender
|
||||||
|
)
|
||||||
|
|
||||||
|
def is_interested_in_alias(self, alias):
|
||||||
|
return self._matches_regex(alias, ApplicationService.NS_ALIASES)
|
||||||
|
|
||||||
|
def is_interested_in_room(self, room_id):
|
||||||
|
return self._matches_regex(room_id, ApplicationService.NS_ROOMS)
|
||||||
|
|
||||||
|
def is_exclusive_user(self, user_id):
|
||||||
|
return (
|
||||||
|
self._is_exclusive(ApplicationService.NS_USERS, user_id)
|
||||||
|
or user_id == self.sender
|
||||||
|
)
|
||||||
|
|
||||||
|
def is_exclusive_alias(self, alias):
|
||||||
|
return self._is_exclusive(ApplicationService.NS_ALIASES, alias)
|
||||||
|
|
||||||
|
def is_exclusive_room(self, room_id):
|
||||||
|
return self._is_exclusive(ApplicationService.NS_ROOMS, room_id)
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
return "ApplicationService: %s" % (self.__dict__,)
|
||||||
112
synapse/appservice/api.py
Normal file
112
synapse/appservice/api.py
Normal file
@@ -0,0 +1,112 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright 2015, 2016 OpenMarket Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
from twisted.internet import defer
|
||||||
|
|
||||||
|
from synapse.api.errors import CodeMessageException
|
||||||
|
from synapse.http.client import SimpleHttpClient
|
||||||
|
from synapse.events.utils import serialize_event
|
||||||
|
|
||||||
|
import logging
|
||||||
|
import urllib
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class ApplicationServiceApi(SimpleHttpClient):
|
||||||
|
"""This class manages HS -> AS communications, including querying and
|
||||||
|
pushing.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, hs):
|
||||||
|
super(ApplicationServiceApi, self).__init__(hs)
|
||||||
|
self.clock = hs.get_clock()
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def query_user(self, service, user_id):
|
||||||
|
uri = service.url + ("/users/%s" % urllib.quote(user_id))
|
||||||
|
response = None
|
||||||
|
try:
|
||||||
|
response = yield self.get_json(uri, {
|
||||||
|
"access_token": service.hs_token
|
||||||
|
})
|
||||||
|
if response is not None: # just an empty json object
|
||||||
|
defer.returnValue(True)
|
||||||
|
except CodeMessageException as e:
|
||||||
|
if e.code == 404:
|
||||||
|
defer.returnValue(False)
|
||||||
|
return
|
||||||
|
logger.warning("query_user to %s received %s", uri, e.code)
|
||||||
|
except Exception as ex:
|
||||||
|
logger.warning("query_user to %s threw exception %s", uri, ex)
|
||||||
|
defer.returnValue(False)
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def query_alias(self, service, alias):
|
||||||
|
uri = service.url + ("/rooms/%s" % urllib.quote(alias))
|
||||||
|
response = None
|
||||||
|
try:
|
||||||
|
response = yield self.get_json(uri, {
|
||||||
|
"access_token": service.hs_token
|
||||||
|
})
|
||||||
|
if response is not None: # just an empty json object
|
||||||
|
defer.returnValue(True)
|
||||||
|
except CodeMessageException as e:
|
||||||
|
logger.warning("query_alias to %s received %s", uri, e.code)
|
||||||
|
if e.code == 404:
|
||||||
|
defer.returnValue(False)
|
||||||
|
return
|
||||||
|
except Exception as ex:
|
||||||
|
logger.warning("query_alias to %s threw exception %s", uri, ex)
|
||||||
|
defer.returnValue(False)
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def push_bulk(self, service, events, txn_id=None):
|
||||||
|
events = self._serialize(events)
|
||||||
|
|
||||||
|
if txn_id is None:
|
||||||
|
logger.warning("push_bulk: Missing txn ID sending events to %s",
|
||||||
|
service.url)
|
||||||
|
txn_id = str(0)
|
||||||
|
txn_id = str(txn_id)
|
||||||
|
|
||||||
|
uri = service.url + ("/transactions/%s" %
|
||||||
|
urllib.quote(txn_id))
|
||||||
|
try:
|
||||||
|
yield self.put_json(
|
||||||
|
uri=uri,
|
||||||
|
json_body={
|
||||||
|
"events": events
|
||||||
|
},
|
||||||
|
args={
|
||||||
|
"access_token": service.hs_token
|
||||||
|
})
|
||||||
|
defer.returnValue(True)
|
||||||
|
return
|
||||||
|
except CodeMessageException as e:
|
||||||
|
logger.warning("push_bulk to %s received %s", uri, e.code)
|
||||||
|
except Exception as ex:
|
||||||
|
logger.warning("push_bulk to %s threw exception %s", uri, ex)
|
||||||
|
defer.returnValue(False)
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def push(self, service, event, txn_id=None):
|
||||||
|
response = yield self.push_bulk(service, [event], txn_id)
|
||||||
|
defer.returnValue(response)
|
||||||
|
|
||||||
|
def _serialize(self, events):
|
||||||
|
time_now = self.clock.time_msec()
|
||||||
|
return [
|
||||||
|
serialize_event(e, time_now, as_client_event=True) for e in events
|
||||||
|
]
|
||||||
254
synapse/appservice/scheduler.py
Normal file
254
synapse/appservice/scheduler.py
Normal file
@@ -0,0 +1,254 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright 2015, 2016 OpenMarket Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
"""
|
||||||
|
This module controls the reliability for application service transactions.
|
||||||
|
|
||||||
|
The nominal flow through this module looks like:
|
||||||
|
__________
|
||||||
|
1---ASa[e]-->| Service |--> Queue ASa[f]
|
||||||
|
2----ASb[e]->| Queuer |
|
||||||
|
3--ASa[f]--->|__________|-----------+ ASa[e], ASb[e]
|
||||||
|
V
|
||||||
|
-````````- +------------+
|
||||||
|
|````````|<--StoreTxn-|Transaction |
|
||||||
|
|Database| | Controller |---> SEND TO AS
|
||||||
|
`--------` +------------+
|
||||||
|
What happens on SEND TO AS depends on the state of the Application Service:
|
||||||
|
- If the AS is marked as DOWN, do nothing.
|
||||||
|
- If the AS is marked as UP, send the transaction.
|
||||||
|
* SUCCESS : Increment where the AS is up to txn-wise and nuke the txn
|
||||||
|
contents from the db.
|
||||||
|
* FAILURE : Marked AS as DOWN and start Recoverer.
|
||||||
|
|
||||||
|
Recoverer attempts to recover ASes who have died. The flow for this looks like:
|
||||||
|
,--------------------- backoff++ --------------.
|
||||||
|
V |
|
||||||
|
START ---> Wait exp ------> Get oldest txn ID from ----> FAILURE
|
||||||
|
backoff DB and try to send it
|
||||||
|
^ |___________
|
||||||
|
Mark AS as | V
|
||||||
|
UP & quit +---------- YES SUCCESS
|
||||||
|
| | |
|
||||||
|
NO <--- Have more txns? <------ Mark txn success & nuke <-+
|
||||||
|
from db; incr AS pos.
|
||||||
|
Reset backoff.
|
||||||
|
|
||||||
|
This is all tied together by the AppServiceScheduler which DIs the required
|
||||||
|
components.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from synapse.appservice import ApplicationServiceState
|
||||||
|
from twisted.internet import defer
|
||||||
|
import logging
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class AppServiceScheduler(object):
|
||||||
|
""" Public facing API for this module. Does the required DI to tie the
|
||||||
|
components together. This also serves as the "event_pool", which in this
|
||||||
|
case is a simple array.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, clock, store, as_api):
|
||||||
|
self.clock = clock
|
||||||
|
self.store = store
|
||||||
|
self.as_api = as_api
|
||||||
|
|
||||||
|
def create_recoverer(service, callback):
|
||||||
|
return _Recoverer(clock, store, as_api, service, callback)
|
||||||
|
|
||||||
|
self.txn_ctrl = _TransactionController(
|
||||||
|
clock, store, as_api, create_recoverer
|
||||||
|
)
|
||||||
|
self.queuer = _ServiceQueuer(self.txn_ctrl)
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def start(self):
|
||||||
|
logger.info("Starting appservice scheduler")
|
||||||
|
# check for any DOWN ASes and start recoverers for them.
|
||||||
|
recoverers = yield _Recoverer.start(
|
||||||
|
self.clock, self.store, self.as_api, self.txn_ctrl.on_recovered
|
||||||
|
)
|
||||||
|
self.txn_ctrl.add_recoverers(recoverers)
|
||||||
|
|
||||||
|
def submit_event_for_as(self, service, event):
|
||||||
|
self.queuer.enqueue(service, event)
|
||||||
|
|
||||||
|
|
||||||
|
class _ServiceQueuer(object):
|
||||||
|
"""Queues events for the same application service together, sending
|
||||||
|
transactions as soon as possible. Once a transaction is sent successfully,
|
||||||
|
this schedules any other events in the queue to run.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, txn_ctrl):
|
||||||
|
self.queued_events = {} # dict of {service_id: [events]}
|
||||||
|
self.pending_requests = {} # dict of {service_id: Deferred}
|
||||||
|
self.txn_ctrl = txn_ctrl
|
||||||
|
|
||||||
|
def enqueue(self, service, event):
|
||||||
|
# if this service isn't being sent something
|
||||||
|
if not self.pending_requests.get(service.id):
|
||||||
|
self._send_request(service, [event])
|
||||||
|
else:
|
||||||
|
# add to queue for this service
|
||||||
|
if service.id not in self.queued_events:
|
||||||
|
self.queued_events[service.id] = []
|
||||||
|
self.queued_events[service.id].append(event)
|
||||||
|
|
||||||
|
def _send_request(self, service, events):
|
||||||
|
# send request and add callbacks
|
||||||
|
d = self.txn_ctrl.send(service, events)
|
||||||
|
d.addBoth(self._on_request_finish)
|
||||||
|
d.addErrback(self._on_request_fail)
|
||||||
|
self.pending_requests[service.id] = d
|
||||||
|
|
||||||
|
def _on_request_finish(self, service):
|
||||||
|
self.pending_requests[service.id] = None
|
||||||
|
# if there are queued events, then send them.
|
||||||
|
if (service.id in self.queued_events
|
||||||
|
and len(self.queued_events[service.id]) > 0):
|
||||||
|
self._send_request(service, self.queued_events[service.id])
|
||||||
|
self.queued_events[service.id] = []
|
||||||
|
|
||||||
|
def _on_request_fail(self, err):
|
||||||
|
logger.error("AS request failed: %s", err)
|
||||||
|
|
||||||
|
|
||||||
|
class _TransactionController(object):
|
||||||
|
|
||||||
|
def __init__(self, clock, store, as_api, recoverer_fn):
|
||||||
|
self.clock = clock
|
||||||
|
self.store = store
|
||||||
|
self.as_api = as_api
|
||||||
|
self.recoverer_fn = recoverer_fn
|
||||||
|
# keep track of how many recoverers there are
|
||||||
|
self.recoverers = []
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def send(self, service, events):
|
||||||
|
try:
|
||||||
|
txn = yield self.store.create_appservice_txn(
|
||||||
|
service=service,
|
||||||
|
events=events
|
||||||
|
)
|
||||||
|
service_is_up = yield self._is_service_up(service)
|
||||||
|
if service_is_up:
|
||||||
|
sent = yield txn.send(self.as_api)
|
||||||
|
if sent:
|
||||||
|
txn.complete(self.store)
|
||||||
|
else:
|
||||||
|
self._start_recoverer(service)
|
||||||
|
except Exception as e:
|
||||||
|
logger.exception(e)
|
||||||
|
self._start_recoverer(service)
|
||||||
|
# request has finished
|
||||||
|
defer.returnValue(service)
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def on_recovered(self, recoverer):
|
||||||
|
self.recoverers.remove(recoverer)
|
||||||
|
logger.info("Successfully recovered application service AS ID %s",
|
||||||
|
recoverer.service.id)
|
||||||
|
logger.info("Remaining active recoverers: %s", len(self.recoverers))
|
||||||
|
yield self.store.set_appservice_state(
|
||||||
|
recoverer.service,
|
||||||
|
ApplicationServiceState.UP
|
||||||
|
)
|
||||||
|
|
||||||
|
def add_recoverers(self, recoverers):
|
||||||
|
for r in recoverers:
|
||||||
|
self.recoverers.append(r)
|
||||||
|
if len(recoverers) > 0:
|
||||||
|
logger.info("New active recoverers: %s", len(self.recoverers))
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def _start_recoverer(self, service):
|
||||||
|
yield self.store.set_appservice_state(
|
||||||
|
service,
|
||||||
|
ApplicationServiceState.DOWN
|
||||||
|
)
|
||||||
|
logger.info(
|
||||||
|
"Application service falling behind. Starting recoverer. AS ID %s",
|
||||||
|
service.id
|
||||||
|
)
|
||||||
|
recoverer = self.recoverer_fn(service, self.on_recovered)
|
||||||
|
self.add_recoverers([recoverer])
|
||||||
|
recoverer.recover()
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def _is_service_up(self, service):
|
||||||
|
state = yield self.store.get_appservice_state(service)
|
||||||
|
defer.returnValue(state == ApplicationServiceState.UP or state is None)
|
||||||
|
|
||||||
|
|
||||||
|
class _Recoverer(object):
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def start(clock, store, as_api, callback):
|
||||||
|
services = yield store.get_appservices_by_state(
|
||||||
|
ApplicationServiceState.DOWN
|
||||||
|
)
|
||||||
|
recoverers = [
|
||||||
|
_Recoverer(clock, store, as_api, s, callback) for s in services
|
||||||
|
]
|
||||||
|
for r in recoverers:
|
||||||
|
logger.info("Starting recoverer for AS ID %s which was marked as "
|
||||||
|
"DOWN", r.service.id)
|
||||||
|
r.recover()
|
||||||
|
defer.returnValue(recoverers)
|
||||||
|
|
||||||
|
def __init__(self, clock, store, as_api, service, callback):
|
||||||
|
self.clock = clock
|
||||||
|
self.store = store
|
||||||
|
self.as_api = as_api
|
||||||
|
self.service = service
|
||||||
|
self.callback = callback
|
||||||
|
self.backoff_counter = 1
|
||||||
|
|
||||||
|
def recover(self):
|
||||||
|
self.clock.call_later((2 ** self.backoff_counter), self.retry)
|
||||||
|
|
||||||
|
def _backoff(self):
|
||||||
|
# cap the backoff to be around 8.5min => (2^9) = 512 secs
|
||||||
|
if self.backoff_counter < 9:
|
||||||
|
self.backoff_counter += 1
|
||||||
|
self.recover()
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def retry(self):
|
||||||
|
try:
|
||||||
|
txn = yield self.store.get_oldest_unsent_txn(self.service)
|
||||||
|
if txn:
|
||||||
|
logger.info("Retrying transaction %s for AS ID %s",
|
||||||
|
txn.id, txn.service.id)
|
||||||
|
sent = yield txn.send(self.as_api)
|
||||||
|
if sent:
|
||||||
|
yield txn.complete(self.store)
|
||||||
|
# reset the backoff counter and retry immediately
|
||||||
|
self.backoff_counter = 1
|
||||||
|
yield self.retry()
|
||||||
|
else:
|
||||||
|
self._backoff()
|
||||||
|
else:
|
||||||
|
self._set_service_recovered()
|
||||||
|
except Exception as e:
|
||||||
|
logger.exception(e)
|
||||||
|
self._backoff()
|
||||||
|
|
||||||
|
def _set_service_recovered(self):
|
||||||
|
self.callback(self)
|
||||||
@@ -1,5 +1,5 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
# Copyright 2014, 2015 OpenMarket Ltd
|
# Copyright 2014-2016 OpenMarket Ltd
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
|
|||||||
35
synapse/config/__main__.py
Normal file
35
synapse/config/__main__.py
Normal file
@@ -0,0 +1,35 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright 2015, 2016 OpenMarket Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
from synapse.config._base import ConfigError
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
import sys
|
||||||
|
from homeserver import HomeServerConfig
|
||||||
|
|
||||||
|
action = sys.argv[1]
|
||||||
|
|
||||||
|
if action == "read":
|
||||||
|
key = sys.argv[2]
|
||||||
|
try:
|
||||||
|
config = HomeServerConfig.load_config("", sys.argv[3:])
|
||||||
|
except ConfigError as e:
|
||||||
|
sys.stderr.write("\n" + e.message + "\n")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
print getattr(config, key)
|
||||||
|
sys.exit(0)
|
||||||
|
else:
|
||||||
|
sys.stderr.write("Unknown command %r\n" % (action,))
|
||||||
|
sys.exit(1)
|
||||||
@@ -1,5 +1,5 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
# Copyright 2014, 2015 OpenMarket Ltd
|
# Copyright 2014-2016 OpenMarket Ltd
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@@ -14,28 +14,67 @@
|
|||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
import argparse
|
import argparse
|
||||||
import sys
|
import errno
|
||||||
import os
|
import os
|
||||||
import yaml
|
import yaml
|
||||||
|
from textwrap import dedent
|
||||||
|
|
||||||
|
|
||||||
class ConfigError(Exception):
|
class ConfigError(Exception):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
class Config(object):
|
# We split these messages out to allow packages to override with package
|
||||||
def __init__(self, args):
|
# specific instructions.
|
||||||
pass
|
MISSING_REPORT_STATS_CONFIG_INSTRUCTIONS = """\
|
||||||
|
Please opt in or out of reporting anonymized homeserver usage statistics, by
|
||||||
|
setting the `report_stats` key in your config file to either True or False.
|
||||||
|
"""
|
||||||
|
|
||||||
|
MISSING_REPORT_STATS_SPIEL = """\
|
||||||
|
We would really appreciate it if you could help our project out by reporting
|
||||||
|
anonymized usage statistics from your homeserver. Only very basic aggregate
|
||||||
|
data (e.g. number of users) will be reported, but it helps us to track the
|
||||||
|
growth of the Matrix community, and helps us to make Matrix a success, as well
|
||||||
|
as to convince other networks that they should peer with us.
|
||||||
|
|
||||||
|
Thank you.
|
||||||
|
"""
|
||||||
|
|
||||||
|
MISSING_SERVER_NAME = """\
|
||||||
|
Missing mandatory `server_name` config option.
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
class Config(object):
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def parse_size(string):
|
def parse_size(value):
|
||||||
|
if isinstance(value, int) or isinstance(value, long):
|
||||||
|
return value
|
||||||
sizes = {"K": 1024, "M": 1024 * 1024}
|
sizes = {"K": 1024, "M": 1024 * 1024}
|
||||||
size = 1
|
size = 1
|
||||||
suffix = string[-1]
|
suffix = value[-1]
|
||||||
if suffix in sizes:
|
if suffix in sizes:
|
||||||
string = string[:-1]
|
value = value[:-1]
|
||||||
size = sizes[suffix]
|
size = sizes[suffix]
|
||||||
return int(string) * size
|
return int(value) * size
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def parse_duration(value):
|
||||||
|
if isinstance(value, int) or isinstance(value, long):
|
||||||
|
return value
|
||||||
|
second = 1000
|
||||||
|
hour = 60 * 60 * second
|
||||||
|
day = 24 * hour
|
||||||
|
week = 7 * day
|
||||||
|
year = 365 * day
|
||||||
|
sizes = {"s": second, "h": hour, "d": day, "w": week, "y": year}
|
||||||
|
size = 1
|
||||||
|
suffix = value[-1]
|
||||||
|
if suffix in sizes:
|
||||||
|
value = value[:-1]
|
||||||
|
size = sizes[suffix]
|
||||||
|
return int(value) * size
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def abspath(file_path):
|
def abspath(file_path):
|
||||||
@@ -63,8 +102,11 @@ class Config(object):
|
|||||||
@classmethod
|
@classmethod
|
||||||
def ensure_directory(cls, dir_path):
|
def ensure_directory(cls, dir_path):
|
||||||
dir_path = cls.abspath(dir_path)
|
dir_path = cls.abspath(dir_path)
|
||||||
if not os.path.exists(dir_path):
|
try:
|
||||||
os.makedirs(dir_path)
|
os.makedirs(dir_path)
|
||||||
|
except OSError, e:
|
||||||
|
if e.errno != errno.EEXIST:
|
||||||
|
raise
|
||||||
if not os.path.isdir(dir_path):
|
if not os.path.isdir(dir_path):
|
||||||
raise ConfigError(
|
raise ConfigError(
|
||||||
"%s is not a directory" % (dir_path,)
|
"%s is not a directory" % (dir_path,)
|
||||||
@@ -86,83 +128,211 @@ class Config(object):
|
|||||||
with open(file_path) as file_stream:
|
with open(file_path) as file_stream:
|
||||||
return yaml.load(file_stream)
|
return yaml.load(file_stream)
|
||||||
|
|
||||||
@classmethod
|
def invoke_all(self, name, *args, **kargs):
|
||||||
def add_arguments(cls, parser):
|
results = []
|
||||||
pass
|
for cls in type(self).mro():
|
||||||
|
if name in cls.__dict__:
|
||||||
|
results.append(getattr(cls, name)(self, *args, **kargs))
|
||||||
|
return results
|
||||||
|
|
||||||
@classmethod
|
def generate_config(
|
||||||
def generate_config(cls, args, config_dir_path):
|
self,
|
||||||
pass
|
config_dir_path,
|
||||||
|
server_name,
|
||||||
|
is_generating_file,
|
||||||
|
report_stats=None,
|
||||||
|
):
|
||||||
|
default_config = "# vim:ft=yaml\n"
|
||||||
|
|
||||||
|
default_config += "\n\n".join(dedent(conf) for conf in self.invoke_all(
|
||||||
|
"default_config",
|
||||||
|
config_dir_path=config_dir_path,
|
||||||
|
server_name=server_name,
|
||||||
|
is_generating_file=is_generating_file,
|
||||||
|
report_stats=report_stats,
|
||||||
|
))
|
||||||
|
|
||||||
|
config = yaml.load(default_config)
|
||||||
|
|
||||||
|
return default_config, config
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def load_config(cls, description, argv, generate_section=None):
|
def load_config(cls, description, argv, generate_section=None):
|
||||||
|
obj = cls()
|
||||||
|
|
||||||
config_parser = argparse.ArgumentParser(add_help=False)
|
config_parser = argparse.ArgumentParser(add_help=False)
|
||||||
config_parser.add_argument(
|
config_parser.add_argument(
|
||||||
"-c", "--config-path",
|
"-c", "--config-path",
|
||||||
|
action="append",
|
||||||
metavar="CONFIG_FILE",
|
metavar="CONFIG_FILE",
|
||||||
help="Specify config file"
|
help="Specify config file. Can be given multiple times and"
|
||||||
|
" may specify directories containing *.yaml files."
|
||||||
)
|
)
|
||||||
config_parser.add_argument(
|
config_parser.add_argument(
|
||||||
"--generate-config",
|
"--generate-config",
|
||||||
action="store_true",
|
action="store_true",
|
||||||
help="Generate config file"
|
help="Generate a config file for the server name"
|
||||||
|
)
|
||||||
|
config_parser.add_argument(
|
||||||
|
"--report-stats",
|
||||||
|
action="store",
|
||||||
|
help="Stuff",
|
||||||
|
choices=["yes", "no"]
|
||||||
|
)
|
||||||
|
config_parser.add_argument(
|
||||||
|
"--generate-keys",
|
||||||
|
action="store_true",
|
||||||
|
help="Generate any missing key files then exit"
|
||||||
|
)
|
||||||
|
config_parser.add_argument(
|
||||||
|
"--keys-directory",
|
||||||
|
metavar="DIRECTORY",
|
||||||
|
help="Used with 'generate-*' options to specify where files such as"
|
||||||
|
" certs and signing keys should be stored in, unless explicitly"
|
||||||
|
" specified in the config."
|
||||||
|
)
|
||||||
|
config_parser.add_argument(
|
||||||
|
"-H", "--server-name",
|
||||||
|
help="The server name to generate a config file for"
|
||||||
)
|
)
|
||||||
config_args, remaining_args = config_parser.parse_known_args(argv)
|
config_args, remaining_args = config_parser.parse_known_args(argv)
|
||||||
|
|
||||||
|
generate_keys = config_args.generate_keys
|
||||||
|
|
||||||
|
config_files = []
|
||||||
|
if config_args.config_path:
|
||||||
|
for config_path in config_args.config_path:
|
||||||
|
if os.path.isdir(config_path):
|
||||||
|
# We accept specifying directories as config paths, we search
|
||||||
|
# inside that directory for all files matching *.yaml, and then
|
||||||
|
# we apply them in *sorted* order.
|
||||||
|
files = []
|
||||||
|
for entry in os.listdir(config_path):
|
||||||
|
entry_path = os.path.join(config_path, entry)
|
||||||
|
if not os.path.isfile(entry_path):
|
||||||
|
print (
|
||||||
|
"Found subdirectory in config directory: %r. IGNORING."
|
||||||
|
) % (entry_path, )
|
||||||
|
continue
|
||||||
|
|
||||||
|
if not entry.endswith(".yaml"):
|
||||||
|
print (
|
||||||
|
"Found file in config directory that does not"
|
||||||
|
" end in '.yaml': %r. IGNORING."
|
||||||
|
) % (entry_path, )
|
||||||
|
continue
|
||||||
|
|
||||||
|
files.append(entry_path)
|
||||||
|
|
||||||
|
config_files.extend(sorted(files))
|
||||||
|
else:
|
||||||
|
config_files.append(config_path)
|
||||||
|
|
||||||
if config_args.generate_config:
|
if config_args.generate_config:
|
||||||
if not config_args.config_path:
|
if config_args.report_stats is None:
|
||||||
config_parser.error(
|
config_parser.error(
|
||||||
"Must specify where to generate the config file"
|
"Please specify either --report-stats=yes or --report-stats=no\n\n" +
|
||||||
|
MISSING_REPORT_STATS_SPIEL
|
||||||
)
|
)
|
||||||
config_dir_path = os.path.dirname(config_args.config_path)
|
if not config_files:
|
||||||
if os.path.exists(config_args.config_path):
|
config_parser.error(
|
||||||
defaults = cls.read_config_file(config_args.config_path)
|
"Must supply a config file.\nA config file can be automatically"
|
||||||
|
" generated using \"--generate-config -H SERVER_NAME"
|
||||||
|
" -c CONFIG-FILE\""
|
||||||
|
)
|
||||||
|
(config_path,) = config_files
|
||||||
|
if not os.path.exists(config_path):
|
||||||
|
if config_args.keys_directory:
|
||||||
|
config_dir_path = config_args.keys_directory
|
||||||
|
else:
|
||||||
|
config_dir_path = os.path.dirname(config_path)
|
||||||
|
config_dir_path = os.path.abspath(config_dir_path)
|
||||||
|
|
||||||
|
server_name = config_args.server_name
|
||||||
|
if not server_name:
|
||||||
|
raise ConfigError(
|
||||||
|
"Must specify a server_name to a generate config for."
|
||||||
|
" Pass -H server.name."
|
||||||
|
)
|
||||||
|
if not os.path.exists(config_dir_path):
|
||||||
|
os.makedirs(config_dir_path)
|
||||||
|
with open(config_path, "wb") as config_file:
|
||||||
|
config_bytes, config = obj.generate_config(
|
||||||
|
config_dir_path=config_dir_path,
|
||||||
|
server_name=server_name,
|
||||||
|
report_stats=(config_args.report_stats == "yes"),
|
||||||
|
is_generating_file=True
|
||||||
|
)
|
||||||
|
obj.invoke_all("generate_files", config)
|
||||||
|
config_file.write(config_bytes)
|
||||||
|
print (
|
||||||
|
"A config file has been generated in %r for server name"
|
||||||
|
" %r with corresponding SSL keys and self-signed"
|
||||||
|
" certificates. Please review this file and customise it"
|
||||||
|
" to your needs."
|
||||||
|
) % (config_path, server_name)
|
||||||
|
print (
|
||||||
|
"If this server name is incorrect, you will need to"
|
||||||
|
" regenerate the SSL certificates"
|
||||||
|
)
|
||||||
|
return
|
||||||
else:
|
else:
|
||||||
defaults = {}
|
print (
|
||||||
else:
|
"Config file %r already exists. Generating any missing key"
|
||||||
if config_args.config_path:
|
" files."
|
||||||
defaults = cls.read_config_file(config_args.config_path)
|
) % (config_path,)
|
||||||
else:
|
generate_keys = True
|
||||||
defaults = {}
|
|
||||||
|
|
||||||
parser = argparse.ArgumentParser(
|
parser = argparse.ArgumentParser(
|
||||||
parents=[config_parser],
|
parents=[config_parser],
|
||||||
description=description,
|
description=description,
|
||||||
formatter_class=argparse.RawDescriptionHelpFormatter,
|
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||||
)
|
)
|
||||||
cls.add_arguments(parser)
|
|
||||||
parser.set_defaults(**defaults)
|
|
||||||
|
|
||||||
|
obj.invoke_all("add_arguments", parser)
|
||||||
args = parser.parse_args(remaining_args)
|
args = parser.parse_args(remaining_args)
|
||||||
|
|
||||||
if config_args.generate_config:
|
if not config_files:
|
||||||
config_dir_path = os.path.dirname(config_args.config_path)
|
config_parser.error(
|
||||||
config_dir_path = os.path.abspath(config_dir_path)
|
"Must supply a config file.\nA config file can be automatically"
|
||||||
if not os.path.exists(config_dir_path):
|
" generated using \"--generate-config -H SERVER_NAME"
|
||||||
os.makedirs(config_dir_path)
|
" -c CONFIG-FILE\""
|
||||||
cls.generate_config(args, config_dir_path)
|
|
||||||
config = {}
|
|
||||||
for key, value in vars(args).items():
|
|
||||||
if (key not in set(["config_path", "generate_config"])
|
|
||||||
and value is not None):
|
|
||||||
config[key] = value
|
|
||||||
with open(config_args.config_path, "w") as config_file:
|
|
||||||
# TODO(paul) it would be lovely if we wrote out vim- and emacs-
|
|
||||||
# style mode markers into the file, to hint to people that
|
|
||||||
# this is a YAML file.
|
|
||||||
yaml.dump(config, config_file, default_flow_style=False)
|
|
||||||
print (
|
|
||||||
"A config file has been generated in %s for server name"
|
|
||||||
" '%s' with corresponding SSL keys and self-signed"
|
|
||||||
" certificates. Please review this file and customise it to"
|
|
||||||
" your needs."
|
|
||||||
) % (
|
|
||||||
config_args.config_path, config['server_name']
|
|
||||||
)
|
)
|
||||||
print (
|
|
||||||
"If this server name is incorrect, you will need to regenerate"
|
|
||||||
" the SSL certificates"
|
|
||||||
)
|
|
||||||
sys.exit(0)
|
|
||||||
|
|
||||||
return cls(args)
|
if config_args.keys_directory:
|
||||||
|
config_dir_path = config_args.keys_directory
|
||||||
|
else:
|
||||||
|
config_dir_path = os.path.dirname(config_args.config_path[-1])
|
||||||
|
config_dir_path = os.path.abspath(config_dir_path)
|
||||||
|
|
||||||
|
specified_config = {}
|
||||||
|
for config_file in config_files:
|
||||||
|
yaml_config = cls.read_config_file(config_file)
|
||||||
|
specified_config.update(yaml_config)
|
||||||
|
|
||||||
|
if "server_name" not in specified_config:
|
||||||
|
raise ConfigError(MISSING_SERVER_NAME)
|
||||||
|
|
||||||
|
server_name = specified_config["server_name"]
|
||||||
|
_, config = obj.generate_config(
|
||||||
|
config_dir_path=config_dir_path,
|
||||||
|
server_name=server_name,
|
||||||
|
is_generating_file=False,
|
||||||
|
)
|
||||||
|
config.pop("log_config")
|
||||||
|
config.update(specified_config)
|
||||||
|
if "report_stats" not in config:
|
||||||
|
raise ConfigError(
|
||||||
|
MISSING_REPORT_STATS_CONFIG_INSTRUCTIONS + "\n" +
|
||||||
|
MISSING_REPORT_STATS_SPIEL
|
||||||
|
)
|
||||||
|
|
||||||
|
if generate_keys:
|
||||||
|
obj.invoke_all("generate_files", config)
|
||||||
|
return
|
||||||
|
|
||||||
|
obj.invoke_all("read_config", config)
|
||||||
|
|
||||||
|
obj.invoke_all("read_arguments", args)
|
||||||
|
|
||||||
|
return obj
|
||||||
|
|||||||
27
synapse/config/appservice.py
Normal file
27
synapse/config/appservice.py
Normal file
@@ -0,0 +1,27 @@
|
|||||||
|
# Copyright 2015, 2016 OpenMarket Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
from ._base import Config
|
||||||
|
|
||||||
|
|
||||||
|
class AppServiceConfig(Config):
|
||||||
|
|
||||||
|
def read_config(self, config):
|
||||||
|
self.app_service_config_files = config.get("app_service_config_files", [])
|
||||||
|
|
||||||
|
def default_config(cls, **kwargs):
|
||||||
|
return """\
|
||||||
|
# A list of application service config file to use
|
||||||
|
app_service_config_files: []
|
||||||
|
"""
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
# Copyright 2014, 2015 OpenMarket Ltd
|
# Copyright 2014-2016 OpenMarket Ltd
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@@ -17,35 +17,31 @@ from ._base import Config
|
|||||||
|
|
||||||
class CaptchaConfig(Config):
|
class CaptchaConfig(Config):
|
||||||
|
|
||||||
def __init__(self, args):
|
def read_config(self, config):
|
||||||
super(CaptchaConfig, self).__init__(args)
|
self.recaptcha_private_key = config["recaptcha_private_key"]
|
||||||
self.recaptcha_private_key = args.recaptcha_private_key
|
self.recaptcha_public_key = config["recaptcha_public_key"]
|
||||||
self.enable_registration_captcha = args.enable_registration_captcha
|
self.enable_registration_captcha = config["enable_registration_captcha"]
|
||||||
self.captcha_ip_origin_is_x_forwarded = (
|
self.captcha_bypass_secret = config.get("captcha_bypass_secret")
|
||||||
args.captcha_ip_origin_is_x_forwarded
|
self.recaptcha_siteverify_api = config["recaptcha_siteverify_api"]
|
||||||
)
|
|
||||||
self.captcha_bypass_secret = args.captcha_bypass_secret
|
|
||||||
|
|
||||||
@classmethod
|
def default_config(self, **kwargs):
|
||||||
def add_arguments(cls, parser):
|
return """\
|
||||||
super(CaptchaConfig, cls).add_arguments(parser)
|
## Captcha ##
|
||||||
group = parser.add_argument_group("recaptcha")
|
|
||||||
group.add_argument(
|
# This Home Server's ReCAPTCHA public key.
|
||||||
"--recaptcha-private-key", type=str, default="YOUR_PRIVATE_KEY",
|
recaptcha_public_key: "YOUR_PUBLIC_KEY"
|
||||||
help="The matching private key for the web client's public key."
|
|
||||||
)
|
# This Home Server's ReCAPTCHA private key.
|
||||||
group.add_argument(
|
recaptcha_private_key: "YOUR_PRIVATE_KEY"
|
||||||
"--enable-registration-captcha", type=bool, default=False,
|
|
||||||
help="Enables ReCaptcha checks when registering, preventing signup"
|
# Enables ReCaptcha checks when registering, preventing signup
|
||||||
+ " unless a captcha is answered. Requires a valid ReCaptcha "
|
# unless a captcha is answered. Requires a valid ReCaptcha
|
||||||
+ "public/private key."
|
# public/private key.
|
||||||
)
|
enable_registration_captcha: False
|
||||||
group.add_argument(
|
|
||||||
"--captcha_ip_origin_is_x_forwarded", type=bool, default=False,
|
# A secret key used to bypass the captcha test entirely.
|
||||||
help="When checking captchas, use the X-Forwarded-For (XFF) header"
|
#captcha_bypass_secret: "YOUR_SECRET_HERE"
|
||||||
+ " as the client IP and not the actual client IP."
|
|
||||||
)
|
# The API endpoint to use for verifying m.login.recaptcha responses.
|
||||||
group.add_argument(
|
recaptcha_siteverify_api: "https://www.google.com/recaptcha/api/siteverify"
|
||||||
"--captcha_bypass_secret", type=str,
|
"""
|
||||||
help="A secret key used to bypass the captcha test entirely."
|
|
||||||
)
|
|
||||||
|
|||||||
47
synapse/config/cas.py
Normal file
47
synapse/config/cas.py
Normal file
@@ -0,0 +1,47 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright 2015, 2016 OpenMarket Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
from ._base import Config
|
||||||
|
|
||||||
|
|
||||||
|
class CasConfig(Config):
|
||||||
|
"""Cas Configuration
|
||||||
|
|
||||||
|
cas_server_url: URL of CAS server
|
||||||
|
"""
|
||||||
|
|
||||||
|
def read_config(self, config):
|
||||||
|
cas_config = config.get("cas_config", None)
|
||||||
|
if cas_config:
|
||||||
|
self.cas_enabled = cas_config.get("enabled", True)
|
||||||
|
self.cas_server_url = cas_config["server_url"]
|
||||||
|
self.cas_service_url = cas_config["service_url"]
|
||||||
|
self.cas_required_attributes = cas_config.get("required_attributes", {})
|
||||||
|
else:
|
||||||
|
self.cas_enabled = False
|
||||||
|
self.cas_server_url = None
|
||||||
|
self.cas_service_url = None
|
||||||
|
self.cas_required_attributes = {}
|
||||||
|
|
||||||
|
def default_config(self, config_dir_path, server_name, **kwargs):
|
||||||
|
return """
|
||||||
|
# Enable CAS for registration and login.
|
||||||
|
#cas_config:
|
||||||
|
# enabled: true
|
||||||
|
# server_url: "https://cas-server.com"
|
||||||
|
# service_url: "https://homesever.domain.com:8448"
|
||||||
|
# #required_attributes:
|
||||||
|
# # name: value
|
||||||
|
"""
|
||||||
@@ -1,5 +1,5 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
# Copyright 2014, 2015 OpenMarket Ltd
|
# Copyright 2014-2016 OpenMarket Ltd
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@@ -14,32 +14,66 @@
|
|||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
from ._base import Config
|
from ._base import Config
|
||||||
import os
|
|
||||||
|
|
||||||
|
|
||||||
class DatabaseConfig(Config):
|
class DatabaseConfig(Config):
|
||||||
def __init__(self, args):
|
|
||||||
super(DatabaseConfig, self).__init__(args)
|
|
||||||
if args.database_path == ":memory:":
|
|
||||||
self.database_path = ":memory:"
|
|
||||||
else:
|
|
||||||
self.database_path = self.abspath(args.database_path)
|
|
||||||
self.event_cache_size = self.parse_size(args.event_cache_size)
|
|
||||||
|
|
||||||
@classmethod
|
def read_config(self, config):
|
||||||
def add_arguments(cls, parser):
|
self.event_cache_size = self.parse_size(
|
||||||
super(DatabaseConfig, cls).add_arguments(parser)
|
config.get("event_cache_size", "10K")
|
||||||
|
)
|
||||||
|
|
||||||
|
self.database_config = config.get("database")
|
||||||
|
|
||||||
|
if self.database_config is None:
|
||||||
|
self.database_config = {
|
||||||
|
"name": "sqlite3",
|
||||||
|
"args": {},
|
||||||
|
}
|
||||||
|
|
||||||
|
name = self.database_config.get("name", None)
|
||||||
|
if name == "psycopg2":
|
||||||
|
pass
|
||||||
|
elif name == "sqlite3":
|
||||||
|
self.database_config.setdefault("args", {}).update({
|
||||||
|
"cp_min": 1,
|
||||||
|
"cp_max": 1,
|
||||||
|
"check_same_thread": False,
|
||||||
|
})
|
||||||
|
else:
|
||||||
|
raise RuntimeError("Unsupported database type '%s'" % (name,))
|
||||||
|
|
||||||
|
self.set_databasepath(config.get("database_path"))
|
||||||
|
|
||||||
|
def default_config(self, **kwargs):
|
||||||
|
database_path = self.abspath("homeserver.db")
|
||||||
|
return """\
|
||||||
|
# Database configuration
|
||||||
|
database:
|
||||||
|
# The database engine name
|
||||||
|
name: "sqlite3"
|
||||||
|
# Arguments to pass to the engine
|
||||||
|
args:
|
||||||
|
# Path to the database
|
||||||
|
database: "%(database_path)s"
|
||||||
|
|
||||||
|
# Number of events to cache in memory.
|
||||||
|
event_cache_size: "10K"
|
||||||
|
""" % locals()
|
||||||
|
|
||||||
|
def read_arguments(self, args):
|
||||||
|
self.set_databasepath(args.database_path)
|
||||||
|
|
||||||
|
def set_databasepath(self, database_path):
|
||||||
|
if database_path != ":memory:":
|
||||||
|
database_path = self.abspath(database_path)
|
||||||
|
if self.database_config.get("name", None) == "sqlite3":
|
||||||
|
if database_path is not None:
|
||||||
|
self.database_config["args"]["database"] = database_path
|
||||||
|
|
||||||
|
def add_arguments(self, parser):
|
||||||
db_group = parser.add_argument_group("database")
|
db_group = parser.add_argument_group("database")
|
||||||
db_group.add_argument(
|
db_group.add_argument(
|
||||||
"-d", "--database-path", default="homeserver.db",
|
"-d", "--database-path", metavar="SQLITE_DATABASE_PATH",
|
||||||
help="The database name."
|
help="The path to a sqlite database to use."
|
||||||
)
|
)
|
||||||
db_group.add_argument(
|
|
||||||
"--event-cache-size", default="100K",
|
|
||||||
help="Number of events to cache in memory."
|
|
||||||
)
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def generate_config(cls, args, config_dir_path):
|
|
||||||
super(DatabaseConfig, cls).generate_config(args, config_dir_path)
|
|
||||||
args.database_path = os.path.abspath(args.database_path)
|
|
||||||
|
|||||||
@@ -1,42 +0,0 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
# Copyright 2014, 2015 OpenMarket Ltd
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
from ._base import Config
|
|
||||||
|
|
||||||
|
|
||||||
class EmailConfig(Config):
|
|
||||||
|
|
||||||
def __init__(self, args):
|
|
||||||
super(EmailConfig, self).__init__(args)
|
|
||||||
self.email_from_address = args.email_from_address
|
|
||||||
self.email_smtp_server = args.email_smtp_server
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def add_arguments(cls, parser):
|
|
||||||
super(EmailConfig, cls).add_arguments(parser)
|
|
||||||
email_group = parser.add_argument_group("email")
|
|
||||||
email_group.add_argument(
|
|
||||||
"--email-from-address",
|
|
||||||
default="FROM@EXAMPLE.COM",
|
|
||||||
help="The address to send emails from (e.g. for password resets)."
|
|
||||||
)
|
|
||||||
email_group.add_argument(
|
|
||||||
"--email-smtp-server",
|
|
||||||
default="",
|
|
||||||
help=(
|
|
||||||
"The SMTP server to send emails from (e.g. for password"
|
|
||||||
" resets)."
|
|
||||||
)
|
|
||||||
)
|
|
||||||
@@ -1,5 +1,5 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
# Copyright 2014, 2015 OpenMarket Ltd
|
# Copyright 2014-2016 OpenMarket Ltd
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@@ -20,16 +20,26 @@ from .database import DatabaseConfig
|
|||||||
from .ratelimiting import RatelimitConfig
|
from .ratelimiting import RatelimitConfig
|
||||||
from .repository import ContentRepositoryConfig
|
from .repository import ContentRepositoryConfig
|
||||||
from .captcha import CaptchaConfig
|
from .captcha import CaptchaConfig
|
||||||
from .email import EmailConfig
|
|
||||||
from .voip import VoipConfig
|
from .voip import VoipConfig
|
||||||
|
from .registration import RegistrationConfig
|
||||||
|
from .metrics import MetricsConfig
|
||||||
|
from .appservice import AppServiceConfig
|
||||||
|
from .key import KeyConfig
|
||||||
|
from .saml2 import SAML2Config
|
||||||
|
from .cas import CasConfig
|
||||||
|
from .password import PasswordConfig
|
||||||
|
|
||||||
|
|
||||||
class HomeServerConfig(TlsConfig, ServerConfig, DatabaseConfig, LoggingConfig,
|
class HomeServerConfig(TlsConfig, ServerConfig, DatabaseConfig, LoggingConfig,
|
||||||
RatelimitConfig, ContentRepositoryConfig, CaptchaConfig,
|
RatelimitConfig, ContentRepositoryConfig, CaptchaConfig,
|
||||||
EmailConfig, VoipConfig):
|
VoipConfig, RegistrationConfig, MetricsConfig,
|
||||||
|
AppServiceConfig, KeyConfig, SAML2Config, CasConfig,
|
||||||
|
PasswordConfig,):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
import sys
|
import sys
|
||||||
HomeServerConfig.load_config("Generate config", sys.argv[1:], "HomeServer")
|
sys.stdout.write(
|
||||||
|
HomeServerConfig().generate_config(sys.argv[1], sys.argv[2])[0]
|
||||||
|
)
|
||||||
|
|||||||
156
synapse/config/key.py
Normal file
156
synapse/config/key.py
Normal file
@@ -0,0 +1,156 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright 2015, 2016 OpenMarket Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
from ._base import Config, ConfigError
|
||||||
|
|
||||||
|
from synapse.util.stringutils import random_string
|
||||||
|
from signedjson.key import (
|
||||||
|
generate_signing_key, is_signing_algorithm_supported,
|
||||||
|
decode_signing_key_base64, decode_verify_key_bytes,
|
||||||
|
read_signing_keys, write_signing_keys, NACL_ED25519
|
||||||
|
)
|
||||||
|
from unpaddedbase64 import decode_base64
|
||||||
|
from synapse.util.stringutils import random_string_with_symbols
|
||||||
|
|
||||||
|
import os
|
||||||
|
import hashlib
|
||||||
|
import logging
|
||||||
|
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class KeyConfig(Config):
|
||||||
|
|
||||||
|
def read_config(self, config):
|
||||||
|
self.signing_key = self.read_signing_key(config["signing_key_path"])
|
||||||
|
self.old_signing_keys = self.read_old_signing_keys(
|
||||||
|
config["old_signing_keys"]
|
||||||
|
)
|
||||||
|
self.key_refresh_interval = self.parse_duration(
|
||||||
|
config["key_refresh_interval"]
|
||||||
|
)
|
||||||
|
self.perspectives = self.read_perspectives(
|
||||||
|
config["perspectives"]
|
||||||
|
)
|
||||||
|
|
||||||
|
self.macaroon_secret_key = config.get(
|
||||||
|
"macaroon_secret_key", self.registration_shared_secret
|
||||||
|
)
|
||||||
|
|
||||||
|
if not self.macaroon_secret_key:
|
||||||
|
# Unfortunately, there are people out there that don't have this
|
||||||
|
# set. Lets just be "nice" and derive one from their secret key.
|
||||||
|
logger.warn("Config is missing missing macaroon_secret_key")
|
||||||
|
seed = self.signing_key[0].seed
|
||||||
|
self.macaroon_secret_key = hashlib.sha256(seed)
|
||||||
|
|
||||||
|
def default_config(self, config_dir_path, server_name, is_generating_file=False,
|
||||||
|
**kwargs):
|
||||||
|
base_key_name = os.path.join(config_dir_path, server_name)
|
||||||
|
|
||||||
|
if is_generating_file:
|
||||||
|
macaroon_secret_key = random_string_with_symbols(50)
|
||||||
|
else:
|
||||||
|
macaroon_secret_key = None
|
||||||
|
|
||||||
|
return """\
|
||||||
|
macaroon_secret_key: "%(macaroon_secret_key)s"
|
||||||
|
|
||||||
|
## Signing Keys ##
|
||||||
|
|
||||||
|
# Path to the signing key to sign messages with
|
||||||
|
signing_key_path: "%(base_key_name)s.signing.key"
|
||||||
|
|
||||||
|
# The keys that the server used to sign messages with but won't use
|
||||||
|
# to sign new messages. E.g. it has lost its private key
|
||||||
|
old_signing_keys: {}
|
||||||
|
# "ed25519:auto":
|
||||||
|
# # Base64 encoded public key
|
||||||
|
# key: "The public part of your old signing key."
|
||||||
|
# # Millisecond POSIX timestamp when the key expired.
|
||||||
|
# expired_ts: 123456789123
|
||||||
|
|
||||||
|
# How long key response published by this server is valid for.
|
||||||
|
# Used to set the valid_until_ts in /key/v2 APIs.
|
||||||
|
# Determines how quickly servers will query to check which keys
|
||||||
|
# are still valid.
|
||||||
|
key_refresh_interval: "1d" # 1 Day.
|
||||||
|
|
||||||
|
# The trusted servers to download signing keys from.
|
||||||
|
perspectives:
|
||||||
|
servers:
|
||||||
|
"matrix.org":
|
||||||
|
verify_keys:
|
||||||
|
"ed25519:auto":
|
||||||
|
key: "Noi6WqcDj0QmPxCNQqgezwTlBKrfqehY1u2FyWP9uYw"
|
||||||
|
""" % locals()
|
||||||
|
|
||||||
|
def read_perspectives(self, perspectives_config):
|
||||||
|
servers = {}
|
||||||
|
for server_name, server_config in perspectives_config["servers"].items():
|
||||||
|
for key_id, key_data in server_config["verify_keys"].items():
|
||||||
|
if is_signing_algorithm_supported(key_id):
|
||||||
|
key_base64 = key_data["key"]
|
||||||
|
key_bytes = decode_base64(key_base64)
|
||||||
|
verify_key = decode_verify_key_bytes(key_id, key_bytes)
|
||||||
|
servers.setdefault(server_name, {})[key_id] = verify_key
|
||||||
|
return servers
|
||||||
|
|
||||||
|
def read_signing_key(self, signing_key_path):
|
||||||
|
signing_keys = self.read_file(signing_key_path, "signing_key")
|
||||||
|
try:
|
||||||
|
return read_signing_keys(signing_keys.splitlines(True))
|
||||||
|
except Exception:
|
||||||
|
raise ConfigError(
|
||||||
|
"Error reading signing_key."
|
||||||
|
" Try running again with --generate-config"
|
||||||
|
)
|
||||||
|
|
||||||
|
def read_old_signing_keys(self, old_signing_keys):
|
||||||
|
keys = {}
|
||||||
|
for key_id, key_data in old_signing_keys.items():
|
||||||
|
if is_signing_algorithm_supported(key_id):
|
||||||
|
key_base64 = key_data["key"]
|
||||||
|
key_bytes = decode_base64(key_base64)
|
||||||
|
verify_key = decode_verify_key_bytes(key_id, key_bytes)
|
||||||
|
verify_key.expired_ts = key_data["expired_ts"]
|
||||||
|
keys[key_id] = verify_key
|
||||||
|
else:
|
||||||
|
raise ConfigError(
|
||||||
|
"Unsupported signing algorithm for old key: %r" % (key_id,)
|
||||||
|
)
|
||||||
|
return keys
|
||||||
|
|
||||||
|
def generate_files(self, config):
|
||||||
|
signing_key_path = config["signing_key_path"]
|
||||||
|
if not os.path.exists(signing_key_path):
|
||||||
|
with open(signing_key_path, "w") as signing_key_file:
|
||||||
|
key_id = "a_" + random_string(4)
|
||||||
|
write_signing_keys(
|
||||||
|
signing_key_file, (generate_signing_key(key_id),),
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
signing_keys = self.read_file(signing_key_path, "signing_key")
|
||||||
|
if len(signing_keys.split("\n")[0].split()) == 1:
|
||||||
|
# handle keys in the old format.
|
||||||
|
key_id = "a_" + random_string(4)
|
||||||
|
key = decode_signing_key_base64(
|
||||||
|
NACL_ED25519, key_id, signing_keys.split("\n")[0]
|
||||||
|
)
|
||||||
|
with open(signing_key_path, "w") as signing_key_file:
|
||||||
|
write_signing_keys(
|
||||||
|
signing_key_file, (key,),
|
||||||
|
)
|
||||||
@@ -1,5 +1,5 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
# Copyright 2014, 2015 OpenMarket Ltd
|
# Copyright 2014-2016 OpenMarket Ltd
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@@ -19,25 +19,97 @@ from twisted.python.log import PythonLoggingObserver
|
|||||||
import logging
|
import logging
|
||||||
import logging.config
|
import logging.config
|
||||||
import yaml
|
import yaml
|
||||||
|
from string import Template
|
||||||
|
import os
|
||||||
|
import signal
|
||||||
|
from synapse.util.debug import debug_deferreds
|
||||||
|
|
||||||
|
|
||||||
|
DEFAULT_LOG_CONFIG = Template("""
|
||||||
|
version: 1
|
||||||
|
|
||||||
|
formatters:
|
||||||
|
precise:
|
||||||
|
format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s\
|
||||||
|
- %(message)s'
|
||||||
|
|
||||||
|
filters:
|
||||||
|
context:
|
||||||
|
(): synapse.util.logcontext.LoggingContextFilter
|
||||||
|
request: ""
|
||||||
|
|
||||||
|
handlers:
|
||||||
|
file:
|
||||||
|
class: logging.handlers.RotatingFileHandler
|
||||||
|
formatter: precise
|
||||||
|
filename: ${log_file}
|
||||||
|
maxBytes: 104857600
|
||||||
|
backupCount: 10
|
||||||
|
filters: [context]
|
||||||
|
level: INFO
|
||||||
|
console:
|
||||||
|
class: logging.StreamHandler
|
||||||
|
formatter: precise
|
||||||
|
|
||||||
|
loggers:
|
||||||
|
synapse:
|
||||||
|
level: INFO
|
||||||
|
|
||||||
|
synapse.storage.SQL:
|
||||||
|
level: INFO
|
||||||
|
|
||||||
|
root:
|
||||||
|
level: INFO
|
||||||
|
handlers: [file, console]
|
||||||
|
""")
|
||||||
|
|
||||||
|
|
||||||
class LoggingConfig(Config):
|
class LoggingConfig(Config):
|
||||||
def __init__(self, args):
|
|
||||||
super(LoggingConfig, self).__init__(args)
|
|
||||||
self.verbosity = int(args.verbose) if args.verbose else None
|
|
||||||
self.log_config = self.abspath(args.log_config)
|
|
||||||
self.log_file = self.abspath(args.log_file)
|
|
||||||
|
|
||||||
@classmethod
|
def read_config(self, config):
|
||||||
|
self.verbosity = config.get("verbose", 0)
|
||||||
|
self.log_config = self.abspath(config.get("log_config"))
|
||||||
|
self.log_file = self.abspath(config.get("log_file"))
|
||||||
|
if config.get("full_twisted_stacktraces"):
|
||||||
|
debug_deferreds()
|
||||||
|
|
||||||
|
def default_config(self, config_dir_path, server_name, **kwargs):
|
||||||
|
log_file = self.abspath("homeserver.log")
|
||||||
|
log_config = self.abspath(
|
||||||
|
os.path.join(config_dir_path, server_name + ".log.config")
|
||||||
|
)
|
||||||
|
return """
|
||||||
|
# Logging verbosity level.
|
||||||
|
verbose: 0
|
||||||
|
|
||||||
|
# File to write logging to
|
||||||
|
log_file: "%(log_file)s"
|
||||||
|
|
||||||
|
# A yaml python logging config file
|
||||||
|
log_config: "%(log_config)s"
|
||||||
|
|
||||||
|
# Stop twisted from discarding the stack traces of exceptions in
|
||||||
|
# deferreds by waiting a reactor tick before running a deferred's
|
||||||
|
# callbacks.
|
||||||
|
# full_twisted_stacktraces: true
|
||||||
|
""" % locals()
|
||||||
|
|
||||||
|
def read_arguments(self, args):
|
||||||
|
if args.verbose is not None:
|
||||||
|
self.verbosity = args.verbose
|
||||||
|
if args.log_config is not None:
|
||||||
|
self.log_config = args.log_config
|
||||||
|
if args.log_file is not None:
|
||||||
|
self.log_file = args.log_file
|
||||||
|
|
||||||
def add_arguments(cls, parser):
|
def add_arguments(cls, parser):
|
||||||
super(LoggingConfig, cls).add_arguments(parser)
|
|
||||||
logging_group = parser.add_argument_group("logging")
|
logging_group = parser.add_argument_group("logging")
|
||||||
logging_group.add_argument(
|
logging_group.add_argument(
|
||||||
'-v', '--verbose', dest="verbose", action='count',
|
'-v', '--verbose', dest="verbose", action='count',
|
||||||
help="The verbosity level."
|
help="The verbosity level."
|
||||||
)
|
)
|
||||||
logging_group.add_argument(
|
logging_group.add_argument(
|
||||||
'-f', '--log-file', dest="log_file", default="homeserver.log",
|
'-f', '--log-file', dest="log_file",
|
||||||
help="File to log to."
|
help="File to log to."
|
||||||
)
|
)
|
||||||
logging_group.add_argument(
|
logging_group.add_argument(
|
||||||
@@ -45,6 +117,14 @@ class LoggingConfig(Config):
|
|||||||
help="Python logging config file"
|
help="Python logging config file"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def generate_files(self, config):
|
||||||
|
log_config = config.get("log_config")
|
||||||
|
if log_config and not os.path.exists(log_config):
|
||||||
|
with open(log_config, "wb") as log_config_file:
|
||||||
|
log_config_file.write(
|
||||||
|
DEFAULT_LOG_CONFIG.substitute(log_file=config["log_file"])
|
||||||
|
)
|
||||||
|
|
||||||
def setup_logging(self):
|
def setup_logging(self):
|
||||||
log_format = (
|
log_format = (
|
||||||
"%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s"
|
"%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s"
|
||||||
@@ -71,6 +151,19 @@ class LoggingConfig(Config):
|
|||||||
handler = logging.handlers.RotatingFileHandler(
|
handler = logging.handlers.RotatingFileHandler(
|
||||||
self.log_file, maxBytes=(1000 * 1000 * 100), backupCount=3
|
self.log_file, maxBytes=(1000 * 1000 * 100), backupCount=3
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def sighup(signum, stack):
|
||||||
|
logger.info("Closing log file due to SIGHUP")
|
||||||
|
handler.doRollover()
|
||||||
|
logger.info("Opened new log file due to SIGHUP")
|
||||||
|
|
||||||
|
# TODO(paul): obviously this is a terrible mechanism for
|
||||||
|
# stealing SIGHUP, because it means no other part of synapse
|
||||||
|
# can use it instead. If we want to catch SIGHUP anywhere
|
||||||
|
# else as well, I'd suggest we find a nicer way to broadcast
|
||||||
|
# it around.
|
||||||
|
if getattr(signal, "SIGHUP"):
|
||||||
|
signal.signal(signal.SIGHUP, sighup)
|
||||||
else:
|
else:
|
||||||
handler = logging.StreamHandler()
|
handler = logging.StreamHandler()
|
||||||
handler.setFormatter(formatter)
|
handler.setFormatter(formatter)
|
||||||
@@ -78,7 +171,6 @@ class LoggingConfig(Config):
|
|||||||
handler.addFilter(LoggingContextFilter(request=""))
|
handler.addFilter(LoggingContextFilter(request=""))
|
||||||
|
|
||||||
logger.addHandler(handler)
|
logger.addHandler(handler)
|
||||||
logger.info("Test")
|
|
||||||
else:
|
else:
|
||||||
with open(self.log_config, 'r') as f:
|
with open(self.log_config, 'r') as f:
|
||||||
logging.config.dictConfig(yaml.load(f))
|
logging.config.dictConfig(yaml.load(f))
|
||||||
|
|||||||
33
synapse/config/metrics.py
Normal file
33
synapse/config/metrics.py
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright 2015, 2016 OpenMarket Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
from ._base import Config
|
||||||
|
|
||||||
|
|
||||||
|
class MetricsConfig(Config):
|
||||||
|
def read_config(self, config):
|
||||||
|
self.enable_metrics = config["enable_metrics"]
|
||||||
|
self.report_stats = config.get("report_stats", None)
|
||||||
|
self.metrics_port = config.get("metrics_port")
|
||||||
|
self.metrics_bind_host = config.get("metrics_bind_host", "127.0.0.1")
|
||||||
|
|
||||||
|
def default_config(self, report_stats=None, **kwargs):
|
||||||
|
suffix = "" if report_stats is None else "report_stats: %(report_stats)s\n"
|
||||||
|
return ("""\
|
||||||
|
## Metrics ###
|
||||||
|
|
||||||
|
# Enable collection and rendering of performance metrics
|
||||||
|
enable_metrics: False
|
||||||
|
""" + suffix) % locals()
|
||||||
32
synapse/config/password.py
Normal file
32
synapse/config/password.py
Normal file
@@ -0,0 +1,32 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright 2015, 2016 OpenMarket Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
from ._base import Config
|
||||||
|
|
||||||
|
|
||||||
|
class PasswordConfig(Config):
|
||||||
|
"""Password login configuration
|
||||||
|
"""
|
||||||
|
|
||||||
|
def read_config(self, config):
|
||||||
|
password_config = config.get("password_config", {})
|
||||||
|
self.password_enabled = password_config.get("enabled", True)
|
||||||
|
|
||||||
|
def default_config(self, config_dir_path, server_name, **kwargs):
|
||||||
|
return """
|
||||||
|
# Enable password for login.
|
||||||
|
password_config:
|
||||||
|
enabled: true
|
||||||
|
"""
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
# Copyright 2014, 2015 OpenMarket Ltd
|
# Copyright 2014-2016 OpenMarket Ltd
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@@ -17,20 +17,42 @@ from ._base import Config
|
|||||||
|
|
||||||
class RatelimitConfig(Config):
|
class RatelimitConfig(Config):
|
||||||
|
|
||||||
def __init__(self, args):
|
def read_config(self, config):
|
||||||
super(RatelimitConfig, self).__init__(args)
|
self.rc_messages_per_second = config["rc_messages_per_second"]
|
||||||
self.rc_messages_per_second = args.rc_messages_per_second
|
self.rc_message_burst_count = config["rc_message_burst_count"]
|
||||||
self.rc_message_burst_count = args.rc_message_burst_count
|
|
||||||
|
|
||||||
@classmethod
|
self.federation_rc_window_size = config["federation_rc_window_size"]
|
||||||
def add_arguments(cls, parser):
|
self.federation_rc_sleep_limit = config["federation_rc_sleep_limit"]
|
||||||
super(RatelimitConfig, cls).add_arguments(parser)
|
self.federation_rc_sleep_delay = config["federation_rc_sleep_delay"]
|
||||||
rc_group = parser.add_argument_group("ratelimiting")
|
self.federation_rc_reject_limit = config["federation_rc_reject_limit"]
|
||||||
rc_group.add_argument(
|
self.federation_rc_concurrent = config["federation_rc_concurrent"]
|
||||||
"--rc-messages-per-second", type=float, default=0.2,
|
|
||||||
help="number of messages a client can send per second"
|
def default_config(self, **kwargs):
|
||||||
)
|
return """\
|
||||||
rc_group.add_argument(
|
## Ratelimiting ##
|
||||||
"--rc-message-burst-count", type=float, default=10,
|
|
||||||
help="number of message a client can send before being throttled"
|
# Number of messages a client can send per second
|
||||||
)
|
rc_messages_per_second: 0.2
|
||||||
|
|
||||||
|
# Number of message a client can send before being throttled
|
||||||
|
rc_message_burst_count: 10.0
|
||||||
|
|
||||||
|
# The federation window size in milliseconds
|
||||||
|
federation_rc_window_size: 1000
|
||||||
|
|
||||||
|
# The number of federation requests from a single server in a window
|
||||||
|
# before the server will delay processing the request.
|
||||||
|
federation_rc_sleep_limit: 10
|
||||||
|
|
||||||
|
# The duration in milliseconds to delay processing events from
|
||||||
|
# remote servers by if they go over the sleep limit.
|
||||||
|
federation_rc_sleep_delay: 500
|
||||||
|
|
||||||
|
# The maximum number of concurrent federation requests allowed
|
||||||
|
# from a single server
|
||||||
|
federation_rc_reject_limit: 50
|
||||||
|
|
||||||
|
# The number of federation requests to concurrently process from a
|
||||||
|
# single server
|
||||||
|
federation_rc_concurrent: 3
|
||||||
|
"""
|
||||||
|
|||||||
81
synapse/config/registration.py
Normal file
81
synapse/config/registration.py
Normal file
@@ -0,0 +1,81 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright 2015, 2016 OpenMarket Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
from ._base import Config
|
||||||
|
|
||||||
|
from synapse.util.stringutils import random_string_with_symbols
|
||||||
|
|
||||||
|
from distutils.util import strtobool
|
||||||
|
|
||||||
|
|
||||||
|
class RegistrationConfig(Config):
|
||||||
|
|
||||||
|
def read_config(self, config):
|
||||||
|
self.enable_registration = bool(
|
||||||
|
strtobool(str(config["enable_registration"]))
|
||||||
|
)
|
||||||
|
if "disable_registration" in config:
|
||||||
|
self.enable_registration = not bool(
|
||||||
|
strtobool(str(config["disable_registration"]))
|
||||||
|
)
|
||||||
|
|
||||||
|
self.registration_shared_secret = config.get("registration_shared_secret")
|
||||||
|
|
||||||
|
self.bcrypt_rounds = config.get("bcrypt_rounds", 12)
|
||||||
|
self.trusted_third_party_id_servers = config["trusted_third_party_id_servers"]
|
||||||
|
self.allow_guest_access = config.get("allow_guest_access", False)
|
||||||
|
|
||||||
|
def default_config(self, **kwargs):
|
||||||
|
registration_shared_secret = random_string_with_symbols(50)
|
||||||
|
|
||||||
|
return """\
|
||||||
|
## Registration ##
|
||||||
|
|
||||||
|
# Enable registration for new users.
|
||||||
|
enable_registration: False
|
||||||
|
|
||||||
|
# If set, allows registration by anyone who also has the shared
|
||||||
|
# secret, even if registration is otherwise disabled.
|
||||||
|
registration_shared_secret: "%(registration_shared_secret)s"
|
||||||
|
|
||||||
|
# Set the number of bcrypt rounds used to generate password hash.
|
||||||
|
# Larger numbers increase the work factor needed to generate the hash.
|
||||||
|
# The default number of rounds is 12.
|
||||||
|
bcrypt_rounds: 12
|
||||||
|
|
||||||
|
# Allows users to register as guests without a password/email/etc, and
|
||||||
|
# participate in rooms hosted on this server which have been made
|
||||||
|
# accessible to anonymous users.
|
||||||
|
allow_guest_access: False
|
||||||
|
|
||||||
|
# The list of identity servers trusted to verify third party
|
||||||
|
# identifiers by this server.
|
||||||
|
trusted_third_party_id_servers:
|
||||||
|
- matrix.org
|
||||||
|
- vector.im
|
||||||
|
""" % locals()
|
||||||
|
|
||||||
|
def add_arguments(self, parser):
|
||||||
|
reg_group = parser.add_argument_group("registration")
|
||||||
|
reg_group.add_argument(
|
||||||
|
"--enable-registration", action="store_true", default=None,
|
||||||
|
help="Enable registration for new users."
|
||||||
|
)
|
||||||
|
|
||||||
|
def read_arguments(self, args):
|
||||||
|
if args.enable_registration is not None:
|
||||||
|
self.enable_registration = bool(
|
||||||
|
strtobool(str(args.enable_registration))
|
||||||
|
)
|
||||||
@@ -14,35 +14,87 @@
|
|||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
from ._base import Config
|
from ._base import Config
|
||||||
|
from collections import namedtuple
|
||||||
|
|
||||||
|
ThumbnailRequirement = namedtuple(
|
||||||
|
"ThumbnailRequirement", ["width", "height", "method", "media_type"]
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def parse_thumbnail_requirements(thumbnail_sizes):
|
||||||
|
""" Takes a list of dictionaries with "width", "height", and "method" keys
|
||||||
|
and creates a map from image media types to the thumbnail size, thumnailing
|
||||||
|
method, and thumbnail media type to precalculate
|
||||||
|
|
||||||
|
Args:
|
||||||
|
thumbnail_sizes(list): List of dicts with "width", "height", and
|
||||||
|
"method" keys
|
||||||
|
Returns:
|
||||||
|
Dictionary mapping from media type string to list of
|
||||||
|
ThumbnailRequirement tuples.
|
||||||
|
"""
|
||||||
|
requirements = {}
|
||||||
|
for size in thumbnail_sizes:
|
||||||
|
width = size["width"]
|
||||||
|
height = size["height"]
|
||||||
|
method = size["method"]
|
||||||
|
jpeg_thumbnail = ThumbnailRequirement(width, height, method, "image/jpeg")
|
||||||
|
png_thumbnail = ThumbnailRequirement(width, height, method, "image/png")
|
||||||
|
requirements.setdefault("image/jpeg", []).append(jpeg_thumbnail)
|
||||||
|
requirements.setdefault("image/gif", []).append(png_thumbnail)
|
||||||
|
requirements.setdefault("image/png", []).append(png_thumbnail)
|
||||||
|
return {
|
||||||
|
media_type: tuple(thumbnails)
|
||||||
|
for media_type, thumbnails in requirements.items()
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
class ContentRepositoryConfig(Config):
|
class ContentRepositoryConfig(Config):
|
||||||
def __init__(self, args):
|
def read_config(self, config):
|
||||||
super(ContentRepositoryConfig, self).__init__(args)
|
self.max_upload_size = self.parse_size(config["max_upload_size"])
|
||||||
self.max_upload_size = self.parse_size(args.max_upload_size)
|
self.max_image_pixels = self.parse_size(config["max_image_pixels"])
|
||||||
self.max_image_pixels = self.parse_size(args.max_image_pixels)
|
self.media_store_path = self.ensure_directory(config["media_store_path"])
|
||||||
self.media_store_path = self.ensure_directory(args.media_store_path)
|
self.uploads_path = self.ensure_directory(config["uploads_path"])
|
||||||
|
self.dynamic_thumbnails = config["dynamic_thumbnails"]
|
||||||
|
self.thumbnail_requirements = parse_thumbnail_requirements(
|
||||||
|
config["thumbnail_sizes"]
|
||||||
|
)
|
||||||
|
|
||||||
def parse_size(self, string):
|
def default_config(self, **kwargs):
|
||||||
sizes = {"K": 1024, "M": 1024 * 1024}
|
media_store = self.default_path("media_store")
|
||||||
size = 1
|
uploads_path = self.default_path("uploads")
|
||||||
suffix = string[-1]
|
return """
|
||||||
if suffix in sizes:
|
# Directory where uploaded images and attachments are stored.
|
||||||
string = string[:-1]
|
media_store_path: "%(media_store)s"
|
||||||
size = sizes[suffix]
|
|
||||||
return int(string) * size
|
|
||||||
|
|
||||||
@classmethod
|
# Directory where in-progress uploads are stored.
|
||||||
def add_arguments(cls, parser):
|
uploads_path: "%(uploads_path)s"
|
||||||
super(ContentRepositoryConfig, cls).add_arguments(parser)
|
|
||||||
db_group = parser.add_argument_group("content_repository")
|
# The largest allowed upload size in bytes
|
||||||
db_group.add_argument(
|
max_upload_size: "10M"
|
||||||
"--max-upload-size", default="10M"
|
|
||||||
)
|
# Maximum number of pixels that will be thumbnailed
|
||||||
db_group.add_argument(
|
max_image_pixels: "32M"
|
||||||
"--media-store-path", default=cls.default_path("media_store")
|
|
||||||
)
|
# Whether to generate new thumbnails on the fly to precisely match
|
||||||
db_group.add_argument(
|
# the resolution requested by the client. If true then whenever
|
||||||
"--max-image-pixels", default="32M",
|
# a new resolution is requested by the client the server will
|
||||||
help="Maximum number of pixels that will be thumbnailed"
|
# generate a new thumbnail. If false the server will pick a thumbnail
|
||||||
)
|
# from a precalcualted list.
|
||||||
|
dynamic_thumbnails: false
|
||||||
|
|
||||||
|
# List of thumbnail to precalculate when an image is uploaded.
|
||||||
|
thumbnail_sizes:
|
||||||
|
- width: 32
|
||||||
|
height: 32
|
||||||
|
method: crop
|
||||||
|
- width: 96
|
||||||
|
height: 96
|
||||||
|
method: crop
|
||||||
|
- width: 320
|
||||||
|
height: 240
|
||||||
|
method: scale
|
||||||
|
- width: 640
|
||||||
|
height: 480
|
||||||
|
method: scale
|
||||||
|
""" % locals()
|
||||||
|
|||||||
55
synapse/config/saml2.py
Normal file
55
synapse/config/saml2.py
Normal file
@@ -0,0 +1,55 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright 2015 Ericsson
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
from ._base import Config
|
||||||
|
|
||||||
|
|
||||||
|
class SAML2Config(Config):
|
||||||
|
"""SAML2 Configuration
|
||||||
|
Synapse uses pysaml2 libraries for providing SAML2 support
|
||||||
|
|
||||||
|
config_path: Path to the sp_conf.py configuration file
|
||||||
|
idp_redirect_url: Identity provider URL which will redirect
|
||||||
|
the user back to /login/saml2 with proper info.
|
||||||
|
|
||||||
|
sp_conf.py file is something like:
|
||||||
|
https://github.com/rohe/pysaml2/blob/master/example/sp-repoze/sp_conf.py.example
|
||||||
|
|
||||||
|
More information: https://pythonhosted.org/pysaml2/howto/config.html
|
||||||
|
"""
|
||||||
|
|
||||||
|
def read_config(self, config):
|
||||||
|
saml2_config = config.get("saml2_config", None)
|
||||||
|
if saml2_config:
|
||||||
|
self.saml2_enabled = saml2_config.get("enabled", True)
|
||||||
|
self.saml2_config_path = saml2_config["config_path"]
|
||||||
|
self.saml2_idp_redirect_url = saml2_config["idp_redirect_url"]
|
||||||
|
else:
|
||||||
|
self.saml2_enabled = False
|
||||||
|
self.saml2_config_path = None
|
||||||
|
self.saml2_idp_redirect_url = None
|
||||||
|
|
||||||
|
def default_config(self, config_dir_path, server_name, **kwargs):
|
||||||
|
return """
|
||||||
|
# Enable SAML2 for registration and login. Uses pysaml2
|
||||||
|
# config_path: Path to the sp_conf.py configuration file
|
||||||
|
# idp_redirect_url: Identity provider URL which will redirect
|
||||||
|
# the user back to /login/saml2 with proper info.
|
||||||
|
# See pysaml2 docs for format of config.
|
||||||
|
#saml2_config:
|
||||||
|
# enabled: true
|
||||||
|
# config_path: "%s/sp_conf.py"
|
||||||
|
# idp_redirect_url: "http://%s/idp"
|
||||||
|
""" % (config_dir_path, server_name)
|
||||||
@@ -1,5 +1,5 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
# Copyright 2014, 2015 OpenMarket Ltd
|
# Copyright 2014-2016 OpenMarket Ltd
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@@ -13,110 +13,218 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
import os
|
from ._base import Config
|
||||||
from ._base import Config, ConfigError
|
|
||||||
import syutil.crypto.signing_key
|
|
||||||
|
|
||||||
|
|
||||||
class ServerConfig(Config):
|
class ServerConfig(Config):
|
||||||
def __init__(self, args):
|
|
||||||
super(ServerConfig, self).__init__(args)
|
|
||||||
self.server_name = args.server_name
|
|
||||||
self.signing_key = self.read_signing_key(args.signing_key_path)
|
|
||||||
self.bind_port = args.bind_port
|
|
||||||
self.bind_host = args.bind_host
|
|
||||||
self.unsecure_port = args.unsecure_port
|
|
||||||
self.daemonize = args.daemonize
|
|
||||||
self.pid_file = self.abspath(args.pid_file)
|
|
||||||
self.webclient = True
|
|
||||||
self.manhole = args.manhole
|
|
||||||
self.no_tls = args.no_tls
|
|
||||||
|
|
||||||
if not args.content_addr:
|
def read_config(self, config):
|
||||||
host = args.server_name
|
self.server_name = config["server_name"]
|
||||||
|
self.pid_file = self.abspath(config.get("pid_file"))
|
||||||
|
self.web_client = config["web_client"]
|
||||||
|
self.web_client_location = config.get("web_client_location", None)
|
||||||
|
self.soft_file_limit = config["soft_file_limit"]
|
||||||
|
self.daemonize = config.get("daemonize")
|
||||||
|
self.print_pidfile = config.get("print_pidfile")
|
||||||
|
self.user_agent_suffix = config.get("user_agent_suffix")
|
||||||
|
self.use_frozen_dicts = config.get("use_frozen_dicts", True)
|
||||||
|
|
||||||
|
self.listeners = config.get("listeners", [])
|
||||||
|
|
||||||
|
bind_port = config.get("bind_port")
|
||||||
|
if bind_port:
|
||||||
|
self.listeners = []
|
||||||
|
bind_host = config.get("bind_host", "")
|
||||||
|
gzip_responses = config.get("gzip_responses", True)
|
||||||
|
|
||||||
|
names = ["client", "webclient"] if self.web_client else ["client"]
|
||||||
|
|
||||||
|
self.listeners.append({
|
||||||
|
"port": bind_port,
|
||||||
|
"bind_address": bind_host,
|
||||||
|
"tls": True,
|
||||||
|
"type": "http",
|
||||||
|
"resources": [
|
||||||
|
{
|
||||||
|
"names": names,
|
||||||
|
"compress": gzip_responses,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"names": ["federation"],
|
||||||
|
"compress": False,
|
||||||
|
}
|
||||||
|
]
|
||||||
|
})
|
||||||
|
|
||||||
|
unsecure_port = config.get("unsecure_port", bind_port - 400)
|
||||||
|
if unsecure_port:
|
||||||
|
self.listeners.append({
|
||||||
|
"port": unsecure_port,
|
||||||
|
"bind_address": bind_host,
|
||||||
|
"tls": False,
|
||||||
|
"type": "http",
|
||||||
|
"resources": [
|
||||||
|
{
|
||||||
|
"names": names,
|
||||||
|
"compress": gzip_responses,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"names": ["federation"],
|
||||||
|
"compress": False,
|
||||||
|
}
|
||||||
|
]
|
||||||
|
})
|
||||||
|
|
||||||
|
manhole = config.get("manhole")
|
||||||
|
if manhole:
|
||||||
|
self.listeners.append({
|
||||||
|
"port": manhole,
|
||||||
|
"bind_address": "127.0.0.1",
|
||||||
|
"type": "manhole",
|
||||||
|
})
|
||||||
|
|
||||||
|
metrics_port = config.get("metrics_port")
|
||||||
|
if metrics_port:
|
||||||
|
self.listeners.append({
|
||||||
|
"port": metrics_port,
|
||||||
|
"bind_address": config.get("metrics_bind_host", "127.0.0.1"),
|
||||||
|
"tls": False,
|
||||||
|
"type": "http",
|
||||||
|
"resources": [
|
||||||
|
{
|
||||||
|
"names": ["metrics"],
|
||||||
|
"compress": False,
|
||||||
|
},
|
||||||
|
]
|
||||||
|
})
|
||||||
|
|
||||||
|
# Attempt to guess the content_addr for the v0 content repostitory
|
||||||
|
content_addr = config.get("content_addr")
|
||||||
|
if not content_addr:
|
||||||
|
for listener in self.listeners:
|
||||||
|
if listener["type"] == "http" and not listener.get("tls", False):
|
||||||
|
unsecure_port = listener["port"]
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
raise RuntimeError("Could not determine 'content_addr'")
|
||||||
|
|
||||||
|
host = self.server_name
|
||||||
if ':' not in host:
|
if ':' not in host:
|
||||||
host = "%s:%d" % (host, args.unsecure_port)
|
host = "%s:%d" % (host, unsecure_port)
|
||||||
else:
|
else:
|
||||||
host = host.split(':')[0]
|
host = host.split(':')[0]
|
||||||
host = "%s:%d" % (host, args.unsecure_port)
|
host = "%s:%d" % (host, unsecure_port)
|
||||||
args.content_addr = "http://%s" % (host,)
|
content_addr = "http://%s" % (host,)
|
||||||
|
|
||||||
self.content_addr = args.content_addr
|
self.content_addr = content_addr
|
||||||
|
|
||||||
@classmethod
|
def default_config(self, server_name, **kwargs):
|
||||||
def add_arguments(cls, parser):
|
if ":" in server_name:
|
||||||
super(ServerConfig, cls).add_arguments(parser)
|
bind_port = int(server_name.split(":")[1])
|
||||||
|
unsecure_port = bind_port - 400
|
||||||
|
else:
|
||||||
|
bind_port = 8448
|
||||||
|
unsecure_port = 8008
|
||||||
|
|
||||||
|
pid_file = self.abspath("homeserver.pid")
|
||||||
|
return """\
|
||||||
|
## Server ##
|
||||||
|
|
||||||
|
# The domain name of the server, with optional explicit port.
|
||||||
|
# This is used by remote servers to connect to this server,
|
||||||
|
# e.g. matrix.org, localhost:8080, etc.
|
||||||
|
# This is also the last part of your UserID.
|
||||||
|
server_name: "%(server_name)s"
|
||||||
|
|
||||||
|
# When running as a daemon, the file to store the pid in
|
||||||
|
pid_file: %(pid_file)s
|
||||||
|
|
||||||
|
# Whether to serve a web client from the HTTP/HTTPS root resource.
|
||||||
|
web_client: True
|
||||||
|
|
||||||
|
# Set the soft limit on the number of file descriptors synapse can use
|
||||||
|
# Zero is used to indicate synapse should set the soft limit to the
|
||||||
|
# hard limit.
|
||||||
|
soft_file_limit: 0
|
||||||
|
|
||||||
|
# List of ports that Synapse should listen on, their purpose and their
|
||||||
|
# configuration.
|
||||||
|
listeners:
|
||||||
|
# Main HTTPS listener
|
||||||
|
# For when matrix traffic is sent directly to synapse.
|
||||||
|
-
|
||||||
|
# The port to listen for HTTPS requests on.
|
||||||
|
port: %(bind_port)s
|
||||||
|
|
||||||
|
# Local interface to listen on.
|
||||||
|
# The empty string will cause synapse to listen on all interfaces.
|
||||||
|
bind_address: ''
|
||||||
|
|
||||||
|
# This is a 'http' listener, allows us to specify 'resources'.
|
||||||
|
type: http
|
||||||
|
|
||||||
|
tls: true
|
||||||
|
|
||||||
|
# Use the X-Forwarded-For (XFF) header as the client IP and not the
|
||||||
|
# actual client IP.
|
||||||
|
x_forwarded: false
|
||||||
|
|
||||||
|
# List of HTTP resources to serve on this listener.
|
||||||
|
resources:
|
||||||
|
-
|
||||||
|
# List of resources to host on this listener.
|
||||||
|
names:
|
||||||
|
- client # The client-server APIs, both v1 and v2
|
||||||
|
- webclient # The bundled webclient.
|
||||||
|
|
||||||
|
# Should synapse compress HTTP responses to clients that support it?
|
||||||
|
# This should be disabled if running synapse behind a load balancer
|
||||||
|
# that can do automatic compression.
|
||||||
|
compress: true
|
||||||
|
|
||||||
|
- names: [federation] # Federation APIs
|
||||||
|
compress: false
|
||||||
|
|
||||||
|
# Unsecure HTTP listener,
|
||||||
|
# For when matrix traffic passes through loadbalancer that unwraps TLS.
|
||||||
|
- port: %(unsecure_port)s
|
||||||
|
tls: false
|
||||||
|
bind_address: ''
|
||||||
|
type: http
|
||||||
|
|
||||||
|
x_forwarded: false
|
||||||
|
|
||||||
|
resources:
|
||||||
|
- names: [client, webclient]
|
||||||
|
compress: true
|
||||||
|
- names: [federation]
|
||||||
|
compress: false
|
||||||
|
|
||||||
|
# Turn on the twisted ssh manhole service on localhost on the given
|
||||||
|
# port.
|
||||||
|
# - port: 9000
|
||||||
|
# bind_address: 127.0.0.1
|
||||||
|
# type: manhole
|
||||||
|
""" % locals()
|
||||||
|
|
||||||
|
def read_arguments(self, args):
|
||||||
|
if args.manhole is not None:
|
||||||
|
self.manhole = args.manhole
|
||||||
|
if args.daemonize is not None:
|
||||||
|
self.daemonize = args.daemonize
|
||||||
|
if args.print_pidfile is not None:
|
||||||
|
self.print_pidfile = args.print_pidfile
|
||||||
|
|
||||||
|
def add_arguments(self, parser):
|
||||||
server_group = parser.add_argument_group("server")
|
server_group = parser.add_argument_group("server")
|
||||||
server_group.add_argument(
|
|
||||||
"-H", "--server-name", default="localhost",
|
|
||||||
help="The domain name of the server, with optional explicit port. "
|
|
||||||
"This is used by remote servers to connect to this server, "
|
|
||||||
"e.g. matrix.org, localhost:8080, etc."
|
|
||||||
)
|
|
||||||
server_group.add_argument("--signing-key-path",
|
|
||||||
help="The signing key to sign messages with")
|
|
||||||
server_group.add_argument("-p", "--bind-port", metavar="PORT",
|
|
||||||
type=int, help="https port to listen on",
|
|
||||||
default=8448)
|
|
||||||
server_group.add_argument("--unsecure-port", metavar="PORT",
|
|
||||||
type=int, help="http port to listen on",
|
|
||||||
default=8008)
|
|
||||||
server_group.add_argument("--bind-host", default="",
|
|
||||||
help="Local interface to listen on")
|
|
||||||
server_group.add_argument("-D", "--daemonize", action='store_true',
|
server_group.add_argument("-D", "--daemonize", action='store_true',
|
||||||
|
default=None,
|
||||||
help="Daemonize the home server")
|
help="Daemonize the home server")
|
||||||
server_group.add_argument('--pid-file', default="homeserver.pid",
|
server_group.add_argument("--print-pidfile", action='store_true',
|
||||||
help="When running as a daemon, the file to"
|
default=None,
|
||||||
" store the pid in")
|
help="Print the path to the pidfile just"
|
||||||
|
" before daemonizing")
|
||||||
server_group.add_argument("--manhole", metavar="PORT", dest="manhole",
|
server_group.add_argument("--manhole", metavar="PORT", dest="manhole",
|
||||||
type=int,
|
type=int,
|
||||||
help="Turn on the twisted telnet manhole"
|
help="Turn on the twisted telnet manhole"
|
||||||
" service on the given port.")
|
" service on the given port.")
|
||||||
server_group.add_argument("--content-addr", default=None,
|
|
||||||
help="The host and scheme to use for the "
|
|
||||||
"content repository")
|
|
||||||
server_group.add_argument("--no-tls", action='store_true',
|
|
||||||
help="Don't bind to the https port.")
|
|
||||||
|
|
||||||
def read_signing_key(self, signing_key_path):
|
|
||||||
signing_keys = self.read_file(signing_key_path, "signing_key")
|
|
||||||
try:
|
|
||||||
return syutil.crypto.signing_key.read_signing_keys(
|
|
||||||
signing_keys.splitlines(True)
|
|
||||||
)
|
|
||||||
except Exception:
|
|
||||||
raise ConfigError(
|
|
||||||
"Error reading signing_key."
|
|
||||||
" Try running again with --generate-config"
|
|
||||||
)
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def generate_config(cls, args, config_dir_path):
|
|
||||||
super(ServerConfig, cls).generate_config(args, config_dir_path)
|
|
||||||
base_key_name = os.path.join(config_dir_path, args.server_name)
|
|
||||||
|
|
||||||
args.pid_file = os.path.abspath(args.pid_file)
|
|
||||||
|
|
||||||
if not args.signing_key_path:
|
|
||||||
args.signing_key_path = base_key_name + ".signing.key"
|
|
||||||
|
|
||||||
if not os.path.exists(args.signing_key_path):
|
|
||||||
with open(args.signing_key_path, "w") as signing_key_file:
|
|
||||||
syutil.crypto.signing_key.write_signing_keys(
|
|
||||||
signing_key_file,
|
|
||||||
(syutil.crypto.signing_key.generate_singing_key("auto"),),
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
signing_keys = cls.read_file(args.signing_key_path, "signing_key")
|
|
||||||
if len(signing_keys.split("\n")[0].split()) == 1:
|
|
||||||
# handle keys in the old format.
|
|
||||||
key = syutil.crypto.signing_key.decode_signing_key_base64(
|
|
||||||
syutil.crypto.signing_key.NACL_ED25519,
|
|
||||||
"auto",
|
|
||||||
signing_keys.split("\n")[0]
|
|
||||||
)
|
|
||||||
with open(args.signing_key_path, "w") as signing_key_file:
|
|
||||||
syutil.crypto.signing_key.write_signing_keys(
|
|
||||||
signing_key_file,
|
|
||||||
(key,),
|
|
||||||
)
|
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
# Copyright 2014, 2015 OpenMarket Ltd
|
# Copyright 2014-2016 OpenMarket Ltd
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@@ -23,28 +23,57 @@ GENERATE_DH_PARAMS = False
|
|||||||
|
|
||||||
|
|
||||||
class TlsConfig(Config):
|
class TlsConfig(Config):
|
||||||
def __init__(self, args):
|
def read_config(self, config):
|
||||||
super(TlsConfig, self).__init__(args)
|
|
||||||
self.tls_certificate = self.read_tls_certificate(
|
self.tls_certificate = self.read_tls_certificate(
|
||||||
args.tls_certificate_path
|
config.get("tls_certificate_path")
|
||||||
)
|
|
||||||
self.tls_private_key = self.read_tls_private_key(
|
|
||||||
args.tls_private_key_path
|
|
||||||
)
|
)
|
||||||
|
self.tls_certificate_file = config.get("tls_certificate_path")
|
||||||
|
|
||||||
|
self.no_tls = config.get("no_tls", False)
|
||||||
|
|
||||||
|
if self.no_tls:
|
||||||
|
self.tls_private_key = None
|
||||||
|
else:
|
||||||
|
self.tls_private_key = self.read_tls_private_key(
|
||||||
|
config.get("tls_private_key_path")
|
||||||
|
)
|
||||||
|
|
||||||
self.tls_dh_params_path = self.check_file(
|
self.tls_dh_params_path = self.check_file(
|
||||||
args.tls_dh_params_path, "tls_dh_params"
|
config.get("tls_dh_params_path"), "tls_dh_params"
|
||||||
)
|
)
|
||||||
|
|
||||||
@classmethod
|
# This config option applies to non-federation HTTP clients
|
||||||
def add_arguments(cls, parser):
|
# (e.g. for talking to recaptcha, identity servers, and such)
|
||||||
super(TlsConfig, cls).add_arguments(parser)
|
# It should never be used in production, and is intended for
|
||||||
tls_group = parser.add_argument_group("tls")
|
# use only when running tests.
|
||||||
tls_group.add_argument("--tls-certificate-path",
|
self.use_insecure_ssl_client_just_for_testing_do_not_use = config.get(
|
||||||
help="PEM encoded X509 certificate for TLS")
|
"use_insecure_ssl_client_just_for_testing_do_not_use"
|
||||||
tls_group.add_argument("--tls-private-key-path",
|
)
|
||||||
help="PEM encoded private key for TLS")
|
|
||||||
tls_group.add_argument("--tls-dh-params-path",
|
def default_config(self, config_dir_path, server_name, **kwargs):
|
||||||
help="PEM dh parameters for ephemeral keys")
|
base_key_name = os.path.join(config_dir_path, server_name)
|
||||||
|
|
||||||
|
tls_certificate_path = base_key_name + ".tls.crt"
|
||||||
|
tls_private_key_path = base_key_name + ".tls.key"
|
||||||
|
tls_dh_params_path = base_key_name + ".tls.dh"
|
||||||
|
|
||||||
|
return """\
|
||||||
|
# PEM encoded X509 certificate for TLS.
|
||||||
|
# You can replace the self-signed certificate that synapse
|
||||||
|
# autogenerates on launch with your own SSL certificate + key pair
|
||||||
|
# if you like. Any required intermediary certificates can be
|
||||||
|
# appended after the primary certificate in hierarchical order.
|
||||||
|
tls_certificate_path: "%(tls_certificate_path)s"
|
||||||
|
|
||||||
|
# PEM encoded private key for TLS
|
||||||
|
tls_private_key_path: "%(tls_private_key_path)s"
|
||||||
|
|
||||||
|
# PEM dh parameters for ephemeral keys
|
||||||
|
tls_dh_params_path: "%(tls_dh_params_path)s"
|
||||||
|
|
||||||
|
# Don't bind to the https port
|
||||||
|
no_tls: False
|
||||||
|
""" % locals()
|
||||||
|
|
||||||
def read_tls_certificate(self, cert_path):
|
def read_tls_certificate(self, cert_path):
|
||||||
cert_pem = self.read_file(cert_path, "tls_certificate")
|
cert_pem = self.read_file(cert_path, "tls_certificate")
|
||||||
@@ -54,22 +83,13 @@ class TlsConfig(Config):
|
|||||||
private_key_pem = self.read_file(private_key_path, "tls_private_key")
|
private_key_pem = self.read_file(private_key_path, "tls_private_key")
|
||||||
return crypto.load_privatekey(crypto.FILETYPE_PEM, private_key_pem)
|
return crypto.load_privatekey(crypto.FILETYPE_PEM, private_key_pem)
|
||||||
|
|
||||||
@classmethod
|
def generate_files(self, config):
|
||||||
def generate_config(cls, args, config_dir_path):
|
tls_certificate_path = config["tls_certificate_path"]
|
||||||
super(TlsConfig, cls).generate_config(args, config_dir_path)
|
tls_private_key_path = config["tls_private_key_path"]
|
||||||
base_key_name = os.path.join(config_dir_path, args.server_name)
|
tls_dh_params_path = config["tls_dh_params_path"]
|
||||||
|
|
||||||
if args.tls_certificate_path is None:
|
if not os.path.exists(tls_private_key_path):
|
||||||
args.tls_certificate_path = base_key_name + ".tls.crt"
|
with open(tls_private_key_path, "w") as private_key_file:
|
||||||
|
|
||||||
if args.tls_private_key_path is None:
|
|
||||||
args.tls_private_key_path = base_key_name + ".tls.key"
|
|
||||||
|
|
||||||
if args.tls_dh_params_path is None:
|
|
||||||
args.tls_dh_params_path = base_key_name + ".tls.dh"
|
|
||||||
|
|
||||||
if not os.path.exists(args.tls_private_key_path):
|
|
||||||
with open(args.tls_private_key_path, "w") as private_key_file:
|
|
||||||
tls_private_key = crypto.PKey()
|
tls_private_key = crypto.PKey()
|
||||||
tls_private_key.generate_key(crypto.TYPE_RSA, 2048)
|
tls_private_key.generate_key(crypto.TYPE_RSA, 2048)
|
||||||
private_key_pem = crypto.dump_privatekey(
|
private_key_pem = crypto.dump_privatekey(
|
||||||
@@ -77,17 +97,17 @@ class TlsConfig(Config):
|
|||||||
)
|
)
|
||||||
private_key_file.write(private_key_pem)
|
private_key_file.write(private_key_pem)
|
||||||
else:
|
else:
|
||||||
with open(args.tls_private_key_path) as private_key_file:
|
with open(tls_private_key_path) as private_key_file:
|
||||||
private_key_pem = private_key_file.read()
|
private_key_pem = private_key_file.read()
|
||||||
tls_private_key = crypto.load_privatekey(
|
tls_private_key = crypto.load_privatekey(
|
||||||
crypto.FILETYPE_PEM, private_key_pem
|
crypto.FILETYPE_PEM, private_key_pem
|
||||||
)
|
)
|
||||||
|
|
||||||
if not os.path.exists(args.tls_certificate_path):
|
if not os.path.exists(tls_certificate_path):
|
||||||
with open(args.tls_certificate_path, "w") as certifcate_file:
|
with open(tls_certificate_path, "w") as certificate_file:
|
||||||
cert = crypto.X509()
|
cert = crypto.X509()
|
||||||
subject = cert.get_subject()
|
subject = cert.get_subject()
|
||||||
subject.CN = args.server_name
|
subject.CN = config["server_name"]
|
||||||
|
|
||||||
cert.set_serial_number(1000)
|
cert.set_serial_number(1000)
|
||||||
cert.gmtime_adj_notBefore(0)
|
cert.gmtime_adj_notBefore(0)
|
||||||
@@ -99,18 +119,18 @@ class TlsConfig(Config):
|
|||||||
|
|
||||||
cert_pem = crypto.dump_certificate(crypto.FILETYPE_PEM, cert)
|
cert_pem = crypto.dump_certificate(crypto.FILETYPE_PEM, cert)
|
||||||
|
|
||||||
certifcate_file.write(cert_pem)
|
certificate_file.write(cert_pem)
|
||||||
|
|
||||||
if not os.path.exists(args.tls_dh_params_path):
|
if not os.path.exists(tls_dh_params_path):
|
||||||
if GENERATE_DH_PARAMS:
|
if GENERATE_DH_PARAMS:
|
||||||
subprocess.check_call([
|
subprocess.check_call([
|
||||||
"openssl", "dhparam",
|
"openssl", "dhparam",
|
||||||
"-outform", "PEM",
|
"-outform", "PEM",
|
||||||
"-out", args.tls_dh_params_path,
|
"-out", tls_dh_params_path,
|
||||||
"2048"
|
"2048"
|
||||||
])
|
])
|
||||||
else:
|
else:
|
||||||
with open(args.tls_dh_params_path, "w") as dh_params_file:
|
with open(tls_dh_params_path, "w") as dh_params_file:
|
||||||
dh_params_file.write(
|
dh_params_file.write(
|
||||||
"2048-bit DH parameters taken from rfc3526\n"
|
"2048-bit DH parameters taken from rfc3526\n"
|
||||||
"-----BEGIN DH PARAMETERS-----\n"
|
"-----BEGIN DH PARAMETERS-----\n"
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
# Copyright 2014, 2015 OpenMarket Ltd
|
# Copyright 2014-2016 OpenMarket Ltd
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@@ -17,28 +17,21 @@ from ._base import Config
|
|||||||
|
|
||||||
class VoipConfig(Config):
|
class VoipConfig(Config):
|
||||||
|
|
||||||
def __init__(self, args):
|
def read_config(self, config):
|
||||||
super(VoipConfig, self).__init__(args)
|
self.turn_uris = config.get("turn_uris", [])
|
||||||
self.turn_uris = args.turn_uris
|
self.turn_shared_secret = config["turn_shared_secret"]
|
||||||
self.turn_shared_secret = args.turn_shared_secret
|
self.turn_user_lifetime = self.parse_duration(config["turn_user_lifetime"])
|
||||||
self.turn_user_lifetime = args.turn_user_lifetime
|
|
||||||
|
|
||||||
@classmethod
|
def default_config(self, **kwargs):
|
||||||
def add_arguments(cls, parser):
|
return """\
|
||||||
super(VoipConfig, cls).add_arguments(parser)
|
## Turn ##
|
||||||
group = parser.add_argument_group("voip")
|
|
||||||
group.add_argument(
|
# The public URIs of the TURN server to give to clients
|
||||||
"--turn-uris", type=str, default=None,
|
turn_uris: []
|
||||||
help="The public URIs of the TURN server to give to clients"
|
|
||||||
)
|
# The shared secret used to compute passwords for the TURN server
|
||||||
group.add_argument(
|
turn_shared_secret: "YOUR_SHARED_SECRET"
|
||||||
"--turn-shared-secret", type=str, default=None,
|
|
||||||
help=(
|
# How long generated TURN credentials last
|
||||||
"The shared secret used to compute passwords for the TURN"
|
turn_user_lifetime: "1h"
|
||||||
" server"
|
"""
|
||||||
)
|
|
||||||
)
|
|
||||||
group.add_argument(
|
|
||||||
"--turn-user-lifetime", type=int, default=(1000 * 60 * 60),
|
|
||||||
help="How long generated TURN credentials last, in ms"
|
|
||||||
)
|
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
# Copyright 2014, 2015 OpenMarket Ltd
|
# Copyright 2014-2016 OpenMarket Ltd
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
# Copyright 2014, 2015 OpenMarket Ltd
|
# Copyright 2014-2016 OpenMarket Ltd
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@@ -35,10 +35,13 @@ class ServerContextFactory(ssl.ContextFactory):
|
|||||||
_ecCurve = _OpenSSLECCurve(_defaultCurveName)
|
_ecCurve = _OpenSSLECCurve(_defaultCurveName)
|
||||||
_ecCurve.addECKeyToContext(context)
|
_ecCurve.addECKeyToContext(context)
|
||||||
except:
|
except:
|
||||||
logger.exception("Failed to enable eliptic curve for TLS")
|
logger.exception("Failed to enable elliptic curve for TLS")
|
||||||
context.set_options(SSL.OP_NO_SSLv2 | SSL.OP_NO_SSLv3)
|
context.set_options(SSL.OP_NO_SSLv2 | SSL.OP_NO_SSLv3)
|
||||||
context.use_certificate(config.tls_certificate)
|
context.use_certificate_chain_file(config.tls_certificate_file)
|
||||||
context.use_privatekey(config.tls_private_key)
|
|
||||||
|
if not config.no_tls:
|
||||||
|
context.use_privatekey(config.tls_private_key)
|
||||||
|
|
||||||
context.load_tmp_dh(config.tls_dh_params_path)
|
context.load_tmp_dh(config.tls_dh_params_path)
|
||||||
context.set_cipher_list("!ADH:HIGH+kEDH:!AECDH:HIGH+kEECDH")
|
context.set_cipher_list("!ADH:HIGH+kEDH:!AECDH:HIGH+kEECDH")
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
# Copyright 2014, 2015 OpenMarket Ltd
|
# Copyright 2014-2016 OpenMarket Ltd
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@@ -15,11 +15,12 @@
|
|||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
|
|
||||||
from synapse.events.utils import prune_event
|
|
||||||
from syutil.jsonutil import encode_canonical_json
|
|
||||||
from syutil.base64util import encode_base64, decode_base64
|
|
||||||
from syutil.crypto.jsonsign import sign_json
|
|
||||||
from synapse.api.errors import SynapseError, Codes
|
from synapse.api.errors import SynapseError, Codes
|
||||||
|
from synapse.events.utils import prune_event
|
||||||
|
|
||||||
|
from canonicaljson import encode_canonical_json
|
||||||
|
from unpaddedbase64 import encode_base64, decode_base64
|
||||||
|
from signedjson.sign import sign_json
|
||||||
|
|
||||||
import hashlib
|
import hashlib
|
||||||
import logging
|
import logging
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
# Copyright 2014, 2015 OpenMarket Ltd
|
# Copyright 2014-2016 OpenMarket Ltd
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@@ -18,37 +18,51 @@ from twisted.web.http import HTTPClient
|
|||||||
from twisted.internet.protocol import Factory
|
from twisted.internet.protocol import Factory
|
||||||
from twisted.internet import defer, reactor
|
from twisted.internet import defer, reactor
|
||||||
from synapse.http.endpoint import matrix_federation_endpoint
|
from synapse.http.endpoint import matrix_federation_endpoint
|
||||||
from synapse.util.logcontext import PreserveLoggingContext
|
from synapse.util.logcontext import (
|
||||||
|
preserve_context_over_fn, preserve_context_over_deferred
|
||||||
|
)
|
||||||
import simplejson as json
|
import simplejson as json
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
KEY_API_V1 = b"/_matrix/key/v1/"
|
||||||
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def fetch_server_key(server_name, ssl_context_factory):
|
def fetch_server_key(server_name, ssl_context_factory, path=KEY_API_V1):
|
||||||
"""Fetch the keys for a remote server."""
|
"""Fetch the keys for a remote server."""
|
||||||
|
|
||||||
factory = SynapseKeyClientFactory()
|
factory = SynapseKeyClientFactory()
|
||||||
|
factory.path = path
|
||||||
endpoint = matrix_federation_endpoint(
|
endpoint = matrix_federation_endpoint(
|
||||||
reactor, server_name, ssl_context_factory, timeout=30
|
reactor, server_name, ssl_context_factory, timeout=30
|
||||||
)
|
)
|
||||||
|
|
||||||
for i in range(5):
|
for i in range(5):
|
||||||
try:
|
try:
|
||||||
with PreserveLoggingContext():
|
protocol = yield preserve_context_over_fn(
|
||||||
protocol = yield endpoint.connect(factory)
|
endpoint.connect, factory
|
||||||
server_response, server_certificate = yield protocol.remote_key
|
)
|
||||||
defer.returnValue((server_response, server_certificate))
|
server_response, server_certificate = yield preserve_context_over_deferred(
|
||||||
return
|
protocol.remote_key
|
||||||
|
)
|
||||||
|
defer.returnValue((server_response, server_certificate))
|
||||||
|
return
|
||||||
|
except SynapseKeyClientError as e:
|
||||||
|
logger.exception("Error getting key for %r" % (server_name,))
|
||||||
|
if e.status.startswith("4"):
|
||||||
|
# Don't retry for 4xx responses.
|
||||||
|
raise IOError("Cannot get key for %r" % server_name)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.exception(e)
|
logger.exception(e)
|
||||||
raise IOError("Cannot get key for %s" % server_name)
|
raise IOError("Cannot get key for %r" % server_name)
|
||||||
|
|
||||||
|
|
||||||
class SynapseKeyClientError(Exception):
|
class SynapseKeyClientError(Exception):
|
||||||
"""The key wasn't retrieved from the remote server."""
|
"""The key wasn't retrieved from the remote server."""
|
||||||
|
status = None
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
@@ -66,17 +80,30 @@ class SynapseKeyClientProtocol(HTTPClient):
|
|||||||
def connectionMade(self):
|
def connectionMade(self):
|
||||||
self.host = self.transport.getHost()
|
self.host = self.transport.getHost()
|
||||||
logger.debug("Connected to %s", self.host)
|
logger.debug("Connected to %s", self.host)
|
||||||
self.sendCommand(b"GET", b"/_matrix/key/v1/")
|
self.sendCommand(b"GET", self.path)
|
||||||
self.endHeaders()
|
self.endHeaders()
|
||||||
self.timer = reactor.callLater(
|
self.timer = reactor.callLater(
|
||||||
self.timeout,
|
self.timeout,
|
||||||
self.on_timeout
|
self.on_timeout
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def errback(self, error):
|
||||||
|
if not self.remote_key.called:
|
||||||
|
self.remote_key.errback(error)
|
||||||
|
|
||||||
|
def callback(self, result):
|
||||||
|
if not self.remote_key.called:
|
||||||
|
self.remote_key.callback(result)
|
||||||
|
|
||||||
def handleStatus(self, version, status, message):
|
def handleStatus(self, version, status, message):
|
||||||
if status != b"200":
|
if status != b"200":
|
||||||
# logger.info("Non-200 response from %s: %s %s",
|
# logger.info("Non-200 response from %s: %s %s",
|
||||||
# self.transport.getHost(), status, message)
|
# self.transport.getHost(), status, message)
|
||||||
|
error = SynapseKeyClientError(
|
||||||
|
"Non-200 response %r from %r" % (status, self.host)
|
||||||
|
)
|
||||||
|
error.status = status
|
||||||
|
self.errback(error)
|
||||||
self.transport.abortConnection()
|
self.transport.abortConnection()
|
||||||
|
|
||||||
def handleResponse(self, response_body_bytes):
|
def handleResponse(self, response_body_bytes):
|
||||||
@@ -89,15 +116,18 @@ class SynapseKeyClientProtocol(HTTPClient):
|
|||||||
return
|
return
|
||||||
|
|
||||||
certificate = self.transport.getPeerCertificate()
|
certificate = self.transport.getPeerCertificate()
|
||||||
self.remote_key.callback((json_response, certificate))
|
self.callback((json_response, certificate))
|
||||||
self.transport.abortConnection()
|
self.transport.abortConnection()
|
||||||
self.timer.cancel()
|
self.timer.cancel()
|
||||||
|
|
||||||
def on_timeout(self):
|
def on_timeout(self):
|
||||||
logger.debug("Timeout waiting for response from %s", self.host)
|
logger.debug("Timeout waiting for response from %s", self.host)
|
||||||
self.remote_key.errback(IOError("Timeout waiting for response"))
|
self.errback(IOError("Timeout waiting for response"))
|
||||||
self.transport.abortConnection()
|
self.transport.abortConnection()
|
||||||
|
|
||||||
|
|
||||||
class SynapseKeyClientFactory(Factory):
|
class SynapseKeyClientFactory(Factory):
|
||||||
protocol = SynapseKeyClientProtocol
|
def protocol(self):
|
||||||
|
protocol = SynapseKeyClientProtocol()
|
||||||
|
protocol.path = self.path
|
||||||
|
return protocol
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
# Copyright 2014, 2015 OpenMarket Ltd
|
# Copyright 2014-2016 OpenMarket Ltd
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@@ -14,83 +14,603 @@
|
|||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
from synapse.crypto.keyclient import fetch_server_key
|
from synapse.crypto.keyclient import fetch_server_key
|
||||||
|
from synapse.api.errors import SynapseError, Codes
|
||||||
|
from synapse.util.retryutils import get_retry_limiter
|
||||||
|
from synapse.util import unwrapFirstError
|
||||||
|
from synapse.util.async import ObservableDeferred
|
||||||
|
from synapse.util.logcontext import (
|
||||||
|
preserve_context_over_deferred, preserve_context_over_fn, PreserveLoggingContext,
|
||||||
|
preserve_fn
|
||||||
|
)
|
||||||
|
|
||||||
from twisted.internet import defer
|
from twisted.internet import defer
|
||||||
from syutil.crypto.jsonsign import verify_signed_json, signature_ids
|
|
||||||
from syutil.crypto.signing_key import (
|
from signedjson.sign import (
|
||||||
|
verify_signed_json, signature_ids, sign_json, encode_canonical_json
|
||||||
|
)
|
||||||
|
from signedjson.key import (
|
||||||
is_signing_algorithm_supported, decode_verify_key_bytes
|
is_signing_algorithm_supported, decode_verify_key_bytes
|
||||||
)
|
)
|
||||||
from syutil.base64util import decode_base64, encode_base64
|
from unpaddedbase64 import decode_base64, encode_base64
|
||||||
from synapse.api.errors import SynapseError, Codes
|
|
||||||
|
|
||||||
from OpenSSL import crypto
|
from OpenSSL import crypto
|
||||||
|
|
||||||
|
from collections import namedtuple
|
||||||
|
import urllib
|
||||||
|
import hashlib
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
KeyGroup = namedtuple("KeyGroup", ("server_name", "group_id", "key_ids"))
|
||||||
|
|
||||||
|
|
||||||
class Keyring(object):
|
class Keyring(object):
|
||||||
def __init__(self, hs):
|
def __init__(self, hs):
|
||||||
self.store = hs.get_datastore()
|
self.store = hs.get_datastore()
|
||||||
self.clock = hs.get_clock()
|
self.clock = hs.get_clock()
|
||||||
|
self.client = hs.get_http_client()
|
||||||
|
self.config = hs.get_config()
|
||||||
|
self.perspective_servers = self.config.perspectives
|
||||||
self.hs = hs
|
self.hs = hs
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
self.key_downloads = {}
|
||||||
|
|
||||||
def verify_json_for_server(self, server_name, json_object):
|
def verify_json_for_server(self, server_name, json_object):
|
||||||
logger.debug("Verifying for %s", server_name)
|
return self.verify_json_objects_for_server(
|
||||||
key_ids = signature_ids(json_object, server_name)
|
[(server_name, json_object)]
|
||||||
if not key_ids:
|
)[0]
|
||||||
raise SynapseError(
|
|
||||||
400,
|
def verify_json_objects_for_server(self, server_and_json):
|
||||||
"Not signed with a supported algorithm",
|
"""Bulk verfies signatures of json objects, bulk fetching keys as
|
||||||
Codes.UNAUTHORIZED,
|
necessary.
|
||||||
)
|
|
||||||
try:
|
Args:
|
||||||
verify_key = yield self.get_server_verify_key(server_name, key_ids)
|
server_and_json (list): List of pairs of (server_name, json_object)
|
||||||
except IOError:
|
|
||||||
raise SynapseError(
|
Returns:
|
||||||
502,
|
list of deferreds indicating success or failure to verify each
|
||||||
"Error downloading keys for %s" % (server_name,),
|
json object's signature for the given server_name.
|
||||||
Codes.UNAUTHORIZED,
|
"""
|
||||||
)
|
group_id_to_json = {}
|
||||||
except:
|
group_id_to_group = {}
|
||||||
raise SynapseError(
|
group_ids = []
|
||||||
401,
|
|
||||||
"No key for %s with id %s" % (server_name, key_ids),
|
next_group_id = 0
|
||||||
Codes.UNAUTHORIZED,
|
deferreds = {}
|
||||||
)
|
|
||||||
try:
|
for server_name, json_object in server_and_json:
|
||||||
verify_signed_json(json_object, server_name, verify_key)
|
logger.debug("Verifying for %s", server_name)
|
||||||
except:
|
group_id = next_group_id
|
||||||
raise SynapseError(
|
next_group_id += 1
|
||||||
401,
|
group_ids.append(group_id)
|
||||||
"Invalid signature for server %s with key %s:%s" % (
|
|
||||||
server_name, verify_key.alg, verify_key.version
|
key_ids = signature_ids(json_object, server_name)
|
||||||
),
|
if not key_ids:
|
||||||
Codes.UNAUTHORIZED,
|
deferreds[group_id] = defer.fail(SynapseError(
|
||||||
|
400,
|
||||||
|
"Not signed with a supported algorithm",
|
||||||
|
Codes.UNAUTHORIZED,
|
||||||
|
))
|
||||||
|
else:
|
||||||
|
deferreds[group_id] = defer.Deferred()
|
||||||
|
|
||||||
|
group = KeyGroup(server_name, group_id, key_ids)
|
||||||
|
|
||||||
|
group_id_to_group[group_id] = group
|
||||||
|
group_id_to_json[group_id] = json_object
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def handle_key_deferred(group, deferred):
|
||||||
|
server_name = group.server_name
|
||||||
|
try:
|
||||||
|
_, _, key_id, verify_key = yield deferred
|
||||||
|
except IOError as e:
|
||||||
|
logger.warn(
|
||||||
|
"Got IOError when downloading keys for %s: %s %s",
|
||||||
|
server_name, type(e).__name__, str(e.message),
|
||||||
|
)
|
||||||
|
raise SynapseError(
|
||||||
|
502,
|
||||||
|
"Error downloading keys for %s" % (server_name,),
|
||||||
|
Codes.UNAUTHORIZED,
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
logger.exception(
|
||||||
|
"Got Exception when downloading keys for %s: %s %s",
|
||||||
|
server_name, type(e).__name__, str(e.message),
|
||||||
|
)
|
||||||
|
raise SynapseError(
|
||||||
|
401,
|
||||||
|
"No key for %s with id %s" % (server_name, key_ids),
|
||||||
|
Codes.UNAUTHORIZED,
|
||||||
|
)
|
||||||
|
|
||||||
|
json_object = group_id_to_json[group.group_id]
|
||||||
|
|
||||||
|
try:
|
||||||
|
verify_signed_json(json_object, server_name, verify_key)
|
||||||
|
except:
|
||||||
|
raise SynapseError(
|
||||||
|
401,
|
||||||
|
"Invalid signature for server %s with key %s:%s" % (
|
||||||
|
server_name, verify_key.alg, verify_key.version
|
||||||
|
),
|
||||||
|
Codes.UNAUTHORIZED,
|
||||||
|
)
|
||||||
|
|
||||||
|
server_to_deferred = {
|
||||||
|
server_name: defer.Deferred()
|
||||||
|
for server_name, _ in server_and_json
|
||||||
|
}
|
||||||
|
|
||||||
|
with PreserveLoggingContext():
|
||||||
|
|
||||||
|
# We want to wait for any previous lookups to complete before
|
||||||
|
# proceeding.
|
||||||
|
wait_on_deferred = self.wait_for_previous_lookups(
|
||||||
|
[server_name for server_name, _ in server_and_json],
|
||||||
|
server_to_deferred,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# Actually start fetching keys.
|
||||||
|
wait_on_deferred.addBoth(
|
||||||
|
lambda _: self.get_server_verify_keys(group_id_to_group, deferreds)
|
||||||
|
)
|
||||||
|
|
||||||
|
# When we've finished fetching all the keys for a given server_name,
|
||||||
|
# resolve the deferred passed to `wait_for_previous_lookups` so that
|
||||||
|
# any lookups waiting will proceed.
|
||||||
|
server_to_gids = {}
|
||||||
|
|
||||||
|
def remove_deferreds(res, server_name, group_id):
|
||||||
|
server_to_gids[server_name].discard(group_id)
|
||||||
|
if not server_to_gids[server_name]:
|
||||||
|
d = server_to_deferred.pop(server_name, None)
|
||||||
|
if d:
|
||||||
|
d.callback(None)
|
||||||
|
return res
|
||||||
|
|
||||||
|
for g_id, deferred in deferreds.items():
|
||||||
|
server_name = group_id_to_group[g_id].server_name
|
||||||
|
server_to_gids.setdefault(server_name, set()).add(g_id)
|
||||||
|
deferred.addBoth(remove_deferreds, server_name, g_id)
|
||||||
|
|
||||||
|
# Pass those keys to handle_key_deferred so that the json object
|
||||||
|
# signatures can be verified
|
||||||
|
return [
|
||||||
|
preserve_context_over_fn(
|
||||||
|
handle_key_deferred,
|
||||||
|
group_id_to_group[g_id],
|
||||||
|
deferreds[g_id],
|
||||||
|
)
|
||||||
|
for g_id in group_ids
|
||||||
|
]
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def get_server_verify_key(self, server_name, key_ids):
|
def wait_for_previous_lookups(self, server_names, server_to_deferred):
|
||||||
|
"""Waits for any previous key lookups for the given servers to finish.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
server_names (list): list of server_names we want to lookup
|
||||||
|
server_to_deferred (dict): server_name to deferred which gets
|
||||||
|
resolved once we've finished looking up keys for that server
|
||||||
|
"""
|
||||||
|
while True:
|
||||||
|
wait_on = [
|
||||||
|
self.key_downloads[server_name]
|
||||||
|
for server_name in server_names
|
||||||
|
if server_name in self.key_downloads
|
||||||
|
]
|
||||||
|
if wait_on:
|
||||||
|
with PreserveLoggingContext():
|
||||||
|
yield defer.DeferredList(wait_on)
|
||||||
|
else:
|
||||||
|
break
|
||||||
|
|
||||||
|
for server_name, deferred in server_to_deferred.items():
|
||||||
|
d = ObservableDeferred(preserve_context_over_deferred(deferred))
|
||||||
|
self.key_downloads[server_name] = d
|
||||||
|
|
||||||
|
def rm(r, server_name):
|
||||||
|
self.key_downloads.pop(server_name, None)
|
||||||
|
return r
|
||||||
|
|
||||||
|
d.addBoth(rm, server_name)
|
||||||
|
|
||||||
|
def get_server_verify_keys(self, group_id_to_group, group_id_to_deferred):
|
||||||
|
"""Takes a dict of KeyGroups and tries to find at least one key for
|
||||||
|
each group.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# These are functions that produce keys given a list of key ids
|
||||||
|
key_fetch_fns = (
|
||||||
|
self.get_keys_from_store, # First try the local store
|
||||||
|
self.get_keys_from_perspectives, # Then try via perspectives
|
||||||
|
self.get_keys_from_server, # Then try directly
|
||||||
|
)
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def do_iterations():
|
||||||
|
merged_results = {}
|
||||||
|
|
||||||
|
missing_keys = {}
|
||||||
|
for group in group_id_to_group.values():
|
||||||
|
missing_keys.setdefault(group.server_name, set()).update(
|
||||||
|
group.key_ids
|
||||||
|
)
|
||||||
|
|
||||||
|
for fn in key_fetch_fns:
|
||||||
|
results = yield fn(missing_keys.items())
|
||||||
|
merged_results.update(results)
|
||||||
|
|
||||||
|
# We now need to figure out which groups we have keys for
|
||||||
|
# and which we don't
|
||||||
|
missing_groups = {}
|
||||||
|
for group in group_id_to_group.values():
|
||||||
|
for key_id in group.key_ids:
|
||||||
|
if key_id in merged_results[group.server_name]:
|
||||||
|
with PreserveLoggingContext():
|
||||||
|
group_id_to_deferred[group.group_id].callback((
|
||||||
|
group.group_id,
|
||||||
|
group.server_name,
|
||||||
|
key_id,
|
||||||
|
merged_results[group.server_name][key_id],
|
||||||
|
))
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
missing_groups.setdefault(
|
||||||
|
group.server_name, []
|
||||||
|
).append(group)
|
||||||
|
|
||||||
|
if not missing_groups:
|
||||||
|
break
|
||||||
|
|
||||||
|
missing_keys = {
|
||||||
|
server_name: set(
|
||||||
|
key_id for group in groups for key_id in group.key_ids
|
||||||
|
)
|
||||||
|
for server_name, groups in missing_groups.items()
|
||||||
|
}
|
||||||
|
|
||||||
|
for group in missing_groups.values():
|
||||||
|
group_id_to_deferred[group.group_id].errback(SynapseError(
|
||||||
|
401,
|
||||||
|
"No key for %s with id %s" % (
|
||||||
|
group.server_name, group.key_ids,
|
||||||
|
),
|
||||||
|
Codes.UNAUTHORIZED,
|
||||||
|
))
|
||||||
|
|
||||||
|
def on_err(err):
|
||||||
|
for deferred in group_id_to_deferred.values():
|
||||||
|
if not deferred.called:
|
||||||
|
deferred.errback(err)
|
||||||
|
|
||||||
|
do_iterations().addErrback(on_err)
|
||||||
|
|
||||||
|
return group_id_to_deferred
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def get_keys_from_store(self, server_name_and_key_ids):
|
||||||
|
res = yield defer.gatherResults(
|
||||||
|
[
|
||||||
|
self.store.get_server_verify_keys(
|
||||||
|
server_name, key_ids
|
||||||
|
).addCallback(lambda ks, server: (server, ks), server_name)
|
||||||
|
for server_name, key_ids in server_name_and_key_ids
|
||||||
|
],
|
||||||
|
consumeErrors=True,
|
||||||
|
).addErrback(unwrapFirstError)
|
||||||
|
|
||||||
|
defer.returnValue(dict(res))
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def get_keys_from_perspectives(self, server_name_and_key_ids):
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def get_key(perspective_name, perspective_keys):
|
||||||
|
try:
|
||||||
|
result = yield self.get_server_verify_key_v2_indirect(
|
||||||
|
server_name_and_key_ids, perspective_name, perspective_keys
|
||||||
|
)
|
||||||
|
defer.returnValue(result)
|
||||||
|
except Exception as e:
|
||||||
|
logger.exception(
|
||||||
|
"Unable to get key from %r: %s %s",
|
||||||
|
perspective_name,
|
||||||
|
type(e).__name__, str(e.message),
|
||||||
|
)
|
||||||
|
defer.returnValue({})
|
||||||
|
|
||||||
|
results = yield defer.gatherResults(
|
||||||
|
[
|
||||||
|
get_key(p_name, p_keys)
|
||||||
|
for p_name, p_keys in self.perspective_servers.items()
|
||||||
|
],
|
||||||
|
consumeErrors=True,
|
||||||
|
).addErrback(unwrapFirstError)
|
||||||
|
|
||||||
|
union_of_keys = {}
|
||||||
|
for result in results:
|
||||||
|
for server_name, keys in result.items():
|
||||||
|
union_of_keys.setdefault(server_name, {}).update(keys)
|
||||||
|
|
||||||
|
defer.returnValue(union_of_keys)
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def get_keys_from_server(self, server_name_and_key_ids):
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def get_key(server_name, key_ids):
|
||||||
|
limiter = yield get_retry_limiter(
|
||||||
|
server_name,
|
||||||
|
self.clock,
|
||||||
|
self.store,
|
||||||
|
)
|
||||||
|
with limiter:
|
||||||
|
keys = None
|
||||||
|
try:
|
||||||
|
keys = yield self.get_server_verify_key_v2_direct(
|
||||||
|
server_name, key_ids
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
logger.info(
|
||||||
|
"Unable to getting key %r for %r directly: %s %s",
|
||||||
|
key_ids, server_name,
|
||||||
|
type(e).__name__, str(e.message),
|
||||||
|
)
|
||||||
|
|
||||||
|
if not keys:
|
||||||
|
keys = yield self.get_server_verify_key_v1_direct(
|
||||||
|
server_name, key_ids
|
||||||
|
)
|
||||||
|
|
||||||
|
keys = {server_name: keys}
|
||||||
|
|
||||||
|
defer.returnValue(keys)
|
||||||
|
|
||||||
|
results = yield defer.gatherResults(
|
||||||
|
[
|
||||||
|
get_key(server_name, key_ids)
|
||||||
|
for server_name, key_ids in server_name_and_key_ids
|
||||||
|
],
|
||||||
|
consumeErrors=True,
|
||||||
|
).addErrback(unwrapFirstError)
|
||||||
|
|
||||||
|
merged = {}
|
||||||
|
for result in results:
|
||||||
|
merged.update(result)
|
||||||
|
|
||||||
|
defer.returnValue({
|
||||||
|
server_name: keys
|
||||||
|
for server_name, keys in merged.items()
|
||||||
|
if keys
|
||||||
|
})
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def get_server_verify_key_v2_indirect(self, server_names_and_key_ids,
|
||||||
|
perspective_name,
|
||||||
|
perspective_keys):
|
||||||
|
# TODO(mark): Set the minimum_valid_until_ts to that needed by
|
||||||
|
# the events being validated or the current time if validating
|
||||||
|
# an incoming request.
|
||||||
|
query_response = yield self.client.post_json(
|
||||||
|
destination=perspective_name,
|
||||||
|
path=b"/_matrix/key/v2/query",
|
||||||
|
data={
|
||||||
|
u"server_keys": {
|
||||||
|
server_name: {
|
||||||
|
key_id: {
|
||||||
|
u"minimum_valid_until_ts": 0
|
||||||
|
} for key_id in key_ids
|
||||||
|
}
|
||||||
|
for server_name, key_ids in server_names_and_key_ids
|
||||||
|
}
|
||||||
|
},
|
||||||
|
long_retries=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
keys = {}
|
||||||
|
|
||||||
|
responses = query_response["server_keys"]
|
||||||
|
|
||||||
|
for response in responses:
|
||||||
|
if (u"signatures" not in response
|
||||||
|
or perspective_name not in response[u"signatures"]):
|
||||||
|
raise ValueError(
|
||||||
|
"Key response not signed by perspective server"
|
||||||
|
" %r" % (perspective_name,)
|
||||||
|
)
|
||||||
|
|
||||||
|
verified = False
|
||||||
|
for key_id in response[u"signatures"][perspective_name]:
|
||||||
|
if key_id in perspective_keys:
|
||||||
|
verify_signed_json(
|
||||||
|
response,
|
||||||
|
perspective_name,
|
||||||
|
perspective_keys[key_id]
|
||||||
|
)
|
||||||
|
verified = True
|
||||||
|
|
||||||
|
if not verified:
|
||||||
|
logging.info(
|
||||||
|
"Response from perspective server %r not signed with a"
|
||||||
|
" known key, signed with: %r, known keys: %r",
|
||||||
|
perspective_name,
|
||||||
|
list(response[u"signatures"][perspective_name]),
|
||||||
|
list(perspective_keys)
|
||||||
|
)
|
||||||
|
raise ValueError(
|
||||||
|
"Response not signed with a known key for perspective"
|
||||||
|
" server %r" % (perspective_name,)
|
||||||
|
)
|
||||||
|
|
||||||
|
processed_response = yield self.process_v2_response(
|
||||||
|
perspective_name, response
|
||||||
|
)
|
||||||
|
|
||||||
|
for server_name, response_keys in processed_response.items():
|
||||||
|
keys.setdefault(server_name, {}).update(response_keys)
|
||||||
|
|
||||||
|
yield defer.gatherResults(
|
||||||
|
[
|
||||||
|
self.store_keys(
|
||||||
|
server_name=server_name,
|
||||||
|
from_server=perspective_name,
|
||||||
|
verify_keys=response_keys,
|
||||||
|
)
|
||||||
|
for server_name, response_keys in keys.items()
|
||||||
|
],
|
||||||
|
consumeErrors=True
|
||||||
|
).addErrback(unwrapFirstError)
|
||||||
|
|
||||||
|
defer.returnValue(keys)
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def get_server_verify_key_v2_direct(self, server_name, key_ids):
|
||||||
|
keys = {}
|
||||||
|
|
||||||
|
for requested_key_id in key_ids:
|
||||||
|
if requested_key_id in keys:
|
||||||
|
continue
|
||||||
|
|
||||||
|
(response, tls_certificate) = yield fetch_server_key(
|
||||||
|
server_name, self.hs.tls_server_context_factory,
|
||||||
|
path=(b"/_matrix/key/v2/server/%s" % (
|
||||||
|
urllib.quote(requested_key_id),
|
||||||
|
)).encode("ascii"),
|
||||||
|
)
|
||||||
|
|
||||||
|
if (u"signatures" not in response
|
||||||
|
or server_name not in response[u"signatures"]):
|
||||||
|
raise ValueError("Key response not signed by remote server")
|
||||||
|
|
||||||
|
if "tls_fingerprints" not in response:
|
||||||
|
raise ValueError("Key response missing TLS fingerprints")
|
||||||
|
|
||||||
|
certificate_bytes = crypto.dump_certificate(
|
||||||
|
crypto.FILETYPE_ASN1, tls_certificate
|
||||||
|
)
|
||||||
|
sha256_fingerprint = hashlib.sha256(certificate_bytes).digest()
|
||||||
|
sha256_fingerprint_b64 = encode_base64(sha256_fingerprint)
|
||||||
|
|
||||||
|
response_sha256_fingerprints = set()
|
||||||
|
for fingerprint in response[u"tls_fingerprints"]:
|
||||||
|
if u"sha256" in fingerprint:
|
||||||
|
response_sha256_fingerprints.add(fingerprint[u"sha256"])
|
||||||
|
|
||||||
|
if sha256_fingerprint_b64 not in response_sha256_fingerprints:
|
||||||
|
raise ValueError("TLS certificate not allowed by fingerprints")
|
||||||
|
|
||||||
|
response_keys = yield self.process_v2_response(
|
||||||
|
from_server=server_name,
|
||||||
|
requested_ids=[requested_key_id],
|
||||||
|
response_json=response,
|
||||||
|
)
|
||||||
|
|
||||||
|
keys.update(response_keys)
|
||||||
|
|
||||||
|
yield defer.gatherResults(
|
||||||
|
[
|
||||||
|
preserve_fn(self.store_keys)(
|
||||||
|
server_name=key_server_name,
|
||||||
|
from_server=server_name,
|
||||||
|
verify_keys=verify_keys,
|
||||||
|
)
|
||||||
|
for key_server_name, verify_keys in keys.items()
|
||||||
|
],
|
||||||
|
consumeErrors=True
|
||||||
|
).addErrback(unwrapFirstError)
|
||||||
|
|
||||||
|
defer.returnValue(keys)
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def process_v2_response(self, from_server, response_json,
|
||||||
|
requested_ids=[]):
|
||||||
|
time_now_ms = self.clock.time_msec()
|
||||||
|
response_keys = {}
|
||||||
|
verify_keys = {}
|
||||||
|
for key_id, key_data in response_json["verify_keys"].items():
|
||||||
|
if is_signing_algorithm_supported(key_id):
|
||||||
|
key_base64 = key_data["key"]
|
||||||
|
key_bytes = decode_base64(key_base64)
|
||||||
|
verify_key = decode_verify_key_bytes(key_id, key_bytes)
|
||||||
|
verify_key.time_added = time_now_ms
|
||||||
|
verify_keys[key_id] = verify_key
|
||||||
|
|
||||||
|
old_verify_keys = {}
|
||||||
|
for key_id, key_data in response_json["old_verify_keys"].items():
|
||||||
|
if is_signing_algorithm_supported(key_id):
|
||||||
|
key_base64 = key_data["key"]
|
||||||
|
key_bytes = decode_base64(key_base64)
|
||||||
|
verify_key = decode_verify_key_bytes(key_id, key_bytes)
|
||||||
|
verify_key.expired = key_data["expired_ts"]
|
||||||
|
verify_key.time_added = time_now_ms
|
||||||
|
old_verify_keys[key_id] = verify_key
|
||||||
|
|
||||||
|
results = {}
|
||||||
|
server_name = response_json["server_name"]
|
||||||
|
for key_id in response_json["signatures"].get(server_name, {}):
|
||||||
|
if key_id not in response_json["verify_keys"]:
|
||||||
|
raise ValueError(
|
||||||
|
"Key response must include verification keys for all"
|
||||||
|
" signatures"
|
||||||
|
)
|
||||||
|
if key_id in verify_keys:
|
||||||
|
verify_signed_json(
|
||||||
|
response_json,
|
||||||
|
server_name,
|
||||||
|
verify_keys[key_id]
|
||||||
|
)
|
||||||
|
|
||||||
|
signed_key_json = sign_json(
|
||||||
|
response_json,
|
||||||
|
self.config.server_name,
|
||||||
|
self.config.signing_key[0],
|
||||||
|
)
|
||||||
|
|
||||||
|
signed_key_json_bytes = encode_canonical_json(signed_key_json)
|
||||||
|
ts_valid_until_ms = signed_key_json[u"valid_until_ts"]
|
||||||
|
|
||||||
|
updated_key_ids = set(requested_ids)
|
||||||
|
updated_key_ids.update(verify_keys)
|
||||||
|
updated_key_ids.update(old_verify_keys)
|
||||||
|
|
||||||
|
response_keys.update(verify_keys)
|
||||||
|
response_keys.update(old_verify_keys)
|
||||||
|
|
||||||
|
yield defer.gatherResults(
|
||||||
|
[
|
||||||
|
preserve_fn(self.store.store_server_keys_json)(
|
||||||
|
server_name=server_name,
|
||||||
|
key_id=key_id,
|
||||||
|
from_server=server_name,
|
||||||
|
ts_now_ms=time_now_ms,
|
||||||
|
ts_expires_ms=ts_valid_until_ms,
|
||||||
|
key_json_bytes=signed_key_json_bytes,
|
||||||
|
)
|
||||||
|
for key_id in updated_key_ids
|
||||||
|
],
|
||||||
|
consumeErrors=True,
|
||||||
|
).addErrback(unwrapFirstError)
|
||||||
|
|
||||||
|
results[server_name] = response_keys
|
||||||
|
|
||||||
|
defer.returnValue(results)
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def get_server_verify_key_v1_direct(self, server_name, key_ids):
|
||||||
"""Finds a verification key for the server with one of the key ids.
|
"""Finds a verification key for the server with one of the key ids.
|
||||||
Args:
|
Args:
|
||||||
server_name (str): The name of the server to fetch a key for.
|
server_name (str): The name of the server to fetch a key for.
|
||||||
keys_ids (list of str): The key_ids to check for.
|
keys_ids (list of str): The key_ids to check for.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
# Check the datastore to see if we have one cached.
|
|
||||||
cached = yield self.store.get_server_verify_keys(server_name, key_ids)
|
|
||||||
|
|
||||||
if cached:
|
|
||||||
defer.returnValue(cached[0])
|
|
||||||
return
|
|
||||||
|
|
||||||
# Try to fetch the key from the remote server.
|
# Try to fetch the key from the remote server.
|
||||||
# TODO(markjh): Ratelimit requests to a given server.
|
|
||||||
|
|
||||||
(response, tls_certificate) = yield fetch_server_key(
|
(response, tls_certificate) = yield fetch_server_key(
|
||||||
server_name, self.hs.tls_context_factory
|
server_name, self.hs.tls_server_context_factory
|
||||||
)
|
)
|
||||||
|
|
||||||
# Check the response.
|
# Check the response.
|
||||||
@@ -111,11 +631,16 @@ class Keyring(object):
|
|||||||
if encode_base64(x509_certificate_bytes) != tls_certificate_b64:
|
if encode_base64(x509_certificate_bytes) != tls_certificate_b64:
|
||||||
raise ValueError("TLS certificate doesn't match")
|
raise ValueError("TLS certificate doesn't match")
|
||||||
|
|
||||||
|
# Cache the result in the datastore.
|
||||||
|
|
||||||
|
time_now_ms = self.clock.time_msec()
|
||||||
|
|
||||||
verify_keys = {}
|
verify_keys = {}
|
||||||
for key_id, key_base64 in response["verify_keys"].items():
|
for key_id, key_base64 in response["verify_keys"].items():
|
||||||
if is_signing_algorithm_supported(key_id):
|
if is_signing_algorithm_supported(key_id):
|
||||||
key_bytes = decode_base64(key_base64)
|
key_bytes = decode_base64(key_base64)
|
||||||
verify_key = decode_verify_key_bytes(key_id, key_bytes)
|
verify_key = decode_verify_key_bytes(key_id, key_bytes)
|
||||||
|
verify_key.time_added = time_now_ms
|
||||||
verify_keys[key_id] = verify_key
|
verify_keys[key_id] = verify_key
|
||||||
|
|
||||||
for key_id in response["signatures"][server_name]:
|
for key_id in response["signatures"][server_name]:
|
||||||
@@ -131,10 +656,6 @@ class Keyring(object):
|
|||||||
verify_keys[key_id]
|
verify_keys[key_id]
|
||||||
)
|
)
|
||||||
|
|
||||||
# Cache the result in the datastore.
|
|
||||||
|
|
||||||
time_now_ms = self.clock.time_msec()
|
|
||||||
|
|
||||||
yield self.store.store_server_certificate(
|
yield self.store.store_server_certificate(
|
||||||
server_name,
|
server_name,
|
||||||
server_name,
|
server_name,
|
||||||
@@ -142,14 +663,31 @@ class Keyring(object):
|
|||||||
tls_certificate,
|
tls_certificate,
|
||||||
)
|
)
|
||||||
|
|
||||||
for key_id, key in verify_keys.items():
|
yield self.store_keys(
|
||||||
yield self.store.store_server_verify_key(
|
server_name=server_name,
|
||||||
server_name, server_name, time_now_ms, key
|
from_server=server_name,
|
||||||
)
|
verify_keys=verify_keys,
|
||||||
|
)
|
||||||
|
|
||||||
for key_id in key_ids:
|
defer.returnValue(verify_keys)
|
||||||
if key_id in verify_keys:
|
|
||||||
defer.returnValue(verify_keys[key_id])
|
|
||||||
return
|
|
||||||
|
|
||||||
raise ValueError("No verification key found for given key ids")
|
@defer.inlineCallbacks
|
||||||
|
def store_keys(self, server_name, from_server, verify_keys):
|
||||||
|
"""Store a collection of verify keys for a given server
|
||||||
|
Args:
|
||||||
|
server_name(str): The name of the server the keys are for.
|
||||||
|
from_server(str): The server the keys were downloaded from.
|
||||||
|
verify_keys(dict): A mapping of key_id to VerifyKey.
|
||||||
|
Returns:
|
||||||
|
A deferred that completes when the keys are stored.
|
||||||
|
"""
|
||||||
|
# TODO(markjh): Store whether the keys have expired.
|
||||||
|
yield defer.gatherResults(
|
||||||
|
[
|
||||||
|
preserve_fn(self.store.store_server_verify_key)(
|
||||||
|
server_name, server_name, key.time_added, key
|
||||||
|
)
|
||||||
|
for key_id, key in verify_keys.items()
|
||||||
|
],
|
||||||
|
consumeErrors=True,
|
||||||
|
).addErrback(unwrapFirstError)
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
# Copyright 2014, 2015 OpenMarket Ltd
|
# Copyright 2014-2016 OpenMarket Ltd
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@@ -16,6 +16,12 @@
|
|||||||
from synapse.util.frozenutils import freeze
|
from synapse.util.frozenutils import freeze
|
||||||
|
|
||||||
|
|
||||||
|
# Whether we should use frozen_dict in FrozenEvent. Using frozen_dicts prevents
|
||||||
|
# bugs where we accidentally share e.g. signature dicts. However, converting
|
||||||
|
# a dict to frozen_dicts is expensive.
|
||||||
|
USE_FROZEN_DICTS = True
|
||||||
|
|
||||||
|
|
||||||
class _EventInternalMetadata(object):
|
class _EventInternalMetadata(object):
|
||||||
def __init__(self, internal_metadata_dict):
|
def __init__(self, internal_metadata_dict):
|
||||||
self.__dict__ = dict(internal_metadata_dict)
|
self.__dict__ = dict(internal_metadata_dict)
|
||||||
@@ -46,9 +52,10 @@ def _event_dict_property(key):
|
|||||||
|
|
||||||
class EventBase(object):
|
class EventBase(object):
|
||||||
def __init__(self, event_dict, signatures={}, unsigned={},
|
def __init__(self, event_dict, signatures={}, unsigned={},
|
||||||
internal_metadata_dict={}):
|
internal_metadata_dict={}, rejected_reason=None):
|
||||||
self.signatures = signatures
|
self.signatures = signatures
|
||||||
self.unsigned = unsigned
|
self.unsigned = unsigned
|
||||||
|
self.rejected_reason = rejected_reason
|
||||||
|
|
||||||
self._event_dict = event_dict
|
self._event_dict = event_dict
|
||||||
|
|
||||||
@@ -83,7 +90,7 @@ class EventBase(object):
|
|||||||
d = dict(self._event_dict)
|
d = dict(self._event_dict)
|
||||||
d.update({
|
d.update({
|
||||||
"signatures": self.signatures,
|
"signatures": self.signatures,
|
||||||
"unsigned": self.unsigned,
|
"unsigned": dict(self.unsigned),
|
||||||
})
|
})
|
||||||
|
|
||||||
return d
|
return d
|
||||||
@@ -102,14 +109,26 @@ class EventBase(object):
|
|||||||
pdu_json.setdefault("unsigned", {})["age"] = int(age)
|
pdu_json.setdefault("unsigned", {})["age"] = int(age)
|
||||||
del pdu_json["unsigned"]["age_ts"]
|
del pdu_json["unsigned"]["age_ts"]
|
||||||
|
|
||||||
|
# This may be a frozen event
|
||||||
|
pdu_json["unsigned"].pop("redacted_because", None)
|
||||||
|
|
||||||
return pdu_json
|
return pdu_json
|
||||||
|
|
||||||
def __set__(self, instance, value):
|
def __set__(self, instance, value):
|
||||||
raise AttributeError("Unrecognized attribute %s" % (instance,))
|
raise AttributeError("Unrecognized attribute %s" % (instance,))
|
||||||
|
|
||||||
|
def __getitem__(self, field):
|
||||||
|
return self._event_dict[field]
|
||||||
|
|
||||||
|
def __contains__(self, field):
|
||||||
|
return field in self._event_dict
|
||||||
|
|
||||||
|
def items(self):
|
||||||
|
return self._event_dict.items()
|
||||||
|
|
||||||
|
|
||||||
class FrozenEvent(EventBase):
|
class FrozenEvent(EventBase):
|
||||||
def __init__(self, event_dict, internal_metadata_dict={}):
|
def __init__(self, event_dict, internal_metadata_dict={}, rejected_reason=None):
|
||||||
event_dict = dict(event_dict)
|
event_dict = dict(event_dict)
|
||||||
|
|
||||||
# Signatures is a dict of dicts, and this is faster than doing a
|
# Signatures is a dict of dicts, and this is faster than doing a
|
||||||
@@ -121,13 +140,17 @@ class FrozenEvent(EventBase):
|
|||||||
|
|
||||||
unsigned = dict(event_dict.pop("unsigned", {}))
|
unsigned = dict(event_dict.pop("unsigned", {}))
|
||||||
|
|
||||||
frozen_dict = freeze(event_dict)
|
if USE_FROZEN_DICTS:
|
||||||
|
frozen_dict = freeze(event_dict)
|
||||||
|
else:
|
||||||
|
frozen_dict = event_dict
|
||||||
|
|
||||||
super(FrozenEvent, self).__init__(
|
super(FrozenEvent, self).__init__(
|
||||||
frozen_dict,
|
frozen_dict,
|
||||||
signatures=signatures,
|
signatures=signatures,
|
||||||
unsigned=unsigned,
|
unsigned=unsigned,
|
||||||
internal_metadata_dict=internal_metadata_dict,
|
internal_metadata_dict=internal_metadata_dict,
|
||||||
|
rejected_reason=rejected_reason,
|
||||||
)
|
)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
# Copyright 2014, 2015 OpenMarket Ltd
|
# Copyright 2014-2016 OpenMarket Ltd
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
# Copyright 2014, 2015 OpenMarket Ltd
|
# Copyright 2014-2016 OpenMarket Ltd
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@@ -16,8 +16,8 @@
|
|||||||
|
|
||||||
class EventContext(object):
|
class EventContext(object):
|
||||||
|
|
||||||
def __init__(self, current_state=None, auth_events=None):
|
def __init__(self, current_state=None):
|
||||||
self.current_state = current_state
|
self.current_state = current_state
|
||||||
self.auth_events = auth_events
|
|
||||||
self.state_group = None
|
self.state_group = None
|
||||||
self.rejected = False
|
self.rejected = False
|
||||||
|
self.push_actions = []
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
# Copyright 2014, 2015 OpenMarket Ltd
|
# Copyright 2014-2016 OpenMarket Ltd
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@@ -66,7 +66,6 @@ def prune_event(event):
|
|||||||
"users_default",
|
"users_default",
|
||||||
"events",
|
"events",
|
||||||
"events_default",
|
"events_default",
|
||||||
"events_default",
|
|
||||||
"state_default",
|
"state_default",
|
||||||
"ban",
|
"ban",
|
||||||
"kick",
|
"kick",
|
||||||
@@ -74,6 +73,8 @@ def prune_event(event):
|
|||||||
)
|
)
|
||||||
elif event_type == EventTypes.Aliases:
|
elif event_type == EventTypes.Aliases:
|
||||||
add_fields("aliases")
|
add_fields("aliases")
|
||||||
|
elif event_type == EventTypes.RoomHistoryVisibility:
|
||||||
|
add_fields("history_visibility")
|
||||||
|
|
||||||
allowed_fields = {
|
allowed_fields = {
|
||||||
k: v
|
k: v
|
||||||
@@ -99,19 +100,20 @@ def format_event_raw(d):
|
|||||||
|
|
||||||
|
|
||||||
def format_event_for_client_v1(d):
|
def format_event_for_client_v1(d):
|
||||||
d["user_id"] = d.pop("sender", None)
|
d = format_event_for_client_v2(d)
|
||||||
|
|
||||||
move_keys = ("age", "redacted_because", "replaces_state", "prev_content")
|
sender = d.get("sender")
|
||||||
for key in move_keys:
|
if sender is not None:
|
||||||
|
d["user_id"] = sender
|
||||||
|
|
||||||
|
copy_keys = (
|
||||||
|
"age", "redacted_because", "replaces_state", "prev_content",
|
||||||
|
"invite_room_state",
|
||||||
|
)
|
||||||
|
for key in copy_keys:
|
||||||
if key in d["unsigned"]:
|
if key in d["unsigned"]:
|
||||||
d[key] = d["unsigned"][key]
|
d[key] = d["unsigned"][key]
|
||||||
|
|
||||||
drop_keys = (
|
|
||||||
"auth_events", "prev_events", "hashes", "signatures", "depth",
|
|
||||||
"unsigned", "origin", "prev_state"
|
|
||||||
)
|
|
||||||
for key in drop_keys:
|
|
||||||
d.pop(key, None)
|
|
||||||
return d
|
return d
|
||||||
|
|
||||||
|
|
||||||
@@ -125,10 +127,9 @@ def format_event_for_client_v2(d):
|
|||||||
return d
|
return d
|
||||||
|
|
||||||
|
|
||||||
def format_event_for_client_v2_without_event_id(d):
|
def format_event_for_client_v2_without_room_id(d):
|
||||||
d = format_event_for_client_v2(d)
|
d = format_event_for_client_v2(d)
|
||||||
d.pop("room_id", None)
|
d.pop("room_id", None)
|
||||||
d.pop("event_id", None)
|
|
||||||
return d
|
return d
|
||||||
|
|
||||||
|
|
||||||
@@ -150,7 +151,8 @@ def serialize_event(e, time_now_ms, as_client_event=True,
|
|||||||
|
|
||||||
if "redacted_because" in e.unsigned:
|
if "redacted_because" in e.unsigned:
|
||||||
d["unsigned"]["redacted_because"] = serialize_event(
|
d["unsigned"]["redacted_because"] = serialize_event(
|
||||||
e.unsigned["redacted_because"], time_now_ms
|
e.unsigned["redacted_because"], time_now_ms,
|
||||||
|
event_format=event_format
|
||||||
)
|
)
|
||||||
|
|
||||||
if token_id is not None:
|
if token_id is not None:
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
# Copyright 2014, 2015 OpenMarket Ltd
|
# Copyright 2014-2016 OpenMarket Ltd
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
# Copyright 2014, 2015 OpenMarket Ltd
|
# Copyright 2014-2016 OpenMarket Ltd
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@@ -17,15 +17,10 @@
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
from .replication import ReplicationLayer
|
from .replication import ReplicationLayer
|
||||||
from .transport import TransportLayer
|
from .transport.client import TransportLayerClient
|
||||||
|
|
||||||
|
|
||||||
def initialize_http_replication(homeserver):
|
def initialize_http_replication(homeserver):
|
||||||
transport = TransportLayer(
|
transport = TransportLayerClient(homeserver)
|
||||||
homeserver,
|
|
||||||
homeserver.hostname,
|
|
||||||
server=homeserver.get_resource_for_federation(),
|
|
||||||
client=homeserver.get_http_client()
|
|
||||||
)
|
|
||||||
|
|
||||||
return ReplicationLayer(homeserver, transport)
|
return ReplicationLayer(homeserver, transport)
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
# Copyright 2015 OpenMarket Ltd
|
# Copyright 2015, 2016 OpenMarket Ltd
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@@ -18,12 +18,12 @@ from twisted.internet import defer
|
|||||||
|
|
||||||
from synapse.events.utils import prune_event
|
from synapse.events.utils import prune_event
|
||||||
|
|
||||||
from syutil.jsonutil import encode_canonical_json
|
|
||||||
|
|
||||||
from synapse.crypto.event_signing import check_event_content_hash
|
from synapse.crypto.event_signing import check_event_content_hash
|
||||||
|
|
||||||
from synapse.api.errors import SynapseError
|
from synapse.api.errors import SynapseError
|
||||||
|
|
||||||
|
from synapse.util import unwrapFirstError
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
|
|
||||||
@@ -32,7 +32,8 @@ logger = logging.getLogger(__name__)
|
|||||||
|
|
||||||
class FederationBase(object):
|
class FederationBase(object):
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def _check_sigs_and_hash_and_fetch(self, origin, pdus, outlier=False):
|
def _check_sigs_and_hash_and_fetch(self, origin, pdus, outlier=False,
|
||||||
|
include_none=False):
|
||||||
"""Takes a list of PDUs and checks the signatures and hashs of each
|
"""Takes a list of PDUs and checks the signatures and hashs of each
|
||||||
one. If a PDU fails its signature check then we check if we have it in
|
one. If a PDU fails its signature check then we check if we have it in
|
||||||
the database and if not then request if from the originating server of
|
the database and if not then request if from the originating server of
|
||||||
@@ -50,84 +51,108 @@ class FederationBase(object):
|
|||||||
Returns:
|
Returns:
|
||||||
Deferred : A list of PDUs that have valid signatures and hashes.
|
Deferred : A list of PDUs that have valid signatures and hashes.
|
||||||
"""
|
"""
|
||||||
|
deferreds = self._check_sigs_and_hashes(pdus)
|
||||||
|
|
||||||
signed_pdus = []
|
def callback(pdu):
|
||||||
|
return pdu
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
def errback(failure, pdu):
|
||||||
def do(pdu):
|
failure.trap(SynapseError)
|
||||||
try:
|
return None
|
||||||
new_pdu = yield self._check_sigs_and_hash(pdu)
|
|
||||||
signed_pdus.append(new_pdu)
|
|
||||||
except SynapseError:
|
|
||||||
# FIXME: We should handle signature failures more gracefully.
|
|
||||||
|
|
||||||
|
def try_local_db(res, pdu):
|
||||||
|
if not res:
|
||||||
# Check local db.
|
# Check local db.
|
||||||
new_pdu = yield self.store.get_event(
|
return self.store.get_event(
|
||||||
pdu.event_id,
|
pdu.event_id,
|
||||||
allow_rejected=True,
|
allow_rejected=True,
|
||||||
allow_none=True,
|
allow_none=True,
|
||||||
)
|
)
|
||||||
if new_pdu:
|
return res
|
||||||
signed_pdus.append(new_pdu)
|
|
||||||
return
|
|
||||||
|
|
||||||
# Check pdu.origin
|
def try_remote(res, pdu):
|
||||||
if pdu.origin != origin:
|
if not res and pdu.origin != origin:
|
||||||
try:
|
return self.get_pdu(
|
||||||
new_pdu = yield self.get_pdu(
|
destinations=[pdu.origin],
|
||||||
destinations=[pdu.origin],
|
event_id=pdu.event_id,
|
||||||
event_id=pdu.event_id,
|
outlier=outlier,
|
||||||
outlier=outlier,
|
timeout=10000,
|
||||||
)
|
).addErrback(lambda e: None)
|
||||||
|
return res
|
||||||
if new_pdu:
|
|
||||||
signed_pdus.append(new_pdu)
|
|
||||||
return
|
|
||||||
except:
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
def warn(res, pdu):
|
||||||
|
if not res:
|
||||||
logger.warn(
|
logger.warn(
|
||||||
"Failed to find copy of %s with valid signature",
|
"Failed to find copy of %s with valid signature",
|
||||||
pdu.event_id,
|
pdu.event_id,
|
||||||
)
|
)
|
||||||
|
return res
|
||||||
|
|
||||||
yield defer.gatherResults(
|
for pdu, deferred in zip(pdus, deferreds):
|
||||||
[do(pdu) for pdu in pdus],
|
deferred.addCallbacks(
|
||||||
|
callback, errback, errbackArgs=[pdu]
|
||||||
|
).addCallback(
|
||||||
|
try_local_db, pdu
|
||||||
|
).addCallback(
|
||||||
|
try_remote, pdu
|
||||||
|
).addCallback(
|
||||||
|
warn, pdu
|
||||||
|
)
|
||||||
|
|
||||||
|
valid_pdus = yield defer.gatherResults(
|
||||||
|
deferreds,
|
||||||
consumeErrors=True
|
consumeErrors=True
|
||||||
)
|
).addErrback(unwrapFirstError)
|
||||||
|
|
||||||
defer.returnValue(signed_pdus)
|
if include_none:
|
||||||
|
defer.returnValue(valid_pdus)
|
||||||
|
else:
|
||||||
|
defer.returnValue([p for p in valid_pdus if p])
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def _check_sigs_and_hash(self, pdu):
|
def _check_sigs_and_hash(self, pdu):
|
||||||
"""Throws a SynapseError if the PDU does not have the correct
|
return self._check_sigs_and_hashes([pdu])[0]
|
||||||
|
|
||||||
|
def _check_sigs_and_hashes(self, pdus):
|
||||||
|
"""Throws a SynapseError if a PDU does not have the correct
|
||||||
signatures.
|
signatures.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
FrozenEvent: Either the given event or it redacted if it failed the
|
FrozenEvent: Either the given event or it redacted if it failed the
|
||||||
content hash check.
|
content hash check.
|
||||||
"""
|
"""
|
||||||
# Check signatures are correct.
|
|
||||||
redacted_event = prune_event(pdu)
|
|
||||||
redacted_pdu_json = redacted_event.get_pdu_json()
|
|
||||||
|
|
||||||
try:
|
redacted_pdus = [
|
||||||
yield self.keyring.verify_json_for_server(
|
prune_event(pdu)
|
||||||
pdu.origin, redacted_pdu_json
|
for pdu in pdus
|
||||||
)
|
]
|
||||||
except SynapseError:
|
|
||||||
|
deferreds = self.keyring.verify_json_objects_for_server([
|
||||||
|
(p.origin, p.get_pdu_json())
|
||||||
|
for p in redacted_pdus
|
||||||
|
])
|
||||||
|
|
||||||
|
def callback(_, pdu, redacted):
|
||||||
|
if not check_event_content_hash(pdu):
|
||||||
|
logger.warn(
|
||||||
|
"Event content has been tampered, redacting %s: %s",
|
||||||
|
pdu.event_id, pdu.get_pdu_json()
|
||||||
|
)
|
||||||
|
return redacted
|
||||||
|
return pdu
|
||||||
|
|
||||||
|
def errback(failure, pdu):
|
||||||
|
failure.trap(SynapseError)
|
||||||
logger.warn(
|
logger.warn(
|
||||||
"Signature check failed for %s redacted to %s",
|
"Signature check failed for %s",
|
||||||
encode_canonical_json(pdu.get_pdu_json()),
|
pdu.event_id,
|
||||||
encode_canonical_json(redacted_pdu_json),
|
|
||||||
)
|
)
|
||||||
raise
|
return failure
|
||||||
|
|
||||||
if not check_event_content_hash(pdu):
|
for deferred, pdu, redacted in zip(deferreds, pdus, redacted_pdus):
|
||||||
logger.warn(
|
deferred.addCallbacks(
|
||||||
"Event content has been tampered, redacting %s, %s",
|
callback, errback,
|
||||||
pdu.event_id, encode_canonical_json(pdu.get_dict())
|
callbackArgs=[pdu, redacted],
|
||||||
|
errbackArgs=[pdu],
|
||||||
)
|
)
|
||||||
defer.returnValue(redacted_event)
|
|
||||||
|
|
||||||
defer.returnValue(pdu)
|
return deferreds
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
# Copyright 2015 OpenMarket Ltd
|
# Copyright 2015, 2016 OpenMarket Ltd
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@@ -17,19 +17,52 @@
|
|||||||
from twisted.internet import defer
|
from twisted.internet import defer
|
||||||
|
|
||||||
from .federation_base import FederationBase
|
from .federation_base import FederationBase
|
||||||
|
from synapse.api.constants import Membership
|
||||||
from .units import Edu
|
from .units import Edu
|
||||||
|
|
||||||
from synapse.api.errors import CodeMessageException
|
from synapse.api.errors import (
|
||||||
|
CodeMessageException, HttpResponseException, SynapseError,
|
||||||
|
)
|
||||||
|
from synapse.util import unwrapFirstError
|
||||||
|
from synapse.util.caches.expiringcache import ExpiringCache
|
||||||
from synapse.util.logutils import log_function
|
from synapse.util.logutils import log_function
|
||||||
from synapse.events import FrozenEvent
|
from synapse.events import FrozenEvent
|
||||||
|
import synapse.metrics
|
||||||
|
|
||||||
|
from synapse.util.retryutils import get_retry_limiter, NotRetryingDestination
|
||||||
|
|
||||||
|
import copy
|
||||||
|
import itertools
|
||||||
import logging
|
import logging
|
||||||
|
import random
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
# synapse.federation.federation_client is a silly name
|
||||||
|
metrics = synapse.metrics.get_metrics_for("synapse.federation.client")
|
||||||
|
|
||||||
|
sent_pdus_destination_dist = metrics.register_distribution("sent_pdu_destinations")
|
||||||
|
|
||||||
|
sent_edus_counter = metrics.register_counter("sent_edus")
|
||||||
|
|
||||||
|
sent_queries_counter = metrics.register_counter("sent_queries", labels=["type"])
|
||||||
|
|
||||||
|
|
||||||
class FederationClient(FederationBase):
|
class FederationClient(FederationBase):
|
||||||
|
|
||||||
|
def start_get_pdu_cache(self):
|
||||||
|
self._get_pdu_cache = ExpiringCache(
|
||||||
|
cache_name="get_pdu_cache",
|
||||||
|
clock=self._clock,
|
||||||
|
max_len=1000,
|
||||||
|
expiry_ms=120 * 1000,
|
||||||
|
reset_expiry_on_get=False,
|
||||||
|
)
|
||||||
|
|
||||||
|
self._get_pdu_cache.start()
|
||||||
|
|
||||||
@log_function
|
@log_function
|
||||||
def send_pdu(self, pdu, destinations):
|
def send_pdu(self, pdu, destinations):
|
||||||
"""Informs the replication layer about a new PDU generated within the
|
"""Informs the replication layer about a new PDU generated within the
|
||||||
@@ -47,6 +80,8 @@ class FederationClient(FederationBase):
|
|||||||
order = self._order
|
order = self._order
|
||||||
self._order += 1
|
self._order += 1
|
||||||
|
|
||||||
|
sent_pdus_destination_dist.inc_by(len(destinations))
|
||||||
|
|
||||||
logger.debug("[%s] transaction_layer.enqueue_pdu... ", pdu.event_id)
|
logger.debug("[%s] transaction_layer.enqueue_pdu... ", pdu.event_id)
|
||||||
|
|
||||||
# TODO, add errback, etc.
|
# TODO, add errback, etc.
|
||||||
@@ -66,6 +101,8 @@ class FederationClient(FederationBase):
|
|||||||
content=content,
|
content=content,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
sent_edus_counter.inc()
|
||||||
|
|
||||||
# TODO, add errback, etc.
|
# TODO, add errback, etc.
|
||||||
self._transaction_queue.enqueue_edu(edu)
|
self._transaction_queue.enqueue_edu(edu)
|
||||||
return defer.succeed(None)
|
return defer.succeed(None)
|
||||||
@@ -92,10 +129,42 @@ class FederationClient(FederationBase):
|
|||||||
a Deferred which will eventually yield a JSON object from the
|
a Deferred which will eventually yield a JSON object from the
|
||||||
response
|
response
|
||||||
"""
|
"""
|
||||||
|
sent_queries_counter.inc(query_type)
|
||||||
|
|
||||||
return self.transport_layer.make_query(
|
return self.transport_layer.make_query(
|
||||||
destination, query_type, args, retry_on_dns_fail=retry_on_dns_fail
|
destination, query_type, args, retry_on_dns_fail=retry_on_dns_fail
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@log_function
|
||||||
|
def query_client_keys(self, destination, content):
|
||||||
|
"""Query device keys for a device hosted on a remote server.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
destination (str): Domain name of the remote homeserver
|
||||||
|
content (dict): The query content.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
a Deferred which will eventually yield a JSON object from the
|
||||||
|
response
|
||||||
|
"""
|
||||||
|
sent_queries_counter.inc("client_device_keys")
|
||||||
|
return self.transport_layer.query_client_keys(destination, content)
|
||||||
|
|
||||||
|
@log_function
|
||||||
|
def claim_client_keys(self, destination, content):
|
||||||
|
"""Claims one-time keys for a device hosted on a remote server.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
destination (str): Domain name of the remote homeserver
|
||||||
|
content (dict): The query content.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
a Deferred which will eventually yield a JSON object from the
|
||||||
|
response
|
||||||
|
"""
|
||||||
|
sent_queries_counter.inc("client_one_time_keys")
|
||||||
|
return self.transport_layer.claim_client_keys(destination, content)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
@log_function
|
@log_function
|
||||||
def backfill(self, dest, context, limit, extremities):
|
def backfill(self, dest, context, limit, extremities):
|
||||||
@@ -128,16 +197,17 @@ class FederationClient(FederationBase):
|
|||||||
for p in transaction_data["pdus"]
|
for p in transaction_data["pdus"]
|
||||||
]
|
]
|
||||||
|
|
||||||
for i, pdu in enumerate(pdus):
|
# FIXME: We should handle signature failures more gracefully.
|
||||||
pdus[i] = yield self._check_sigs_and_hash(pdu)
|
pdus[:] = yield defer.gatherResults(
|
||||||
|
self._check_sigs_and_hashes(pdus),
|
||||||
# FIXME: We should handle signature failures more gracefully.
|
consumeErrors=True,
|
||||||
|
).addErrback(unwrapFirstError)
|
||||||
|
|
||||||
defer.returnValue(pdus)
|
defer.returnValue(pdus)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
@log_function
|
@log_function
|
||||||
def get_pdu(self, destinations, event_id, outlier=False):
|
def get_pdu(self, destinations, event_id, outlier=False, timeout=None):
|
||||||
"""Requests the PDU with given origin and ID from the remote home
|
"""Requests the PDU with given origin and ID from the remote home
|
||||||
servers.
|
servers.
|
||||||
|
|
||||||
@@ -153,6 +223,8 @@ class FederationClient(FederationBase):
|
|||||||
outlier (bool): Indicates whether the PDU is an `outlier`, i.e. if
|
outlier (bool): Indicates whether the PDU is an `outlier`, i.e. if
|
||||||
it's from an arbitary point in the context as opposed to part
|
it's from an arbitary point in the context as opposed to part
|
||||||
of the current block of PDUs. Defaults to `False`
|
of the current block of PDUs. Defaults to `False`
|
||||||
|
timeout (int): How long to try (in ms) each destination for before
|
||||||
|
moving to the next destination. None indicates no timeout.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Deferred: Results in the requested PDU.
|
Deferred: Results in the requested PDU.
|
||||||
@@ -160,29 +232,58 @@ class FederationClient(FederationBase):
|
|||||||
|
|
||||||
# TODO: Rate limit the number of times we try and get the same event.
|
# TODO: Rate limit the number of times we try and get the same event.
|
||||||
|
|
||||||
|
if self._get_pdu_cache:
|
||||||
|
e = self._get_pdu_cache.get(event_id)
|
||||||
|
if e:
|
||||||
|
defer.returnValue(e)
|
||||||
|
|
||||||
pdu = None
|
pdu = None
|
||||||
for destination in destinations:
|
for destination in destinations:
|
||||||
try:
|
try:
|
||||||
transaction_data = yield self.transport_layer.get_event(
|
limiter = yield get_retry_limiter(
|
||||||
destination, event_id
|
destination,
|
||||||
|
self._clock,
|
||||||
|
self.store,
|
||||||
)
|
)
|
||||||
|
|
||||||
logger.debug("transaction_data %r", transaction_data)
|
with limiter:
|
||||||
|
transaction_data = yield self.transport_layer.get_event(
|
||||||
|
destination, event_id, timeout=timeout,
|
||||||
|
)
|
||||||
|
|
||||||
pdu_list = [
|
logger.debug("transaction_data %r", transaction_data)
|
||||||
self.event_from_pdu_json(p, outlier=outlier)
|
|
||||||
for p in transaction_data["pdus"]
|
|
||||||
]
|
|
||||||
|
|
||||||
if pdu_list:
|
pdu_list = [
|
||||||
pdu = pdu_list[0]
|
self.event_from_pdu_json(p, outlier=outlier)
|
||||||
|
for p in transaction_data["pdus"]
|
||||||
|
]
|
||||||
|
|
||||||
# Check signatures are correct.
|
if pdu_list and pdu_list[0]:
|
||||||
pdu = yield self._check_sigs_and_hash(pdu)
|
pdu = pdu_list[0]
|
||||||
|
|
||||||
break
|
# Check signatures are correct.
|
||||||
except CodeMessageException:
|
pdu = yield self._check_sigs_and_hashes([pdu])[0]
|
||||||
raise
|
|
||||||
|
break
|
||||||
|
|
||||||
|
except SynapseError:
|
||||||
|
logger.info(
|
||||||
|
"Failed to get PDU %s from %s because %s",
|
||||||
|
event_id, destination, e,
|
||||||
|
)
|
||||||
|
continue
|
||||||
|
except CodeMessageException as e:
|
||||||
|
if 400 <= e.code < 500:
|
||||||
|
raise
|
||||||
|
|
||||||
|
logger.info(
|
||||||
|
"Failed to get PDU %s from %s because %s",
|
||||||
|
event_id, destination, e,
|
||||||
|
)
|
||||||
|
continue
|
||||||
|
except NotRetryingDestination as e:
|
||||||
|
logger.info(e.message)
|
||||||
|
continue
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.info(
|
logger.info(
|
||||||
"Failed to get PDU %s from %s because %s",
|
"Failed to get PDU %s from %s because %s",
|
||||||
@@ -190,6 +291,9 @@ class FederationClient(FederationBase):
|
|||||||
)
|
)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
if self._get_pdu_cache is not None and pdu:
|
||||||
|
self._get_pdu_cache[event_id] = pdu
|
||||||
|
|
||||||
defer.returnValue(pdu)
|
defer.returnValue(pdu)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
@@ -253,16 +357,55 @@ class FederationClient(FederationBase):
|
|||||||
defer.returnValue(signed_auth)
|
defer.returnValue(signed_auth)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def make_join(self, destinations, room_id, user_id):
|
def make_membership_event(self, destinations, room_id, user_id, membership,
|
||||||
|
content={},):
|
||||||
|
"""
|
||||||
|
Creates an m.room.member event, with context, without participating in the room.
|
||||||
|
|
||||||
|
Does so by asking one of the already participating servers to create an
|
||||||
|
event with proper context.
|
||||||
|
|
||||||
|
Note that this does not append any events to any graphs.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
destinations (str): Candidate homeservers which are probably
|
||||||
|
participating in the room.
|
||||||
|
room_id (str): The room in which the event will happen.
|
||||||
|
user_id (str): The user whose membership is being evented.
|
||||||
|
membership (str): The "membership" property of the event. Must be
|
||||||
|
one of "join" or "leave".
|
||||||
|
content (object): Any additional data to put into the content field
|
||||||
|
of the event.
|
||||||
|
Return:
|
||||||
|
A tuple of (origin (str), event (object)) where origin is the remote
|
||||||
|
homeserver which generated the event.
|
||||||
|
"""
|
||||||
|
valid_memberships = {Membership.JOIN, Membership.LEAVE}
|
||||||
|
if membership not in valid_memberships:
|
||||||
|
raise RuntimeError(
|
||||||
|
"make_membership_event called with membership='%s', must be one of %s" %
|
||||||
|
(membership, ",".join(valid_memberships))
|
||||||
|
)
|
||||||
for destination in destinations:
|
for destination in destinations:
|
||||||
|
if destination == self.server_name:
|
||||||
|
continue
|
||||||
|
|
||||||
try:
|
try:
|
||||||
ret = yield self.transport_layer.make_join(
|
ret = yield self.transport_layer.make_membership_event(
|
||||||
destination, room_id, user_id
|
destination, room_id, user_id, membership
|
||||||
)
|
)
|
||||||
|
|
||||||
pdu_dict = ret["event"]
|
pdu_dict = ret["event"]
|
||||||
|
|
||||||
logger.debug("Got response to make_join: %s", pdu_dict)
|
logger.debug("Got response to make_%s: %s", membership, pdu_dict)
|
||||||
|
|
||||||
|
pdu_dict["content"].update(content)
|
||||||
|
|
||||||
|
# The protoevent received over the JSON wire may not have all
|
||||||
|
# the required fields. Lets just gloss over that because
|
||||||
|
# there's some we never care about
|
||||||
|
if "prev_state" not in pdu_dict:
|
||||||
|
pdu_dict["prev_state"] = []
|
||||||
|
|
||||||
defer.returnValue(
|
defer.returnValue(
|
||||||
(destination, self.event_from_pdu_json(pdu_dict))
|
(destination, self.event_from_pdu_json(pdu_dict))
|
||||||
@@ -272,8 +415,8 @@ class FederationClient(FederationBase):
|
|||||||
raise
|
raise
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.warn(
|
logger.warn(
|
||||||
"Failed to make_join via %s: %s",
|
"Failed to make_%s via %s: %s",
|
||||||
destination, e.message
|
membership, destination, e.message
|
||||||
)
|
)
|
||||||
|
|
||||||
raise RuntimeError("Failed to send to any server.")
|
raise RuntimeError("Failed to send to any server.")
|
||||||
@@ -281,6 +424,9 @@ class FederationClient(FederationBase):
|
|||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def send_join(self, destinations, pdu):
|
def send_join(self, destinations, pdu):
|
||||||
for destination in destinations:
|
for destination in destinations:
|
||||||
|
if destination == self.server_name:
|
||||||
|
continue
|
||||||
|
|
||||||
try:
|
try:
|
||||||
time_now = self._clock.time_msec()
|
time_now = self._clock.time_msec()
|
||||||
_, content = yield self.transport_layer.send_join(
|
_, content = yield self.transport_layer.send_join(
|
||||||
@@ -302,13 +448,39 @@ class FederationClient(FederationBase):
|
|||||||
for p in content.get("auth_chain", [])
|
for p in content.get("auth_chain", [])
|
||||||
]
|
]
|
||||||
|
|
||||||
signed_state = yield self._check_sigs_and_hash_and_fetch(
|
pdus = {
|
||||||
destination, state, outlier=True
|
p.event_id: p
|
||||||
|
for p in itertools.chain(state, auth_chain)
|
||||||
|
}
|
||||||
|
|
||||||
|
valid_pdus = yield self._check_sigs_and_hash_and_fetch(
|
||||||
|
destination, pdus.values(),
|
||||||
|
outlier=True,
|
||||||
)
|
)
|
||||||
|
|
||||||
signed_auth = yield self._check_sigs_and_hash_and_fetch(
|
valid_pdus_map = {
|
||||||
destination, auth_chain, outlier=True
|
p.event_id: p
|
||||||
)
|
for p in valid_pdus
|
||||||
|
}
|
||||||
|
|
||||||
|
# NB: We *need* to copy to ensure that we don't have multiple
|
||||||
|
# references being passed on, as that causes... issues.
|
||||||
|
signed_state = [
|
||||||
|
copy.copy(valid_pdus_map[p.event_id])
|
||||||
|
for p in state
|
||||||
|
if p.event_id in valid_pdus_map
|
||||||
|
]
|
||||||
|
|
||||||
|
signed_auth = [
|
||||||
|
valid_pdus_map[p.event_id]
|
||||||
|
for p in auth_chain
|
||||||
|
if p.event_id in valid_pdus_map
|
||||||
|
]
|
||||||
|
|
||||||
|
# NB: We *need* to copy to ensure that we don't have multiple
|
||||||
|
# references being passed on, as that causes... issues.
|
||||||
|
for s in signed_state:
|
||||||
|
s.internal_metadata = copy.deepcopy(s.internal_metadata)
|
||||||
|
|
||||||
auth_chain.sort(key=lambda e: e.depth)
|
auth_chain.sort(key=lambda e: e.depth)
|
||||||
|
|
||||||
@@ -320,7 +492,7 @@ class FederationClient(FederationBase):
|
|||||||
except CodeMessageException:
|
except CodeMessageException:
|
||||||
raise
|
raise
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.warn(
|
logger.exception(
|
||||||
"Failed to send_join via %s: %s",
|
"Failed to send_join via %s: %s",
|
||||||
destination, e.message
|
destination, e.message
|
||||||
)
|
)
|
||||||
@@ -350,6 +522,33 @@ class FederationClient(FederationBase):
|
|||||||
|
|
||||||
defer.returnValue(pdu)
|
defer.returnValue(pdu)
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def send_leave(self, destinations, pdu):
|
||||||
|
for destination in destinations:
|
||||||
|
if destination == self.server_name:
|
||||||
|
continue
|
||||||
|
|
||||||
|
try:
|
||||||
|
time_now = self._clock.time_msec()
|
||||||
|
_, content = yield self.transport_layer.send_leave(
|
||||||
|
destination=destination,
|
||||||
|
room_id=pdu.room_id,
|
||||||
|
event_id=pdu.event_id,
|
||||||
|
content=pdu.get_pdu_json(time_now),
|
||||||
|
)
|
||||||
|
|
||||||
|
logger.debug("Got content: %s", content)
|
||||||
|
defer.returnValue(None)
|
||||||
|
except CodeMessageException:
|
||||||
|
raise
|
||||||
|
except Exception as e:
|
||||||
|
logger.exception(
|
||||||
|
"Failed to send_leave via %s: %s",
|
||||||
|
destination, e.message
|
||||||
|
)
|
||||||
|
|
||||||
|
raise RuntimeError("Failed to send to any server.")
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def query_auth(self, destination, room_id, event_id, local_auth):
|
def query_auth(self, destination, room_id, event_id, local_auth):
|
||||||
"""
|
"""
|
||||||
@@ -390,6 +589,116 @@ class FederationClient(FederationBase):
|
|||||||
|
|
||||||
defer.returnValue(ret)
|
defer.returnValue(ret)
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def get_missing_events(self, destination, room_id, earliest_events_ids,
|
||||||
|
latest_events, limit, min_depth):
|
||||||
|
"""Tries to fetch events we are missing. This is called when we receive
|
||||||
|
an event without having received all of its ancestors.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
destination (str)
|
||||||
|
room_id (str)
|
||||||
|
earliest_events_ids (list): List of event ids. Effectively the
|
||||||
|
events we expected to receive, but haven't. `get_missing_events`
|
||||||
|
should only return events that didn't happen before these.
|
||||||
|
latest_events (list): List of events we have received that we don't
|
||||||
|
have all previous events for.
|
||||||
|
limit (int): Maximum number of events to return.
|
||||||
|
min_depth (int): Minimum depth of events tor return.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
content = yield self.transport_layer.get_missing_events(
|
||||||
|
destination=destination,
|
||||||
|
room_id=room_id,
|
||||||
|
earliest_events=earliest_events_ids,
|
||||||
|
latest_events=[e.event_id for e in latest_events],
|
||||||
|
limit=limit,
|
||||||
|
min_depth=min_depth,
|
||||||
|
)
|
||||||
|
|
||||||
|
events = [
|
||||||
|
self.event_from_pdu_json(e)
|
||||||
|
for e in content.get("events", [])
|
||||||
|
]
|
||||||
|
|
||||||
|
signed_events = yield self._check_sigs_and_hash_and_fetch(
|
||||||
|
destination, events, outlier=False
|
||||||
|
)
|
||||||
|
|
||||||
|
have_gotten_all_from_destination = True
|
||||||
|
except HttpResponseException as e:
|
||||||
|
if not e.code == 400:
|
||||||
|
raise
|
||||||
|
|
||||||
|
# We are probably hitting an old server that doesn't support
|
||||||
|
# get_missing_events
|
||||||
|
signed_events = []
|
||||||
|
have_gotten_all_from_destination = False
|
||||||
|
|
||||||
|
if len(signed_events) >= limit:
|
||||||
|
defer.returnValue(signed_events)
|
||||||
|
|
||||||
|
servers = yield self.store.get_joined_hosts_for_room(room_id)
|
||||||
|
|
||||||
|
servers = set(servers)
|
||||||
|
servers.discard(self.server_name)
|
||||||
|
|
||||||
|
failed_to_fetch = set()
|
||||||
|
|
||||||
|
while len(signed_events) < limit:
|
||||||
|
# Are we missing any?
|
||||||
|
|
||||||
|
seen_events = set(earliest_events_ids)
|
||||||
|
seen_events.update(e.event_id for e in signed_events if e)
|
||||||
|
|
||||||
|
missing_events = {}
|
||||||
|
for e in itertools.chain(latest_events, signed_events):
|
||||||
|
if e.depth > min_depth:
|
||||||
|
missing_events.update({
|
||||||
|
e_id: e.depth for e_id, _ in e.prev_events
|
||||||
|
if e_id not in seen_events
|
||||||
|
and e_id not in failed_to_fetch
|
||||||
|
})
|
||||||
|
|
||||||
|
if not missing_events:
|
||||||
|
break
|
||||||
|
|
||||||
|
have_seen = yield self.store.have_events(missing_events)
|
||||||
|
|
||||||
|
for k in have_seen:
|
||||||
|
missing_events.pop(k, None)
|
||||||
|
|
||||||
|
if not missing_events:
|
||||||
|
break
|
||||||
|
|
||||||
|
# Okay, we haven't gotten everything yet. Lets get them.
|
||||||
|
ordered_missing = sorted(missing_events.items(), key=lambda x: x[0])
|
||||||
|
|
||||||
|
if have_gotten_all_from_destination:
|
||||||
|
servers.discard(destination)
|
||||||
|
|
||||||
|
def random_server_list():
|
||||||
|
srvs = list(servers)
|
||||||
|
random.shuffle(srvs)
|
||||||
|
return srvs
|
||||||
|
|
||||||
|
deferreds = [
|
||||||
|
self.get_pdu(
|
||||||
|
destinations=random_server_list(),
|
||||||
|
event_id=e_id,
|
||||||
|
)
|
||||||
|
for e_id, depth in ordered_missing[:limit - len(signed_events)]
|
||||||
|
]
|
||||||
|
|
||||||
|
res = yield defer.DeferredList(deferreds, consumeErrors=True)
|
||||||
|
for (result, val), (e_id, _) in zip(res, ordered_missing):
|
||||||
|
if result and val:
|
||||||
|
signed_events.append(val)
|
||||||
|
else:
|
||||||
|
failed_to_fetch.add(e_id)
|
||||||
|
|
||||||
|
defer.returnValue(signed_events)
|
||||||
|
|
||||||
def event_from_pdu_json(self, pdu_json, outlier=False):
|
def event_from_pdu_json(self, pdu_json, outlier=False):
|
||||||
event = FrozenEvent(
|
event = FrozenEvent(
|
||||||
pdu_json
|
pdu_json
|
||||||
@@ -398,3 +707,26 @@ class FederationClient(FederationBase):
|
|||||||
event.internal_metadata.outlier = outlier
|
event.internal_metadata.outlier = outlier
|
||||||
|
|
||||||
return event
|
return event
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def forward_third_party_invite(self, destinations, room_id, event_dict):
|
||||||
|
for destination in destinations:
|
||||||
|
if destination == self.server_name:
|
||||||
|
continue
|
||||||
|
|
||||||
|
try:
|
||||||
|
yield self.transport_layer.exchange_third_party_invite(
|
||||||
|
destination=destination,
|
||||||
|
room_id=room_id,
|
||||||
|
event_dict=event_dict,
|
||||||
|
)
|
||||||
|
defer.returnValue(None)
|
||||||
|
except CodeMessageException:
|
||||||
|
raise
|
||||||
|
except Exception as e:
|
||||||
|
logger.exception(
|
||||||
|
"Failed to send_third_party_invite via %s: %s",
|
||||||
|
destination, e.message
|
||||||
|
)
|
||||||
|
|
||||||
|
raise RuntimeError("Failed to send to any server.")
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
# Copyright 2015 OpenMarket Ltd
|
# Copyright 2015, 2016 OpenMarket Ltd
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@@ -20,18 +20,28 @@ from .federation_base import FederationBase
|
|||||||
from .units import Transaction, Edu
|
from .units import Transaction, Edu
|
||||||
|
|
||||||
from synapse.util.logutils import log_function
|
from synapse.util.logutils import log_function
|
||||||
from synapse.util.logcontext import PreserveLoggingContext
|
|
||||||
from synapse.events import FrozenEvent
|
from synapse.events import FrozenEvent
|
||||||
|
import synapse.metrics
|
||||||
|
|
||||||
from synapse.api.errors import FederationError, SynapseError
|
from synapse.api.errors import FederationError, SynapseError
|
||||||
|
|
||||||
from synapse.crypto.event_signing import compute_event_signature
|
from synapse.crypto.event_signing import compute_event_signature
|
||||||
|
|
||||||
|
import simplejson as json
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
# synapse.federation.federation_server is a silly name
|
||||||
|
metrics = synapse.metrics.get_metrics_for("synapse.federation.server")
|
||||||
|
|
||||||
|
received_pdus_counter = metrics.register_counter("received_pdus")
|
||||||
|
|
||||||
|
received_edus_counter = metrics.register_counter("received_edus")
|
||||||
|
|
||||||
|
received_queries_counter = metrics.register_counter("received_queries", labels=["type"])
|
||||||
|
|
||||||
|
|
||||||
class FederationServer(FederationBase):
|
class FederationServer(FederationBase):
|
||||||
def set_handler(self, handler):
|
def set_handler(self, handler):
|
||||||
@@ -84,6 +94,8 @@ class FederationServer(FederationBase):
|
|||||||
def on_incoming_transaction(self, transaction_data):
|
def on_incoming_transaction(self, transaction_data):
|
||||||
transaction = Transaction(**transaction_data)
|
transaction = Transaction(**transaction_data)
|
||||||
|
|
||||||
|
received_pdus_counter.inc_by(len(transaction.pdus))
|
||||||
|
|
||||||
for p in transaction.pdus:
|
for p in transaction.pdus:
|
||||||
if "unsigned" in p:
|
if "unsigned" in p:
|
||||||
unsigned = p["unsigned"]
|
unsigned = p["unsigned"]
|
||||||
@@ -111,30 +123,37 @@ class FederationServer(FederationBase):
|
|||||||
|
|
||||||
logger.debug("[%s] Transaction is new", transaction.transaction_id)
|
logger.debug("[%s] Transaction is new", transaction.transaction_id)
|
||||||
|
|
||||||
with PreserveLoggingContext():
|
results = []
|
||||||
dl = []
|
|
||||||
for pdu in pdu_list:
|
|
||||||
dl.append(self._handle_new_pdu(transaction.origin, pdu))
|
|
||||||
|
|
||||||
if hasattr(transaction, "edus"):
|
for pdu in pdu_list:
|
||||||
for edu in [Edu(**x) for x in transaction.edus]:
|
try:
|
||||||
self.received_edu(
|
yield self._handle_new_pdu(transaction.origin, pdu)
|
||||||
transaction.origin,
|
results.append({})
|
||||||
edu.edu_type,
|
except FederationError as e:
|
||||||
edu.content
|
self.send_failure(e, transaction.origin)
|
||||||
)
|
results.append({"error": str(e)})
|
||||||
|
except Exception as e:
|
||||||
|
results.append({"error": str(e)})
|
||||||
|
logger.exception("Failed to handle PDU")
|
||||||
|
|
||||||
results = yield defer.DeferredList(dl)
|
if hasattr(transaction, "edus"):
|
||||||
|
for edu in [Edu(**x) for x in transaction.edus]:
|
||||||
|
self.received_edu(
|
||||||
|
transaction.origin,
|
||||||
|
edu.edu_type,
|
||||||
|
edu.content
|
||||||
|
)
|
||||||
|
|
||||||
ret = []
|
for failure in getattr(transaction, "pdu_failures", []):
|
||||||
for r in results:
|
logger.info("Got failure %r", failure)
|
||||||
if r[0]:
|
|
||||||
ret.append({})
|
|
||||||
else:
|
|
||||||
logger.exception(r[1])
|
|
||||||
ret.append({"error": str(r[1])})
|
|
||||||
|
|
||||||
logger.debug("Returning: %s", str(ret))
|
logger.debug("Returning: %s", str(results))
|
||||||
|
|
||||||
|
response = {
|
||||||
|
"pdus": dict(zip(
|
||||||
|
(p.event_id for p in pdu_list), results
|
||||||
|
)),
|
||||||
|
}
|
||||||
|
|
||||||
yield self.transaction_actions.set_response(
|
yield self.transaction_actions.set_response(
|
||||||
transaction,
|
transaction,
|
||||||
@@ -143,6 +162,8 @@ class FederationServer(FederationBase):
|
|||||||
defer.returnValue((200, response))
|
defer.returnValue((200, response))
|
||||||
|
|
||||||
def received_edu(self, origin, edu_type, content):
|
def received_edu(self, origin, edu_type, content):
|
||||||
|
received_edus_counter.inc()
|
||||||
|
|
||||||
if edu_type in self.edu_handlers:
|
if edu_type in self.edu_handlers:
|
||||||
self.edu_handlers[edu_type](origin, content)
|
self.edu_handlers[edu_type](origin, content)
|
||||||
else:
|
else:
|
||||||
@@ -194,6 +215,8 @@ class FederationServer(FederationBase):
|
|||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def on_query_request(self, query_type, args):
|
def on_query_request(self, query_type, args):
|
||||||
|
received_queries_counter.inc(query_type)
|
||||||
|
|
||||||
if query_type in self.query_handlers:
|
if query_type in self.query_handlers:
|
||||||
response = yield self.query_handlers[query_type](args)
|
response = yield self.query_handlers[query_type](args)
|
||||||
defer.returnValue((200, response))
|
defer.returnValue((200, response))
|
||||||
@@ -229,6 +252,20 @@ class FederationServer(FederationBase):
|
|||||||
],
|
],
|
||||||
}))
|
}))
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def on_make_leave_request(self, room_id, user_id):
|
||||||
|
pdu = yield self.handler.on_make_leave_request(room_id, user_id)
|
||||||
|
time_now = self._clock.time_msec()
|
||||||
|
defer.returnValue({"event": pdu.get_pdu_json(time_now)})
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def on_send_leave_request(self, origin, content):
|
||||||
|
logger.debug("on_send_leave_request: content: %s", content)
|
||||||
|
pdu = self.event_from_pdu_json(content)
|
||||||
|
logger.debug("on_send_leave_request: pdu sigs: %s", pdu.signatures)
|
||||||
|
yield self.handler.on_send_leave_request(origin, pdu)
|
||||||
|
defer.returnValue((200, {}))
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def on_event_auth(self, origin, room_id, event_id):
|
def on_event_auth(self, origin, room_id, event_id):
|
||||||
time_now = self._clock.time_msec()
|
time_now = self._clock.time_msec()
|
||||||
@@ -288,6 +325,62 @@ class FederationServer(FederationBase):
|
|||||||
(200, send_content)
|
(200, send_content)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
@log_function
|
||||||
|
def on_query_client_keys(self, origin, content):
|
||||||
|
query = []
|
||||||
|
for user_id, device_ids in content.get("device_keys", {}).items():
|
||||||
|
if not device_ids:
|
||||||
|
query.append((user_id, None))
|
||||||
|
else:
|
||||||
|
for device_id in device_ids:
|
||||||
|
query.append((user_id, device_id))
|
||||||
|
|
||||||
|
results = yield self.store.get_e2e_device_keys(query)
|
||||||
|
|
||||||
|
json_result = {}
|
||||||
|
for user_id, device_keys in results.items():
|
||||||
|
for device_id, json_bytes in device_keys.items():
|
||||||
|
json_result.setdefault(user_id, {})[device_id] = json.loads(
|
||||||
|
json_bytes
|
||||||
|
)
|
||||||
|
|
||||||
|
defer.returnValue({"device_keys": json_result})
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
@log_function
|
||||||
|
def on_claim_client_keys(self, origin, content):
|
||||||
|
query = []
|
||||||
|
for user_id, device_keys in content.get("one_time_keys", {}).items():
|
||||||
|
for device_id, algorithm in device_keys.items():
|
||||||
|
query.append((user_id, device_id, algorithm))
|
||||||
|
|
||||||
|
results = yield self.store.claim_e2e_one_time_keys(query)
|
||||||
|
|
||||||
|
json_result = {}
|
||||||
|
for user_id, device_keys in results.items():
|
||||||
|
for device_id, keys in device_keys.items():
|
||||||
|
for key_id, json_bytes in keys.items():
|
||||||
|
json_result.setdefault(user_id, {})[device_id] = {
|
||||||
|
key_id: json.loads(json_bytes)
|
||||||
|
}
|
||||||
|
|
||||||
|
defer.returnValue({"one_time_keys": json_result})
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
@log_function
|
||||||
|
def on_get_missing_events(self, origin, room_id, earliest_events,
|
||||||
|
latest_events, limit, min_depth):
|
||||||
|
missing_events = yield self.handler.on_get_missing_events(
|
||||||
|
origin, room_id, earliest_events, latest_events, limit, min_depth
|
||||||
|
)
|
||||||
|
|
||||||
|
time_now = self._clock.time_msec()
|
||||||
|
|
||||||
|
defer.returnValue({
|
||||||
|
"events": [ev.get_pdu_json(time_now) for ev in missing_events],
|
||||||
|
})
|
||||||
|
|
||||||
@log_function
|
@log_function
|
||||||
def _get_persisted_pdu(self, origin, event_id, do_auth=True):
|
def _get_persisted_pdu(self, origin, event_id, do_auth=True):
|
||||||
""" Get a PDU from the database with given origin and id.
|
""" Get a PDU from the database with given origin and id.
|
||||||
@@ -314,7 +407,7 @@ class FederationServer(FederationBase):
|
|||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
@log_function
|
@log_function
|
||||||
def _handle_new_pdu(self, origin, pdu, max_recursion=10):
|
def _handle_new_pdu(self, origin, pdu, get_missing=True):
|
||||||
# We reprocess pdus when we have seen them only as outliers
|
# We reprocess pdus when we have seen them only as outliers
|
||||||
existing = yield self._get_persisted_pdu(
|
existing = yield self._get_persisted_pdu(
|
||||||
origin, pdu.event_id, do_auth=False
|
origin, pdu.event_id, do_auth=False
|
||||||
@@ -331,7 +424,6 @@ class FederationServer(FederationBase):
|
|||||||
)
|
)
|
||||||
if already_seen:
|
if already_seen:
|
||||||
logger.debug("Already seen pdu %s", pdu.event_id)
|
logger.debug("Already seen pdu %s", pdu.event_id)
|
||||||
defer.returnValue({})
|
|
||||||
return
|
return
|
||||||
|
|
||||||
# Check signature.
|
# Check signature.
|
||||||
@@ -367,42 +459,54 @@ class FederationServer(FederationBase):
|
|||||||
pdu.room_id, min_depth
|
pdu.room_id, min_depth
|
||||||
)
|
)
|
||||||
|
|
||||||
if min_depth and pdu.depth > min_depth and max_recursion > 0:
|
prevs = {e_id for e_id, _ in pdu.prev_events}
|
||||||
for event_id, hashes in pdu.prev_events:
|
seen = set(have_seen.keys())
|
||||||
if event_id not in have_seen:
|
|
||||||
logger.debug(
|
if min_depth and pdu.depth < min_depth:
|
||||||
"_handle_new_pdu requesting pdu %s",
|
# This is so that we don't notify the user about this
|
||||||
event_id
|
# message, to work around the fact that some events will
|
||||||
|
# reference really really old events we really don't want to
|
||||||
|
# send to the clients.
|
||||||
|
pdu.internal_metadata.outlier = True
|
||||||
|
elif min_depth and pdu.depth > min_depth:
|
||||||
|
if get_missing and prevs - seen:
|
||||||
|
latest = yield self.store.get_latest_event_ids_in_room(
|
||||||
|
pdu.room_id
|
||||||
|
)
|
||||||
|
|
||||||
|
# We add the prev events that we have seen to the latest
|
||||||
|
# list to ensure the remote server doesn't give them to us
|
||||||
|
latest = set(latest)
|
||||||
|
latest |= seen
|
||||||
|
|
||||||
|
missing_events = yield self.get_missing_events(
|
||||||
|
origin,
|
||||||
|
pdu.room_id,
|
||||||
|
earliest_events_ids=list(latest),
|
||||||
|
latest_events=[pdu],
|
||||||
|
limit=10,
|
||||||
|
min_depth=min_depth,
|
||||||
|
)
|
||||||
|
|
||||||
|
# We want to sort these by depth so we process them and
|
||||||
|
# tell clients about them in order.
|
||||||
|
missing_events.sort(key=lambda x: x.depth)
|
||||||
|
|
||||||
|
for e in missing_events:
|
||||||
|
yield self._handle_new_pdu(
|
||||||
|
origin,
|
||||||
|
e,
|
||||||
|
get_missing=False
|
||||||
)
|
)
|
||||||
|
|
||||||
try:
|
have_seen = yield self.store.have_events(
|
||||||
new_pdu = yield self.federation_client.get_pdu(
|
[ev for ev, _ in pdu.prev_events]
|
||||||
[origin, pdu.origin],
|
)
|
||||||
event_id=event_id,
|
|
||||||
)
|
|
||||||
|
|
||||||
if new_pdu:
|
prevs = {e_id for e_id, _ in pdu.prev_events}
|
||||||
yield self._handle_new_pdu(
|
seen = set(have_seen.keys())
|
||||||
origin,
|
if prevs - seen:
|
||||||
new_pdu,
|
fetch_state = True
|
||||||
max_recursion=max_recursion-1
|
|
||||||
)
|
|
||||||
|
|
||||||
logger.debug("Processed pdu %s", event_id)
|
|
||||||
else:
|
|
||||||
logger.warn("Failed to get PDU %s", event_id)
|
|
||||||
fetch_state = True
|
|
||||||
except:
|
|
||||||
# TODO(erikj): Do some more intelligent retries.
|
|
||||||
logger.exception("Failed to get PDU")
|
|
||||||
fetch_state = True
|
|
||||||
else:
|
|
||||||
prevs = {e_id for e_id, _ in pdu.prev_events}
|
|
||||||
seen = set(have_seen.keys())
|
|
||||||
if prevs - seen:
|
|
||||||
fetch_state = True
|
|
||||||
else:
|
|
||||||
fetch_state = True
|
|
||||||
|
|
||||||
if fetch_state:
|
if fetch_state:
|
||||||
# We need to get the state at this event, since we haven't
|
# We need to get the state at this event, since we haven't
|
||||||
@@ -418,7 +522,7 @@ class FederationServer(FederationBase):
|
|||||||
except:
|
except:
|
||||||
logger.warn("Failed to get state for event: %s", pdu.event_id)
|
logger.warn("Failed to get state for event: %s", pdu.event_id)
|
||||||
|
|
||||||
ret = yield self.handler.on_receive_pdu(
|
yield self.handler.on_receive_pdu(
|
||||||
origin,
|
origin,
|
||||||
pdu,
|
pdu,
|
||||||
backfilled=False,
|
backfilled=False,
|
||||||
@@ -426,8 +530,6 @@ class FederationServer(FederationBase):
|
|||||||
auth_chain=auth_chain,
|
auth_chain=auth_chain,
|
||||||
)
|
)
|
||||||
|
|
||||||
defer.returnValue(ret)
|
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
return "<ReplicationLayer(%s)>" % self.server_name
|
return "<ReplicationLayer(%s)>" % self.server_name
|
||||||
|
|
||||||
@@ -439,3 +541,15 @@ class FederationServer(FederationBase):
|
|||||||
event.internal_metadata.outlier = outlier
|
event.internal_metadata.outlier = outlier
|
||||||
|
|
||||||
return event
|
return event
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def exchange_third_party_invite(self, invite):
|
||||||
|
ret = yield self.handler.exchange_third_party_invite(invite)
|
||||||
|
defer.returnValue(ret)
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def on_exchange_third_party_invite_request(self, origin, room_id, event_dict):
|
||||||
|
ret = yield self.handler.on_exchange_third_party_invite_request(
|
||||||
|
origin, room_id, event_dict
|
||||||
|
)
|
||||||
|
defer.returnValue(ret)
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user