mirror of
https://github.com/element-hq/synapse.git
synced 2025-12-05 01:10:13 +00:00
Compare commits
44 Commits
anoa/user_
...
erik-hacke
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f56db48f67 | ||
|
|
dd7f1b10ce | ||
|
|
661a4d4603 | ||
|
|
a401750ef4 | ||
|
|
72c673407f | ||
|
|
488a2696b6 | ||
|
|
052df50e49 | ||
|
|
3633ee8c63 | ||
|
|
912c456d60 | ||
|
|
d05575a574 | ||
|
|
2fb6d46042 | ||
|
|
c7fa6672d9 | ||
|
|
36c43eef17 | ||
|
|
411c437fc6 | ||
|
|
6783bc0ded | ||
|
|
295973e3a9 | ||
|
|
3b78452c3a | ||
|
|
1f732e05d0 | ||
|
|
9ed48cecbc | ||
|
|
cc9d4a4d3e | ||
|
|
889803a78e | ||
|
|
22245fb8a7 | ||
|
|
78c5eca141 | ||
|
|
55bef59cc7 | ||
|
|
ad683cd203 | ||
|
|
d66afef01e | ||
|
|
b07a33f024 | ||
|
|
74539aa25b | ||
|
|
8f25cd6627 | ||
|
|
dc1299a4b0 | ||
|
|
2c72d66cda | ||
|
|
cde90a89ed | ||
|
|
5aec53ad95 | ||
|
|
d10d19f0ad | ||
|
|
daec1d77be | ||
|
|
61885f7849 | ||
|
|
ba30d489d9 | ||
|
|
5e2d0650df | ||
|
|
841bcbcafa | ||
|
|
08a6b88e3d | ||
|
|
aece8e73b1 | ||
|
|
328bd35e00 | ||
|
|
7de9a28b8e | ||
|
|
2a9c3aea89 |
@@ -36,6 +36,7 @@ from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
|
||||
from synapse.replication.slave.storage.devices import SlavedDeviceStore
|
||||
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||
from synapse.rest.client.v1.base import ClientV1RestServlet, client_path_patterns
|
||||
from synapse.rest.client.v2_alpha._base import client_v2_patterns
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.engines import create_engine
|
||||
@@ -49,6 +50,35 @@ from twisted.web.resource import NoResource
|
||||
logger = logging.getLogger("synapse.app.frontend_proxy")
|
||||
|
||||
|
||||
class PresenceStatusStubServlet(ClientV1RestServlet):
|
||||
PATTERNS = client_path_patterns("/presence/(?P<user_id>[^/]*)/status")
|
||||
|
||||
def __init__(self, hs):
|
||||
super(PresenceStatusStubServlet, self).__init__(hs)
|
||||
self.http_client = hs.get_simple_http_client()
|
||||
self.auth = hs.get_auth()
|
||||
self.main_uri = hs.config.worker_main_http_uri
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_GET(self, request, user_id):
|
||||
# Pass through the auth headers, if any, in case the access token
|
||||
# is there.
|
||||
auth_headers = request.requestHeaders.getRawHeaders("Authorization", [])
|
||||
headers = {
|
||||
"Authorization": auth_headers,
|
||||
}
|
||||
result = yield self.http_client.get_json(
|
||||
self.main_uri + request.uri,
|
||||
headers=headers,
|
||||
)
|
||||
defer.returnValue((200, result))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_PUT(self, request, user_id):
|
||||
yield self.auth.get_user_by_req(request)
|
||||
defer.returnValue((200, {}))
|
||||
|
||||
|
||||
class KeyUploadServlet(RestServlet):
|
||||
PATTERNS = client_v2_patterns("/keys/upload(/(?P<device_id>[^/]+))?$")
|
||||
|
||||
@@ -135,6 +165,7 @@ class FrontendProxyServer(HomeServer):
|
||||
elif name == "client":
|
||||
resource = JsonResource(self, canonical_json=False)
|
||||
KeyUploadServlet(self).register(resource)
|
||||
PresenceStatusStubServlet(self).register(resource)
|
||||
resources.update({
|
||||
"/_matrix/client/r0": resource,
|
||||
"/_matrix/client/unstable": resource,
|
||||
|
||||
@@ -113,6 +113,7 @@ class SynchrotronPresence(object):
|
||||
logger.info("Presence process_id is %r", self.process_id)
|
||||
|
||||
def send_user_sync(self, user_id, is_syncing, last_sync_ms):
|
||||
return
|
||||
self.hs.get_tcp_replication().send_user_sync(user_id, is_syncing, last_sync_ms)
|
||||
|
||||
def mark_as_coming_online(self, user_id):
|
||||
@@ -210,6 +211,8 @@ class SynchrotronPresence(object):
|
||||
yield self.notify_from_replication(states, stream_id)
|
||||
|
||||
def get_currently_syncing_users(self):
|
||||
# presence is disabled on matrix.org, so we return the empty set
|
||||
return set()
|
||||
return [
|
||||
user_id for user_id, count in self.user_to_num_current_syncs.iteritems()
|
||||
if count > 0
|
||||
|
||||
@@ -108,7 +108,7 @@ def stop(pidfile, app):
|
||||
|
||||
|
||||
Worker = collections.namedtuple("Worker", [
|
||||
"app", "configfile", "pidfile", "cache_factor"
|
||||
"app", "configfile", "pidfile", "cache_factor", "cache_factors",
|
||||
])
|
||||
|
||||
|
||||
@@ -171,6 +171,10 @@ def main():
|
||||
if cache_factor:
|
||||
os.environ["SYNAPSE_CACHE_FACTOR"] = str(cache_factor)
|
||||
|
||||
cache_factors = config.get("synctl_cache_factors", {})
|
||||
for cache_name, factor in cache_factors.iteritems():
|
||||
os.environ["SYNAPSE_CACHE_FACTOR_" + cache_name.upper()] = str(factor)
|
||||
|
||||
worker_configfiles = []
|
||||
if options.worker:
|
||||
start_stop_synapse = False
|
||||
@@ -211,6 +215,10 @@ def main():
|
||||
or pidfile
|
||||
)
|
||||
worker_cache_factor = worker_config.get("synctl_cache_factor") or cache_factor
|
||||
worker_cache_factors = (
|
||||
worker_config.get("synctl_cache_factors")
|
||||
or cache_factors
|
||||
)
|
||||
daemonize = worker_config.get("daemonize") or config.get("daemonize")
|
||||
assert daemonize, "Main process must have daemonize set to true"
|
||||
|
||||
@@ -226,8 +234,10 @@ def main():
|
||||
assert worker_daemonize, "In config %r: expected '%s' to be True" % (
|
||||
worker_configfile, "worker_daemonize")
|
||||
worker_cache_factor = worker_config.get("synctl_cache_factor")
|
||||
worker_cache_factors = worker_config.get("synctl_cache_factors", {})
|
||||
workers.append(Worker(
|
||||
worker_app, worker_configfile, worker_pidfile, worker_cache_factor,
|
||||
worker_cache_factors,
|
||||
))
|
||||
|
||||
action = options.action
|
||||
@@ -262,15 +272,19 @@ def main():
|
||||
start(configfile)
|
||||
|
||||
for worker in workers:
|
||||
env = os.environ.copy()
|
||||
|
||||
if worker.cache_factor:
|
||||
os.environ["SYNAPSE_CACHE_FACTOR"] = str(worker.cache_factor)
|
||||
|
||||
for cache_name, factor in worker.cache_factors.iteritems():
|
||||
os.environ["SYNAPSE_CACHE_FACTOR_" + cache_name.upper()] = str(factor)
|
||||
|
||||
start_worker(worker.app, configfile, worker.configfile)
|
||||
|
||||
if cache_factor:
|
||||
os.environ["SYNAPSE_CACHE_FACTOR"] = str(cache_factor)
|
||||
else:
|
||||
os.environ.pop("SYNAPSE_CACHE_FACTOR", None)
|
||||
# Reset env back to the original
|
||||
os.environ.clear()
|
||||
os.environ.update(env)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
@@ -295,6 +295,7 @@ class TransactionQueue(object):
|
||||
Args:
|
||||
states (list(UserPresenceState))
|
||||
"""
|
||||
return
|
||||
|
||||
# First we queue up the new presence by user ID, so multiple presence
|
||||
# updates in quick successtion are correctly handled
|
||||
|
||||
@@ -372,6 +372,7 @@ class InitialSyncHandler(BaseHandler):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_presence():
|
||||
defer.returnValue([])
|
||||
states = yield presence_handler.get_states(
|
||||
[m.user_id for m in room_members],
|
||||
as_event=True,
|
||||
|
||||
@@ -384,7 +384,7 @@ class MessageHandler(BaseHandler):
|
||||
# If this is an AS, double check that they are allowed to see the members.
|
||||
# This can either be because the AS user is in the room or becuase there
|
||||
# is a user in the room that the AS is "interested in"
|
||||
if requester.app_service and user_id not in users_with_profile:
|
||||
if False and requester.app_service and user_id not in users_with_profile:
|
||||
for uid in users_with_profile:
|
||||
if requester.app_service.is_interested_in_user(uid):
|
||||
break
|
||||
|
||||
@@ -373,6 +373,7 @@ class PresenceHandler(object):
|
||||
"""We've seen the user do something that indicates they're interacting
|
||||
with the app.
|
||||
"""
|
||||
return
|
||||
user_id = user.to_string()
|
||||
|
||||
bump_active_time_counter.inc()
|
||||
@@ -402,6 +403,7 @@ class PresenceHandler(object):
|
||||
Useful for streams that are not associated with an actual
|
||||
client that is being used by a user.
|
||||
"""
|
||||
affect_presence = False
|
||||
if affect_presence:
|
||||
curr_sync = self.user_to_num_current_syncs.get(user_id, 0)
|
||||
self.user_to_num_current_syncs[user_id] = curr_sync + 1
|
||||
@@ -444,6 +446,8 @@ class PresenceHandler(object):
|
||||
Returns:
|
||||
set(str): A set of user_id strings.
|
||||
"""
|
||||
# presence is disabled on matrix.org, so we return the empty set
|
||||
return set()
|
||||
syncing_user_ids = {
|
||||
user_id for user_id, count in self.user_to_num_current_syncs.items()
|
||||
if count
|
||||
@@ -463,6 +467,7 @@ class PresenceHandler(object):
|
||||
syncing_user_ids(set(str)): The set of user_ids that are
|
||||
currently syncing on that server.
|
||||
"""
|
||||
return
|
||||
|
||||
# Grab the previous list of user_ids that were syncing on that process
|
||||
prev_syncing_user_ids = (
|
||||
|
||||
@@ -44,7 +44,7 @@ EMTPY_THIRD_PARTY_ID = ThirdPartyInstanceID(None, None)
|
||||
class RoomListHandler(BaseHandler):
|
||||
def __init__(self, hs):
|
||||
super(RoomListHandler, self).__init__(hs)
|
||||
self.response_cache = ResponseCache(hs, "room_list")
|
||||
self.response_cache = ResponseCache(hs, "room_list", timeout_ms=10 * 60 * 1000)
|
||||
self.remote_response_cache = ResponseCache(hs, "remote_room_list",
|
||||
timeout_ms=30 * 1000)
|
||||
|
||||
|
||||
@@ -28,7 +28,7 @@ from synapse.api.constants import (
|
||||
)
|
||||
from synapse.api.errors import AuthError, SynapseError, Codes
|
||||
from synapse.types import UserID, RoomID
|
||||
from synapse.util.async import Linearizer
|
||||
from synapse.util.async import Linearizer, Limiter
|
||||
from synapse.util.distributor import user_left_room, user_joined_room
|
||||
|
||||
|
||||
@@ -60,6 +60,7 @@ class RoomMemberHandler(object):
|
||||
self.event_creation_hander = hs.get_event_creation_handler()
|
||||
|
||||
self.member_linearizer = Linearizer(name="member")
|
||||
self.member_limiter = Limiter(20)
|
||||
|
||||
self.clock = hs.get_clock()
|
||||
self.spam_checker = hs.get_spam_checker()
|
||||
@@ -232,18 +233,23 @@ class RoomMemberHandler(object):
|
||||
):
|
||||
key = (room_id,)
|
||||
|
||||
with (yield self.member_linearizer.queue(key)):
|
||||
result = yield self._update_membership(
|
||||
requester,
|
||||
target,
|
||||
room_id,
|
||||
action,
|
||||
txn_id=txn_id,
|
||||
remote_room_hosts=remote_room_hosts,
|
||||
third_party_signed=third_party_signed,
|
||||
ratelimit=ratelimit,
|
||||
content=content,
|
||||
)
|
||||
as_id = object()
|
||||
if requester.app_service:
|
||||
as_id = requester.app_service.id
|
||||
|
||||
with (yield self.member_limiter.queue(as_id)):
|
||||
with (yield self.member_linearizer.queue(key)):
|
||||
result = yield self._update_membership(
|
||||
requester,
|
||||
target,
|
||||
room_id,
|
||||
action,
|
||||
txn_id=txn_id,
|
||||
remote_room_hosts=remote_room_hosts,
|
||||
third_party_signed=third_party_signed,
|
||||
ratelimit=ratelimit,
|
||||
content=content,
|
||||
)
|
||||
|
||||
defer.returnValue(result)
|
||||
|
||||
|
||||
@@ -30,6 +30,7 @@ import itertools
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
SYNC_RESPONSE_CACHE_MS = 2 * 60 * 1000
|
||||
|
||||
SyncConfig = collections.namedtuple("SyncConfig", [
|
||||
"user",
|
||||
@@ -169,7 +170,10 @@ class SyncHandler(object):
|
||||
self.presence_handler = hs.get_presence_handler()
|
||||
self.event_sources = hs.get_event_sources()
|
||||
self.clock = hs.get_clock()
|
||||
self.response_cache = ResponseCache(hs, "sync")
|
||||
self.response_cache = ResponseCache(
|
||||
hs, "sync",
|
||||
timeout_ms=SYNC_RESPONSE_CACHE_MS,
|
||||
)
|
||||
self.state = hs.get_state_handler()
|
||||
|
||||
def wait_for_sync_for_user(self, sync_config, since_token=None, timeout=0,
|
||||
@@ -599,7 +603,7 @@ class SyncHandler(object):
|
||||
since_token is None and
|
||||
sync_config.filter_collection.blocks_all_presence()
|
||||
)
|
||||
if not block_all_presence_data:
|
||||
if False and not block_all_presence_data:
|
||||
yield self._generate_sync_entry_for_presence(
|
||||
sync_result_builder, newly_joined_rooms, newly_joined_users
|
||||
)
|
||||
|
||||
@@ -42,6 +42,8 @@ class SlavedClientIpStore(BaseSlavedStore):
|
||||
if last_seen is not None and (now - last_seen) < LAST_SEEN_GRANULARITY:
|
||||
return
|
||||
|
||||
self.client_ip_last_seen.prefill(key, now)
|
||||
|
||||
self.hs.get_tcp_replication().send_user_ip(
|
||||
user_id, access_token, ip, user_agent, device_id, now
|
||||
)
|
||||
|
||||
@@ -33,7 +33,7 @@ import logging
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
MAX_EVENTS_BEHIND = 10000
|
||||
MAX_EVENTS_BEHIND = 500000
|
||||
|
||||
|
||||
EventStreamRow = namedtuple("EventStreamRow", (
|
||||
|
||||
@@ -296,17 +296,6 @@ class ShutdownRoomRestServlet(ClientV1RestServlet):
|
||||
)
|
||||
new_room_id = info["room_id"]
|
||||
|
||||
yield self.event_creation_handler.create_and_send_nonmember_event(
|
||||
room_creator_requester,
|
||||
{
|
||||
"type": "m.room.message",
|
||||
"content": {"body": message, "msgtype": "m.text"},
|
||||
"room_id": new_room_id,
|
||||
"sender": new_room_user_id,
|
||||
},
|
||||
ratelimit=False,
|
||||
)
|
||||
|
||||
requester_user_id = requester.user.to_string()
|
||||
|
||||
logger.info("Shutting down room %r", room_id)
|
||||
@@ -344,6 +333,17 @@ class ShutdownRoomRestServlet(ClientV1RestServlet):
|
||||
|
||||
kicked_users.append(user_id)
|
||||
|
||||
yield self.event_creation_handler.create_and_send_nonmember_event(
|
||||
room_creator_requester,
|
||||
{
|
||||
"type": "m.room.message",
|
||||
"content": {"body": message, "msgtype": "m.text"},
|
||||
"room_id": new_room_id,
|
||||
"sender": new_room_user_id,
|
||||
},
|
||||
ratelimit=False,
|
||||
)
|
||||
|
||||
aliases_for_room = yield self.store.get_aliases_for_room(room_id)
|
||||
|
||||
yield self.store.update_aliases_for_room(
|
||||
|
||||
@@ -81,7 +81,7 @@ class PresenceStatusRestServlet(ClientV1RestServlet):
|
||||
except Exception:
|
||||
raise SynapseError(400, "Unable to parse state")
|
||||
|
||||
yield self.presence_handler.set_state(user, state)
|
||||
# yield self.presence_handler.set_state(user, state)
|
||||
|
||||
defer.returnValue((200, {}))
|
||||
|
||||
|
||||
@@ -28,7 +28,7 @@ logger = logging.getLogger(__name__)
|
||||
# Number of msec of granularity to store the user IP 'last seen' time. Smaller
|
||||
# times give more inserts into the database even for readonly API hits
|
||||
# 120 seconds == 2 minutes
|
||||
LAST_SEEN_GRANULARITY = 120 * 1000
|
||||
LAST_SEEN_GRANULARITY = 10 * 60 * 1000
|
||||
|
||||
|
||||
class ClientIpStore(background_updates.BackgroundUpdateStore):
|
||||
|
||||
@@ -613,6 +613,9 @@ class EventPushActionsStore(EventPushActionsWorkerStore):
|
||||
self._rotate_notifs, 30 * 60 * 1000
|
||||
)
|
||||
|
||||
self._rotate_delay = 3
|
||||
self._rotate_count = 10000
|
||||
|
||||
def _set_push_actions_for_event_and_users_txn(self, txn, events_and_contexts,
|
||||
all_events_and_contexts):
|
||||
"""Handles moving push actions from staging table to main
|
||||
@@ -809,7 +812,7 @@ class EventPushActionsStore(EventPushActionsWorkerStore):
|
||||
)
|
||||
if caught_up:
|
||||
break
|
||||
yield sleep(5)
|
||||
yield sleep(self._rotate_delay)
|
||||
finally:
|
||||
self._doing_notif_rotation = False
|
||||
|
||||
@@ -830,8 +833,8 @@ class EventPushActionsStore(EventPushActionsWorkerStore):
|
||||
txn.execute("""
|
||||
SELECT stream_ordering FROM event_push_actions
|
||||
WHERE stream_ordering > ?
|
||||
ORDER BY stream_ordering ASC LIMIT 1 OFFSET 50000
|
||||
""", (old_rotate_stream_ordering,))
|
||||
ORDER BY stream_ordering ASC LIMIT 1 OFFSET ?
|
||||
""", (old_rotate_stream_ordering, self._rotate_count))
|
||||
stream_row = txn.fetchone()
|
||||
if stream_row:
|
||||
offset_stream_ordering, = stream_row
|
||||
|
||||
@@ -1013,7 +1013,6 @@ class EventsStore(EventsWorkerStore):
|
||||
"event_edge_hashes",
|
||||
"event_edges",
|
||||
"event_forward_extremities",
|
||||
"event_push_actions",
|
||||
"event_reference_hashes",
|
||||
"event_search",
|
||||
"event_signatures",
|
||||
@@ -1033,6 +1032,14 @@ class EventsStore(EventsWorkerStore):
|
||||
[(ev.event_id,) for ev, _ in events_and_contexts]
|
||||
)
|
||||
|
||||
for table in (
|
||||
"event_push_actions",
|
||||
):
|
||||
txn.executemany(
|
||||
"DELETE FROM %s WHERE room_id = ? AND event_id = ?" % (table,),
|
||||
[(ev.event_id,) for ev, _ in events_and_contexts]
|
||||
)
|
||||
|
||||
def _store_event_txn(self, txn, events_and_contexts):
|
||||
"""Insert new events into the event and event_json tables
|
||||
|
||||
|
||||
@@ -65,7 +65,7 @@ class RoomMemberWorkerStore(EventsWorkerStore):
|
||||
hosts = frozenset(get_domain_from_id(user_id) for user_id in user_ids)
|
||||
defer.returnValue(hosts)
|
||||
|
||||
@cached(max_entries=100000, iterable=True)
|
||||
@cachedInlineCallbacks(max_entries=100000, iterable=True)
|
||||
def get_users_in_room(self, room_id):
|
||||
def f(txn):
|
||||
sql = (
|
||||
@@ -79,7 +79,14 @@ class RoomMemberWorkerStore(EventsWorkerStore):
|
||||
|
||||
txn.execute(sql, (room_id, Membership.JOIN,))
|
||||
return [to_ascii(r[0]) for r in txn]
|
||||
return self.runInteraction("get_users_in_room", f)
|
||||
start_time = self._clock.time_msec()
|
||||
result = yield self.runInteraction("get_users_in_room", f)
|
||||
end_time = self._clock.time_msec()
|
||||
logger.info(
|
||||
"Fetched room membership for %s (%i users) in %i ms",
|
||||
room_id, len(result), end_time - start_time,
|
||||
)
|
||||
defer.returnValue(result)
|
||||
|
||||
@cached()
|
||||
def get_invited_rooms_for_user(self, user_id):
|
||||
@@ -453,7 +460,7 @@ class RoomMemberWorkerStore(EventsWorkerStore):
|
||||
|
||||
defer.returnValue(joined_hosts)
|
||||
|
||||
@cached(max_entries=10000, iterable=True)
|
||||
@cached(max_entries=10000)
|
||||
def _get_joined_hosts_cache(self, room_id):
|
||||
return _JoinedHostsCache(self, room_id)
|
||||
|
||||
|
||||
@@ -721,7 +721,7 @@ def _parse_query(database_engine, search_term):
|
||||
results = re.findall(r"([\w\-]+)", search_term, re.UNICODE)
|
||||
|
||||
if isinstance(database_engine, PostgresEngine):
|
||||
return " & ".join(result + ":*" for result in results)
|
||||
return " & ".join(result for result in results)
|
||||
elif isinstance(database_engine, Sqlite3Engine):
|
||||
return " & ".join(result + "*" for result in results)
|
||||
else:
|
||||
|
||||
@@ -20,7 +20,7 @@ from twisted.internet import defer
|
||||
|
||||
from synapse.storage.background_updates import BackgroundUpdateStore
|
||||
from synapse.storage.engines import PostgresEngine
|
||||
from synapse.util.caches import intern_string, CACHE_SIZE_FACTOR
|
||||
from synapse.util.caches import intern_string, get_cache_factor_for
|
||||
from synapse.util.caches.descriptors import cached, cachedList
|
||||
from synapse.util.caches.dictionary_cache import DictionaryCache
|
||||
from synapse.util.stringutils import to_ascii
|
||||
@@ -54,7 +54,7 @@ class StateGroupWorkerStore(SQLBaseStore):
|
||||
super(StateGroupWorkerStore, self).__init__(db_conn, hs)
|
||||
|
||||
self._state_group_cache = DictionaryCache(
|
||||
"*stateGroupCache*", 100000 * CACHE_SIZE_FACTOR
|
||||
"*stateGroupCache*", 500000 * get_cache_factor_for("stateGroupCache")
|
||||
)
|
||||
|
||||
@cached(max_entries=100000, iterable=True)
|
||||
@@ -566,8 +566,7 @@ class StateGroupWorkerStore(SQLBaseStore):
|
||||
state_dict = results[group]
|
||||
|
||||
state_dict.update(
|
||||
((intern_string(k[0]), intern_string(k[1])), to_ascii(v))
|
||||
for k, v in group_state_dict.iteritems()
|
||||
group_state_dict.iteritems()
|
||||
)
|
||||
|
||||
self._state_group_cache.update(
|
||||
|
||||
@@ -18,6 +18,16 @@ import os
|
||||
|
||||
CACHE_SIZE_FACTOR = float(os.environ.get("SYNAPSE_CACHE_FACTOR", 0.5))
|
||||
|
||||
|
||||
def get_cache_factor_for(cache_name):
|
||||
env_var = "SYNAPSE_CACHE_FACTOR_" + cache_name.upper()
|
||||
factor = os.environ.get(env_var)
|
||||
if factor:
|
||||
return float(factor)
|
||||
|
||||
return CACHE_SIZE_FACTOR
|
||||
|
||||
|
||||
metrics = synapse.metrics.get_metrics_for("synapse.util.caches")
|
||||
|
||||
caches_by_name = {}
|
||||
|
||||
@@ -17,7 +17,7 @@ import logging
|
||||
|
||||
from synapse.util.async import ObservableDeferred
|
||||
from synapse.util import unwrapFirstError, logcontext
|
||||
from synapse.util.caches import CACHE_SIZE_FACTOR
|
||||
from synapse.util.caches import get_cache_factor_for
|
||||
from synapse.util.caches.lrucache import LruCache
|
||||
from synapse.util.caches.treecache import TreeCache, iterate_tree_cache_entry
|
||||
from synapse.util.stringutils import to_ascii
|
||||
@@ -310,7 +310,7 @@ class CacheDescriptor(_CacheDescriptorBase):
|
||||
orig, num_args=num_args, inlineCallbacks=inlineCallbacks,
|
||||
cache_context=cache_context)
|
||||
|
||||
max_entries = int(max_entries * CACHE_SIZE_FACTOR)
|
||||
max_entries = int(max_entries * get_cache_factor_for(orig.__name__))
|
||||
|
||||
self.max_entries = max_entries
|
||||
self.tree = tree
|
||||
|
||||
@@ -984,11 +984,13 @@ class RoomInitialSyncTestCase(RestTestCase):
|
||||
|
||||
self.assertTrue("presence" in response)
|
||||
|
||||
presence_by_user = {
|
||||
e["content"]["user_id"]: e for e in response["presence"]
|
||||
}
|
||||
self.assertTrue(self.user_id in presence_by_user)
|
||||
self.assertEquals("m.presence", presence_by_user[self.user_id]["type"])
|
||||
# presence is turned off on hotfixes
|
||||
|
||||
# presence_by_user = {
|
||||
# e["content"]["user_id"]: e for e in response["presence"]
|
||||
# }
|
||||
# self.assertTrue(self.user_id in presence_by_user)
|
||||
# self.assertEquals("m.presence", presence_by_user[self.user_id]["type"])
|
||||
|
||||
|
||||
class RoomMessageListTestCase(RestTestCase):
|
||||
|
||||
Reference in New Issue
Block a user