mirror of
https://github.com/element-hq/synapse.git
synced 2025-12-05 01:10:13 +00:00
Move towards a dedicated Duration class (#19223)
We have various constants to try and avoid mistyping of durations, e.g. `ONE_HOUR_SECONDS * MILLISECONDS_PER_SECOND`, however this can get a little verbose and doesn't help with typing. Instead, let's move towards a dedicated `Duration` class (basically a [`timedelta`](https://docs.python.org/3/library/datetime.html#timedelta-objects) with helper methods). This PR introduces the new types and converts all usages of the existing constants with it. Future PRs may work to move the clock methods to also use it (e.g. `call_later` and `looping_call`). Reviewable commit-by-commit.
This commit is contained in:
1
changelog.d/19223.misc
Normal file
1
changelog.d/19223.misc
Normal file
@@ -0,0 +1 @@
|
||||
Move towards using a dedicated `Duration` type.
|
||||
@@ -30,24 +30,20 @@ from twisted.internet import defer
|
||||
|
||||
from synapse.metrics import SERVER_NAME_LABEL
|
||||
from synapse.types import JsonDict
|
||||
from synapse.util.constants import (
|
||||
MILLISECONDS_PER_SECOND,
|
||||
ONE_HOUR_SECONDS,
|
||||
ONE_MINUTE_SECONDS,
|
||||
)
|
||||
from synapse.util.duration import Duration
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
|
||||
logger = logging.getLogger("synapse.app.homeserver")
|
||||
|
||||
INITIAL_DELAY_BEFORE_FIRST_PHONE_HOME_SECONDS = 5 * ONE_MINUTE_SECONDS
|
||||
INITIAL_DELAY_BEFORE_FIRST_PHONE_HOME = Duration(minutes=5)
|
||||
"""
|
||||
We wait 5 minutes to send the first set of stats as the server can be quite busy the
|
||||
first few minutes
|
||||
"""
|
||||
|
||||
PHONE_HOME_INTERVAL_SECONDS = 3 * ONE_HOUR_SECONDS
|
||||
PHONE_HOME_INTERVAL = Duration(hours=3)
|
||||
"""
|
||||
Phone home stats are sent every 3 hours
|
||||
"""
|
||||
@@ -222,13 +218,13 @@ def start_phone_stats_home(hs: "HomeServer") -> None:
|
||||
# table will decrease
|
||||
clock.looping_call(
|
||||
hs.get_datastores().main.generate_user_daily_visits,
|
||||
5 * ONE_MINUTE_SECONDS * MILLISECONDS_PER_SECOND,
|
||||
Duration(minutes=5).as_millis(),
|
||||
)
|
||||
|
||||
# monthly active user limiting functionality
|
||||
clock.looping_call(
|
||||
hs.get_datastores().main.reap_monthly_active_users,
|
||||
ONE_HOUR_SECONDS * MILLISECONDS_PER_SECOND,
|
||||
Duration(hours=1).as_millis(),
|
||||
)
|
||||
hs.get_datastores().main.reap_monthly_active_users()
|
||||
|
||||
@@ -274,7 +270,7 @@ def start_phone_stats_home(hs: "HomeServer") -> None:
|
||||
logger.info("Scheduling stats reporting for 3 hour intervals")
|
||||
clock.looping_call(
|
||||
phone_stats_home,
|
||||
PHONE_HOME_INTERVAL_SECONDS * MILLISECONDS_PER_SECOND,
|
||||
PHONE_HOME_INTERVAL.as_millis(),
|
||||
hs,
|
||||
stats,
|
||||
)
|
||||
@@ -289,7 +285,7 @@ def start_phone_stats_home(hs: "HomeServer") -> None:
|
||||
# We wait 5 minutes to send the first set of stats as the server can
|
||||
# be quite busy the first few minutes
|
||||
clock.call_later(
|
||||
INITIAL_DELAY_BEFORE_FIRST_PHONE_HOME_SECONDS,
|
||||
INITIAL_DELAY_BEFORE_FIRST_PHONE_HOME.as_secs(),
|
||||
phone_stats_home,
|
||||
hs,
|
||||
stats,
|
||||
|
||||
@@ -39,7 +39,7 @@ from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
|
||||
from synapse.events import StrippedStateEvent
|
||||
from synapse.events.utils import parse_stripped_state_event
|
||||
from synapse.logging.opentracing import start_active_span, trace
|
||||
from synapse.storage.databases.main.sliding_sync import UPDATE_INTERVAL_LAST_USED_TS_MS
|
||||
from synapse.storage.databases.main.sliding_sync import UPDATE_INTERVAL_LAST_USED_TS
|
||||
from synapse.storage.databases.main.state import (
|
||||
ROOM_UNKNOWN_SENTINEL,
|
||||
Sentinel as StateSentinel,
|
||||
@@ -70,7 +70,7 @@ from synapse.types.handlers.sliding_sync import (
|
||||
)
|
||||
from synapse.types.state import StateFilter
|
||||
from synapse.util import MutableOverlayMapping
|
||||
from synapse.util.constants import MILLISECONDS_PER_SECOND, ONE_HOUR_SECONDS
|
||||
from synapse.util.duration import Duration
|
||||
from synapse.util.sentinel import Sentinel
|
||||
|
||||
if TYPE_CHECKING:
|
||||
@@ -85,7 +85,7 @@ logger = logging.getLogger(__name__)
|
||||
# tight loops with clients that request lots of data at once.
|
||||
#
|
||||
# c.f. `NUM_ROOMS_THRESHOLD`. These values are somewhat arbitrary picked.
|
||||
MINIMUM_NOT_USED_AGE_EXPIRY_MS = ONE_HOUR_SECONDS * MILLISECONDS_PER_SECOND
|
||||
MINIMUM_NOT_USED_AGE_EXPIRY = Duration(hours=1)
|
||||
|
||||
# How many rooms with updates we allow before we consider the connection expired
|
||||
# due to too many rooms to send.
|
||||
@@ -99,7 +99,7 @@ NUM_ROOMS_THRESHOLD = 100
|
||||
# connection even if it is actively being used (and we're just not updating the
|
||||
# DB frequently enough). We arbitrarily double the update interval to give some
|
||||
# wiggle room.
|
||||
assert 2 * UPDATE_INTERVAL_LAST_USED_TS_MS < MINIMUM_NOT_USED_AGE_EXPIRY_MS
|
||||
assert 2 * UPDATE_INTERVAL_LAST_USED_TS < MINIMUM_NOT_USED_AGE_EXPIRY
|
||||
|
||||
# Helper definition for the types that we might return. We do this to avoid
|
||||
# copying data between types (which can be expensive for many rooms).
|
||||
@@ -913,7 +913,7 @@ class SlidingSyncRoomLists:
|
||||
if (
|
||||
last_sync_ts is not None
|
||||
and (self._clock.time_msec() - last_sync_ts)
|
||||
> MINIMUM_NOT_USED_AGE_EXPIRY_MS
|
||||
> MINIMUM_NOT_USED_AGE_EXPIRY.as_millis()
|
||||
):
|
||||
raise SlidingSyncUnknownPosition()
|
||||
|
||||
|
||||
@@ -39,7 +39,7 @@ from synapse.metrics.background_process_metrics import wrap_as_background_proces
|
||||
from synapse.storage.databases.main.lock import Lock, LockStore
|
||||
from synapse.util.async_helpers import timeout_deferred
|
||||
from synapse.util.clock import Clock
|
||||
from synapse.util.constants import ONE_MINUTE_SECONDS
|
||||
from synapse.util.duration import Duration
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.logging.opentracing import opentracing
|
||||
@@ -276,7 +276,7 @@ class WaitingLock:
|
||||
def _get_next_retry_interval(self) -> float:
|
||||
next = self._retry_interval
|
||||
self._retry_interval = max(5, next * 2)
|
||||
if self._retry_interval > 10 * ONE_MINUTE_SECONDS: # >7 iterations
|
||||
if self._retry_interval > Duration(minutes=10).as_secs(): # >7 iterations
|
||||
logger.warning(
|
||||
"Lock timeout is getting excessive: %ss. There may be a deadlock.",
|
||||
self._retry_interval,
|
||||
@@ -363,7 +363,7 @@ class WaitingMultiLock:
|
||||
def _get_next_retry_interval(self) -> float:
|
||||
next = self._retry_interval
|
||||
self._retry_interval = max(5, next * 2)
|
||||
if self._retry_interval > 10 * ONE_MINUTE_SECONDS: # >7 iterations
|
||||
if self._retry_interval > Duration(minutes=10).as_secs(): # >7 iterations
|
||||
logger.warning(
|
||||
"Lock timeout is getting excessive: %ss. There may be a deadlock.",
|
||||
self._retry_interval,
|
||||
|
||||
@@ -34,13 +34,14 @@ from twisted.web.iweb import IRequest
|
||||
from synapse.logging.context import make_deferred_yieldable, run_in_background
|
||||
from synapse.types import JsonDict, Requester
|
||||
from synapse.util.async_helpers import ObservableDeferred
|
||||
from synapse.util.duration import Duration
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
CLEANUP_PERIOD_MS = 1000 * 60 * 30 # 30 mins
|
||||
CLEANUP_PERIOD = Duration(minutes=30)
|
||||
|
||||
|
||||
P = ParamSpec("P")
|
||||
@@ -56,7 +57,7 @@ class HttpTransactionCache:
|
||||
] = {}
|
||||
# Try to clean entries every 30 mins. This means entries will exist
|
||||
# for at *LEAST* 30 mins, and at *MOST* 60 mins.
|
||||
self.clock.looping_call(self._cleanup, CLEANUP_PERIOD_MS)
|
||||
self.clock.looping_call(self._cleanup, CLEANUP_PERIOD.as_millis())
|
||||
|
||||
def _get_transaction_key(self, request: IRequest, requester: Requester) -> Hashable:
|
||||
"""A helper function which returns a transaction key that can be used
|
||||
@@ -145,5 +146,5 @@ class HttpTransactionCache:
|
||||
now = self.clock.time_msec()
|
||||
for key in list(self.transactions):
|
||||
ts = self.transactions[key][1]
|
||||
if now > (ts + CLEANUP_PERIOD_MS): # after cleanup period
|
||||
if now > (ts + CLEANUP_PERIOD.as_millis()): # after cleanup period
|
||||
del self.transactions[key]
|
||||
|
||||
@@ -48,9 +48,9 @@ from synapse.storage.database import (
|
||||
)
|
||||
from synapse.storage.util.id_generators import MultiWriterIdGenerator
|
||||
from synapse.types import JsonDict, StrCollection
|
||||
from synapse.util import Duration
|
||||
from synapse.util.caches.expiringcache import ExpiringCache
|
||||
from synapse.util.caches.stream_change_cache import StreamChangeCache
|
||||
from synapse.util.duration import Duration
|
||||
from synapse.util.iterutils import batch_iter
|
||||
from synapse.util.json import json_encoder
|
||||
from synapse.util.stringutils import parse_and_validate_server_name
|
||||
@@ -62,10 +62,10 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# How long to keep messages in the device federation inbox before deleting them.
|
||||
DEVICE_FEDERATION_INBOX_CLEANUP_DELAY_MS = 7 * Duration.DAY_MS
|
||||
DEVICE_FEDERATION_INBOX_CLEANUP_DELAY = Duration(days=7)
|
||||
|
||||
# How often to run the task to clean up old device_federation_inbox rows.
|
||||
DEVICE_FEDERATION_INBOX_CLEANUP_INTERVAL_MS = 5 * Duration.MINUTE_MS
|
||||
DEVICE_FEDERATION_INBOX_CLEANUP_INTERVAL = Duration(minutes=5)
|
||||
|
||||
# Update name for the device federation inbox received timestamp index.
|
||||
DEVICE_FEDERATION_INBOX_RECEIVED_INDEX_UPDATE = (
|
||||
@@ -152,7 +152,7 @@ class DeviceInboxWorkerStore(SQLBaseStore):
|
||||
if hs.config.worker.run_background_tasks:
|
||||
self.clock.looping_call(
|
||||
run_as_background_process,
|
||||
DEVICE_FEDERATION_INBOX_CLEANUP_INTERVAL_MS,
|
||||
DEVICE_FEDERATION_INBOX_CLEANUP_INTERVAL.as_millis(),
|
||||
"_delete_old_federation_inbox_rows",
|
||||
self.server_name,
|
||||
self._delete_old_federation_inbox_rows,
|
||||
@@ -996,9 +996,10 @@ class DeviceInboxWorkerStore(SQLBaseStore):
|
||||
|
||||
def _delete_old_federation_inbox_rows_txn(txn: LoggingTransaction) -> bool:
|
||||
# We delete at most 100 rows that are older than
|
||||
# DEVICE_FEDERATION_INBOX_CLEANUP_DELAY_MS
|
||||
# DEVICE_FEDERATION_INBOX_CLEANUP_DELAY
|
||||
delete_before_ts = (
|
||||
self.clock.time_msec() - DEVICE_FEDERATION_INBOX_CLEANUP_DELAY_MS
|
||||
self.clock.time_msec()
|
||||
- DEVICE_FEDERATION_INBOX_CLEANUP_DELAY.as_millis()
|
||||
)
|
||||
sql = """
|
||||
WITH to_delete AS (
|
||||
|
||||
@@ -37,12 +37,7 @@ from synapse.types.handlers.sliding_sync import (
|
||||
RoomSyncConfig,
|
||||
)
|
||||
from synapse.util.caches.descriptors import cached
|
||||
from synapse.util.constants import (
|
||||
MILLISECONDS_PER_SECOND,
|
||||
ONE_DAY_SECONDS,
|
||||
ONE_HOUR_SECONDS,
|
||||
ONE_MINUTE_SECONDS,
|
||||
)
|
||||
from synapse.util.duration import Duration
|
||||
from synapse.util.json import json_encoder
|
||||
|
||||
if TYPE_CHECKING:
|
||||
@@ -57,14 +52,14 @@ logger = logging.getLogger(__name__)
|
||||
# position. We don't want to update it on every use to avoid excessive
|
||||
# writes, but we want it to be reasonably up-to-date to help with
|
||||
# cleaning up old connection positions.
|
||||
UPDATE_INTERVAL_LAST_USED_TS_MS = 5 * ONE_MINUTE_SECONDS * MILLISECONDS_PER_SECOND
|
||||
UPDATE_INTERVAL_LAST_USED_TS = Duration(minutes=5)
|
||||
|
||||
# Time in milliseconds the connection hasn't been used before we consider it
|
||||
# expired and delete it.
|
||||
CONNECTION_EXPIRY_MS = 7 * ONE_DAY_SECONDS * MILLISECONDS_PER_SECOND
|
||||
CONNECTION_EXPIRY = Duration(days=7)
|
||||
|
||||
# How often we run the background process to delete old sliding sync connections.
|
||||
CONNECTION_EXPIRY_FREQUENCY_MS = ONE_HOUR_SECONDS * MILLISECONDS_PER_SECOND
|
||||
CONNECTION_EXPIRY_FREQUENCY = Duration(hours=1)
|
||||
|
||||
|
||||
class SlidingSyncStore(SQLBaseStore):
|
||||
@@ -101,7 +96,7 @@ class SlidingSyncStore(SQLBaseStore):
|
||||
if self.hs.config.worker.run_background_tasks:
|
||||
self.clock.looping_call(
|
||||
self.delete_old_sliding_sync_connections,
|
||||
CONNECTION_EXPIRY_FREQUENCY_MS,
|
||||
CONNECTION_EXPIRY_FREQUENCY.as_millis(),
|
||||
)
|
||||
|
||||
async def get_latest_bump_stamp_for_room(
|
||||
@@ -430,7 +425,10 @@ class SlidingSyncStore(SQLBaseStore):
|
||||
# Update the `last_used_ts` if it's due to be updated. We don't update
|
||||
# every time to avoid excessive writes.
|
||||
now = self.clock.time_msec()
|
||||
if last_used_ts is None or now - last_used_ts > UPDATE_INTERVAL_LAST_USED_TS_MS:
|
||||
if (
|
||||
last_used_ts is None
|
||||
or now - last_used_ts > UPDATE_INTERVAL_LAST_USED_TS.as_millis()
|
||||
):
|
||||
self.db_pool.simple_update_txn(
|
||||
txn,
|
||||
table="sliding_sync_connections",
|
||||
@@ -532,7 +530,7 @@ class SlidingSyncStore(SQLBaseStore):
|
||||
@wrap_as_background_process("delete_old_sliding_sync_connections")
|
||||
async def delete_old_sliding_sync_connections(self) -> None:
|
||||
"""Delete sliding sync connections that have not been used for a long time."""
|
||||
cutoff_ts = self.clock.time_msec() - CONNECTION_EXPIRY_MS
|
||||
cutoff_ts = self.clock.time_msec() - CONNECTION_EXPIRY.as_millis()
|
||||
|
||||
def delete_old_sliding_sync_connections_txn(txn: LoggingTransaction) -> None:
|
||||
sql = """
|
||||
|
||||
@@ -41,15 +41,6 @@ if typing.TYPE_CHECKING:
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Duration:
|
||||
"""Helper class that holds constants for common time durations in
|
||||
milliseconds."""
|
||||
|
||||
MINUTE_MS = 60 * 1000
|
||||
HOUR_MS = 60 * MINUTE_MS
|
||||
DAY_MS = 24 * HOUR_MS
|
||||
|
||||
|
||||
def unwrapFirstError(failure: Failure) -> Failure:
|
||||
# Deprecated: you probably just want to catch defer.FirstError and reraise
|
||||
# the subFailure's value, which will do a better job of preserving stacktraces.
|
||||
|
||||
@@ -27,7 +27,7 @@ from typing import (
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.util.async_helpers import DeferredEvent
|
||||
from synapse.util.constants import MILLISECONDS_PER_SECOND
|
||||
from synapse.util.duration import Duration
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
@@ -67,7 +67,7 @@ class BackgroundQueue(Generic[T]):
|
||||
self._hs = hs
|
||||
self._name = name
|
||||
self._callback = callback
|
||||
self._timeout_ms = timeout_ms
|
||||
self._timeout_ms = Duration(milliseconds=timeout_ms)
|
||||
|
||||
# The queue of items to process.
|
||||
self._queue: collections.deque[T] = collections.deque()
|
||||
@@ -125,7 +125,7 @@ class BackgroundQueue(Generic[T]):
|
||||
# just loop round, clear the event, recheck the queue, and then
|
||||
# wait here again.
|
||||
new_data = await self._wakeup_event.wait(
|
||||
timeout_seconds=self._timeout_ms / MILLISECONDS_PER_SECOND
|
||||
timeout_seconds=self._timeout_ms.as_secs()
|
||||
)
|
||||
if not new_data:
|
||||
# Timed out waiting for new data, so exit the loop
|
||||
|
||||
@@ -1,23 +0,0 @@
|
||||
#
|
||||
# This file is licensed under the Affero General Public License (AGPL) version 3.
|
||||
#
|
||||
# Copyright (C) 2025 New Vector, Ltd
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as
|
||||
# published by the Free Software Foundation, either version 3 of the
|
||||
# License, or (at your option) any later version.
|
||||
#
|
||||
# See the GNU Affero General Public License for more details:
|
||||
# <https://www.gnu.org/licenses/agpl-3.0.html>.
|
||||
#
|
||||
|
||||
# Time-based constants.
|
||||
#
|
||||
# Laying these out incrementally, even if only some are required, helps with
|
||||
# readability and catching bugs.
|
||||
ONE_MINUTE_SECONDS = 60
|
||||
ONE_HOUR_SECONDS = 60 * ONE_MINUTE_SECONDS
|
||||
ONE_DAY_SECONDS = 24 * ONE_HOUR_SECONDS
|
||||
|
||||
MILLISECONDS_PER_SECOND = 1000
|
||||
40
synapse/util/duration.py
Normal file
40
synapse/util/duration.py
Normal file
@@ -0,0 +1,40 @@
|
||||
#
|
||||
# This file is licensed under the Affero General Public License (AGPL) version 3.
|
||||
#
|
||||
# Copyright (C) 2025 Element Creations Ltd
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as
|
||||
# published by the Free Software Foundation, either version 3 of the
|
||||
# License, or (at your option) any later version.
|
||||
#
|
||||
# See the GNU Affero General Public License for more details:
|
||||
# <https://www.gnu.org/licenses/agpl-3.0.html>.
|
||||
#
|
||||
|
||||
from datetime import timedelta
|
||||
|
||||
# Constant so we don't keep creating new timedelta objects when calling
|
||||
# `.as_millis()`.
|
||||
_ONE_MILLISECOND = timedelta(milliseconds=1)
|
||||
|
||||
|
||||
class Duration(timedelta):
|
||||
"""A subclass of timedelta that adds a convenience method for getting
|
||||
the duration in milliseconds.
|
||||
|
||||
Examples:
|
||||
|
||||
```
|
||||
duration = Duration(hours=2)
|
||||
print(duration.as_millis()) # Outputs: 7200000
|
||||
```
|
||||
"""
|
||||
|
||||
def as_millis(self) -> int:
|
||||
"""Returns the duration in milliseconds."""
|
||||
return int(self / _ONE_MILLISECOND)
|
||||
|
||||
def as_secs(self) -> int:
|
||||
"""Returns the duration in seconds."""
|
||||
return int(self.total_seconds())
|
||||
@@ -17,7 +17,7 @@ from unittest.mock import AsyncMock
|
||||
from twisted.internet.testing import MemoryReactor
|
||||
|
||||
from synapse.app.phone_stats_home import (
|
||||
PHONE_HOME_INTERVAL_SECONDS,
|
||||
PHONE_HOME_INTERVAL,
|
||||
start_phone_stats_home,
|
||||
)
|
||||
from synapse.rest import admin, login, register, room
|
||||
@@ -78,7 +78,7 @@ class PhoneHomeStatsTestCase(unittest.HomeserverTestCase):
|
||||
|
||||
def _get_latest_phone_home_stats(self) -> JsonDict:
|
||||
# Wait for `phone_stats_home` to be called again + a healthy margin (50s).
|
||||
self.reactor.advance(2 * PHONE_HOME_INTERVAL_SECONDS + 50)
|
||||
self.reactor.advance(2 * PHONE_HOME_INTERVAL.as_secs() + 50)
|
||||
|
||||
# Extract the reported stats from our http client mock
|
||||
mock_calls = self.put_json_mock.call_args_list
|
||||
|
||||
@@ -24,7 +24,7 @@ from synapse.api.errors import Codes
|
||||
from synapse.handlers.sliding_sync import room_lists
|
||||
from synapse.rest.client import login, room, sync
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.databases.main.sliding_sync import CONNECTION_EXPIRY_MS
|
||||
from synapse.storage.databases.main.sliding_sync import CONNECTION_EXPIRY
|
||||
from synapse.util.clock import Clock
|
||||
|
||||
from tests.rest.client.sliding_sync.test_sliding_sync import SlidingSyncBase
|
||||
@@ -407,7 +407,7 @@ class SlidingSyncConnectionTrackingTestCase(SlidingSyncBase):
|
||||
we expire the connection and ask the client to do a full resync.
|
||||
|
||||
Connections are only expired if they have not been used for a minimum
|
||||
amount of time (MINIMUM_NOT_USED_AGE_EXPIRY_MS) to avoid expiring
|
||||
amount of time (MINIMUM_NOT_USED_AGE_EXPIRY) to avoid expiring
|
||||
connections that are actively being used.
|
||||
"""
|
||||
|
||||
@@ -455,7 +455,7 @@ class SlidingSyncConnectionTrackingTestCase(SlidingSyncBase):
|
||||
self.helper.send(room_id, "msg", tok=user2_tok)
|
||||
|
||||
# Advance the clock to ensure that the last_used_ts is old enough
|
||||
self.reactor.advance(2 * room_lists.MINIMUM_NOT_USED_AGE_EXPIRY_MS / 1000)
|
||||
self.reactor.advance(2 * room_lists.MINIMUM_NOT_USED_AGE_EXPIRY.as_secs())
|
||||
|
||||
# This sync should now raise SlidingSyncUnknownPosition
|
||||
channel = self.make_sync_request(sync_body, since=from_token, tok=user1_tok)
|
||||
@@ -490,14 +490,14 @@ class SlidingSyncConnectionTrackingTestCase(SlidingSyncBase):
|
||||
_, from_token = self.do_sync(sync_body, tok=user1_tok)
|
||||
|
||||
# We can keep syncing so long as the interval between requests is less
|
||||
# than CONNECTION_EXPIRY_MS
|
||||
# than CONNECTION_EXPIRY
|
||||
for _ in range(5):
|
||||
self.reactor.advance(0.5 * CONNECTION_EXPIRY_MS / 1000)
|
||||
self.reactor.advance(0.5 * CONNECTION_EXPIRY.as_secs())
|
||||
|
||||
_, from_token = self.do_sync(sync_body, tok=user1_tok)
|
||||
|
||||
# ... but if we wait too long, the connection expires
|
||||
self.reactor.advance(1 + CONNECTION_EXPIRY_MS / 1000)
|
||||
self.reactor.advance(1 + CONNECTION_EXPIRY.as_secs())
|
||||
|
||||
# This sync should now raise SlidingSyncUnknownPosition
|
||||
channel = self.make_sync_request(sync_body, since=from_token, tok=user1_tok)
|
||||
|
||||
@@ -26,12 +26,9 @@ from unittest.mock import AsyncMock, Mock, call
|
||||
from twisted.internet import defer, reactor as _reactor
|
||||
|
||||
from synapse.logging.context import SENTINEL_CONTEXT, LoggingContext, current_context
|
||||
from synapse.rest.client.transactions import CLEANUP_PERIOD_MS, HttpTransactionCache
|
||||
from synapse.rest.client.transactions import CLEANUP_PERIOD, HttpTransactionCache
|
||||
from synapse.types import ISynapseReactor, JsonDict
|
||||
from synapse.util.clock import Clock
|
||||
from synapse.util.constants import (
|
||||
MILLISECONDS_PER_SECOND,
|
||||
)
|
||||
|
||||
from tests import unittest
|
||||
from tests.server import get_clock
|
||||
@@ -187,7 +184,7 @@ class HttpTransactionCacheTestCase(unittest.TestCase):
|
||||
)
|
||||
# Advance time just under the cleanup period.
|
||||
# Should NOT have cleaned up yet
|
||||
self.reactor.advance((CLEANUP_PERIOD_MS - 1) / MILLISECONDS_PER_SECOND)
|
||||
self.reactor.advance(CLEANUP_PERIOD.as_secs() - 1)
|
||||
|
||||
yield self.cache.fetch_or_execute_request(
|
||||
self.mock_request, self.mock_requester, cb, "an arg"
|
||||
@@ -196,7 +193,7 @@ class HttpTransactionCacheTestCase(unittest.TestCase):
|
||||
cb.assert_called_once_with("an arg")
|
||||
|
||||
# Advance time just after the cleanup period.
|
||||
self.reactor.advance(2 / MILLISECONDS_PER_SECOND)
|
||||
self.reactor.advance(2)
|
||||
|
||||
yield self.cache.fetch_or_execute_request(
|
||||
self.mock_request, self.mock_requester, cb, "an arg"
|
||||
|
||||
@@ -28,7 +28,7 @@ from synapse.rest import admin
|
||||
from synapse.rest.client import devices
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.databases.main.deviceinbox import (
|
||||
DEVICE_FEDERATION_INBOX_CLEANUP_DELAY_MS,
|
||||
DEVICE_FEDERATION_INBOX_CLEANUP_DELAY,
|
||||
)
|
||||
from synapse.util.clock import Clock
|
||||
|
||||
@@ -191,7 +191,7 @@ class DeviceInboxFederationInboxCleanupTestCase(HomeserverTestCase):
|
||||
self.db_pool = self.store.db_pool
|
||||
|
||||
# Advance time to ensure we are past the cleanup delay
|
||||
self.reactor.advance(DEVICE_FEDERATION_INBOX_CLEANUP_DELAY_MS * 2 / 1000)
|
||||
self.reactor.advance(DEVICE_FEDERATION_INBOX_CLEANUP_DELAY.as_secs() * 2)
|
||||
|
||||
def test_delete_old_federation_inbox_rows_skips_if_no_index(self) -> None:
|
||||
"""Test that we don't delete rows if the index hasn't been created yet."""
|
||||
@@ -245,7 +245,7 @@ class DeviceInboxFederationInboxCleanupTestCase(HomeserverTestCase):
|
||||
)
|
||||
)
|
||||
|
||||
self.reactor.advance(2 * DEVICE_FEDERATION_INBOX_CLEANUP_DELAY_MS / 1000)
|
||||
self.reactor.advance(2 * DEVICE_FEDERATION_INBOX_CLEANUP_DELAY.as_secs())
|
||||
|
||||
# Insert new messages
|
||||
for i in range(5):
|
||||
@@ -293,7 +293,7 @@ class DeviceInboxFederationInboxCleanupTestCase(HomeserverTestCase):
|
||||
)
|
||||
|
||||
# Advance time to ensure we are past the cleanup delay
|
||||
self.reactor.advance(2 * DEVICE_FEDERATION_INBOX_CLEANUP_DELAY_MS / 1000)
|
||||
self.reactor.advance(2 * DEVICE_FEDERATION_INBOX_CLEANUP_DELAY.as_millis())
|
||||
|
||||
# Run the cleanup - it should delete in batches and sleep between them
|
||||
deferred = defer.ensureDeferred(
|
||||
|
||||
Reference in New Issue
Block a user