Compare commits

...

2 Commits

Author SHA1 Message Date
Erik Johnston
2cd6995702 Newsfile 2024-04-05 15:45:35 +01:00
Erik Johnston
c243916546 Reduce DB usage when backpaginating
In certain cases going from a stream token to a topological token can be
expensive (as it all rows in the room above the stream ordering), so we
refactor things so we do that calculation less.
2024-04-05 15:43:32 +01:00
2 changed files with 22 additions and 10 deletions

1
changelog.d/17055.misc Normal file
View File

@@ -0,0 +1 @@
Reduce database usage when backpaginating.

View File

@@ -468,16 +468,6 @@ class PaginationHandler:
room_id, requester, allow_departed_users=True
)
if pagin_config.direction == Direction.BACKWARDS:
# if we're going backwards, we might need to backfill. This
# requires that we have a topo token.
if room_token.topological:
curr_topo = room_token.topological
else:
curr_topo = await self.store.get_current_topological_token(
room_id, room_token.stream
)
# If they have left the room then clamp the token to be before
# they left the room, to save the effort of loading from the
# database.
@@ -496,6 +486,14 @@ class PaginationHandler:
)
assert leave_token.topological is not None
# We need the topological part of the token to compare against.
if room_token.topological:
curr_topo = room_token.topological
else:
curr_topo = await self.store.get_current_topological_token(
room_id, room_token.stream
)
if leave_token.topological < curr_topo:
from_token = from_token.copy_and_replace(
StreamKeyType.ROOM, leave_token
@@ -561,6 +559,19 @@ class PaginationHandler:
break
previous_event_depth = event_depth
# if we're going backwards, we might need to backfill. This
# requires that we have a topo token.
if room_token.topological:
curr_topo = room_token.topological
elif events:
# If we've already fetched some events then we can just use
# those to get the right depth.
curr_topo = events[0].depth
else:
curr_topo = await self.store.get_current_topological_token(
room_id, room_token.stream
)
# Backfill in the foreground if we found a big gap, have too many holes,
# or we don't have enough events to fill the limit that the client asked
# for.