Merge branch 'master' into develop
This commit is contained in:
commit
99cbd33630
10
CHANGES.md
10
CHANGES.md
@ -1,3 +1,13 @@
|
||||
# Synapse 1.130.0 (2025-05-20)
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- Fix startup being blocked on creating a new index that was introduced in v1.130.0rc1. ([\#18439](https://github.com/element-hq/synapse/issues/18439))
|
||||
- Fix the ordering of local messages in rooms that were affected by [GHSA-v56r-hwv5-mxg6](https://github.com/advisories/GHSA-v56r-hwv5-mxg6). ([\#18447](https://github.com/element-hq/synapse/issues/18447))
|
||||
|
||||
|
||||
|
||||
|
||||
# Synapse 1.130.0rc1 (2025-05-13)
|
||||
|
||||
### Features
|
||||
|
||||
6
debian/changelog
vendored
6
debian/changelog
vendored
@ -1,3 +1,9 @@
|
||||
matrix-synapse-py3 (1.130.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.130.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 20 May 2025 08:34:13 -0600
|
||||
|
||||
matrix-synapse-py3 (1.130.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.130.0rc1.
|
||||
|
||||
@ -97,7 +97,7 @@ module-name = "synapse.synapse_rust"
|
||||
|
||||
[tool.poetry]
|
||||
name = "matrix-synapse"
|
||||
version = "1.130.0rc1"
|
||||
version = "1.130.0"
|
||||
description = "Homeserver for the Matrix decentralised comms protocol"
|
||||
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
|
||||
license = "AGPL-3.0-or-later"
|
||||
|
||||
@ -24,7 +24,12 @@ from typing import TYPE_CHECKING, Dict, List, Optional, Set, Tuple, cast
|
||||
|
||||
import attr
|
||||
|
||||
from synapse.api.constants import EventContentFields, Membership, RelationTypes
|
||||
from synapse.api.constants import (
|
||||
MAX_DEPTH,
|
||||
EventContentFields,
|
||||
Membership,
|
||||
RelationTypes,
|
||||
)
|
||||
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
|
||||
from synapse.events import EventBase, make_event_from_dict
|
||||
from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause
|
||||
@ -311,6 +316,10 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS
|
||||
self._sliding_sync_membership_snapshots_fix_forgotten_column_bg_update,
|
||||
)
|
||||
|
||||
self.db_pool.updates.register_background_update_handler(
|
||||
_BackgroundUpdates.FIXUP_MAX_DEPTH_CAP, self.fixup_max_depth_cap_bg_update
|
||||
)
|
||||
|
||||
# We want this to run on the main database at startup before we start processing
|
||||
# events.
|
||||
#
|
||||
@ -2547,6 +2556,77 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS
|
||||
|
||||
return num_rows
|
||||
|
||||
async def fixup_max_depth_cap_bg_update(
|
||||
self, progress: JsonDict, batch_size: int
|
||||
) -> int:
|
||||
"""Fixes the topological ordering for events that have a depth greater
|
||||
than MAX_DEPTH. This should fix /messages ordering oddities."""
|
||||
|
||||
room_id_bound = progress.get("room_id", "")
|
||||
|
||||
def redo_max_depth_bg_update_txn(txn: LoggingTransaction) -> Tuple[bool, int]:
|
||||
txn.execute(
|
||||
"""
|
||||
SELECT room_id, room_version FROM rooms
|
||||
WHERE room_id > ?
|
||||
ORDER BY room_id
|
||||
LIMIT ?
|
||||
""",
|
||||
(room_id_bound, batch_size),
|
||||
)
|
||||
|
||||
# Find the next room ID to process, with a relevant room version.
|
||||
room_ids: List[str] = []
|
||||
max_room_id: Optional[str] = None
|
||||
for room_id, room_version_str in txn:
|
||||
max_room_id = room_id
|
||||
|
||||
# We only want to process rooms with a known room version that
|
||||
# has strict canonical json validation enabled.
|
||||
room_version = KNOWN_ROOM_VERSIONS.get(room_version_str)
|
||||
if room_version and room_version.strict_canonicaljson:
|
||||
room_ids.append(room_id)
|
||||
|
||||
if max_room_id is None:
|
||||
# The query did not return any rooms, so we are done.
|
||||
return True, 0
|
||||
|
||||
# Update the progress to the last room ID we pulled from the DB,
|
||||
# this ensures we always make progress.
|
||||
self.db_pool.updates._background_update_progress_txn(
|
||||
txn,
|
||||
_BackgroundUpdates.FIXUP_MAX_DEPTH_CAP,
|
||||
progress={"room_id": max_room_id},
|
||||
)
|
||||
|
||||
if not room_ids:
|
||||
# There were no rooms in this batch that required the fix.
|
||||
return False, 0
|
||||
|
||||
clause, list_args = make_in_list_sql_clause(
|
||||
self.database_engine, "room_id", room_ids
|
||||
)
|
||||
sql = f"""
|
||||
UPDATE events SET topological_ordering = ?
|
||||
WHERE topological_ordering > ? AND {clause}
|
||||
"""
|
||||
args = [MAX_DEPTH, MAX_DEPTH]
|
||||
args.extend(list_args)
|
||||
txn.execute(sql, args)
|
||||
|
||||
return False, len(room_ids)
|
||||
|
||||
done, num_rooms = await self.db_pool.runInteraction(
|
||||
"redo_max_depth_bg_update", redo_max_depth_bg_update_txn
|
||||
)
|
||||
|
||||
if done:
|
||||
await self.db_pool.updates._end_background_update(
|
||||
_BackgroundUpdates.FIXUP_MAX_DEPTH_CAP
|
||||
)
|
||||
|
||||
return num_rooms
|
||||
|
||||
|
||||
def _resolve_stale_data_in_sliding_sync_tables(
|
||||
txn: LoggingTransaction,
|
||||
|
||||
@ -68,6 +68,14 @@ class SlidingSyncStore(SQLBaseStore):
|
||||
columns=("membership_event_id",),
|
||||
)
|
||||
|
||||
self.db_pool.updates.register_background_index_update(
|
||||
update_name="sliding_sync_membership_snapshots_user_id_stream_ordering",
|
||||
index_name="sliding_sync_membership_snapshots_user_id_stream_ordering",
|
||||
table="sliding_sync_membership_snapshots",
|
||||
columns=("user_id", "event_stream_ordering"),
|
||||
replaces_index="sliding_sync_membership_snapshots_user_id",
|
||||
)
|
||||
|
||||
async def get_latest_bump_stamp_for_room(
|
||||
self,
|
||||
room_id: str,
|
||||
|
||||
@ -12,5 +12,5 @@
|
||||
-- <https://www.gnu.org/licenses/agpl-3.0.html>.
|
||||
|
||||
-- So we can fetch all rooms for a given user sorted by stream order
|
||||
DROP INDEX IF EXISTS sliding_sync_membership_snapshots_user_id;
|
||||
CREATE INDEX IF NOT EXISTS sliding_sync_membership_snapshots_user_id ON sliding_sync_membership_snapshots(user_id, event_stream_ordering);
|
||||
INSERT INTO background_updates (ordering, update_name, progress_json) VALUES
|
||||
(9204, 'sliding_sync_membership_snapshots_user_id_stream_ordering', '{}');
|
||||
@ -0,0 +1,17 @@
|
||||
--
|
||||
-- This file is licensed under the Affero General Public License (AGPL) version 3.
|
||||
--
|
||||
-- Copyright (C) 2025 New Vector, Ltd
|
||||
--
|
||||
-- This program is free software: you can redistribute it and/or modify
|
||||
-- it under the terms of the GNU Affero General Public License as
|
||||
-- published by the Free Software Foundation, either version 3 of the
|
||||
-- License, or (at your option) any later version.
|
||||
--
|
||||
-- See the GNU Affero General Public License for more details:
|
||||
-- <https://www.gnu.org/licenses/agpl-3.0.html>.
|
||||
|
||||
-- Background update that fixes any events with a topological ordering above the
|
||||
-- MAX_DEPTH value.
|
||||
INSERT INTO background_updates (ordering, update_name, progress_json) VALUES
|
||||
(9205, 'fixup_max_depth_cap', '{}');
|
||||
@ -52,3 +52,5 @@ class _BackgroundUpdates:
|
||||
MARK_UNREFERENCED_STATE_GROUPS_FOR_DELETION_BG_UPDATE = (
|
||||
"mark_unreferenced_state_groups_for_deletion_bg_update"
|
||||
)
|
||||
|
||||
FIXUP_MAX_DEPTH_CAP = "fixup_max_depth_cap"
|
||||
|
||||
157
tests/storage/test_events_bg_updates.py
Normal file
157
tests/storage/test_events_bg_updates.py
Normal file
@ -0,0 +1,157 @@
|
||||
#
|
||||
# This file is licensed under the Affero General Public License (AGPL) version 3.
|
||||
#
|
||||
# Copyright (C) 2025 New Vector, Ltd
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as
|
||||
# published by the Free Software Foundation, either version 3 of the
|
||||
# License, or (at your option) any later version.
|
||||
#
|
||||
# See the GNU Affero General Public License for more details:
|
||||
# <https://www.gnu.org/licenses/agpl-3.0.html>.
|
||||
#
|
||||
#
|
||||
|
||||
from typing import Dict
|
||||
|
||||
from twisted.test.proto_helpers import MemoryReactor
|
||||
|
||||
from synapse.api.constants import MAX_DEPTH
|
||||
from synapse.api.room_versions import RoomVersion, RoomVersions
|
||||
from synapse.server import HomeServer
|
||||
from synapse.util import Clock
|
||||
|
||||
from tests.unittest import HomeserverTestCase
|
||||
|
||||
|
||||
class TestFixupMaxDepthCapBgUpdate(HomeserverTestCase):
|
||||
"""Test the background update that caps topological_ordering at MAX_DEPTH."""
|
||||
|
||||
def prepare(
|
||||
self, reactor: MemoryReactor, clock: Clock, homeserver: HomeServer
|
||||
) -> None:
|
||||
self.store = self.hs.get_datastores().main
|
||||
self.db_pool = self.store.db_pool
|
||||
|
||||
self.room_id = "!testroom:example.com"
|
||||
|
||||
# Reinsert the background update as it was already run at the start of
|
||||
# the test.
|
||||
self.get_success(
|
||||
self.db_pool.simple_insert(
|
||||
table="background_updates",
|
||||
values={
|
||||
"update_name": "fixup_max_depth_cap",
|
||||
"progress_json": "{}",
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
def create_room(self, room_version: RoomVersion) -> Dict[str, int]:
|
||||
"""Create a room with a known room version and insert events.
|
||||
|
||||
Returns the set of event IDs that exceed MAX_DEPTH and
|
||||
their depth.
|
||||
"""
|
||||
|
||||
# Create a room with a specific room version
|
||||
self.get_success(
|
||||
self.db_pool.simple_insert(
|
||||
table="rooms",
|
||||
values={
|
||||
"room_id": self.room_id,
|
||||
"room_version": room_version.identifier,
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
# Insert events with some depths exceeding MAX_DEPTH
|
||||
event_id_to_depth: Dict[str, int] = {}
|
||||
for depth in range(MAX_DEPTH - 5, MAX_DEPTH + 5):
|
||||
event_id = f"$event{depth}:example.com"
|
||||
event_id_to_depth[event_id] = depth
|
||||
|
||||
self.get_success(
|
||||
self.db_pool.simple_insert(
|
||||
table="events",
|
||||
values={
|
||||
"event_id": event_id,
|
||||
"room_id": self.room_id,
|
||||
"topological_ordering": depth,
|
||||
"depth": depth,
|
||||
"type": "m.test",
|
||||
"sender": "@user:test",
|
||||
"processed": True,
|
||||
"outlier": False,
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
return event_id_to_depth
|
||||
|
||||
def test_fixup_max_depth_cap_bg_update(self) -> None:
|
||||
"""Test that the background update correctly caps topological_ordering
|
||||
at MAX_DEPTH."""
|
||||
|
||||
event_id_to_depth = self.create_room(RoomVersions.V6)
|
||||
|
||||
# Run the background update
|
||||
progress = {"room_id": ""}
|
||||
batch_size = 10
|
||||
num_rooms = self.get_success(
|
||||
self.store.fixup_max_depth_cap_bg_update(progress, batch_size)
|
||||
)
|
||||
|
||||
# Verify the number of rooms processed
|
||||
self.assertEqual(num_rooms, 1)
|
||||
|
||||
# Verify that the topological_ordering of events has been capped at
|
||||
# MAX_DEPTH
|
||||
rows = self.get_success(
|
||||
self.db_pool.simple_select_list(
|
||||
table="events",
|
||||
keyvalues={"room_id": self.room_id},
|
||||
retcols=["event_id", "topological_ordering"],
|
||||
)
|
||||
)
|
||||
|
||||
for event_id, topological_ordering in rows:
|
||||
if event_id_to_depth[event_id] >= MAX_DEPTH:
|
||||
# Events with a depth greater than or equal to MAX_DEPTH should
|
||||
# be capped at MAX_DEPTH.
|
||||
self.assertEqual(topological_ordering, MAX_DEPTH)
|
||||
else:
|
||||
# Events with a depth less than MAX_DEPTH should remain
|
||||
# unchanged.
|
||||
self.assertEqual(topological_ordering, event_id_to_depth[event_id])
|
||||
|
||||
def test_fixup_max_depth_cap_bg_update_old_room_version(self) -> None:
|
||||
"""Test that the background update does not cap topological_ordering for
|
||||
rooms with old room versions."""
|
||||
|
||||
event_id_to_depth = self.create_room(RoomVersions.V5)
|
||||
|
||||
# Run the background update
|
||||
progress = {"room_id": ""}
|
||||
batch_size = 10
|
||||
num_rooms = self.get_success(
|
||||
self.store.fixup_max_depth_cap_bg_update(progress, batch_size)
|
||||
)
|
||||
|
||||
# Verify the number of rooms processed
|
||||
self.assertEqual(num_rooms, 0)
|
||||
|
||||
# Verify that the topological_ordering of events has been capped at
|
||||
# MAX_DEPTH
|
||||
rows = self.get_success(
|
||||
self.db_pool.simple_select_list(
|
||||
table="events",
|
||||
keyvalues={"room_id": self.room_id},
|
||||
retcols=["event_id", "topological_ordering"],
|
||||
)
|
||||
)
|
||||
|
||||
# Assert that the topological_ordering of events has not been changed
|
||||
# from their depth.
|
||||
self.assertDictEqual(event_id_to_depth, dict(rows))
|
||||
Loading…
Reference in New Issue
Block a user